text stringlengths 9 7.94M | subset stringclasses 1 value | meta dict | file_path stringclasses 1 value | question dict | answers listlengths |
|---|---|---|---|---|---|
\begin{document}
\title{\textbf{Well-posedness of the fractional Zener wave equation for heterogenous viscoelastic materials}} \author{\sc{Ljubica Oparnica and Endre S\"uli}\\~\\}
\date{~} \maketitle
\begin{abstract} {\color{black} The Zener model for viscoelastic solids replaces Hooke's law} $\boldsymbol{\sigma} = 2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I}$, relating the stress tensor $\boldsymbol{\sigma}$ to the strain tensor $\boldsymbol{\varepsilon}(\mathbf{u})$, where $\mathbf{u}$ is the displacement vector, $\mu>0$ is the shear modulus, and $\lambda\geq 0$ is the first Lam\'{e} coefficient, with the constitutive law
$ (1 + \tau D_t) \boldsymbol{\sigma} = (1 + \rho D_t)[2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I}]$,
where $\tau>0$ is the characteristic relaxation time and $\rho \geq \tau$ is the characteristic retardation time. It is the simplest model that predicts creep/recovery and stress relaxation phenomena. We explore the well-posedness of the {\color{black} fractional version of the} model, where the first-order time-derivative $D_t$ in the constitutive law is replaced by the {\color{black} Caputo} time-derivative $D_t^\alpha$, with $\alpha \in (0,1)$, $\mu, \lambda$ belong to $\mathrm{L}^\infty(\Omega)$, $\mu$ is bounded below by a positive constant and $\lambda$ is nonnegative. We show that, when coupled with the equation of motion $\varrho \ddot{\mathbf{u}} = \Div \boldsymbol{\sigma} + \mathbf{f}$, considered in a bounded open Lipschitz domain $\Omega$ in $\mathbb{R}^3$ and over a time interval $(0,T]$, where $\varrho \in \mathrm{L}^\infty(\Omega)$ is the density of the material, assumed to be bounded below by a positive constant, and $\mathbf{f}$ is a specified load vector, the resulting model is well-posed in the sense that the associated initial-boundary-value problem, {\color{black} with initial conditions $\mathbf{u}(0,\mathbf{x}) = \mathbf{g}(\mathbf{x})$, $\dot{\mathbf{u}}(0,\mathbf{x}) = \mathbf{h}(\mathbf{x})$, $\boldsymbol{\sigma}(0,\mathbf{x}) = \mathbf{S}(\mathbf{x})$, for $\mathbf{x} \in \Omega$, and a homogeneous Dirichlet boundary condition, possesses a unique weak solution for any choice of $\mathbf{g} \in [\mathrm{H}^1_0(\Omega)]^3$, $\mathbf{h} \in [\mathrm{L}^2(\Omega)]^3$, and $\mathbf{S} = \mathbf{S}^{\rm T} \in [\mathrm{L}^2(\Omega)]^{3 \times 3}$, and any load vector $\mathbf{f} \in \mathrm{L}^2(0,T;[\mathrm{L}^2(\Omega)]^3)$, and that this unique weak solution depends continuously on the initial data and the load vector.} \end{abstract}
\section{Statement of the model} Suppose that $\Omega \subset \mathbb{R}^3$ is a bounded, open, simply-connected Lipschitz domain, with boundary $\partial \Omega$, occupied by a viscoelastic material, and let $T>0$. Consider the equation of motion \begin{align}\label{eq:1} \varrho \ddot{\mathbf{u}} = \Div{\boldsymbol{\sigma}} + \mathbf{f}\qquad \mbox{in $(0,T] \times \Omega$}, \end{align} with $\varrho>0$ signifying the density of the material, $\mathbf{u}$ the displacement vector, $\boldsymbol{\sigma}$ the stress tensor, and $\mathbf{f}$ the load vector, with the material being considered subject to the initial conditions
\begin{align}\label{eq:2} \mathbf{u}(0,\mathbf{x}) = \mathbf{g}(\mathbf{x}),\qquad \dot{\mathbf{u}}(0,\mathbf{x}) = \mathbf{h}(\mathbf{x}),\qquad \boldsymbol{\sigma}(0,\mathbf{x}) = \mathbf{S}(\mathbf{x}),\qquad \mbox{for $\mathbf{x} \in \Omega$}, \end{align}
and a suitable boundary condition, which for the sake of simplicity of the exposition we shall assume to be the homogeneous Dirichlet boundary condition
\begin{align}\label{eq:3} \mathbf{u}(t,\mathbf{x}) = \mathbf{0} \qquad \mbox{for all $(t,\mathbf{x}) \in (0,T] \times \partial \Omega$}. \end{align}
The discussion below trivially extends to the case of a {\color{black} mixed homogeneous Dirichlet/ nonhomoge\-neous} Neumann boundary condition provided that the Dirichlet part of $\partial\Omega$ has positive two-dimensional surface measure {\color{black} (cf. the concluding remarks at the end of the paper for further comments in this direction)}. In the case of a classical linear (Hookean) elastic body the stress tensor $\boldsymbol{\sigma}$ is related to the strain tensor (symmetric displacement gradient)
\[ \boldsymbol{\varepsilon}(\mathbf{u}):= \frac{1}{2}(\nabla \mathbf{u} + (\nabla \mathbf{u})^{\rm T}) \]
through \textit{Hooke's law}
$\boldsymbol{\sigma} = 2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I}$,
where $\mu>0$ is the \textit{shear modulus} and $\lambda\geq 0$ is the \textit{first Lam\'{e} coefficient}. In this case the initial value $\mathbf{S}=\boldsymbol{\sigma}|_{t=0}$ of $\boldsymbol{\sigma}$ is automatically equal to $2\mu \boldsymbol{\varepsilon}(\mathbf{g}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}$, by Hooke's law, and need not (or, more precisely, should not) be specified independently, as otherwise the resulting initial-boundary-value problem will be over-determined and will have no solution in general. However for Zener's model under consideration here the situation is different: the constitutive law relating the stress tensor $\boldsymbol{\sigma}$ to the strain tensor $\boldsymbol{\varepsilon}(\mathbf{u})$ involves the time-derivative of order $\alpha \in (0,1]$ of $\boldsymbol{\sigma}$:
\begin{align*} (1 + \tau^\alpha D^\alpha_t) \boldsymbol{\sigma} = (1 + \rho^\alpha D^\alpha_t)[2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I}], \end{align*}
with $\tau>0$ signifying the \textit{characteristic relaxation time} and $\rho \geq \tau$ is the \textit{characteristic retardation time}, --- which then necessitates the specification of an initial datum $\mathbf{S}$ for $\boldsymbol{\sigma}$.
In the case of $\alpha=1$ the model was proposed by Zener \cite{Zener} (with $\lambda = 0$). The fractional version of Zener's model was introduced (in one space dimension and, again, with $\lambda=0$) by Caputo and Mainardi (cf. \cite{CM}, and eq. (13) in \cite{FD}); in the context of the present paper a natural generalization of the model from \cite{CM} to the case of three space-dimensions would be
\[ (1 + \tau^\alpha D^\alpha_t) \boldsymbol{\sigma} = E (1 +\, \rho^\alpha D^\alpha_t) \boldsymbol{\varepsilon}(\mathbf{u}), \quad \mbox{with $\boldsymbol{\sigma}(0,\cdot) = E
\left(\frac{\rho}{\tau}\right)^\alpha \boldsymbol{\varepsilon}(\mathbf{u}(0,\cdot))$},\]
where, following Bagley and Torvik \cite{BT}, $E>0$ is referred to as the \textit{rubbery modulus}, $E(\rho/\tau)^\alpha$ is called the \textit{glassy modulus}, and $\alpha \in (0,1)$ is the \textit{fractional order of evolution}. As has been noted by Freed and Diethelm \cite{FD}, this model allows for a finite discontinuity in the stress-strain response at time zero (cf. Remark \ref{re:1} below for further comments on this observation in the context of our well-posedness analysis). Bagley and Torvik \cite{BT} have demonstrated that the fractional orders of evolution in stress and strain must be the same, as originally proposed in the work of Caputo and Mainardi \cite{CM}, in order that a material model of fractional order comply with the second law of thermodynamics; Bagley and Calico \cite{BC} have also shown that the differential orders need to be the same for the stress and the strain in order to ensure that sound waves in the material propagate at finite speed. For further motivation from the point of view of continuum thermodynamics for considering fractional-order constitutive laws of this kind we refer to \cite{A}, \cite{BC}, \cite{BT}, and \cite{P}, for example.
As the actual value of the characteristic retardation time $\rho ~(\geq \tau > 0)$ is of no relevance in the discussion that follows, for the sake of simplicity of the exposition we have fixed $\rho=1$, resulting in the constitutive law
\begin{align}\label{eq:4} (1 + \tau^\alpha D^\alpha_t) \boldsymbol{\sigma} = (1 + D^\alpha_t)[2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I}], \qquad \tau \in (0,1], \quad \alpha \in (0,1). \end{align}
As will be seen in what follows, the relation $(1=) \rho \geq \tau>0$ is crucial for ensuring the well-posedness of the resulting model, in agreement with the discussion in \cite{BT} (particularly eqs. (14) and (22)--(25) therein with $\alpha=\beta$) concerning the relevant thermodynamical conditions to ensure nonnegativity of the internal work and guarantee a nonnegative rate of energy dissipation. The constitutive law \eqref{eq:4} generalizes the one proposed by Caputo and Mainardi in \cite{CM} in that we admit $\lambda \geq 0$, motivated by the fact that formally setting $\alpha=0$ in \eqref{eq:4} reduces it to Hooke's constitutive law. As a matter of fact, we shall assume, more generally, that
\begin{alignat}{2}\label{coeff-ass} \begin{aligned} \varrho \in \mathrm{L}^\infty(\Omega), \qquad \mbox{and there exists a positive constant $\varrho_0$ such that $\varrho(\mathbf{x}) \geq \varrho_0$ a.e. in $\Omega$},\\ \mu \in \mathrm{L}^\infty(\Omega), \qquad \mbox{and there exists a positive constant $\mu_0$ such that $\mu(\mathbf{x}) \geq \mu_0$ a.e. in $\Omega$},\\ \lambda \in \mathrm{L}^\infty(\Omega), \qquad \mbox{and $\lambda(\mathbf{x}) \geq 0$ a.e. in $\Omega$}, \end{aligned} \end{alignat}
so as to admit spatially heterogeneous viscoelastic materials. With straightforward modifications all of our results extend to the case of Hooke's model corresponding to $\alpha=0$ and the classical Zener model corresponding to $\alpha=1$; we shall therefore confine ourselves to the, technically more involved, fractional-order setting, when $\alpha \in (0,1)$.
Zener's constitutive law aims to overcome some of the shortcomings of the Maxwell and Kelvin--Voigt models: the Maxwell model does not describe creep or recovery, and the Kelvin--Voigt model does not describe stress relaxation. Zener's constitutive law is the simplest model that predicts both phenomena. Our aim here is to explore the well-posedness of the model, focusing in particular on its refinement, where the first time-derivative $D_t$ featuring in the constitutive law is replaced by a fractional-order time-derivative $D_t^\alpha$, with $\alpha \in (0,1)$. We emphasize that the equation of motion \eqref{eq:1}, expressing balance of the linear momentum in terms of the Cauchy stress, remains unchanged: it is only the constitutive law relating the stress tensor to the strain tensor, which encodes the specific properties of the material, that is altered here by admitting the fractional range $\alpha \in (0,1)$.
The fractional derivative $D^\alpha_t$ of order $\alpha \in (0,1)$ appearing in \eqref{eq:4} is in the sense of Caputo. It is understood to be acting on $3$-component vector-functions and $3 \times 3$-matrix-valued functions componentwise. In particular, for a scalar-valued function $f \in \mbox{AC}([0,T])$,
\[ (D^\alpha_t f)(t):= \frac{1}{\Gamma(1-\alpha)} \int_0^t \frac{\dot{f}(s)}{(t-s)^\alpha} \,\mathrm{d} s,\qquad t \in (0,T].\]
{\color{black} The partial differential equation \eqref{eq:1} coupled with the constitutive law \eqref{eq:4} is referred to as the \textit{fractional Zener wave equation}. Wave propagation in viscoelastic media governed by the fractional Zener constitutive law in one space dimension was first considered by Caputo and Mainardi \cite{CM}. The existence and uniqueness of the fundamental solution of a generalized Cauchy problem for the fractional Zener wave equation were proved in \cite{KOZ2010}, and an explicit expression for the solution was also given (cf. Theorem 4.2 in \cite{KOZ2010}). The existence and uniqueness of solutions for a generalization of the fractional Zener wave equation proposed by Enelund and Josefson \cite{EJ}, in the case of mixed homogenous Dirichlet/nonhomogeneous Neumann boundary conditions on bounded polytopal domains in two and three space dimensions, were proved by Saedpanah in \cite{FS2014}; and, under suitable restrictions on the domain $\Omega$ and the data, weak solutions of the model were shown in \cite{FS2014} to possess additional regularity. In an earlier work, Larsson and Saedpanah \cite{LS} showed the well-posedness of the homogeneous Dirichlet problem for this model using techniques from linear semigroup theory. The weak formulation of the evolution equation \eqref{eq:8} that we study here differs from the one considered in \cite{FS2014}; indeed, equation (2.7)$_1$ in \cite{FS2014} was arrived at by using Laplace transform techniques on the constitutive law to obtain an explicit expression for the stress tensor in terms of the strain tensor, which was then substituted into the equation of motion to eliminate the stress tensor; whereas, as we shall explain below, we Laplace transform the equation of motion as well as the constitutive law and we then eliminate the Laplace transform of the stress tensor from the transformed equation of motion. Furthermore, in both \cite{LS} and \cite{FS2014} the fractional derivative featuring in the constitutive law was the left Riemann--Liouville derivative rather than the Caputo derivative considered here, and the initial response for the stress tensor was assumed to follow Hooke's law.
The aim of the present work is to explore the question of existence and uniqueness of weak solutions to the initial-boundary-value problem \eqref{eq:1}--\eqref{eq:4} without the additional assumption that the initial response for the stress follows Hooke's law. In the absence of this extra assumption on the initial stress the analysis of the model is considerably more complicated; nevertheless, we are able to show (cf. Theorem \ref{th:1} below) that the model \eqref{eq:1}--\eqref{eq:4} admits a unique weak solution for any $\mathbf{f} \in \mathrm{L}^2(0,T;[\mathrm{L}^2(\Omega)]^3)$, and arbitrary initial data $\mathbf{g} \in [\mathrm{H}^1_0(\Omega)]^3$, $\mathbf{h} \in [\mathrm{L}^2(\Omega)]^3$, and $\mathbf{S} = \mathbf{S}^{\rm T} \in [\mathrm{L}^2(\Omega)]^{3 \times 3}$, without any additional restrictions on the choice of $\mathbf{S}$.
To this end, our first objective is to transform the fractional Zener model \eqref{eq:1}--\eqref{eq:4} to a form in which it is amenable to mathematical analysis. We shall therefore Laplace-transform the equation of motion \eqref{eq:1} (where it will be understood that the source term $\mathbf{f}$ is extended by $\mathbf{0}$ from $(0,T] \times \Omega$ to $(0,\infty)\times \Omega$), as well as the constitutive law \eqref{eq:4} with respect to the temporal variable $t$ (again with the understanding that, for the moment, $t \in (0,\infty)$ rather than $t\in (0,T]$ with $T<\infty$). This will enable us to eliminate the stress tensor $\boldsymbol{\sigma}$ from the equation of motion in terms of the strain tensor $\boldsymbol{\varepsilon}(\mathbf{u})$, resulting in a second-order nonlocal evolution equation (cf. \eqref{eq:8} below), which will then be the focus of our subsequent analysis. We shall concentrate on the proof of existence and uniqueness of weak solutions, and the continuous dependence of weak solutions on the data. Specifically, we shall show that the constitutive law \eqref{eq:4}, when coupled with \eqref{eq:1}--\eqref{eq:3}, gives rise to a well-posed mathematical model: by using a compactness argument we shall prove the existence of a weak solution to the model and will prove that weak solutions thus constructed satisfy an energy inequality, which bounds appropriate norms of the solution in terms of norms of the initial data and the source term; we shall also show that weak solutions are unique. }
\section{Zener's model as a fractional evolution equation} The aim of this section is to merge the equation of motion \eqref{eq:1} and the constitutive law \eqref{eq:4} into a single evolution equation, which we shall then subject to mathematical analysis. We proceed by eliminating the stress tensor $\boldsymbol{\sigma}$ from \eqref{eq:1} by Laplace transforming both \eqref{eq:1} and the constitutive law \eqref{eq:4}.
The Laplace transform with respect to the variable $t$ of a function $f$ defined on $(0,\infty)$ such that
$\int_0^\infty |f(t)|\, \mathrm{e}^{-at} \,\mathrm{d} t < \infty$ for some $a \in \mathbb{R}$, is defined by
\[ \mathcal{L}(f)(p) = \tilde{f}(p) := \int_0^\infty f(t)\, \mathrm{e}^{-pt} \,\mathrm{d} t,\qquad \mbox{for $p \in \mathbb{C}$ with $\mathrm{Re}\,p \geq a$}.\]
Then, for any $f \in \mathrm{C}([0,\infty))\cap \mathrm{C}^1((0,\infty))$ such that $\int_0^\infty (|\dot{f}(t)| + |f(t)|)\, \mathrm{e}^{-at} \,\mathrm{d} t< \infty$ for some $a \in \mathbb{R}$, straightforward calculations yield that \begin{align*} \mathcal{L}(\dot{f})(p) &= p \tilde{f}(p) - f(0),\qquad \mathrm{Re}\,p \geq a, \end{align*}
where the symbol $\cdot$ over a $t$-dependent function denotes its derivative with respect to $t$, and, similarly, $\cdot\cdot$ over a $t$-dependent function denotes its second derivative with respect to $t$. As
\[\mathcal{L}((\cdot)^{-\alpha})(p) = \Gamma(1-\alpha) p^{\alpha - 1},\qquad \!\mathrm{Re}\,p > 0,\quad \alpha \in (0,1),\]
by noting that
\[ D^\alpha_t f = \frac{1}{\Gamma(1-\alpha)} \left[ \dot f \ast_t (\cdot)^{-\alpha}\right], \]
where the convolution $\ast_t$ is defined by $(f\ast_t g)(t):= \int_0^t f(s) g(t-s) \,\mathrm{d} s$, we have that
\begin{align*} \mathcal{L}(D^\alpha_t f)(p) &= \frac{1}{\Gamma(1-\alpha)} \mathcal{L}\left[ \dot{f} \ast_t (\cdot)^{-\alpha}\right](p) = \frac{1}{\Gamma(1-\alpha)} \mathcal{L}(\dot{f})(p)\, \mathcal{L}({(\cdot)}^{-\alpha})(p)\\ &= p^\alpha \tilde{f}(p) - p^{\alpha-1} f(0),\qquad \mathrm{Re}\,p \geq a, \quad \alpha \in (0,1). \end{align*}
Consider the Mittag--Leffler function
\[ E_{\alpha,\beta}(z):= \sum_{k=0}^\infty \frac{z^k}{\Gamma(\alpha k + \beta)}, \qquad z \in \mathbb{C}, \quad \alpha>0, \quad \beta >0. \]
Letting
\[ e_\alpha(t,\gamma):= E_{\alpha,1}(-\gamma t^\alpha),\qquad t \in [0,\infty),\quad \gamma>0,\]
one has that
\begin{align}\label{eq:5} \mathcal{L}(e_\alpha(\cdot,\gamma))(p) = \frac{p^{\alpha-1}}{p^\alpha + \gamma} \qquad \mbox{for $\mathrm{Re}\, p > \gamma^{\frac{1}{\alpha}}$.} \end{align}
Henceforth, for the sake of simplicity, we shall write $e_{\alpha,\gamma}(t)$ instead of $e_\alpha(t,\gamma)$, and restrict ourself to the range $\alpha \in (0,1)$ of relevance to us in the present context. As $e_{\alpha,\gamma}(0)=1$, it follows that
\[ \mathcal{L}(\dot{e}_{\alpha,\gamma})(p) = p\, \tilde{e}_{\alpha,\gamma}(p) - 1 = \frac{p^{\alpha}}{p^\alpha + \gamma} - \mathcal{L}(\delta),\qquad \mathrm{Re}\, p > \gamma^{\frac{1}{\alpha}},\] where $\delta$ is the Dirac distribution concentrated at $t=0$. Thus, now with the Laplace transform acting in the sense of tempered distributions\footnote{For a tempered distribution $f \in \mathcal{S}'$, with $\mbox{supp} (f) \subset [0,\infty)$, we define $\mathcal{L}(f)(p) = \tilde{f}(p) := \langle f , \eta\, \mathrm{e}^{-p\cdot} \rangle$, for $\mbox{Re } p > 0$, where $\eta \in C^\infty(\mathbb{R})$ is such that $\eta(t) \equiv 0$ for $t \leq -2$ and $\eta(t)\equiv 1$ for $t \geq -1$.}
\[ \mathcal{L}(\dot{e}_{\alpha,\gamma} + \delta)(p) = \frac{p^{\alpha}}{p^\alpha + \gamma}, \qquad \mathrm{Re}\, p > \gamma^{\frac{1}{\alpha}}.\]
As a consequence of this identity we have that
\begin{align}\label{eq:6} \begin{aligned} \mathcal{L}^{-1}\left(\frac{1 + \tau^\alpha p^\alpha}{1 + p^\alpha}\right) &= \mathcal{L}^{-1}\left(1 + (\tau^\alpha - 1) \frac{p^\alpha}{1+ p^\alpha}\right) = \delta + (\tau^\alpha-1)\mathcal{L}^{-1}\left(\frac{p^\alpha}{p^\alpha + 1}\right)\\ & = \delta + (\tau^\alpha-1) (\dot{e}_{\alpha,1} + \delta). \end{aligned} \end{align}
Following these preparatory considerations, we Laplace-transform the constitutive law \eqref{eq:4}, which yields
\begin{align*} \tilde{\boldsymbol{\sigma}} + \tau^\alpha \mathcal{L}(D^\alpha_t \boldsymbol{\sigma}) = 2\mu \boldsymbol{\varepsilon}(\tilde\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\tilde\mathbf{u})) \mathbf{I}\, +\, 2\mu \mathcal{L}(D^\alpha_t \boldsymbol{\varepsilon}(\mathbf{u})) + \lambda \mathcal{L}(D^\alpha_t (\tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I}),\qquad \tau \in (0,1]. \end{align*}
Hence, \begin{align*} &\tilde{\boldsymbol{\sigma}}(p) + \tau^\alpha (p^\alpha \tilde\boldsymbol{\sigma}(p) - p^{\alpha-1}\mathbf{S}) \\\quad &= 2\mu \boldsymbol{\varepsilon}(\tilde\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\tilde\mathbf{u})) \mathbf{I}\, +\, 2\mu (p^\alpha \boldsymbol{\varepsilon}(\tilde{\mathbf{u}}) - p^{\alpha-1}\boldsymbol{\varepsilon}(\mathbf{g})) + \lambda (p^\alpha \tr(\boldsymbol{\varepsilon}(\tilde\mathbf{u})) \mathbf{I} - p^{\alpha-1} \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}). \end{align*} Equivalently, \begin{align*} &(1 + \tau^\alpha p^\alpha) \tilde\boldsymbol{\sigma}(p) = (1+ p^\alpha)(2\mu \boldsymbol{\varepsilon}(\tilde\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\tilde\mathbf{u})) \mathbf{I})\, +\, p^{\alpha-1} (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}), \end{align*} and therefore \begin{align}\label{eq:sig-laplace} &\tilde\boldsymbol{\sigma}(p) = \frac{1+ p^\alpha}{1 + \tau^\alpha p^\alpha}(2\mu \boldsymbol{\varepsilon}(\tilde\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\tilde\mathbf{u})) \mathbf{I})\, +\, \frac{p^{\alpha-1}}{1 + \tau^\alpha p^\alpha} (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}). \end{align}
Consequently, and by Laplace-transforming the equation of motion \eqref{eq:1}, we deduce that
\[ \varrho \mathcal{L}(\ddot{\mathbf{u}}) = \frac{1+ p^\alpha}{1 + \tau^\alpha p^\alpha}\Div(2\mu \boldsymbol{\varepsilon}(\tilde\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\tilde\mathbf{u})) \mathbf{I})\, +\, \frac{p^{\alpha-1}}{1 + \tau^\alpha p^\alpha} \Div (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}) + \tilde{\mathbf{f}},\]
and, upon multiplying this equality by $\frac{1+ \tau^\alpha p^\alpha}{1 + p^\alpha}$, we have that
\begin{align*}
&\varrho \frac{1+ \tau^\alpha p^\alpha}{1 + p^\alpha}\mathcal{L}(\ddot{\mathbf{u}}) = \Div(2\mu \boldsymbol{\varepsilon}(\tilde\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\tilde\mathbf{u})) \mathbf{I})\,\\
&\qquad +\, \frac{p^{\alpha-1}}{1 + p^\alpha} \Div (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}) + \frac{1+ \tau^\alpha p^\alpha}{1 + p^\alpha} \tilde{\mathbf{f}}. \end{align*}
Hence, by inverse-Laplace-transforming this equality and applying the convolution theorem for the Laplace transform, we obtain
\begin{align*}
&\varrho \mathcal{L}^{-1}\left(\frac{1+ \tau^\alpha p^\alpha}{1 + p^\alpha}\right) \ast_t \ddot{\mathbf{u}} = \Div(2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I})\, \\
&\qquad +\, \mathcal{L}^{-1}\left(\frac{p^{\alpha-1}}{1 + p^\alpha}\right) \Div (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}) + \mathcal{L}^{-1}\left(\frac{1+ \tau^\alpha p^\alpha}{1 + p^\alpha}\right) \ast_t \mathbf{f}. \end{align*}
Using \eqref{eq:6} and \eqref{eq:5} we then deduce that \begin{align*} \varrho (\delta + (\tau^\alpha-1) \left(\dot{e}_{\alpha,1} + \delta\right))\ast_t \ddot{\mathbf{u}} &= \Div(2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I})\, \\ &\quad +\, e_{\alpha,1}\, \Div (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I})\\ &\quad + (\delta + (\tau^\alpha-1) \left(\dot{e}_{\alpha,1} + \delta\right))\ast_t \mathbf{f}, \end{align*}
and therefore \begin{align*} \varrho \tau^\alpha \ddot \mathbf{u} + \varrho \,(\tau^\alpha-1) \dot{e}_{\alpha,1} \ast_t \ddot{\mathbf{u}} &= \Div(2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I})\\ &\quad +\, e_{\alpha,1}\, \Div (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I})\\ &\quad + \tau^\alpha \mathbf{f} + (\tau^\alpha-1) \dot{e}_{\alpha,1} \ast_t \mathbf{f}. \end{align*}
We now focus on the second term on the left-hand side of this equality. By noting that
\[ (f \ast_t \dot{g})(t) = \frac{\,\mathrm{d}}{\,\mathrm{d} t}(f\ast_t g)(t)- f(t)g(0)\]
we deduce (by suppressing the $\mathbf{x}$-dependence of $\mathbf{u}$ for the sake of notational simplicity) that \[ (\dot{e}_{\alpha,1} \ast_t \ddot{\mathbf{u}})(t) = \frac{\partial}{\partial t}(\dot{e}_{\alpha,1}\ast_t \dot{\mathbf{u}})(t)- \dot{e}_{\alpha,1}(t) \dot{\mathbf{u}}(0) = \frac{\partial}{\partial t}(\dot{e}_{\alpha,1}\ast_t \dot{\mathbf{u}})(t)- \dot{e}_{\alpha,1}(t) \mathbf{h}.\]
Consequently,
\begin{align*} &\varrho \tau^\alpha \ddot \mathbf{u} + \varrho (\tau^\alpha-1)\left[\frac{\partial}{\partial t}(\dot{e}_{\alpha,1}\ast_t \dot{\mathbf{u}})- \dot{e}_{\alpha,1} \mathbf{h} \right] \\ &\qquad = \Div(2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I})\, +\, e_{\alpha,1}\, \Div (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I})\\ &\qquad \quad + \tau^\alpha \mathbf{f} + (\tau^\alpha-1) \dot{e}_{\alpha,1} \ast_t \mathbf{f}, \end{align*} which upon rearrangement yields \begin{align}\label{eq:7} \begin{aligned} &\tau^\alpha\, \varrho\ddot \mathbf{u} + (1-\tau^\alpha) \frac{\partial}{\partial t}(-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}) \\ &\qquad = \Div(2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I})\, \\ &\qquad \quad + (\tau^\alpha-1)\, \dot{e}_{\alpha,1}\, \varrho\mathbf{h} \,+\, e_{\alpha,1}\, \Div (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I})\\ &\qquad \quad + \tau^\alpha \mathbf{f} + (\tau^\alpha-1) \dot{e}_{\alpha,1} \ast_t \mathbf{f}. \end{aligned} \end{align} By introducing the function
\[ \mathbf{b}:= (\tau^\alpha-1)\, \dot{e}_{\alpha,1}\, \varrho \mathbf{h} \,+\, e_{\alpha,1}\, \Div (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}) + \tau^\alpha \mathbf{f} + (\tau^\alpha-1) \dot{e}_{\alpha,1} \ast_t \mathbf{f}\]
that collects the terms involving the initial data $\mathbf{g}$, $\mathbf{h}$, $\mathbf{S}$ and the load vector $\mathbf{f}$ on the right-hand side of \eqref{eq:7}, the equation \eqref{eq:7} takes the following more compact form:
\begin{align}\label{eq:8} &\tau^\alpha \varrho \ddot \mathbf{u} + (1-\tau^\alpha)\, \frac{\partial}{\partial t}(-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}})(t) = \Div(2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I})\, + \, \mathbf{b}. \end{align}
We shall refer to equation \eqref{eq:8} as the fractional Zener wave equation in three dimensional space.
Next we shall derive a formal energy identity for the initial-boundary-value problem \eqref{eq:2}, \eqref{eq:3}, \eqref{eq:8}.
\section{Formal energy estimate for the model} We begin the analysis of the problem by establishing a formal energy inequality, which we shall later rigorously prove by means of an abstract Galerkin approximation. We shall then use the energy inequality satisfied by the sequence of Galerkin approximations in conjunction with a compactness argument to show the existence of weak solutions to the initial-boundary-value problem \eqref{eq:2}, \eqref{eq:3}, \eqref{eq:8} under consideration, and we shall also prove the uniqueness of weak solutions. For the moment, though, we shall postulate the existence of sufficiently smooth solutions in order to proceed with the formal derivation of an energy identity for the model.
To this end we shall take the scalar product of \eqref{eq:8} with $\dot{\mathbf{u}}$, integrate the resulting equality over $\Omega$, and perform partial integration with respect to the spatial variable $\mathbf{x}$, noting that $\mathbf{u}$, and therefore also $\dot{\mathbf{u}}$, satisfies a homogeneous Dirichlet boundary condition on $(0,T] \times \partial \Omega$. In order to avoid notational clutter, whenever the function $\mathbf{f}$ is extended by $\mathbf{0}$ from $(0,T] \times \Omega$ to $(0,\infty) \times \Omega$ the extended function will be denoted by the same symbol as the original function.
As will be seen below, it is significant for the derivation of the energy identity, which guarantees continuous dependence of the solution on the data, that: \begin{itemize} \item $\tau \in (0,1]$, by hypothesis; and \item $e_{\alpha,1}\geq 0$, $-\dot e_{\alpha,1} \geq 0$ and $\ddot e_{\alpha,1} \geq 0$ on $(0,T]$, with $\dot e_{\alpha,1} \in {\rm L}^1((0,T))$ and $\ddot{e}_{\alpha,1} \in {\rm L}^1_{\mathrm{loc}}((0,T))$ for all $T>0$. \end{itemize} We note in passing that by a similar reasoning the discussion below can be replicated in the case of the standard (integer-order) Zener model, corresponding to $\alpha=1$, but since the analysis of that model is much simpler we shall not include it here and will confine ourselves to the fractional-order Zener model, with $\alpha \in (0,1)$. An identical comment applies to the case of a Hookean solid, corresponding to taking $\alpha =0$ in \eqref{eq:4}.
By formally testing the equation \eqref{eq:8} with $\dot{\mathbf{u}}$ and noting that $\dot{\mathbf{u}}$ satisfies a homogeneous Dirichlet boundary condition on $(0,T] \times \partial \Omega$ we deduce, by partial integration with respect to the spatial variable $\mathbf{x}$, that, for any $t \in (0,T]$, \begin{align*}
&\frac{\tau^\alpha}{2} \frac{\,\mathrm{d}}{\,\mathrm{d} t}\int_\Omega \varrho |\dot \mathbf{u}(t,\mathbf{x})|^2 \,\mathrm{d} \mathbf{x} + (1-\tau^\alpha)\, \int_\Omega \varrho \frac{\partial}{\partial t}(-\dot{e}_{\alpha,1}\ast_t \dot{\mathbf{u}})(t,\mathbf{x})\cdot \dot{\mathbf{u}}(t,\mathbf{x}) \,\mathrm{d} \mathbf{x} \\
&\qquad + \frac{1}{2} \frac{\,\mathrm{d}}{\,\mathrm{d} t} \int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{u}(t,\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{u}(t,\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x} \, = \, \int_\Omega \mathbf{b}(t,\mathbf{x}) \cdot \dot{\mathbf{u}}(t,\mathbf{x}) \,\mathrm{d} \mathbf{x}. \end{align*} Hence, by integration over $t \in (0,T]$ and noting the initial conditions \eqref{eq:2}, we deduce that \begin{align}\label{eq:9} \begin{aligned}
&\frac{\tau^\alpha }{2} \int_\Omega \varrho |\dot \mathbf{u}(t,\mathbf{x})|^2 \,\mathrm{d} \mathbf{x} + (1-\tau^\alpha)\, \int_0^t \int_\Omega \frac{\partial}{\partial s}(-\dot{e}_{\alpha,1}\ast_s \sqrt{\varrho} \dot{\mathbf{u}})(s,\mathbf{x})\cdot \sqrt{\varrho} \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s \\
&\quad \quad + \frac{1}{2} \int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{u}(t,\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{u}(t,\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x} \\
&= \, \int_0^t \int_\Omega \mathbf{b} \cdot \dot{\mathbf{u}} (s,\mathbf{x}) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s + \frac{\tau^\alpha }{2} \int_\Omega \varrho |\mathbf{h}(\mathbf{x})|^2 \,\mathrm{d} \mathbf{x} + \frac{1}{2} \int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{g}(\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{g}(\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x}. \end{aligned} \end{align}
To proceed, we need to show that the second term on the left-hand side of \eqref{eq:9} is nonnegative, and that $\dot{\mathbf{u}}$ can be eliminated from the right-hand side by absorbing it into the terms appearing on the left-hand side. Once the nonnegativity of the second term on the left-hand side of \eqref{eq:9} has been verified, the identity \eqref{eq:9} can be viewed as expressing balance of the total energy. In particular, when the load vector $\mathbf{f}=\mathbf{0}$ and the initial data are such that $\mathbf{b}=\mathbf{0}$, we have that
\begin{align}\label{eq:10} \begin{aligned}
&\frac{\tau^\alpha }{2} \int_\Omega \varrho |\dot \mathbf{u}(t,\mathbf{x})|^2 \,\mathrm{d} \mathbf{x} + (1-\tau^\alpha)\, \int_0^t \int_\Omega \frac{\partial}{\partial s}(-\dot{e}_{\alpha,1}\ast_s \sqrt{\varrho} \dot{\mathbf{u}})(s,\mathbf{x})\cdot \sqrt{\varrho} \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s \\
&\qquad + \frac{1}{2}\int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{u}(t,\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{u}(t,\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x} \, \\
&= \frac{\tau^\alpha }{2} \int_\Omega \varrho \,|\mathbf{h}(\mathbf{x})|^2 \,\mathrm{d} \mathbf{x}
+ \frac{1}{2} \int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{g}(\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{g}(\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x}, \qquad t \in (0,T]. \end{aligned} \end{align} Even more specifically, if $\mathbf{f}=\mathbf{0}$ and $\tau=1$, and $\mathbf{S}$ is related to $\boldsymbol{\varepsilon}(\mathbf{g})$ through Hooke's law (i.e., $\mathbf{S} = 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) + \lambda \mathrm{tr}(\boldsymbol{\varepsilon}(\mathbf{g}))$), whereby also $\mathbf{b}= \mathbf{0}$, then the second term on the left-hand side of \eqref{eq:9} (which, thanks to Lemma \ref{le:1} below, can be viewed as an energy dissipation term,) is absent, as is the first term on the right-hand side of \eqref{eq:9}, and we have conservation of the total energy:
\[ \mathcal{E}(t):= \frac{1}{2} \int_\Omega \varrho |\dot \mathbf{u}(t,\mathbf{x})|^2 \,\mathrm{d} \mathbf{x}
+ \frac{1}{2}\int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{u}(t,\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{u}(t,\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x} = \mathcal{E}(0) \quad \forall\,t \in [0,T].\]
Returning to the general case, to show the nonnegativity of the second term on the left-hand side of \eqref{eq:9} we invoke the following result (cf. Lemma 1.7.2 in \cite{Siskova}, whose proof is based on the identity stated in Lemma 2.3.1 in the work of Zacher \cite{Zacher}; see also identity (9) in \cite{Zacher0}).
\begin{lemma}\label{le:1}
Let $\mathcal{H}$ be a separable Hilbert space over the field of real numbers, with scalar product $(\cdot,\cdot)_{\mathcal{H}}$ and norm $\|\cdot\|_{\mathcal{H}}$, and let $T > 0$. Then, for any $k \in \mathrm{L}^1(0,T)$ such that $k \geq 0$, $\dot{k} \in \mathrm{L}^1_{\rm loc}(0,T)$, and $\dot{k} \leq 0$, and any $v \in \mathrm{L}^2((0, T);\mathcal{H})$, the following inequality holds:
\begin{align*} \int_0^t \left(\frac{\,\mathrm{d}}{\,\mathrm{d} s}(k \ast_t v)(s), v(s)\right)_{\mathcal{H}} \,\mathrm{d} s
& \geq \frac{1}{2} (k \ast_t \|v(\cdot)\|^2_{\mathcal{H}})(t)
+ \frac{1}{2}\int_0^t k(s)\|v(s)\|^2_{\mathcal{H}} \,\mathrm{d} s
\qquad \mbox{for all $t \in (0,T]$}, \end{align*} each of the two terms on the right-hand side of the inequality being nonnegative. \end{lemma}
Taking $k(t)=-\dot{e}_{\alpha,1}(t) (>0)$, $t \in (0,T]$, $\mathcal{H}=\mathrm{L}^2_\varrho(\Omega)$, equipped with the inner product and norm (and analogous notations for norms of weighted Lebesgue spaces, used in what follows, with weight functions $1/\varrho$, $\mu$, $1/\mu$, and $\lambda$ instead of $\varrho$) defined by
\[(\mathbf{v}, \mathbf{w})_{\mathrm{L}^2_\varrho(\Omega)}:=\int_\Omega \varrho(\mathbf{x})\, \mathbf{v}(\mathbf{x}) \cdot \mathbf{w}(\mathbf{x}) \,\mathrm{d} \mathbf{x}, \qquad \|\mathbf{v}\|_{L^2_\varrho(\Omega)}:=(\mathbf{v}, \mathbf{v})^{\frac{1}{2}}_{\mathrm{L}^2_\varrho(\Omega)},\]
\noindent and $v = \dot{\mathbf{u}}$ in Lemma \ref{le:1}, we deduce that the second term on the left-hand side of \eqref{eq:9} is nonnegative.
It remains to show that the function $\dot{\mathbf{u}}$, appearing in the integrand of the first integral on the right-hand side, can be absorbed into the left-hand side. To this end, we recall that
\[ \mathbf{b}:= (\tau^\alpha-1)\, \dot{e}_{\alpha,1}\, \varrho \mathbf{h} \,+\, e_{\alpha,1}\, \Div (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}) + \tau^\alpha \mathbf{f} + (\tau^\alpha-1) \dot{e}_{\alpha,1} \ast_t \mathbf{f},\]
and we denote by $\mathbf{T}_1$, $\mathbf{T}_2$, $\mathbf{T}_3$, and $\mathbf{T}_4$, respectively, the four terms whose sum is $\mathbf{b}$.
Clearly, because the function $t \in [0,\infty) \mapsto e_{\alpha,1}(t)$ is positive, strictly monotonic decreasing, and $e_{\alpha,1}(0)=1$, we have by the Cauchy--Schwarz inequality that
\begin{align*} \int_0^t \int_\Omega \mathbf{T}_1(s,\mathbf{x}) &\cdot \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s \leq (1-\tau^\alpha)\, \int_0^t (-\dot{e}_{\alpha,1}(s))
\int_\Omega \varrho |\mathbf{h}(\mathbf{x})| |\dot{\mathbf{u}}(s,\mathbf{x})| \,\mathrm{d} \mathbf{x} \,\mathrm{d} s\\ &\leq (1-\tau^\alpha)\, \int_0^t (-\dot{e}_{\alpha,1}(s))\,
\|\mathbf{h}\|_{\mathrm{L}^2_\varrho(\Omega)}\, \|\dot{\mathbf{u}}(s,\cdot)\|_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s\\
&= (1-\tau^\alpha)\, \|\mathbf{h}\|_{\mathrm{L}^2_\varrho(\Omega)} \int_0^t (-\dot{e}_{\alpha,1}(s))\,
\, \|\dot{\mathbf{u}}(s,\cdot)\|_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s \\ &
\leq (1-\tau^\alpha)\, \|\mathbf{h}\|_{\mathrm{L}^2_\varrho(\Omega)} \left(\int_0^t (-\dot{e}_{\alpha,1}(s)) \,\mathrm{d} s\right)^{\frac{1}{2}}
\left(\int_0^t (-\dot{e}_{\alpha,1}(s)) \|\dot{\mathbf{u}}(s,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s\right)^{\frac{1}{2}}\\ &
\leq (1-\tau^\alpha)\, \|\mathbf{h}\|_{\mathrm{L}^2_\varrho(\Omega)} \left(e_{\alpha,1}(0) - e_{\alpha,1}(t)\right)^{\frac{1}{2}}
\left(\int_0^t (-\dot{e}_{\alpha,1}(s)) \|\dot{\mathbf{u}}(s,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s\right)^{\frac{1}{2}}. \end{align*} By bounding the nonnegative factor $\left(e_{\alpha,1}(0) - e_{\alpha,1}(t)\right)^{\frac{1}{2}}$ above by $1$, for any $\delta_1>0$, to be fixed, \begin{align}\label{eq:11} \begin{aligned} \int_0^t \int_\Omega \mathbf{T}_1(s,\mathbf{x}) &\cdot \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s \leq
(1-\tau^\alpha)\, \|\mathbf{h}\|_{\mathrm{L}^2_\varrho (\Omega)}
\left(\int_0^t (-\dot{e}_{\alpha,1}(s)) \|\dot{\mathbf{u}}(s,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s\right)^{\frac{1}{2}}\\ &\leq
\frac{(1-\tau^\alpha)^2}{4\delta_1\tau^\alpha } \|\mathbf{h}\|_{\mathrm{L}^2_\varrho(\Omega)}^2 +
\tau^\alpha \delta_1 \int_0^t (-\dot{e}_{\alpha,1}(s)) \|\dot{\mathbf{u}}(s,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s. \end{aligned} \end{align}
Next, by partial integration with respect to the temporal variable followed by partial integration with respect to the spatial variable, we have, upon defining
\[ \boldsymbol{\kappa}_0:= \tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I},\]
that \begin{align*} &\int_0^t \int_\Omega \mathbf{T}_2(s,\mathbf{x}) \cdot \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s = \int_0^t e_{\alpha,1}(s)\frac{\,\mathrm{d}}{\,\mathrm{d} s}\left[\int_\Omega \Div \boldsymbol{\kappa}_0(\mathbf{x})\cdot \mathbf{u}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x}\right] \,\mathrm{d} s\\ &\qquad = \left[e_{\alpha,1}(s) \int_\Omega \Div \boldsymbol{\kappa}_0(\mathbf{x}) \cdot \mathbf{u}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x}\right]_{s=0}^{s=t} - \int_0^t \dot{e}_{\alpha,1}(s) \left[\int_\Omega \Div \boldsymbol{\kappa}_0(\mathbf{x})\cdot \mathbf{u}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x}\right] \,\mathrm{d} s\\ &\qquad = \left[-e_{\alpha,1}(s) \int_\Omega \boldsymbol{\kappa}_0(\mathbf{x}) : \nabla \mathbf{u}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x}\right]_{s=0}^{s=t} + \int_0^t \dot{e}_{\alpha,1}(s) \left[\int_\Omega \boldsymbol{\kappa}_0(\mathbf{x}) : \nabla \mathbf{u}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x}\right] \,\mathrm{d} s. \end{align*}
Now, letting $\mathbb{R}^{3 \times 3}_{\mathrm{sym}}$ denote the set of all symmetric $3 \times 3$ matrices with real entries, and noting that for any $A \in \mathbb{R}^{3 \times 3}_{\mathrm{sym}}$ and any $B \in \mathbb{R}^{3 \times 3}$ one has that $A:B = A: \frac{1}{2}(B + B^{\rm T})$, we deduce that \begin{align*} &\int_0^t \int_\Omega \mathbf{T}_2(s,\mathbf{x}) \cdot \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s = \int_0^t \dot{e}_{\alpha,1}(s) \left[\int_\Omega \boldsymbol{\kappa}_0(\mathbf{x}) : \boldsymbol{\varepsilon}(\mathbf{u}(s,\mathbf{x})) \,\mathrm{d} \mathbf{x}\right] \,\mathrm{d} s\\ & \qquad + \left[e_{\alpha,1}(0) \int_\Omega \boldsymbol{\kappa}_0(\mathbf{x}) : \boldsymbol{\varepsilon}(\mathbf{g}(\mathbf{x})) \,\mathrm{d} \mathbf{x}\right] - \left[e_{\alpha,1}(t) \int_\Omega \boldsymbol{\kappa}_0(\mathbf{x}) : \boldsymbol{\varepsilon}(\mathbf{u}(t,\mathbf{x})) \,\mathrm{d} \mathbf{x}\right] \\
&\leq \|\boldsymbol{\kappa}_0\|_{\mathrm{L}^2_{1/\mu}(\Omega)} \int_0^t (-\dot{e}_{\alpha,1}(s)) \|\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))\|_{\mathrm{L}^2_\mu(\Omega)} \,\mathrm{d} s
+ \|\boldsymbol{\kappa}_0\|_{\mathrm{L}^2_{1/\mu}(\Omega)} \|\boldsymbol{\varepsilon}(\mathbf{g})\|_{\mathrm{L}^2_\mu(\Omega)}\\
&\qquad + \|\boldsymbol{\kappa}_0\|_{\mathrm{L}^2_{1/\mu}(\Omega)} \|\boldsymbol{\varepsilon}(\mathbf{u}(t))\|_{\mathrm{L}^2_\mu(\Omega)}, \end{align*}
where in the transition to the right-hand side of the last inequality we have used that $e_{\alpha,1}(0)=1$ and that $t \in [0,\infty) \mapsto e_{\alpha,1}(t)$ is positive and monotonic decreasing. Hence, by the Cauchy--Schwarz inequality, and with a suitable real number $\delta_2>0$, to be fixed below,
\begin{align}\label{eq:12} \begin{aligned} &\int_0^t \int_\Omega \mathbf{T}_2(s,\mathbf{x}) \cdot \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s\\
&\quad\leq \|\boldsymbol{\kappa}_0\|_{\mathrm{L}^2_{1/\mu}(\Omega)} \left(\int_0^t (-\dot{e}_{\alpha,1}(s))\,\mathrm{d} s\right)^{\frac{1}{2}}
\left(\int_0^t (-\dot{e}_{\alpha,1}(s)) \|\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))\|^2_{\mathrm{L}^2_\mu(\Omega)}\,\mathrm{d} s\right)^{\frac{1}{2}} \\ & \qquad
+ \|\boldsymbol{\kappa}_0\|_{\mathrm{L}^2_{1/\mu}(\Omega)} \|\boldsymbol{\varepsilon}(\mathbf{g})\|_{\mathrm{L}^2_\mu(\Omega)}
+ \|\boldsymbol{\kappa}_0\|_{\mathrm{L}^2_{1/\mu}(\Omega)} \|\boldsymbol{\varepsilon}(\mathbf{u}(t))\|_{\mathrm{L}^2_\mu(\Omega)}\\
&\quad\leq \|\boldsymbol{\kappa}_0\|_{\mathrm{L}^2_{1/\mu}(\Omega)} \left(\int_0^t (-\dot{e}_{\alpha,1}(s)) \|\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))\|^2_{\mathrm{L}^2_\mu(\Omega)}\,\mathrm{d} s\right)^{\frac{1}{2}} \\ & \qquad
+ \|\boldsymbol{\kappa}_0\|_{\mathrm{L}^2_{1/\mu}(\Omega)} \|\boldsymbol{\varepsilon}(\mathbf{g})\|_{\mathrm{L}^2_{\mu}(\Omega)}
+ \|\boldsymbol{\kappa}_0\|_{\mathrm{L}^2_{1/\mu}(\Omega)} \|\boldsymbol{\varepsilon}(\mathbf{u}(t))\|_{\mathrm{L}^2_\mu(\Omega)}\\
&\quad\leq \delta_2 \int_0^t (-\dot{e}_{\alpha,1}(s)) \|\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))\|^2_{\mathrm{L}^2_\mu(\Omega)}\,\mathrm{d} s
+ \frac{1}{4\delta_2}\|\boldsymbol{\kappa}_0\|^2_{\mathrm{L}^2_{1/\mu}(\Omega)}\\
&\qquad + \delta_2\|\boldsymbol{\varepsilon}(\mathbf{g})\|_{\mathrm{L}^2_{\mu} (\Omega)}^2 + \frac{1}{4\delta_2}\|\boldsymbol{\kappa}_0\|^2_{\mathrm{L}^2_{1/\mu}(\Omega)} + \delta_2\|\boldsymbol{\varepsilon}(\mathbf{u}(t))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{4\delta_2}\|\boldsymbol{\kappa}_0\|^2_{\mathrm{L}^2_{1/\mu}(\Omega)}. \end{aligned} \end{align}
Next, for a positive real number $\delta_{3}$, to be fixed below,
\begin{align}\label{eq:13} \begin{aligned}
\int_0^t \int_\Omega \mathbf{T}_3(s,\mathbf{x}) \cdot \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} s \,\mathrm{d} \mathbf{x} &= \tau^\alpha \int_0^t \int_\Omega \mathbf{f}(s,\mathbf{x}) \cdot \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} s \,\mathrm{d} \mathbf{x}
\\& \leq \tau^\alpha \int_0^t \|\mathbf{f}(s)\|_{\mathrm{L}^2_{1/\varrho}(\Omega)} \|\dot{\mathbf{u}}(s)\|_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s
\\& \leq \delta_3 \int_0^t \|\dot{\mathbf{u}}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s
+ \frac{\tau^{2\alpha}}{4\delta_3}\int_0^t \|\mathbf{f}(s)\|^2_{\mathrm{L}^2_{1/\varrho}(\Omega)} \,\mathrm{d} s. \end{aligned} \end{align}
Finally, by the Cauchy--Schwarz inequality with respect to $\mathbf{x}$, Minkowski's integral inequality, the negativity of
$\dot{e}_{\alpha,1}$, the bound $\|-\dot{e}_{\alpha,1}\|_{\mathrm{L}^1(0,t)} = 1 - e_{\alpha,1}(t) \leq 1$, Young's inequality for the (Laplace) convolution $\ast_t$ (whose proof we have included at the end of this section for the sake of completeness; cf. Lemma \ref{le:2}), and with $\delta_4>0$ to be fixed below, we have that
\begin{align}\label{eq:14} \begin{aligned} \int_0^t \int_\Omega \mathbf{T}_4(s,\mathbf{x}) &\cdot \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} s \,\mathrm{d} \mathbf{x} =(1-\tau^\alpha) \int_0^t \int_\Omega (-\dot{e}_{\alpha,1} \ast_s \mathbf{f})(s,\mathbf{x}) \cdot \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} s \,\mathrm{d} \mathbf{x}\\
& \leq (1- \tau^\alpha) \int_0^t \|-\dot{e}_{\alpha,1} \ast_s \mathbf{f}(s)\|_{\mathrm{L}^2_{1/\varrho}(\Omega)} \|\dot{\mathbf{u}}(s)\|_{\mathrm{L}^2_{\varrho}(\Omega)} \,\mathrm{d} s\\
&\leq (1- \tau^\alpha) \int_0^t (-\dot{e}_{\alpha,1} \ast_s \|\mathbf{f}\|_{\mathrm{L}^2_{1/\varrho}(\Omega)})(s) \|\dot{\mathbf{u}}(s)\|_{\mathrm{L}^2_{\varrho}(\Omega)} \,\mathrm{d} s\\
& \leq \delta_4 \int_0^t \|\dot{\mathbf{u}}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s + \frac{(1-\tau^\alpha)^2}{4\delta_4}\int_0^t
|(-\dot{e}_{\alpha,1} \ast_s \|\mathbf{f}\|_{\mathrm{L}^2_{1/\varrho}(\Omega)})(s)|^2 \,\mathrm{d} s\\
& = \delta_4 \int_0^t \|\dot{\mathbf{u}}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s + \frac{(1-\tau^\alpha)^2}{4\delta_4}
\left[\|(-\dot{e}_{\alpha,1}) \ast_s \|\mathbf{f}\|_{\mathrm{L}^2_{1/\varrho}(\Omega)}\|_{\mathrm{L}^2(0,t)}\right]^2\\
& \leq \delta_4 \int_0^t \|\dot{\mathbf{u}}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s + \frac{(1-\tau^\alpha)^2}{4\delta_4}
\left[\|(-\dot{e}_{\alpha,1})\|_{\mathrm{L}^1(0,t)} \|\|\mathbf{f}\|_{\mathrm{L}^2_{1/\varrho}(\Omega)}\|_{\mathrm{L}^2(0,t)}\right]^2\\
& \leq \delta_4 \int_0^t \|\dot{\mathbf{u}}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s + \frac{(1-\tau^\alpha)^2}{4\delta_4}
\int_0^t \|\mathbf{f}(s)\|^2_{\mathrm{L}^2_{1/\varrho}(\Omega)}\,\mathrm{d} s. \end{aligned} \end{align} By substituting \eqref{eq:11}--\eqref{eq:14} into \eqref{eq:10} we deduce that
\begin{align}\label{eq:15} \begin{aligned}
&\frac{\tau^\alpha}{2} \|\dot \mathbf{u}(t)\|^2_{\mathrm{L}^2_\varrho(\Omega)} + (1-\tau^\alpha) \int_0^t \int_\Omega \frac{\partial}{\partial s}(-\dot{e}_{\alpha,1}\ast_s \sqrt{\varrho}\dot{\mathbf{u}})(s,\mathbf{x})\cdot \sqrt{\varrho} \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} s \,\mathrm{d} \mathbf{x}\\
&\qquad + \|\boldsymbol{\varepsilon}(\mathbf{u}(t))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{u}(t)))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\\
&\leq \frac{\tau^\alpha }{2} \|\mathbf{h}\|^2_{\mathrm{L}^2_\varrho(\Omega)}
+ \|\boldsymbol{\varepsilon}(\mathbf{g})\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{g}))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\\
&\qquad + \tau^\alpha \delta_1 \int_0^t (-\dot{e}_{\alpha,1}(s)) \|\dot{\mathbf{u}}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s
+ \frac{(1-\tau^\alpha)^2}{4\delta_1\tau^\alpha } \|\mathbf{h}\|_{\mathrm{L}^2_\varrho(\Omega)}^2\\
&\qquad + \delta_2 \int_0^t (-\dot{e}_{\alpha,1}(s)) \|\boldsymbol{\varepsilon}(\mathbf{u}(s))\|^2_{\mathrm{L}^2_\mu(\Omega)}\,\mathrm{d} s
+ \frac{1}{4\delta_2}\|\boldsymbol{\kappa}_0\|^2_{\mathrm{L}^2_{1/\mu}(\Omega)}\\
&\qquad + \delta_2\|\boldsymbol{\varepsilon}(\mathbf{g})\|_{\mathrm{L}^2_\mu(\Omega)}^2 + \frac{1}{4\delta_2}\|\boldsymbol{\kappa}_0\|^2_{\mathrm{L}^2_{1/\mu}(\Omega)}\\
& \qquad + \delta_2\|\boldsymbol{\varepsilon}(\mathbf{u}(t))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{4\delta_2}\|\boldsymbol{\kappa}_0\|^2_{\mathrm{L}^2_{1/\mu}(\Omega)}\\
&\qquad + \delta_3 \int_0^t \|\dot{\mathbf{u}}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s
+ \frac{\tau^{2\alpha}}{4\delta_3}\int_0^t \|\mathbf{f}(s)\|^2_{\mathrm{L}^2_{1/\varrho}(\Omega)} \,\mathrm{d} s\\
&\qquad + \delta_4 \int_0^t \|\dot{\mathbf{u}}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s + \frac{(1-\tau^\alpha)^2}{4\delta_4}
\int_0^t \|\mathbf{f}(s)\|^2_{\mathrm{L}^2_{1/\varrho}(\Omega)}\,\mathrm{d} s. \end{aligned} \end{align} We now fix
\[ \delta_1 = \delta_2 = \frac{1}{2},\quad
\delta_3 = \delta_4 = \frac{\tau^\alpha}{4}. \] The inequality \eqref{eq:15} then takes the following form, for $t \in (0,T]$:
\begin{align}\label{eq:16} \begin{aligned}
&\frac{\tau^\alpha}{2} \|\dot \mathbf{u}(t)\|^2_{\mathrm{L}^2_\varrho(\Omega)} + (1-\tau^\alpha) \int_0^t \int_\Omega \frac{\partial}{\partial s}(-\dot{e}_{\alpha,1}\ast_s \sqrt{\varrho}\dot{\mathbf{u}})(s,\mathbf{x})\cdot \sqrt{\varrho}\dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} s \,\mathrm{d} \mathbf{x}\\
&\qquad + \frac{1}{2} \|\boldsymbol{\varepsilon}(\mathbf{u}(t))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{u}(t)))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\\
&\leq \frac{\tau^{2\alpha} + (1-\tau^\alpha)^2}{2\tau^\alpha} \|\mathbf{h}\|^2_{\mathrm{L}^2_\varrho(\Omega)}
+ \frac{3}{2} \|\boldsymbol{\varepsilon}(\mathbf{g})\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{g}))\|^2_{\mathrm{L}^2_\lambda(\Omega)}
+ \frac{3}{2}\|\boldsymbol{\kappa}_0\|^2_{\mathrm{L}^2_{1/\mu}(\Omega)}\\
&\qquad + \frac{\tau^{2\alpha}+(1-\tau^\alpha)^2}{\tau^\alpha}\int_0^t \|\mathbf{f}(s)\|^2_{\mathrm{L}^2_{1/\varrho}(\Omega)} \,\mathrm{d} s\\
&\qquad + \frac{\tau^\alpha}{2} \int_0^t (1-\dot{e}_{\alpha,1}(s)) \|\dot{\mathbf{u}}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s
+ \frac{1}{2} \int_0^t (-\dot{e}_{\alpha,1}(s)) \|\boldsymbol{\varepsilon}(\mathbf{u}(s))\|^2_{\mathrm{L}^2_\mu(\Omega)}\,\mathrm{d} s. \end{aligned} \end{align} Now, consider the following two nonnegative functions defined on $[0,T]$:
\begin{align*}
y(t)&:= \frac{\tau^\alpha}{2} \|\dot \mathbf{u}(t)\|^2_{\mathrm{L}^2_\varrho(\Omega)} + \frac{1}{2} \|\boldsymbol{\varepsilon}(\mathbf{u}(t))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{u}(t)))\|^2_{\mathrm{L}^2_\lambda(\Omega)},\\ z(t)&:= (1-\tau^\alpha) \int_0^t \int_\Omega \frac{\partial}{\partial s}(-\dot{e}_{\alpha,1}\ast_s \sqrt{\varrho}\dot{\mathbf{u}})(s,\mathbf{x})\cdot \sqrt{\varrho}\dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} s \,\mathrm{d} \mathbf{x}, \end{align*} and let \begin{align}\label{eq:17} \begin{aligned}
A(t) &:= \frac{\tau^{2\alpha} + (1-\tau^\alpha)^2}{2\tau^\alpha} \|\mathbf{h}\|^2_{\mathrm{L}^2_\varrho(\Omega)}
+ \frac{3}{2} \|\boldsymbol{\varepsilon}(\mathbf{g})\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{g}))\|^2_{\mathrm{L}^2_\lambda(\Omega)} \\
&\qquad + \frac{3}{2}\|\boldsymbol{\kappa}_0\|^2_{\mathrm{L}^2_{1/\mu}(\Omega)} + \frac{\tau^{2\alpha}+(1-\tau^\alpha)^2}{\tau^\alpha}\int_0^t \|\mathbf{f}(s)\|^2_{\mathrm{L}^2_{1/\varrho}(\Omega)} \,\mathrm{d} s. \end{aligned} \end{align}
Clearly, $0 \leq A(t) \leq A(T)=:A$. The inequality \eqref{eq:16} then implies that
\[ y(t) + z(t) \leq A(t) + \int_0^t (1-\dot{e}_{\alpha,1}(s)) y(s) \,\mathrm{d} s.\]
Since $t \in [0,T] \mapsto A(t)$ is a nonnegative and nondecreasing function, by Gronwall's lemma we have that
\[ y(t) + z(t) \leq A(t)\, \mathrm{exp}\left(\int_0^t (1-\dot{e}_{\alpha,1}(s))\,\mathrm{d} s\right) = A(t)\, \mathrm{exp}(t + 1 - e_{\alpha,1}(t)), \quad t \in (0,T].\] In other words, with
\[A(t)=A(\tau^\alpha,\|\mathbf{h}\|_{\mathrm{L}^2_\varrho(\Omega)},\|\boldsymbol{\varepsilon}(\mathbf{g})\|_{\mathrm{L}^2_\mu(\Omega)},
\|\tr(\boldsymbol{\varepsilon}(\mathbf{g}))\|_{\mathrm{L}^2_\lambda(\Omega)}, \|\boldsymbol{\kappa}_0\|_{\mathrm{L}^2_{1/\mu}(\Omega)}, \|\mathbf{f}\|_{\mathrm{L}^2(0,t;\mathrm{L}^2_{1/\varrho}(\Omega))})\geq 0\]
defined by the expression \eqref{eq:17} for $t \in [0,T]$, the following energy inequality holds for all $t \in [0,T]$:
\begin{align}\label{eq:18} \begin{aligned}
&\frac{\tau^\alpha}{2} \|\dot \mathbf{u}(t)\|^2_{\mathrm{L}^2_\varrho(\Omega)} + \frac{1}{2} \|\boldsymbol{\varepsilon}(\mathbf{u}(t))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{u}(t)))\|^2_{\mathrm{L}^2_\lambda(\Omega)},\\ &\quad + (1-\tau^\alpha) \int_0^t \int_\Omega \frac{\partial}{\partial s}(-\dot{e}_{\alpha,1}\ast_s \sqrt{\varrho} \dot{\mathbf{u}})(s,\mathbf{x})\cdot \sqrt{\varrho} \dot{\mathbf{u}}(s,\mathbf{x}) \,\mathrm{d} s \,\mathrm{d} \mathbf{x}
\leq A(t)\, \mathrm{exp}(t+1 - e_{\alpha,1}(t)). \end{aligned} \end{align} Thus, assuming the existence of a (sufficiently smooth) solution $\mathbf{u}$ to \eqref{eq:2}, \eqref{eq:3}, \eqref{eq:8}, with \begin{align}\label{eq:19} \mathbf{g} \in [\mathrm{H}^1_0(\Omega)]^3, \quad \mathbf{h} \in [\mathrm{L}^2(\Omega)]^3, \quad \mathbf{S} = \mathbf{S}^{\rm T} \in [\mathrm{L}^2(\Omega)]^{3 \times 3}, \quad \mathbf{f} \in \mathrm{L}^2(0,T;[\mathrm{L}^2(\Omega)]^3), \end{align} recalling that, by hypothesis \textcolor{black}{\eqref{coeff-ass}}, $\varrho, \mu, \lambda \in \mathrm{L}^\infty(\Omega)$, $\varrho$ and $\mu$ are bounded below by positive constants $\varrho_0$ and $\mu_0$, respectively, and $\lambda \geq 0$ a.e. on $\Omega$, the energy inequality \eqref{eq:18} holds, with $A(t)<\infty$ for all $t \in [0,T]$.
We emphasize here the significance of our assumption that $\tau \in (0,1]$: the positivity of $\tau$ is necessary in order to ensure that the factor $A(t)$ (cf. \eqref{eq:17}) appearing on the right-hand side of the energy inequality \eqref{eq:18} is finite, while $\tau \leq 1 ~(=\rho)$ ensures that the prefactor of the last term on the left-hand side of \eqref{eq:18}, which can be viewed as a nonnegative energy dissipation term thanks to Lemma \ref{le:1}, is nonnegative, whereby the entire left-hand side of \eqref{eq:18} is nonnegative.
\begin{remark}\label{re:1} We remark that if $\mathbf{S}$ is chosen so that $\tau^\alpha \mathbf{S} = 2 \mu \boldsymbol{\varepsilon}(\mathbf{g})+ \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g}))$, then $\boldsymbol{\kappa}_0 = \mathbf{0}$, and therefore also $\mathbf{T}_2=\mathbf{0}$. The energy inequality \eqref{eq:18} is then simpler and sharper, which can be seen by erasing all terms containing $\delta_2$ from the right-hand side of \eqref{eq:15}, and making the same choices of $\delta_1$, $\delta_3$ and $\delta_4$ as above. {\color{black} In the special case of $\lambda=0$ this particular choice of the initial stress $\mathbf{S}$, namely $\mathbf{S} = 2\mu (1/\tau)^\alpha \boldsymbol{\varepsilon}(\mathbf{g})$, in our initial condition \eqref{eq:2}$_3$ results in the same initial condition as the one stated in equation (13) in the work of Freed and Diethelm \cite{FS} (recall that we scaled $\rho$ to $1$, so $(\rho/\tau)^\alpha = (1/\tau)^\alpha$).} We shall proceed without making this restrictive assumption on $\mathbf{S}$, and continue to study the general case when $\tau^\alpha \mathbf{S}$ is not required to be equal to $2 \mu \boldsymbol{\varepsilon}(\mathbf{g})+ \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g}))$. \end{remark}
In the next section we shall use a compactness argument, based on a sequence of spatial Galerkin approximations to the problem, to show the existence of a (unique) weak solution.
We close this section with the proof of Young's inequality for Laplace-type convolution, which we used in the derivation of the energy inequality. The proof of this result in the case of Fourier-type convolution is standard; in the case of Laplace-type convolution the argument proceeds along similar lines, with minor modifications; {\color{black} we have included its statement and proof for the convenience of the reader.}
\begin{lemma}\label{le:2} Let {\color{black} $p, q, r \in [1,\infty]$} be such that $\frac{1}{p} + \frac{1}{q} - 1 = \frac{1}{r}$, and let $f \in \mathrm{L}^p(0,t)$ and $g \in \mathrm{L}^q(0,t)$ for some $t>0$; then $s \in [0,t] \mapsto (f\ast_s g)(s):=\int_0^s f(s-u) g(u) \,\mathrm{d} u \in \mathrm{L}^r(0,t)$, and
\[ \|f \ast_s g \|_{\mathrm{L}^r(0,t)} \leq \|f\|_{\mathrm{L}^p(0,t)} \|g\|_{\mathrm{L}^q(0,t)}.\] \end{lemma}
\noindent\textit{Proof.} {\color{black} If $p=\infty$, then necessarily $q=1$ and $r=\infty$, and if $q=\infty$, then necessarily $p=1$ and $r=\infty$. Since for $r=\infty$ the result is a direct consequence of H\"older's inequality,} we shall concentrate here on the nontrivial case when $p,q,r \in [1,\infty)$. We begin by noting that because \[ \frac{1}{r} + \frac{r-p}{pr} + \frac{r-q}{qr} = 1,\] we have by H\"older's inequality that, for any $s \in (0,t]$,
\begin{align*}
|(f \ast_s g)(s)| &= \left|\int_0^s f(s-u) g(u) \,\mathrm{d} u\right| \leq \int_0^s |f(s-u)|\, |g(u)| \,\mathrm{d} u \\
& = \int_0^s |f(s-u)|^{\frac{p}{r}} |g(u)|^{\frac{q}{r}}
|f(s-u)|^{1-\frac{p}{r}} |g(u)|^{1-\frac{q}{r}} \,\mathrm{d} u\\
& \leq \||f(s-\cdot)|^{\frac{p}{r}} |g(\cdot)|^{\frac{q}{r}}\|_{\mathrm{L}^r(0,s)}
\||f(s-\cdot)|^{1-\frac{p}{r}}\|_{\mathrm{L}^{\frac{pr}{r-p}}(0,s)} \||g(\cdot)|^{1-\frac{q}{r}}\|_{\mathrm{L}^{\frac{qr}{r-q}}(0,s)} \\
& = \left(\int_0^s |f(s-u)|^{p} |g(u)|^{q} \,\mathrm{d} u\right)^{\frac{1}{r}}
\|f\|^{\frac{r-p}{r}}_{\mathrm{L}^p(0,s)} \|g\|^{\frac{r-q}{r}}_{\mathrm{L}^q(0,s)}\\
& \leq \left(\int_0^s |f(s-u)|^{p} |g(u)|^{q} \,\mathrm{d} u\right)^{\frac{1}{r}}
\|f\|^{\frac{r-p}{r}}_{\mathrm{L}^p(0,t)} \|g\|^{\frac{r-q}{r}}_{\mathrm{L}^q(0,t)}. \end{align*} Hence, by integration over $s \in (0,t)$, applying Fubini's theorem, and performing the change of variable $\sigma := s-u$, we deduce that \begin{align*}
\int_0^t |(f \ast_s g)(s)|^r \,\mathrm{d} s
& \leq \left(\int_0^t \int_0^s |f(s-u)|^{p} |g(u)|^{q} \,\mathrm{d} u \,\mathrm{d} s\right)
\|f\|^{r-p}_{\mathrm{L}^p(0,t)} \|g\|^{r-q}_{\mathrm{L}^q(0,t)}\\
& = \left(\int_0^t |g(u)|^{q} \left(\int_u^t |f(s-u)|^{p} \,\mathrm{d} s\right) \,\mathrm{d} u\right)
\|f\|^{r-p}_{\mathrm{L}^p(0,t)} \|g\|^{r-q}_{\mathrm{L}^q(0,t)}\\
& = \left(\int_0^t |g(u)|^{q} \left(\int_0^{t-u} |f(\sigma)|^{p} \,\mathrm{d} \sigma\right) \,\mathrm{d} u\right)
\|f\|^{r-p}_{\mathrm{L}^p(0,t)} \|g\|^{r-q}_{\mathrm{L}^q(0,t)}\\
& \leq \left(\int_0^t |g(u)|^{q} \left(\int_0^{t} |f(\sigma)|^{p} \,\mathrm{d} \sigma\right) \,\mathrm{d} u\right)
\|f\|^{r-p}_{\mathrm{L}^p(0,t)} \|g\|^{r-q}_{\mathrm{L}^q(0,t)}\\
& = \left(\|g\|^{q}_{\mathrm{L}^q(0,t)} \|f\|^{p}_{\mathrm{L}^p(0,t)}\right)
\|f\|^{r-p}_{\mathrm{L}^p(0,t)} \|g\|^{r-q}_{\mathrm{L}^q(0,t)} = \|f\|^{r}_{\mathrm{L}^p(0,t)} \|g\|^{r}_{\mathrm{L}^q(0,t)}. \end{align*} By raising this to the power $\frac{1}{r}$, we arrive at the desired inequality. $\quad\Box$
\section{Existence of weak solutions}
Hereafter $\mathrm{W}^{s,p}(D)$ will denote the Sobolev space of real-valued functions defined on a bounded open set $D \subset \mathbb{R}^d$, $d \geq 1$, with differentiability index $s>0$ and integrability index $p \in [1,\infty]$ (cf. \cite{AF}). When $p=2$, we shall write $\mathrm{H}^s(\Omega)$ instead of $\mathrm{W}^{s,2}(D)$ and $\mathrm{H}^s_0(D)$ will denote the closure of $\mathrm{C}^\infty_0(D)$ in $\mathrm{H}^s(D)$. When $D$ is a bounded open Lipschitz domain and $s \in (\frac{1}{2},\frac{3}{2})$, elements of $\mathrm{H}^s_0(D)$ have zero trace on $\partial D$; for such $s$, $\mathrm{H}^{-s}(D)$ will denote the dual space of $\mathrm{H}^s_0(D)$.
For a Banach space $\mathcal{B}$, we shall denote by $\mathrm{L}^p(0,T;\mathcal{B})$ and $\mathrm{W}^{s,p}(0,T;\mathcal{B})$, respectively, the associated Lebesgue and Sobolev space of $\mathcal{B}$-valued mappings defined on the open interval $(0,T)$, and $\mathrm{C}([0,T];\mathcal{B})$ will signify the set of all uniformly continuous $\mathcal{B}$-valued functions defined on $[0,T]$. Furthermore, $\mathrm{C}^{0,1}([0,T];\mathcal{B})$ will denote the space of Lipschitz-continuous $\mathcal{B}$-valued functions defined on $[0,T]$. Suppose that $\mathcal{H}$ is a Hilbert space over the field of real numbers with inner product $(\cdot,\cdot)_{\mathcal{H}}$. We shall denote by $\mathrm{C}_w([0,T];\mathcal{H})$ the linear space of all weakly continuous functions from $[0,T]$ into $\mathcal{H}$, i.e., the set of all functions $v \in \mathrm{L}^\infty(0,T;\mathcal{H})$ such that $t \in [0,T] \mapsto (v(t),w) \in \mathbb{R}$ is a continuous function on $[0,T]$ for each $w \in \mathcal{H}$.
Our objective in this section is to show the existence and the uniqueness of a \textit{weak solution} to the problem \eqref{eq:2}, \eqref{eq:3}, \eqref{eq:8}, defined as follows.
\begin{definition}[Weak solution]\label{weak-sol} Suppose that the initial data $\mathbf{g}$, $\mathbf{h}$, $\mathbf{S}$ and the source term $\mathbf{f}$ satisfy \eqref{eq:19}, and assume that $\tau \in (0,1]$, $\alpha \in (0,1)$, and $\varrho$, $\mu$, and $\lambda$ are as in \eqref{coeff-ass}. A function
\begin{alignat}{2}\label{eq:20} \begin{aligned} \mathbf{u} &\in \mathrm{C}_w([0,T];[\mathrm{H}^1_0(\Omega)]^3),\qquad \mbox{with}\\ \dot{\mathbf{u}} \in \mathrm{C}_w([0,T]; &~ [\mathrm{L}^2(\Omega)]^3), \quad \mbox{and} \quad (-\dot e_{\alpha,1})^{\frac{1}{2}}~\! \dot{\mathbf{u}} \in \mathrm{L}^2(0,T;[\mathrm{L}^2(\Omega)]^3), \end{aligned} \end{alignat}
satisfying the equality \begin{align}\label{eq:21} \begin{aligned}
\tau^\alpha \int_0^T &(\varrho \mathbf{u} (s,\cdot), \ddot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s - (1-\tau^\alpha) \int_0^T ((-\dot{e}_{\alpha,1}\ast_s \varrho \dot{\mathbf{u}})(s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s \\ &\quad + \int_0^T \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot)) \big) \,\mathrm{d} s \\ & = - \tau^\alpha (\varrho \mathbf{g}, \dot{\mathbf{v}}(0,\cdot)) + \tau^\alpha (\varrho \mathbf{h}, \mathbf{v}(0,\cdot)) + \int_0^T \langle \mathbf{b}(s,\cdot), \mathbf{v}(s,\cdot) \rangle \,\mathrm{d} s \end{aligned} \end{align} for all $\mathbf{v} \in \mathrm{W}^{2,1}(0,T;[\mathrm{L}^2(\Omega)]^3) \cap \mathrm{L}^1(0,T;[\mathrm{H}^1_0(\Omega)]^3)$ with $\mathbf{v}(T,\cdot)=0$ and $\dot{\mathbf{v}}(T,\cdot)=0$, and
\begin{align}\label{eq:22} \mathbf{b}:= (\tau^\alpha-1)\, \dot{e}_{\alpha,1}\,\varrho \mathbf{h} \,+\, e_{\alpha,1}\, \Div (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}) + \tau^\alpha \mathbf{f} + (\tau^\alpha-1) \dot{e}_{\alpha,1} \ast_t \mathbf{f}, \end{align}
is called a weak solution to the problem \eqref{eq:2}, \eqref{eq:3}, \eqref{eq:8}. \end{definition}
In \eqref{eq:21} and throughout the rest of the paper $\langle \cdot , \cdot \rangle$ denotes the duality pairing between $[\mathrm{H}^{-1}(\Omega)]^3$ and $[\mathrm{H}^1_0(\Omega)]^3$, and $(\cdot, \cdot)$ is the inner product of $[\mathrm{L}^2(\Omega)]^3$. We note that, for $\alpha \in (0,1)$,
\begin{align}\label{eq:23}
- \dot{e}_{\alpha,1}(t) \thicksim \frac{\alpha\, t^{\alpha-1}}{\Gamma(\alpha +1)}\qquad \mbox{as
$t \rightarrow 0_+$}, \end{align}
and hence, by noting from \eqref{eq:22} the additive structure of $\mathbf{b}$, we have that
\[\mathbf{b} \in \mathrm{L}^p(0,T;[\mathrm{L}^2(\Omega)]^3) + \mathrm{W}^{1,p}(0,T;[\mathrm{H}^{-1}(\Omega)]^3) + \mathrm{L}^2(0,T;[\mathrm{L}^2(\Omega)]^3)\qquad \forall\, p \in \big[1, \textstyle{\frac{1}{1-\alpha}}\big).\]
{\color{black} The function $\boldsymbol{\sigma}$ has been eliminated in the transition from \eqref{eq:2}, \eqref{eq:3}, \eqref{eq:8} to the weak formulation \eqref{eq:21}, \eqref{eq:22}, and the initial condition $\boldsymbol{\sigma}(0,\cdot) = \mathbf{S}(\cdot)$ has been encoded into \eqref{eq:21}, \eqref{eq:22}. Motivated by \eqref{eq:sig-laplace}, for a weak solution $\mathbf{u}$, whose existence and uniqueness we will show in Theorem \ref{th:1} below, we therefore \emph{define} the associated stress tensor $\boldsymbol{\sigma}$ by \begin{align}\label{eq:stress-defin} \begin{aligned} \boldsymbol{\sigma}(t,\cdot) &:= \mathcal{L}^{-1}\left(\frac{1+ p^\alpha}{1 + \tau^\alpha p^\alpha}\right) \ast_t (2\mu \boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot))) \mathbf{I}) \\ &\quad + \mathcal{L}^{-1}\left(\frac{p^{\alpha-1}}{1 + \tau^\alpha p^\alpha}\right) \,(\tau^\alpha\mathbf{S}(\cdot)- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}(\cdot)) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g}(\cdot))) \mathbf{I}). \end{aligned} \end{align} }
Consider the bilinear form $a(\cdot,\cdot)$ on $[\mathrm{H}^1_0(\Omega)]^3 \times [\mathrm{H}^1_0(\Omega)]^3$, defined by
\[ a(\mathbf{w}, \mathbf{v}):= (2\mu \boldsymbol{\varepsilon}(\mathbf{w}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{w})) \mathbf{I}, \boldsymbol{\varepsilon}(\mathbf{v})) \qquad \forall\, \mathbf{w}, \mathbf{v} \in [\mathrm{H}^1_0(\Omega)]^3,\]
and observe that
\[ a(\mathbf{w}, \mathbf{v})= (2\mu \boldsymbol{\varepsilon}(\mathbf{w}), \boldsymbol{\varepsilon}(\mathbf{v})) + (\lambda \tr(\boldsymbol{\varepsilon}(\mathbf{w})) , \tr(\boldsymbol{\varepsilon}(\mathbf{v}))) \qquad \forall\, \mathbf{w}, \mathbf{v} \in [\mathrm{H}^1_0(\Omega)]^3.\]
Clearly, $a(\mathbf{w},\mathbf{v}) = a(\mathbf{v},\mathbf{w})$, and there exist positive real numbers $c_1$ and $c_0$ such that $a(\mathbf{w},\mathbf{v})\leq c_1
\|\mathbf{w}\|_{\mathrm{H}^1(\Omega)} \|\mathbf{v}\|_{\mathrm{H}^1(\Omega)}$ for all $\mathbf{w}, \mathbf{v} \in [\mathrm{H}^1_0(\Omega)]^3$ (by the Cauchy--Schwarz inequality), and $a(\mathbf{v},\mathbf{v}) \geq c_0 \|\mathbf{v}\|^2_{\mathrm{H}^1(\Omega)}$ for all $\mathbf{v} \in [\mathrm{H}^1_0(\Omega)]^3$ (by Korn's inequality). Hence, $a(\cdot,\cdot)$ is a symmetric, bounded, and coercive bilinear form on $[\mathrm{H}^1_0(\Omega)]^3 \times [\mathrm{H}^1_0(\Omega)]^3$. Furthermore, by Rellich's theorem, the infinite-dimensional separable Hilbert space $[\mathrm{H}^1_0(\Omega)]^3$ is compactly and densely embedded into the infinite-dimensional separable Hilbert space $[\mathrm{L}^2(\Omega)]^3$.
To proceed, we require the following version of the Hilbert--Schmidt theorem \cite{FS}.
\begin{lemma}\label{le:3} Let $\mathcal{H}$ and $\mathcal{V}$ be separable Hilbert spaces, with $\mathcal{V}$ compactly embedded into $\mathcal{H}$ and $\overline{\mathcal{V}} = \mathcal{H}$ in the norm of $\mathcal{H}$. Let $a\colon \mathcal{V} \times \mathcal{V} \to \mathbb{R}$ be a nonzero, symmetric, bounded and coercive bilinear form. Then, there exist sequences of real numbers $(\lambda_n)_{n \in \mathbb{N}}$ and unit $\mathcal{H}$-norm members $(e_n)_{n \in \mathbb{N}}$ of $\mathcal{V}$, which solve the following problem: \emph{Find $\lambda \in \mathbb{R}$ and $e \in \mathcal{H} \setminus \{ 0 \}$ such that}
\begin{equation}\label{eq:24} a(e,v) = \lambda ( e, v )_\mathcal{H} \quad \forall\,v \in \mathcal{V}. \end{equation}
The $\lambda_n$, which can be assumed to be in increasing order with respect to $n$, are positive, bounded from below away from $0$, and $\lim_{n\to\infty}\lambda_n = \infty$.
Additionally, the $e_n$ form an $\mathcal{H}$-orthonormal system whose $\mathcal{H}$-closed span is $\mathcal{H}$ and the rescaling $e_n/\sqrt{\lambda_n}$ gives rise to an $a$-orthonormal system whose $a$-closed span is $\mathcal{V}$. \end{lemma}
We are now ready to formulate the main result of this section. \begin{theorem}\label{th:1} Suppose that the initial data $\mathbf{g}$, $\mathbf{h}$, $\mathbf{S}$ and the source term $\mathbf{f}$ satisfy \eqref{eq:19}, and assume that $\tau \in (0,1]$, $\alpha \in (0,1)$, and $\varrho$, $\mu$, and $\lambda$ are as in \eqref{coeff-ass}. Then, {\color{black} the weak formulation \eqref{eq:21}, \eqref{eq:22} of the problem \eqref{eq:2}, \eqref{eq:3}, \eqref{eq:8} has a (weak) solution} in the sense of Definition \ref{weak-sol}
such that \[\mathbf{u} \in \mathrm{C}([0,T];[\mathrm{H}^s_0(\Omega)]^3)\qquad \mbox{for all $s \in (\frac{1}{2},1)$},\] and \[\tau^\alpha\varrho \dot \mathbf{u} + (1-\tau^\alpha)(-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}) \in \mathrm{W}^{1,p}(0,T;[\mathrm{H}^{-1}(\Omega)]^3),\qquad \alpha \in (0,1),\]
for all $p \in [1,2]$ satisfying $p<\frac{1}{1-\alpha}$. Furthermore, $\mathbf{u}$ satisfies the energy inequality
\begin{align}\label{EE} \begin{aligned}
&\frac{\tau^\alpha}{2} \|\dot \mathbf{u}(t')\|^2_{\mathrm{L}^2_\varrho(\Omega)} + \frac{1}{2} \|\boldsymbol{\varepsilon}(\mathbf{u}(t'))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{u}(t')))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\\
&\quad + \frac{1-\tau^\alpha}{2} \int_0^{t'} -\dot{e}_{\alpha,1}(s)\|\dot{\mathbf{u}}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s \leq 3A(t) \, \mathrm{exp}(t+1 - e_{\alpha,1}(t)), \end{aligned} \end{align} for all $t \in (0,T]$ and a.e. $t' \in (0,t]$, where $A(t)$ is defined by \eqref{eq:17} for $t \in [0,T]$.
{\color{black} The initial condition $\mathbf{u}(0,\cdot) = \mathbf{g}(\cdot)$ is satisfied in the sense of continuous functions from $[0,T]$ into $[\mathrm{L}^2(\Omega)]^3$ and the initial condition $\dot\mathbf{u} (0,\cdot) = \mathbf{h}(\cdot)$ is satisfied as an equality in $\mathrm{C}_w([0,T],[\mathrm{L}^2(\Omega)]^3)$. Furthermore, the weak solution $\mathbf{u}$ is unique and depends continuously on the data $\mathbf{g}$, $\mathbf{h}$, $\mathbf{S}$, and $\mathbf{f}$.
The stress tensor $\boldsymbol{\sigma}$, defined by \eqref{eq:stress-defin} in terms of the unique weak solution $\mathbf{u}$ of \eqref{eq:21}, \eqref{eq:22}, satisfies the initial condition $\boldsymbol{\sigma}(0,\cdot) = \mathbf{S}(\cdot)$ as an equality in $\mathrm{C}_w([0,T],[\mathrm{L}^2(\Omega)]^{3 \times 3})$.} \end{theorem}
\textit{Proof.} STEP 1: \textit{Existence of solutions.} We begin by showing the existence of a weak solution. We shall use Lemma \ref{le:3} with $\mathcal{H}= [\mathrm{L}^2_\varrho(\Omega)]^3 \simeq [\mathrm{L}^2(\Omega)]^3$ equipped with the inner product defined by $(\mathbf{w},\mathbf{v})_{\mathcal{H}} := (\varrho \mathbf{w} , \mathbf{v})$, $\mathcal{V}= [\mathrm{H}^1_0(\Omega)]^3$, to generate an $\mathcal{H}$-orthonormal Galerkin basis $(\mathbf{\boldsymbol{\varphi}}_n)_{n \in \mathbb{N}} \subset [\mathrm{H}^1_0(\Omega)]^3$, whose $[\mathrm{L}^2(\Omega)]^3$-closed span is $[\mathrm{L}^2(\Omega)]^3$ and the rescaling $\boldsymbol{\varphi}_n/\sqrt{\lambda_n}$ gives rise to an $a$-orthonormal system whose $a$-closed span is $[\mathrm{H}^1_0(\Omega)]^3$; $(\lambda_n)_{n \in \mathbb{N}}$ is a countably infinite sequence of positive eigenvalues, bounded away from $0$, and $\lim_{n\to\infty}\lambda_n = \infty$, defined by $a(\boldsymbol{\varphi}_n, \mathbf{v}) = \lambda_n (\varrho \boldsymbol{\varphi}_n , \mathbf{v})$ for all $\mathbf{v} \in [\mathrm{H}^1_0(\Omega)]^3$.
Let $\mathcal{V}_n := \mbox{span}\{\boldsymbol{\varphi}_1,\dots, \boldsymbol{\varphi}_n\}$, and let $P_n \mathbf{v} \in \mathcal{V}_n$ denote the orthogonal projection of $\mathbf{v} \in [\mathrm{L}^2_\varrho(\Omega)]^3$, in the inner product of $[\mathrm{L}^2_\varrho(\Omega)]^3$, onto $\mathcal{V}_n$. We seek a Galerkin approximation $\mathbf{u}_n: [0,T] \mapsto \mathbf{u}_n(t) \in \mathcal{V}_n$ of the form
\begin{align}\label{eq:25} \mathbf{u}_n(t,\mathbf{x}) := \sum_{k=1}^n \beta_k(t) \boldsymbol{\varphi}_k(\mathbf{x}) \end{align}
satisfying
\begin{align}\label{eq:26} \hspace{-3mm}\tau^\alpha (\varrho \ddot \mathbf{u}_n, \mathbf{v} ) + (1-\tau^\alpha) \left ( \frac{\partial}{\partial t}(-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}_n), \mathbf{v} \right ) + \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}_n) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}_n)) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{v}) \big) = \langle \mathbf{b}, \mathbf{v} \rangle \end{align} for all $\mathbf{v} \in \mathcal{V}_n$, together with the initial conditions
\[ \mathbf{u}_n(0,\cdot) = P_n\mathbf{g} \qquad \mbox{and} \qquad \dot{\mathbf{u}}_n(0,\cdot) = P_n\mathbf{h}.\]
Equivalently,
\[\beta_k(0)=(\varrho \mathbf{g},\boldsymbol{\varphi}_k)\qquad \mbox{and}\qquad \dot{\beta}_k(0)=(\varrho \mathbf{h},\boldsymbol{\varphi}_k),\qquad \mbox{for $k=1,\dots,n$}.\]
Hence,
\[ \|{\mathbf{u}}_n(0,\cdot)\|_{\mathrm{L}^2_\varrho(\Omega)} \leq \|\mathbf{g}\|_{\mathrm{L}^2_\varrho(\Omega)}\qquad \mbox{and}\qquad \|\dot{\mathbf{u}}_n(0,\cdot)\|_{\mathrm{L}^2_\varrho(\Omega)} \leq \|\mathbf{h}\|_{\mathrm{L}^2_\varrho(\Omega)},\] and
\begin{align*} a(\mathbf{u}_n(0,\cdot),\mathbf{u}_n(0,\cdot)) &= \sum_{k,\ell=1}^n \beta_k(0) \beta_\ell(0) a(\boldsymbol{\varphi}_k,\boldsymbol{\varphi}_\ell) = \sum_{k,\ell=1}^n \beta_k(0) \beta_\ell(0) \lambda_k (\varrho \boldsymbol{\varphi}_k,\boldsymbol{\varphi}_\ell)\\
&= \sum_{k=1}^n [\beta_k(0)]^2 \lambda_k \|\boldsymbol{\varphi}_k\|^2_{\mathrm{L}^2_\varrho(\Omega)}= \sum_{k=1}^n [(\varrho\mathbf{g},\boldsymbol{\varphi}_k)]^2 \lambda_k \|\boldsymbol{\varphi}_k\|^2_{\mathrm{L}^2_\varrho(\Omega)}\\
&\leq \sum_{k=1}^\infty [(\varrho \mathbf{g},\boldsymbol{\varphi}_k)]^2 \lambda_k \|\boldsymbol{\varphi}_k\|^2_{\mathrm{L}^2_\varrho(\Omega)} = \sum_{k,\ell=1}^\infty (\varrho \mathbf{g},\boldsymbol{\varphi}_k) (\varrho \mathbf{g}, \boldsymbol{\varphi}_\ell) \lambda_k (\varrho \boldsymbol{\varphi}_k,\boldsymbol{\varphi}_\ell)\\ & = a\left(\sum_{k=1}^\infty (\varrho \mathbf{g},\boldsymbol{\varphi}_k) \boldsymbol{\varphi}_k , \sum_{\ell=1}^\infty (\varrho \mathbf{g},\boldsymbol{\varphi}_\ell) \boldsymbol{\varphi}_\ell\right) = a(\mathbf{g},\mathbf{g}). \end{align*} Thus, by the coercivity and the boundedness of the bilinear form $a(\cdot,\cdot)$ on $[\mathrm{H}^1_0(\Omega)]^3 \times [\mathrm{H}^1_0(\Omega)]^3$, also
\[ c_0 \|\mathbf{u}_n(0,\cdot)\|^2_{\mathrm{H}^1(\Omega)} = c_0 \|P_n \mathbf{g}\|^2_{\mathrm{H}^1(\Omega)}\leq c_1 \|\mathbf{g}\|^2_{\mathrm{H}^1(\Omega)}.\]
Therefore, the orthogonal projector $P_n$ has operator norm $\|P_n\|_{\mathcal{L}([\mathrm{L}^2_\varrho(\Omega)]^3, [\mathrm{L}^2_\varrho(\Omega)]^3)}$
bounded by $1$, uniformly in $n$, and it is, simultaneously, a bounded linear operator from $[\mathrm{H}^1_0(\Omega)]^3$ into $\mathcal{V}_n \subset [\mathrm{H}^1_0(\Omega)]^3$, with operator norm $\|P_n\|_{\mathcal{L}([\mathrm{H}^1(\Omega)]^3, [\mathrm{H}^1(\Omega)]^3)}$ bounded by $(c_1/c_0)^{1/2}$, uniformly in $n$.
We begin by showing the existence of a unique Galerkin approximation $t \in [0,T] \mapsto \mathbf{u}_n(t) \in \mathcal{V}_n$. By substituting \eqref{eq:25} into \eqref{eq:26} and taking $\mathbf{v} = \boldsymbol{\varphi}_m \in \mathcal{V}_n$ for $m=1,\dots, n$ and noting the orthonormality $(\varrho \boldsymbol{\varphi}_k, \boldsymbol{\varphi}_m) = \delta_{k,m}$ for $k, m = 1, \dots, n$, we have that
\begin{align}\label{eq:27} \tau^\alpha \ddot \beta_m + (1-\tau^\alpha) \frac{\,\mathrm{d}}{\,\mathrm{d} t}(-\dot{e}_{\alpha,1}\ast_t \dot{\beta}_m) + \lambda_m \beta_m = \langle \mathbf{b}, \boldsymbol{\varphi}_m \rangle,\qquad \mbox{$m=1,\dots,n$,} \end{align} with $\langle \mathbf{b}, \boldsymbol{\varphi}_m \rangle \in \mathrm{L}^p(0,T)$ for all $p \in \big[1, \frac{1}{1-\alpha}\big)$, in conjunction with the initial conditions
\[ \beta_m(0) = (\varrho \mathbf{g},\boldsymbol{\varphi}_m),\qquad \dot{\beta}_m(0) = (\varrho \mathbf{h},\boldsymbol{\varphi}_m),\qquad \mbox{$m=1,\dots,n$}.\]
The existence of a unique solution $\beta_m$ to this problem, with $\dot{\beta}_m \in \mathrm{AC}([0,T])$ for each $m \in \{1,\dots,n\}$ is easily shown: by letting $\gamma_m:= \dot{\beta}_m$, \eqref{eq:27} can be rewritten as a first-order system for the two-component function $t \in [0,T] \mapsto (\beta_m(t),\gamma_m(t))^{\rm T} \in \mathbb{R}^2$, and then, because $\langle \mathbf{b}, \boldsymbol{\varphi}_m \rangle
\in \mathrm{L}^1(0,T)$, integration of this system over $[0,t]$, with $t \in (0,T]$ yields an integral equation to which one can apply Banach's fixed point theorem in the complete metric space $\mathrm{C}([0,T]) \times \mathrm{C}([0,T])$ to deduce the existence of a unique absolutely continuous solution $(\beta_m, \gamma_m)^{\rm T}$, defined on a ``maximal'' interval $[0,t_*] \subset [0,T]$. If $t_*$ were strictly less than $T$, then it would follow that $|\beta_m(t)| + |\gamma_m(t)| \rightarrow + \infty$ as $t \rightarrow t_*$; the \textit{a priori} bound \eqref{eq:29}, which we shall prove below, however rules out this possibility; therefore $t_* = T$. Thus we deduce the existence of a unique Galerkin approximation $t \in [0,T] \mapsto \mathbf{u}_n(t) \in \mathcal{V}_n$, with $\dot{\mathbf{u}}_n \in \mathrm{AC}([0,T]; \mathcal{V}_n)$.
By taking $\mathbf{v}=\boldsymbol{\varphi}_m$ in \eqref{eq:26}, multiplying the resulting equality with $\beta_m(t)$ and summing over $m=1,\dots,n$, we deduce that
\begin{align*}
&\frac{\tau^\alpha}{2} \frac{\,\mathrm{d}}{\,\mathrm{d} t}\int_\Omega \varrho |\dot \mathbf{u}_n(t,\mathbf{x})|^2 \,\mathrm{d} \mathbf{x} + (1-\tau^\alpha)\, \int_\Omega \frac{\partial}{\partial t}(-\dot{e}_{\alpha,1}\ast_t \sqrt{\varrho} \dot{\mathbf{u}}_n)(t,\mathbf{x})\cdot \sqrt{\varrho}\dot{\mathbf{u}}_n(t,\mathbf{x}) \,\mathrm{d} \mathbf{x} \\
&\qquad + \frac{1}{2} \frac{\,\mathrm{d}}{\,\mathrm{d} t} \int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{u}_n(t,\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{u}_n(t,\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x} \, = \, \langle \mathbf{b}(t,\cdot), \dot{\mathbf{u}}_n(t,\cdot)\rangle. \end{align*} Hence, by integration over $t \in (0,T]$ and noting the initial conditions satisfied by $\mathbf{u}_n$ we deduce that \begin{align*}
&\frac{\tau^\alpha }{2} \int_\Omega \varrho |\dot \mathbf{u}_n(t,\mathbf{x})|^2 \,\mathrm{d} \mathbf{x} + (1-\tau^\alpha)\, \int_0^t \int_\Omega \frac{\partial}{\partial s}(-\dot{e}_{\alpha,1}\ast_s \sqrt{\varrho} \dot{\mathbf{u}}_n)(s,\mathbf{x})\cdot \sqrt{\varrho} \dot{\mathbf{u}}_n(s,\mathbf{x}) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s \\
&\quad \quad + \frac{1}{2} \int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{u}_n(t,\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{u}_n(t,\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x} \\ &= \, \int_0^t \langle \mathbf{b}(s,\cdot), \dot{\mathbf{u}}_n(s,\cdot)\rangle \,\mathrm{d} s \\
&\quad \quad + \frac{\tau^\alpha }{2} \int_\Omega \varrho |\dot{\mathbf{u}}_n(0,\mathbf{x})|^2 \,\mathrm{d} \mathbf{x} + \frac{1}{2} \int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{u}_n(0,\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{u}_n(0,\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x} \\
&= \, \int_0^t \langle \mathbf{b}(s,\cdot), \dot{\mathbf{u}}_n(s,\cdot)\rangle \,\mathrm{d} s + \frac{\tau^\alpha }{2} \|\dot{\mathbf{u}}_n(0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} + \frac{1}{2} a(\mathbf{u}_n(0,\cdot), \mathbf{u}_n(0,\cdot))\\
&\leq \, \int_0^t \langle \mathbf{b}(s,\cdot), \dot{\mathbf{u}}_n(s,\cdot)\rangle \,\mathrm{d} s + \frac{\tau^\alpha }{2} \|\mathbf{h}\|^2_{\mathrm{L}^2_\varrho(\Omega)} + \frac{1}{2} a(\mathbf{g}, \mathbf{g})\\
&= \int_0^t \langle \mathbf{b}(s,\cdot), \dot{\mathbf{u}}_n(s,\cdot)\rangle \,\mathrm{d} s + \frac{\tau^\alpha }{2} \int_\Omega \varrho |\mathbf{h}|^2 \,\mathrm{d} \mathbf{x} + \frac{1}{2} \int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{g}(\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{g}(\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x}. \end{align*}
Therefore, \begin{align}\label{eq:28} \begin{aligned}
&\frac{\tau^\alpha }{2} \int_\Omega \varrho |\dot \mathbf{u}_n(t,\mathbf{x})|^2 \,\mathrm{d} \mathbf{x} + (1-\tau^\alpha)\, \int_0^t \int_\Omega \frac{\partial}{\partial s}(-\dot{e}_{\alpha,1}\ast_s \sqrt{\varrho}\dot{\mathbf{u}}_n)(s,\mathbf{x})\cdot \sqrt{\varrho}\dot{\mathbf{u}}_n(s,\mathbf{x}) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s \\
&\quad \quad + \frac{1}{2} \int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{u}_n(t,\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{u}_n(t,\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x} \\
& \leq \int_0^t \langle \mathbf{b}(s,\cdot), \dot{\mathbf{u}}_n(s,\cdot)\rangle \,\mathrm{d} s + \frac{\tau^\alpha }{2} \int_\Omega \varrho |\mathbf{h}|^2 \,\mathrm{d} \mathbf{x} + \frac{1}{2} \int_\Omega 2\mu |\boldsymbol{\varepsilon}(\mathbf{g}(\mathbf{x}))|^2 + \lambda |\tr(\boldsymbol{\varepsilon}(\mathbf{g}(\mathbf{x})))|^2 \,\mathrm{d} \mathbf{x}. \end{aligned} \end{align}
We can now repeat the procedure (this time rigorously, as $\mathbf{u}_n$ possesses the necessary regularity properties) leading from \eqref{eq:9} to the energy inequality \eqref{eq:18}, with $\mathbf{u}$ replaced by $\mathbf{u}_n$ throughout, resulting in the uniform bound
\begin{align}\label{eq:29} \begin{aligned}
&\frac{\tau^\alpha}{2} \|\dot \mathbf{u}_n(t)\|^2_{\mathrm{L}^2_\varrho (\Omega)} + \frac{1}{2} \|\boldsymbol{\varepsilon}(\mathbf{u}_n(t))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{u}_n(t)))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\\ &\quad + (1-\tau^\alpha)\, \int_0^t \int_\Omega \frac{\partial}{\partial s}(-\dot{e}_{\alpha,1}\ast_s \sqrt{\varrho}\dot{\mathbf{u}}_n)(s)\cdot \sqrt{\varrho}\dot{\mathbf{u}}_n(s) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s \leq A(t) \, \mathrm{exp}(t+1 - e_{\alpha,1}(t)), \end{aligned} \end{align} for all $t \in (0,T]$, with $A(t)$ again defined by the expression \eqref{eq:17}.
We are now ready to pass to the limit $n \rightarrow \infty$. To this end, we fix an integer $N$ and choose a function $\mathbf{v} \in \mathrm{C}^2_0([0,T);[\mathrm{H}^1_0(\Omega)]^3)$ of the form
\begin{align}\label{eq:30} \mathbf{v}(t,\mathbf{x}) := \sum_{k=1}^N \alpha_k(t) \boldsymbol{\varphi}_k(\mathbf{x}), \end{align}
where $\alpha_k \in \mathrm{C}^2_0([0,T))$ for $k=1,\dots,N$, i.e., $\alpha_k \in \mathrm{C}^2([0,T])$ and has compact support in the half-open interval $[0,T)$. We then choose $n \geq N$ in \eqref{eq:26}, take $\mathbf{v} = \boldsymbol{\varphi}_k$ as test function in \eqref{eq:26} for $k \in \{1,\dots,N\}$, multiply the resulting equality with $\alpha_k$, sum through $k=1,\dots,N$, and perform partial integrations in the first and the second term on the left-hand side to deduce that
\begin{align*} &\tau^\alpha (\varrho \mathbf{u}_n (0,\cdot), \dot \mathbf{v}(0,\cdot)) - \tau^\alpha (\varrho \dot \mathbf{u}_n (0,\cdot), \mathbf{v}(0,\cdot)) + \tau^\alpha \int_0^T (\varrho \mathbf{u}_n (s,\cdot), \ddot{\mathbf{v}}(s,\cdot) ) \,\mathrm{d} s \\ &- (1-\tau^\alpha) ((-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}_n)(0,\cdot), \mathbf{v}(0,\cdot)) - (1-\tau^\alpha) \int_0^T ((-\dot{e}_{\alpha,1}\ast_s \varrho \dot{\mathbf{u}}_n)(s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s \\ &+ \int_0^T \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}_n(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}_n(s,\cdot))) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot)) \big) \,\mathrm{d} s = \int_0^T \langle \mathbf{b}(s,\cdot), \mathbf{v}(s,\cdot) \rangle \,\mathrm{d} s \end{align*} for all $\mathbf{v}$ as in \eqref{eq:30} with $N$ fixed, and with any $n \geq N$.
Thus, because $(\varrho \mathbf{u}_n (0,\cdot), \dot \mathbf{v}(0,\cdot))=(\varrho \mathbf{g}, \dot \mathbf{v}(0,\cdot))$ and $(\varrho \dot \mathbf{u}_n (0,\cdot), \mathbf{v}(0,\cdot)) = (\varrho \mathbf{h}, \mathbf{v}(0,\cdot))$ for all $\mathbf{v} \in \mathcal{V}_n$, and therefore (since $n \geq N$) also for all $\mathbf{v}$ of the form \eqref{eq:30}, and as $ ((-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}_n)(0,\cdot) =0$, we have that
\begin{align}\label{eq:31} \begin{aligned}
\tau^\alpha &\int_0^T (\varrho \mathbf{u}_n (s,\cdot), \ddot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s - (1-\tau^\alpha) \int_0^T ((-\dot{e}_{\alpha,1}\ast_s \varrho \dot{\mathbf{u}}_n)(s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s \\ &\quad + \int_0^T \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}_n(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}_n(s,\cdot))) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot)) \big) \,\mathrm{d} s \\ & = - \tau^\alpha (\varrho \mathbf{g}, \dot{\mathbf{v}}(0,\cdot)) + \tau^\alpha (\varrho \mathbf{h}, \mathbf{v}(0,\cdot)) + \int_0^T \langle \mathbf{b}(s,\cdot), \mathbf{v}(s,\cdot) \rangle \,\mathrm{d} s. \end{aligned} \end{align}
As $0 \leq A(t) \leq A(T)=:A$ and $\mathrm{exp}(t+1 - e_{\alpha,1}(t)) \leq \mathrm{exp}(T+1)$, it follows from the energy estimate \eqref{eq:29} and Lemma \ref{le:1} that \begin{itemize} \item $(\mathbf{u}_n)_{n \in \mathbb{N}}$ is a bounded sequence in $\mathrm{L}^\infty(0,T;[\mathrm{H}^1_0(\Omega)]^3)$; \item $(\dot \mathbf{u}_n)_{n \in \mathbb{N}}$ is a bounded sequence in $\mathrm{L}^\infty(0,T;[\mathrm{L}^2_\varrho(\Omega)]^3)\simeq \mathrm{L}^\infty(0,T;[\mathrm{L}^2(\Omega)]^3)$; \item $((-\dot e_{\alpha,1})^{\frac{1}{2}}\,\dot \mathbf{u}_n)_{n \in \mathbb{N}}$ is a bounded sequence in $\mathrm{L}^2(0,T;[\mathrm{L}^2_\varrho (\Omega)]^3) \simeq \mathrm{L}^2(0,T;[\mathrm{L}^2(\Omega)]^3)$. \end{itemize}
Thus, by the Banach--Alaoglu theorem there exists a subsequence $(\mathbf{u}_{n_\ell})_{\ell=1}^\infty$ such that
\begin{align}\label{eq:32}
\left\{ \begin{array}{rll} \mathbf{u}_{n_\ell} &\rightharpoonup \;\;\mathbf{u} & \qquad \mbox{weakly$^\ast$ in $\mathrm{L}^\infty(0,T;[\mathrm{H}^1_0(\Omega)]^3)$}, \\
\dot{\mathbf{u}}_{n_\ell} &\rightharpoonup \;\;\dot{\mathbf{u}} & \qquad \mbox{weakly$^\ast$ in $\mathrm{L}^\infty(0,T;[\mathrm{L}^2(\Omega)]^3)$},\\
(-\dot e_{\alpha,1})^{\frac{1}{2}}\,\dot{\mathbf{u}}_{n_\ell} &\rightharpoonup \;\;(-\dot e_{\alpha,1})^{\frac{1}{2}}\,\dot{\mathbf{u}} & \qquad \mbox{weakly in $\mathrm{L}^2(0,T;[\mathrm{L}^2(\Omega)]^3)$}.
\end{array}
\right. \end{align} Furthermore, because for any $s \in (\frac{1}{2},1)$ the Sobolev space $[\mathrm{H}^1_0(\Omega)]^3$ is compactly embedded into the fractional-order Sobolev space $[\mathrm{H}^s_0(\Omega)]^3$, which is, in turn, continuously embedded into $[\mathrm{L}^2(\Omega)]^3$, it follows from the Aubin--Lions--Simon lemma (cf. \cite{BS}) and the first two bullet points above that \begin{align}\label{eq:33}
\begin{array}{rll}
\mathbf{u}_{n_\ell} &\rightarrow \;\;\mathbf{u} & \qquad \mbox{strongly in $\mathrm{C}([0,T];[\mathrm{H}^s_0(\Omega)]^3)$},\qquad s \in (\frac{1}{2},1),
\end{array} \end{align} and therefore also \begin{align}\label{eq:34}
\begin{array}{rll}
\mathbf{u}_{n_\ell} &\rightarrow \;\;\mathbf{u} & \qquad \mbox{strongly in $\mathrm{C}([0,T];[\mathrm{L}^2(\Omega)]^3)$}.
\end{array} \end{align}
We take $n=n_\ell$ in \eqref{eq:31} and pass to the limit $\ell \rightarrow \infty$ with $\mathbf{v}$ fixed. It then follows that \begin{align}\label{eq:35} \begin{aligned} \tau^\alpha &\int_0^T (\varrho\mathbf{u} (s,\cdot), \ddot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s - (1-\tau^\alpha) \int_0^T ((-\dot{e}_{\alpha,1}\ast_s \varrho \dot{\mathbf{u}})(s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s \\ &\quad + \int_0^T \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot)) \big) \,\mathrm{d} s \\ & = -\tau^\alpha (\varrho\mathbf{g}, \dot{\mathbf{v}}(0,\cdot)) + \tau^\alpha (\varrho\mathbf{h}, \mathbf{v}(0,\cdot)) + \int_0^T \langle \mathbf{b}(s,\cdot), \mathbf{v}(s,\cdot) \rangle \,\mathrm{d} s \end{aligned} \end{align} for all $\mathbf{v}$ as in \eqref{eq:30} above, with $N$ fixed. This equality however holds for all functions $\mathbf{v} \in \mathrm{W}^{2,1}(0,T;[\mathrm{L}^2(\Omega)]^3) \cap \mathrm{L}^1(0,T;[\mathrm{H}^1_0(\Omega))]^3)$ such that $\mathbf{v}(T,\cdot)=0$ and $\dot{\mathbf{v}}(T,\cdot)=0$, as the set of all functions of the form \eqref{eq:30} is dense in this function space.
We note here that the passage to the limit in the second term on the left-hand side of \eqref{eq:31}, resulting in the second term on the left-hand side of \eqref{eq:35} proceeds as follows: by Fubini's theorem to interchange the spatial integral with the integral with respect to $s$, and then by interchanging the order of integration in $s$ and $t$, we have that
\begin{align*}
\int_0^T ((-\dot{e}_{\alpha,1}\ast_s \varrho\dot{\mathbf{u}}_{n_\ell})(s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s &= - \int_0^T \left(\int_0^t \dot{e}_{\alpha,1}(t-s) \varrho\dot{\mathbf{u}}_{n_\ell}(s,\cdot) \,\mathrm{d} s , \dot{\mathbf{v}}(t,\cdot)\right)\! \,\mathrm{d} t\\ & = - \int_0^T \int_0^t \dot{e}_{\alpha,1}(t-s) \left(\varrho\dot{\mathbf{u}}_{n_\ell}(s,\cdot), \dot{\mathbf{v}}(t,\cdot)\right)\! \,\mathrm{d} s \,\mathrm{d} t \\ & = - \int_0^T \int_s^T \dot{e}_{\alpha,1}(t-s) \left(\varrho\dot{\mathbf{u}}_{n_\ell}(s,\cdot), \dot{\mathbf{v}}(t,\cdot)\right)\! \,\mathrm{d} t \,\mathrm{d} s \\ & = - \int_0^T \left(\varrho\dot{\mathbf{u}}_{n_\ell}(s,\cdot), \int_s^T \dot{e}_{\alpha,1}(t-s) \dot{\mathbf{v}}(t,\cdot)\,\mathrm{d} t \right) \! \,\mathrm{d} s. \end{align*} Then, because
$s \in [0,T] \mapsto \int_s^T \dot{e}_{\alpha,1}(t-s) \dot{\mathbf{v}}(t,\cdot)\,\mathrm{d} t \in
\mathrm{L}^1(0,T;[\mathrm{L}^2(\Omega)]^3), $ noting \eqref{eq:32}$_2$ yields \begin{align*} \lim_{\ell \rightarrow \infty} \int_0^T ((-\dot{e}_{\alpha,1}\ast_s \varrho\dot{\mathbf{u}}_{n_\ell})(s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s &= - \lim_{\ell \rightarrow \infty} \int_0^T \left(\varrho\dot{\mathbf{u}}_{n_\ell}(s,\cdot), \int_s^T \dot{e}_{\alpha,1}(t-s) \dot{\mathbf{v}}(t,\cdot)\,\mathrm{d} t \right) \! \,\mathrm{d} s\\ &= -\int_0^T \left(\varrho\dot{\mathbf{u}}(s,\cdot), \int_s^T \dot{e}_{\alpha,1}(t-s) \dot{\mathbf{v}}(t,\cdot)\,\mathrm{d} t \right)\! \,\mathrm{d} s\\ & = \int_0^T ((-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}(s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s, \end{align*} as has been asserted above. The passages to the limits in the first and third term on the left-hand side of \eqref{eq:31} are immediate, by using \eqref{eq:32}$_2$ and \eqref{eq:32}$_1$, respectively.
We have thereby shown the existence of a function $\mathbf{u} \in \mathrm{L}^\infty(0,T;[\mathrm{H}^1_0(\Omega)]^3)$ such that $\dot{\mathbf{u}} \in \mathrm{L}^\infty(0,T;[\mathrm{L}^2(\Omega)]^3)$, satisfying \eqref{eq:35} for all $\mathbf{v} \in \mathrm{W}^{2,1}(0,T;[\mathrm{L}^2(\Omega)]^3) \cap \mathrm{L}^1(0,T;[\mathrm{H}^1_0(\Omega))]^3)$ such that $\mathbf{v}(T,\cdot)=0$ and $\dot{\mathbf{v}}(T,\cdot)=0$; the proof of the existence of a weak solution is therefore almost complete. It remains to show that $\mathbf{u} \in \mathrm{C}_w([0,T];[\mathrm{H}^1_0(\Omega)]^3)$ and $\dot{\mathbf{u}} \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^3)$.
We begin by recalling that, for any pair of Hilbert spaces $\mathcal{H}$ and $\mathcal{V}$ such that $\mathcal{V}$ is continuously and densely embedded into $\mathcal{H}$, if $v \in \mathrm{L}^\infty(0,T;\mathcal{V})$ and $\dot{v} \in \mathrm{L}^1(0,T;\mathcal{H})$ (whereby $v \in \mathrm{W}^{1,1}(0,T;\mathcal{H}) \subset \mathrm{C}([0,T];\mathcal{H}) \subset \mathrm{C}_w([0,T];\mathcal{H})$), then $v \in \mathrm{C}_w([0,T];\mathcal{V})$ (cf. eq. (8.49) in Lemma 8.1, Ch. 3 of \cite{LM}). Therefore, because
\[\mathbf{u} \in \mathrm{L}^\infty(0,T;[\mathrm{H}^1_0(\Omega)]^3)\quad\mbox{and}\quad
\dot{\mathbf{u}} \in \mathrm{L}^\infty(0,T;[\mathrm{L}^2(\Omega)]^3) \subset \mathrm{L}^1(0,T;[\mathrm{L}^2(\Omega)]^3), \]
it follows, with $\mathcal{V}=[\mathrm{H}^1_0(\Omega)]^3$ and $\mathcal{H} = [\mathrm{L}^2(\Omega)]^3$, that $\mathbf{u} \in \mathrm{C}_w([0,T];[\mathrm{H}^1_0(\Omega)]^3)$.
Next, we will show that $\dot{\mathbf{u}} \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^3)$. It follows from \eqref{eq:26} that $\mathbf{u}_n(t) \in \mathcal{V}_n$, for $t \in [0,T]$, satisfies: \begin{align}\label{eq:36} \hspace{-3mm} \left( \frac{\partial}{\partial t}\big(\tau^\alpha \varrho \dot \mathbf{u}_n + (1-\tau^\alpha) (-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}_n)\big), \mathbf{v} \right ) = - \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}_n) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}_n)) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{v}) \big) + \langle \mathbf{b}, \mathbf{v} \rangle \end{align} for all $\mathbf{v} \in \mathcal{V}_n$. We thus have from \eqref{eq:36} that, for any $\mathbf{v} \in [\mathrm{H}^1_0(\Omega)]^3$,
\begin{align*} \hspace{-3mm} \left( \frac{\partial}{\partial t}\big(\tau^\alpha \varrho \dot \mathbf{u}_n + (1-\tau^\alpha) (-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}_n)\big), \mathbf{v} \right ) &= \left( \frac{\partial}{\partial t}\big(\varrho \tau^\alpha\dot \mathbf{u}_n + \varrho (1-\tau^\alpha) (-\dot{e}_{\alpha,1}\ast_t \dot{\mathbf{u}}_n)\big), P_n \mathbf{v} \right )\\ & = - \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}_n) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}_n)) \mathbf{I} \,, \boldsymbol{\varepsilon}(P_n \mathbf{v}) \big) + \langle \mathbf{b}, P_n \mathbf{v} \rangle. \end{align*}
We note that by the energy estimate \eqref{eq:29} and because $\|P_n\|_{\mathcal{L}([\mathrm{H}^1(\Omega)]^3, [\mathrm{H}^1(\Omega)]^3)}$ is bounded by $(c_1/c_0)^{1/2}$, uniformly in $n$, there exists a positive constant $C$, independent of $n$, such that \begin{alignat*}{2} &\big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}_n) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}_n)) \mathbf{I} \,, \boldsymbol{\varepsilon}(P_n \mathbf{v}) \big) = \big (2\mu \boldsymbol{\varepsilon}(\mathbf{u}_n)\,, \boldsymbol{\varepsilon}(P_n \mathbf{v}) \big) + \big(\lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}_n)) \mathbf{I} \,, \boldsymbol{\varepsilon}(P_n \mathbf{v}) \big)\\ &\qquad = \big (2\mu \boldsymbol{\varepsilon}(\mathbf{u}_n)\,, \boldsymbol{\varepsilon}(P_n \mathbf{v}) \big) + \big(\lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}_n)) \,, \tr(\boldsymbol{\varepsilon}(P_n \mathbf{v})) \big)\\
&\qquad \leq \left(2\|\boldsymbol{\varepsilon}(\mathbf{u}_n)\|^2_{\mathrm{L}^2_\mu(\Omega)} + \|\tr(\boldsymbol{\varepsilon}(\mathbf{u}_n))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\right)^{\frac{1}{2}}
\left(2\|\boldsymbol{\varepsilon}(P_n \mathbf{v})\|^2_{\mathrm{L}^2_\mu(\Omega)} + \|\tr(\boldsymbol{\varepsilon}(P_n \mathbf{v}))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\right)^{\frac{1}{2}}\\
&\qquad \leq C \left(2\|\boldsymbol{\varepsilon}(P_n \mathbf{v})\|^2_{\mathrm{L}^2_\mu(\Omega)} + \|\tr(\boldsymbol{\varepsilon}(P_n \mathbf{v}))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\right)^{\frac{1}{2}} \leq C \|\mathbf{v} \|_{\mathrm{H}^1(\Omega)} \qquad \forall\, \mathbf{v} \in [\mathrm{H}^1_0(\Omega)]^3. \end{alignat*}
Also,
\begin{align*} \langle \mathbf{b} , P_n \mathbf{v} \rangle & = (\tau^\alpha-1) \dot{e}_{\alpha,1} \left(\varrho \mathbf{h}\,, P_n \mathbf{v} \right) \,-\, e_{\alpha,1} \left(\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}\,, \boldsymbol{\varepsilon}(P_n \mathbf{v}) \right)\\ & \qquad + \tau^\alpha \left(\mathbf{f} , P_n \mathbf{v}\right) + (\tau^\alpha-1) \dot{e}_{\alpha,1} \ast_t \left(\mathbf{f} , P_n \mathbf{v}\right) \qquad \forall\, \mathbf{v} \in [\mathrm{H}^1_0(\Omega)]^3, \end{align*}
and therefore, because $\|P_n\|_{\mathcal{L}([\mathrm{L}^2(\Omega)]^3, [\mathrm{L}^2(\Omega)]^3)}\leq 1$ and $\|P_n\|_{\mathcal{L}([\mathrm{H}^1(\Omega)]^3, [\mathrm{H}^1(\Omega)]^3)}$ is bounded by $(c_1/c_0)^{1/2}$, uniformly in $n$, there exists a positive constant $C$, independent of $n$, such that
\begin{align*}
|\langle \mathbf{b} , P_n \mathbf{v} \rangle| & \leq (1-\tau^\alpha) (-\dot{e}_{\alpha,1}) \|\mathbf{h}\|_{\mathrm{L}^2_\varrho(\Omega)} \|\mathbf{v}\|_{\mathrm{L}^2_\varrho(\Omega)}\\
&\qquad + \, C e_{\alpha,1} \|\tau^\alpha\mathbf{S}-
2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}\|_{\mathrm{L}^2(\Omega)} \|\mathbf{v}\|_{\mathrm{H}^1(\Omega)}\\
& \qquad + \tau^\alpha \|\mathbf{f}\|_{\mathrm{L}^2_{1/\varrho}(\Omega)} \|\mathbf{v}\|_{\mathrm{L}^2_{\varrho}(\Omega)}\\
& \qquad + (1-\tau^\alpha) \big((-\dot{e}_{\alpha,1}) \ast_t \|\mathbf{f}\|_{\mathrm{L}^2_{1/\varrho}(\Omega)}\big) \|\mathbf{v}\|_{\mathrm{L}^2_{\varrho}(\Omega)} \qquad \forall\, \mathbf{v} \in [\mathrm{H}^1_0(\Omega)]^3. \end{align*}
Thus we deduce, with $\varrho_1:= \|\varrho\|_{\mathrm{L}^\infty(\Omega)}$,
\begin{align*}
&\left\|\frac{\partial}{\partial t}\big(\tau^\alpha \varrho \dot \mathbf{u}_n + (1-\tau^\alpha)
(-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}_n)\big)\right\|_{\mathrm{H}^{-1}(\Omega)} \\ &:= \sup_{\mathbf{v} \in [\mathrm{H}^1_0(\Omega)]^3} \frac{\left\langle \frac{\partial}{\partial t}\big(\varrho \tau^\alpha\dot \mathbf{u}_n + \varrho (1-\tau^\alpha)
(-\dot{e}_{\alpha,1}\ast_t \dot{\mathbf{u}}_n)\big) , \mathbf{v} \right\rangle}{\|\mathbf{v}\|_{\mathrm{H}^1_0(\Omega)}}\\
& \leq C + \varrho_1 (1-\tau^\alpha) (-\dot{e}_{\alpha,1}) \|\mathbf{h}\|_{\mathrm{L}^2(\Omega)} \\
&\qquad + C e_{\alpha,1}\|\tau^\alpha\mathbf{S}-
2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}\|_{\mathrm{L}^2(\Omega)} \\
&\qquad + \tau^\alpha \sqrt{\frac{\varrho_1}{\varrho_0}} \|\mathbf{f}\|_{\mathrm{L}^2(\Omega)} + (1-\tau^\alpha) \sqrt{\frac{\varrho_1}{\varrho_0}} \big((-\dot{e}_{\alpha,1}) \ast_t \|\mathbf{f}\|_{\mathrm{L}^2(\Omega)}\big), \end{align*}
which then implies, because of \eqref{eq:23}, for any $p \in [1,2]$ satisfying $p<\frac{1}{1-\alpha}$, that
\begin{align*}
& \left\|\frac{\partial}{\partial t}\big(\tau^\alpha\varrho \dot \mathbf{u}_n + \varrho (1-\tau^\alpha)
(-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}_n)\big)\right\|_{\mathrm{L}^p(0,T;\mathrm{H}^{-1}(\Omega))} \\
&\leq C T^{\frac{1}{p}} + \varrho_1 (1-\tau^\alpha) \|-\dot{e}_{\alpha,1}\|_{\mathrm{L}^p(0,T)} \|\mathbf{h}\|_{\mathrm{L}^2(\Omega)}
\end{align*} \begin{align*}
&\qquad + C \|e_{\alpha,1}\|_{\mathrm{L}^p(0,T)} \|\tau^\alpha\mathbf{S}-
2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}\|_{\mathrm{L}^2(\Omega)} \\
&\qquad + \tau^\alpha \sqrt{\frac{\varrho_1}{\varrho_0}} \|\mathbf{f}\|_{\mathrm{L}^p(0,T;\mathrm{L}^2(\Omega))} + (1-\tau^\alpha) \sqrt{\frac{\varrho_1}{\varrho_0}} \|-\dot{e}_{\alpha,1}\|_{\mathrm{L}^1(0,T)} \|\mathbf{f}\|_{\mathrm{L}^p(0,T;\mathrm{L}^2(\Omega))}. \end{align*} Hence, for any $p \in [1,2]$ such that $p<\frac{1}{1-\alpha}$, we have that \begin{align*}
\left\|\frac{\partial}{\partial t}\big(\tau^\alpha \varrho \dot \mathbf{u}_n + \varrho (1-\tau^\alpha)
(-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}_n)\big) \right\|_{\mathrm{L}^p(0,T;\mathrm{H}^{-1}(\Omega))} \leq C, \end{align*} where $C$ is a positive constant, independent of $n$. Consequently, by the Banach--Alaoglu theorem, there exists a subsequence $(\mathbf{u}_{n_\ell})_{\ell=1}^\infty$ such that
\[\frac{\partial}{\partial t}\big(\tau^\alpha \varrho \dot \mathbf{u}_{n_\ell} + (1-\tau^\alpha) (-\dot{e}_{\alpha,1}\ast_t \varrho\dot{\mathbf{u}}_{n_\ell})\big) \rightharpoonup \frac{\partial}{\partial t}\big(\tau^\alpha\varrho \dot \mathbf{u} + (1-\tau^\alpha) (-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}})\big), \]
weakly in $\mathrm{L}^p(0,T;[\mathrm{H}^{-1}(\Omega)]^3)$ for any $p \in [1,2]$ such that $p<\frac{1}{1-\alpha}$. As
\[ \tau^\alpha \varrho\dot \mathbf{u} + (1-\tau^\alpha) (-\dot{e}_{\alpha,1}\ast_t \varrho\dot{\mathbf{u}}) \in \mathrm{L}^\infty(0,T;[\mathrm{L}^2(\Omega)]^3)\]
and
\[ \frac{\partial}{\partial t}\big(\tau^\alpha\varrho \dot \mathbf{u} + (1-\tau^\alpha) (-\dot{e}_{\alpha,1}\ast_t \varrho\dot{\mathbf{u}})\big) \in \mathrm{L}^1(0,T;[\mathrm{H}^{-1}(\Omega)]^3),\]
it once again follows, thanks to the continuous embedding of $[\mathrm{L}^2(\Omega)]^3$ into $[\mathrm{H}^{-1}(\Omega)]^3$, that
\begin{align}\label{eq:37} \tau^\alpha \varrho \dot \mathbf{u} + (1-\tau^\alpha) (-\dot{e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}}) \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^3). \end{align}
However, as $\varrho \dot{\mathbf{u}} \in \mathrm{L}^\infty(0,T;[\mathrm{L}^2(\Omega)]^3)$, we have that $t \in [0,T] \mapsto (\varrho\dot{\mathbf{u}}(t),\mathbf{w})$ belongs to $\mathrm{L}^\infty(0,T)$ for each $\mathbf{w} \in [\mathrm{L}^2(\Omega)]^3$, and therefore, thanks to the smoothing property of the convolution, the function $t \in [0,T] \mapsto -\dot{e}_{\alpha,1}(t) \ast_t (\varrho \dot{\mathbf{u}}(t),\mathbf{w})$ belongs to $\mathrm{C}([0,T])$. Consequently,
\[ t \in [0,T] \mapsto (-\dot{e}_{\alpha,1}(t) \ast_t \varrho \dot{\mathbf{u}}(t),\mathbf{w}) \in \mathrm{C}([0,T])\qquad \forall\, \mathbf{w} \in [\mathrm{L}^2(\Omega)]^3,\]
meaning that $(1-\tau^\alpha) (-\dot{e}_{\alpha,1}) \ast_t \varrho \dot{\mathbf{u}} \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^3)$, and therefore by \eqref{eq:37}, also $\varrho\dot{\mathbf{u}} \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^3)$. Because $\varrho_0 \leq \varrho(\mathbf{x}) \leq \varrho_1$ a.e. on $\Omega$, it then follows that
\[\dot{\mathbf{u}} \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^3).\]
That completes the proof of the existence of a weak solution.
STEP 2: \textit{Proof of the energy inequality.} Next we prove that weak solutions whose existence we have thus proved satisfy the energy inequality in the statement of the theorem. Our starting point is \eqref{eq:29}.
By Lemma \ref{le:1}, we have that
\begin{align*} &\int_0^t \int_\Omega \frac{\partial}{\partial s}(-\dot{e}_{\alpha,1}\ast_s \sqrt{\varrho}\dot{\mathbf{u}}_n)(s)\cdot \sqrt{\varrho} \dot{\mathbf{u}}_n(s) \,\mathrm{d} \mathbf{x} \,\mathrm{d} s \\
&\qquad \geq \frac{1}{2} (-\dot{e}_{\alpha,1}(\cdot) \ast_t \|\sqrt{\varrho}\dot{\mathbf{u}}_n(\cdot)\|^2_{\mathrm{L}^2(\Omega)})(t)
+ \frac{1}{2}\int_0^t -\dot{e}_{\alpha,1}(s)\|\sqrt{\varrho}\dot{\mathbf{u}}_n(s)\|^2_{\mathrm{L}^2(\Omega)} \,\mathrm{d} s
\qquad \mbox{for all $t \in (0,T]$}, \end{align*} and each of the two terms on the right-hand side is nonnegative. By omitting the first term from the right-hand side of this equality, and substituting the resulting inequality into \eqref{eq:29} we have that \begin{align}\label{eq:39} \begin{aligned}
&\frac{\tau^\alpha}{2} \|\dot \mathbf{u}_n(t)\|^2_{\mathrm{L}^2_\varrho(\Omega)} + \frac{1}{2} \|\boldsymbol{\varepsilon}(\mathbf{u}_n(t))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{u}_n(t)))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\\
&\quad + \frac{1-\tau^\alpha}{2} \int_0^t -\dot{e}_{\alpha,1}(s)\|\dot{\mathbf{u}}_n(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s \leq A(t) \, \mathrm{exp}(t+1 - e_{\alpha,1}(t))\quad \mbox{for all $t \in (0,T]$}. \end{aligned} \end{align}
As $\dot{\mathbf{u}}_{n_\ell} \rightharpoonup \dot{\mathbf{u}}$ weakly$^\ast$ in $\mathrm{L}^\infty(0,T;[\mathrm{L}^2_\varrho(\Omega)]^3)$, the weak lower-semicontinuity of the norm function and \eqref{eq:39} imply that
\begin{align}\label{eq:40}
\|\dot \mathbf{u}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \leq
\|\dot \mathbf{u}\|^2_{\mathrm{L}^\infty(0,t;\mathrm{L}^2_\varrho(\Omega))} \leq \liminf_{\ell \rightarrow \infty} \|\dot \mathbf{u}_{n_\ell}\|^2_{\mathrm{L}^\infty(0,t;\mathrm{L}^2_\varrho(\Omega))} \leq A(t) \, \mathrm{exp}(t+1 - e_{\alpha,1}(t)) \end{align}
for all $t \in (0,T]$ and a.e. $s \in (0,t]$. Similarly, because $\mathbf{u}_{n_\ell} \rightharpoonup \mathbf{u}$ weakly$^\ast$ in $\mathrm{L}^\infty(0,T;[\mathrm{H}^1_0(\Omega)]^3)$, \begin{align}\label{eq:41} \begin{aligned}
&\bigg[\frac{1}{2} \|\boldsymbol{\varepsilon}(\mathbf{u}(s))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{u}(s)))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\bigg]\\
&\qquad \leq \mbox{ess.sup}_{s \in (0,t]}\bigg[\frac{1}{2} \|\boldsymbol{\varepsilon}(\mathbf{u}(s))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{u}(s)))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\bigg] \\ &\qquad \leq \liminf_{\ell \rightarrow \infty} \left\{
\mbox{ess.sup}_{s \in (0,t]}\left[\frac{\mu}{2} \|\boldsymbol{\varepsilon}(\mathbf{u}_{n_\ell}(s))\|^2_{\mathrm{L}^2(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{u}_{n_\ell}(s)))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\right]\right\}\\ &\qquad \leq A(t) \, \mathrm{exp}(t+1 - e_{\alpha,1}(t)) \end{aligned} \end{align} for all $t \in (0,T]$ and a.e. $s \in (0,t]$. Finally, because $(-e_{\alpha,1})^{\frac{1}{2}} \dot{\mathbf{u}}_{n_\ell} \rightharpoonup (-e_{\alpha,1})^{\frac{1}{2}} \dot{\mathbf{u}}$ weakly in {\color{black} the function space} $\mathrm{L}^2(0,T;[\mathrm{L}^2_\varrho(\Omega)]^3)$, we have that \begin{align}\label{eq:42}
\int_0^t -\dot{e}_{\alpha,1}(s)\|\dot{\mathbf{u}}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s \leq \liminf_{\ell \rightarrow \infty} \int_0^t -\dot{e}_{\alpha,1}(s)\|\dot{\mathbf{u}}_{n_\ell}(s)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s \leq A(t) \, \mathrm{exp}(t+1 - e_{\alpha,1}(t)) \end{align}
for all $t \in (0,T]$. Summing \eqref{eq:40}--\eqref{eq:42} we deduce the asserted energy inequality \textcolor{black}{\eqref{EE}}.
{\color{black} STEP 3: \textit{Attainment of the initial conditions for $\mathbf{u}$ and $\dot{\mathbf{u}}$.} Next, we shall prove that the initial condition $\mathbf{u}(0,\cdot) = \mathbf{g}(\cdot)$ is satisfied in the sense of continuous functions from $[0,T]$ into $[\mathrm{L}^2(\Omega)]^3$. To this end, we note that \begin{align*}
\|\mathbf{u}(0,\cdot) - \mathbf{u}_{n_\ell}(0,\cdot)\|_{\mathrm{L}^2(\Omega)} \leq \|\mathbf{u} - \mathbf{u}_{n_\ell}\|_{\mathrm{C}([0,T];\mathrm{L}^2(\Omega))} \rightarrow 0 \qquad \mbox{as $\ell \rightarrow \infty$}, \end{align*}
thanks to \eqref{eq:34}. Since $\mathbf{u}_{n_\ell}(0,\cdot) = P_{n_\ell}\mathbf{g}(\cdot) \rightarrow \mathbf{g}(\cdot)$ strongly in $[\mathrm{L}^2_\varrho(\Omega)]^3\simeq [\mathrm{L}^2(\Omega)]^3 $ as $\ell \rightarrow \infty$, we finally deduce by the triangle inequality that $\mathbf{u}(0,\cdot) - \mathbf{g}(\cdot) = 0$. Therefore, $\mathbf{u}(0,\cdot) = \mathbf{g}(\cdot)$, with $\mathbf{u} \in \mathrm{C}([0,T];[\mathrm{L}^2(\Omega)]^3)$.
To show that the initial condition, $\dot{\mathbf{u}}(0,\cdot) = \mathbf{h}(\cdot)$ is satisfied we note that, thanks to \eqref{eq:20}$_1$ and \eqref{eq:20}$_2$, we have $\mathbf{u} \in \mathrm{W}^{1,\infty}(0,T;[\mathrm{L}^2(\Omega)]^3) = \mathrm{C}^{0,1}([0,T];[\mathrm{L}^2(\Omega)]^3)$, so we can perform partial integration with respect to $t$ in the first term on the left-hand side of \eqref{eq:21}, resulting in \begin{align*}
-\tau^\alpha (\varrho\mathbf{u}(0,\cdot), \dot{\mathbf{v}}(0,\cdot))&- \tau^\alpha \int_0^T (\varrho\dot\mathbf{u} (s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s - (1-\tau^\alpha) \int_0^T ((-\dot{e}_{\alpha,1}\ast_s \varrho\dot{\mathbf{u}})(s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s \\ & + \int_0^T \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot)) \big) \,\mathrm{d} s \\ &\hspace{-5mm} = - \tau^\alpha (\varrho\mathbf{g}, \dot{\mathbf{v}}(0,\cdot)) + \tau^\alpha (\varrho\mathbf{h}, \mathbf{v}(0,\cdot)) + \int_0^T \langle \mathbf{b}(s,\cdot), \mathbf{v}(s,\cdot) \rangle \,\mathrm{d} s \end{align*} for all $\mathbf{v} \in \mathrm{W}^{2,1}(0,T;[\mathrm{L}^2(\Omega)]^3) \cap \mathrm{L}^1(0,T;[\mathrm{H}^1_0(\Omega)]^3)$ with $\mathbf{v}(T,\cdot)=0$ and $\dot{\mathbf{v}}(T,\cdot)=0$. As $\mathbf{u}(0,\cdot) = \mathbf{g}(\cdot)$, the first term on the left-hand side and the first term on the right-hand side cancel, whereby \begin{align*} &- \tau^\alpha \int_0^T (\varrho\dot\mathbf{u} (s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s - (1-\tau^\alpha) \int_0^T ((-\dot{e}_{\alpha,1}\ast_s \varrho\dot{\mathbf{u}})(s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s \\ & + \int_0^T \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot)) \big) \,\mathrm{d} s
= \tau^\alpha (\varrho\mathbf{h}, \mathbf{v}(0,\cdot)) + \int_0^T \langle \mathbf{b}(s,\cdot), \mathbf{v}(s,\cdot) \rangle \,\mathrm{d} s \end{align*} for all $\mathbf{v} \in \mathrm{W}^{2,1}(0,T;[\mathrm{L}^2(\Omega)]^3) \cap \mathrm{L}^1(0,T;[\mathrm{H}^1_0(\Omega)]^3)$ with $\mathbf{v}(T,\cdot)=0$ and $\dot{\mathbf{v}}(T,\cdot)=0$. As the set of all such $\mathbf{v}$ is dense in the set of all $\mathbf{v} \in \mathrm{W}^{1,1}(0,T;[\mathrm{L}^2(\Omega)]^3) \cap \mathrm{L}^1(0,T;[\mathrm{H}^1_0(\Omega)]^3)$ with $\mathbf{v}(T,\cdot)=0$, it follows that \begin{align}\label{eq:4.23aa} \begin{aligned} &- \tau^\alpha \int_0^T (\varrho\dot\mathbf{u} (s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s - (1-\tau^\alpha) \int_0^T ((-\dot{e}_{\alpha,1}\ast_s \varrho\dot{\mathbf{u}})(s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s\\ & + \int_0^T \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot)) \big) \,\mathrm{d} s
= \tau^\alpha (\varrho\mathbf{h}, \mathbf{v}(0,\cdot)) + \int_0^T \langle \mathbf{b}(s,\cdot), \mathbf{v}(s,\cdot) \rangle \,\mathrm{d} s \end{aligned} \end{align} holds for all $\mathbf{v} \in \mathrm{W}^{1,1}(0,T;[\mathrm{L}^2(\Omega)]^3) \cap \mathrm{L}^1(0,T;[\mathrm{H}^1_0(\Omega)]^3)$ with $\mathbf{v}(T,\cdot)=0$.
We fix a $t_0 \in (0,T)$ and for $\varepsilon \in (0,T-t_0)$ we define
\[ \varphi_\varepsilon(t):= \left\{\begin{array}{cl} 1 & \mbox{for $0 \leq t \leq t_0$},\\ 1 - \frac{1}{\varepsilon}(t - t_0) & \mbox{for $t_0 < t < t_0 + \varepsilon$},\\ 0 & \mbox{for $t_0 + \varepsilon \leq t \leq T$.} \end{array} \right. \]
Clearly, $\varphi_\varepsilon \in \mathrm{C}^{0,1}([0,T])$, the weak derivative of $\varphi_\varepsilon$ is $\varphi_\varepsilon' = - \frac{1}{\varepsilon} \chi_{(t_0,t_0 + \varepsilon)}$, and $\varphi_\varepsilon(T)=0$. Hence, for any $\mathbf{w} \in [\mathrm{H}^1(\Omega)]^3$, and taking $\mathbf{v} = \varphi_\varepsilon \mathbf{w}$ in \eqref{eq:4.23aa}, we have that \begin{align}\label{eq:4.23bb} &\tau^\alpha \,\frac{1}{\varepsilon}\int_{t_0}^{t_0+\varepsilon} (\varrho\dot\mathbf{u} (s,\cdot), {\mathbf{w}}(\cdot)) \,\mathrm{d} s + (1-\tau^\alpha)\, \frac{1}{\varepsilon}\int_{t_0}^{t_0+\varepsilon} ((-\dot{e}_{\alpha,1}\ast_s \varrho\dot{\mathbf{u}})(s,\cdot),{\mathbf{w}}(\cdot)) \,\mathrm{d} s \nonumber\\ & + \int_0^{t_0+\varepsilon} \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))) \mathbf{I} \,, \varphi_\varepsilon(s)\, \boldsymbol{\varepsilon}(\mathbf{w}(\cdot)) \big) \,\mathrm{d} s\\ & \hspace{2in} = \tau^\alpha (\varrho\mathbf{h}, \mathbf{w}) + \int_0^{t_0+\varepsilon} \langle \mathbf{b}(s,\cdot), \varphi_\varepsilon(s) \mathbf{w}(\cdot) \rangle \,\mathrm{d} s \nonumber \end{align} for all $\mathbf{w} \in [\mathrm{H}^1_0(\Omega)]^3$. As $\varrho\dot{\mathbf{u}} \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^3)$ and $(-\dot{e}_{\alpha,1}) \ast_t \varrho \dot{\mathbf{u}} \in \mathrm{C}([0,T];[\mathrm{L}^2(\Omega)]^3)$ (cf. the end of STEP 1), we can pass to the limit $\varepsilon \rightarrow 0_+$ in \eqref{eq:4.23bb}, with $t_0 \in (0,T)$ fixed, to deduce by applying Lebesgue's differentiation theorem to the first and the second integral on the left-hand side of \eqref{eq:4.23bb}, recalling the continuity of the integrands in those integrals as functions of the integration variable $s$, for $\mathbf{w} \in [\mathrm{H}^1_0(\Omega)]^3$ fixed, and using the continuity of the integral with respect to its (upper) limit in the third integral on the left-hand side of
\eqref{eq:4.23bb} and the second term on the right-hand side of \eqref{eq:4.23bb}, that
\begin{align}\label{eq:4.23dd} \begin{aligned} &\tau^\alpha \,(\varrho\dot\mathbf{u} (t_0,\cdot), {\mathbf{w}}(\cdot)) + (1-\tau^\alpha)\, ((-\dot{e}_{\alpha,1}\ast_t \varrho\dot{\mathbf{u}})(t_0,\cdot),{\mathbf{w}}(\cdot))\\ & + \int_0^{t_0} \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{w}(\cdot)) \big) \,\mathrm{d} s = \tau^\alpha (\varrho\mathbf{h}, \mathbf{w}) + \int_0^{t_0} \langle \mathbf{b}(s,\cdot), \mathbf{w}(\cdot) \rangle \,\mathrm{d} s \end{aligned} \end{align} for all $\mathbf{w} \in [\mathrm{H}^1_0(\Omega)]^3$ and all $t_0 \in (0,T)$. Next, with $\mathbf{w} \in [\mathrm{H}^1_0(\Omega)]^3$ fixed, we pass to the limit $t_0 \rightarrow 0_+$ in \eqref{eq:4.23dd}, noting that the third term on the left-hand side and the second term on the right-hand side both vanish in this limit thanks to the continuity of these integrals as functions of $t_0$, and that, for the same reason and by Fubini's theorem, also
\[\lim_{t_0 \rightarrow 0_+} ((-\dot{e}_{\alpha,1}\ast_t \varrho\dot{\mathbf{u}})(t_0,\cdot),{\mathbf{w}}(\cdot)) = -\lim_{t_0 \rightarrow 0_+} \int_0^{t_0} \dot{e}_{\alpha,1}(t)\, (( \varrho\dot{\mathbf{u}})(t - t_0,\cdot),{\mathbf{w}}(\cdot)) \,\mathrm{d} t = 0.\]
Consequently, upon passage to the limit $t_0 \rightarrow 0_+$, the equality \eqref{eq:4.23dd} collapses to
\[ \tau^\alpha \,(\varrho\dot\mathbf{u} (0,\cdot), {\mathbf{w}}(\cdot)) = \tau^\alpha (\varrho\mathbf{h}, \mathbf{w}) \qquad \forall\, \mathbf{w} \in [\mathrm{H}^1_0(\Omega)]^3.\]
As $\tau \in (0,1]$ and $[\mathrm{H}^1_0(\Omega)]^3$ is dense in $[\mathrm{L}^2(\Omega)]^3$ it then follows that
\[ (\varrho\dot\mathbf{u} (0,\cdot), {\mathbf{w}}(\cdot)) = (\varrho\mathbf{h}, \mathbf{w}) \qquad \forall\, \mathbf{w} \in [\mathrm{L}^2(\Omega)]^3.\]
Because $\varrho \in \mathrm{L}^\infty(\Omega)$ and $\varrho(x) \geq \rho_0>0$ a.e. in $\Omega$ (cf. \eqref{coeff-ass}), we finally have that
\[ (\dot\mathbf{u} (0,\cdot), {\mathbf{w}}(\cdot)) = (\mathbf{h}, \mathbf{w}) \qquad \forall\, \mathbf{w} \in [\mathrm{L}^2(\Omega)]^3,\]
and therefore $\dot\mathbf{u} (0,\cdot) = \mathbf{h}(\cdot)$, as an equality in $\mathrm{C}_w([0,T],[\mathrm{L}^2(\Omega)]^3)$. }
STEP 4. \textit{Uniqueness of the solution.} Having shown, for initial data $\mathbf{g}$, $\mathbf{h}$, $\mathbf{S}$ and the source term $\mathbf{f}$ satisfying \eqref{eq:19}, and for any $\tau \in (0,1]$, $\varrho, \mu, \lambda \in \mathrm{L}^\infty(\Omega)$, with $\varrho(\mathbf{x}) \geq \varrho_0>0$, $\mu(\mathbf{x}) \geq \mu_0>0$ and $\lambda(\mathbf{x}) \geq 0$ for a.e. $\mathbf{x} \in \Omega$, and $\alpha \in (0,1)$, the existence of a weak solution \eqref{eq:20} to the problem \eqref{eq:2}, \eqref{eq:3}, \eqref{eq:8} satisfying the equality \eqref{eq:21} with \eqref{eq:22} we now to turn to the proof of uniqueness of weak solutions. {\color{black} Suppose that $\mathbf{u}_1$ and $\mathbf{u}_2$ are two weak solutions to \eqref{eq:2}, \eqref{eq:3}, \eqref{eq:8} subject to the same initial data and source term. Then, they both satisfy \eqref{eq:4.23aa}, and therefore, thanks to the linearity of the problem their difference $\mathbf{u}:= \mathbf{u}_1 - \mathbf{u}_2 $ satisfies
\begin{align}\label{eq:46} \begin{aligned} - \tau^\alpha \int_0^T (\varrho\dot\mathbf{u} (s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s &- (1-\tau^\alpha) \int_0^T ((-\dot{e}_{\alpha,1}\ast_s \varrho\dot{\mathbf{u}})(s,\cdot), \dot{\mathbf{v}}(s,\cdot)) \,\mathrm{d} s \\ & + \int_0^T \big ( 2\mu \boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(s,\cdot))) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot)) \big) \,\mathrm{d} s = 0 \end{aligned} \end{align}
for all $\mathbf{v} \in \mathrm{W}^{1,1}(0,T;[\mathrm{L}^2(\Omega)]^3) \cap \mathrm{L}^1(0,T;[\mathrm{H}^1_0(\Omega)]^3)$ with $\mathbf{v}(T,\cdot)=0$. We fix a $t_0 \in (0,T)$,} and let
\[ \mathbf{v}(t,\mathbf{x}):= \left\{ \begin{array}{cl} -\int_t^{t_0} \mathbf{u}(s,\mathbf{x}) \,\mathrm{d} s & \mbox{for $0 < t \leq t_0$},\\ \mathbf{0} & \mbox{for $t_0 < t <T$.} \end{array} \right.\]
Clearly, $\mathbf{v} \in \mathrm{W}^{1,\infty}(0,T;[\mathrm{H}^1_0(\Omega)]^3)$ with $\mathbf{v}(T,\cdot)=\mathbf{0}$, and hence the function $\mathbf{v}$, thus defined, is an admissible test function. We therefore have from \eqref{eq:46} that \begin{align}\label{eq:47} \begin{aligned}
- \tau^\alpha \int_0^{t_0} (\varrho\dot\mathbf{u} (s,\cdot), \mathbf{u}(s,\cdot)) \,\mathrm{d} s &- (1-\tau^\alpha) \int_0^{t_0} ((-\dot{e}_{\alpha,1}\ast_s \varrho \dot{\mathbf{u}})(s,\cdot), \mathbf{u}(s,\cdot)) \,\mathrm{d} s \\ &+ \int_0^{t_0} \big ( 2\mu \boldsymbol{\varepsilon}(\dot\mathbf{v}(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\dot\mathbf{v}(s,\cdot))) \mathbf{I} \,, \boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot)) \big) \,\mathrm{d} s = 0. \end{aligned} \end{align} Focusing in particular on the first and the third term on the left-hand side of \eqref{eq:47} we then have that \begin{align}\label{eq:48} \begin{aligned}
- \frac{1}{2} \tau^\alpha \int_0^{t_0} &\frac{\,\mathrm{d}}{\,\mathrm{d} s}\|\mathbf{u} (s,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s - (1-\tau^\alpha) \int_0^{t_0} ((-\dot{e}_{\alpha,1}\ast_s \varrho \dot{\mathbf{u}})(s,\cdot), \mathbf{u}(s,\cdot)) \,\mathrm{d} s \\
& + \int_0^{t_0} \left( \frac{\,\mathrm{d}}{\,\mathrm{d} s}\|\boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \frac{\,\mathrm{d}}{\,\mathrm{d} s}\|\tr(\boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot)))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\right) \,\mathrm{d} s = 0. \end{aligned} \end{align} As
\begin{align*} (-\dot{e}_{\alpha,1}\ast_s \dot{\mathbf{u}})(s,\cdot) &= \frac{\,\mathrm{d}}{\,\mathrm{d} s} (-{e}_{\alpha,1}\ast_s \dot{\mathbf{u}})(s,\cdot) - (-{e}_{\alpha,1}(0)) \dot{\mathbf{u}}(s,\cdot)
= \frac{\,\mathrm{d}}{\,\mathrm{d} s} (-{e}_{\alpha,1}\ast_s \dot{\mathbf{u}})(s,\cdot) + \dot{\mathbf{u}}(s,\cdot), \end{align*} inserting this into the second term on the left-hand side of \eqref{eq:48} yields \begin{align*}
- \frac{1}{2}\tau^\alpha \int_0^{t_0} \frac{\,\mathrm{d}}{\,\mathrm{d} s}\|\mathbf{u} (s,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s &- (1-\tau^\alpha) \int_0^{t_0} \left(\frac{\,\mathrm{d}}{\,\mathrm{d} s} (-{e}_{\alpha,1}\ast_s \varrho\dot{\mathbf{u}})(s,\cdot)+ \varrho \dot{\mathbf{u}}(s,\cdot), \mathbf{u}(s,\cdot)\right) \,\mathrm{d} s \\
& + \int_0^{t_0} \left(\frac{\,\mathrm{d}}{\,\mathrm{d} s}\|\boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \frac{\,\mathrm{d}}{\,\mathrm{d} s}\|\tr(\boldsymbol{\varepsilon}(\mathbf{v}(s,\cdot)))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\right) \,\mathrm{d} s = 0. \end{align*} Hence, by performing partial integration in the second integral on the left-hand side, and because $\mathbf{v}(t_0,\cdot)=0$, it follows that \begin{align*} &-\frac{1}{2}\tau^\alpha
\left(\|\mathbf{u}(t_0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)}- \|\mathbf{u}(0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)}\right)
- \frac{1}{2}(1-\tau^\alpha) \left(\|\mathbf{u}(t_0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} - \|\mathbf{u}(0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)}\right)\\ & - (1-\tau^\alpha)\int_0^{t_0} \left(({e}_{\alpha,1}\ast_s \sqrt{\varrho}\dot{\mathbf{u}})(s,\cdot), \sqrt{\varrho}\dot{\mathbf{u}}(s,\cdot)\right) \,\mathrm{d} s
+ (1-\tau^\alpha) \left(({e}_{\alpha,1}\ast_s \varrho \dot{\mathbf{u}})(s,\cdot), {\mathbf{u}}(s,\cdot)\right)|_{s=0}^{s=t_0}\\
&- \|\boldsymbol{\varepsilon}(\mathbf{v}(0,\cdot))\|^2_{\mathrm{L}^2_\mu(\Omega)} - \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{v}(0,\cdot)))\|^2_{\mathrm{L}^2_\lambda(\Omega)} = 0. \end{align*} Again, because $\mathbf{u} \in \mathrm{C}([0,T];[\mathrm{L}^2(\Omega)]^3)$ satisfies $\mathbf{u}(0,\mathbf{x}) = \mathbf{0}$ for a.e. $\mathbf{x} \in \Omega$, rearrangement yields \begin{align}\label{eq:49} \begin{aligned}
&\frac{1}{2} \|\mathbf{u}(t_0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} + (1-\tau^\alpha)\int_0^{t_0} \left(({e}_{\alpha,1}\ast_s \sqrt{\varrho} \dot{\mathbf{u}})(s,\cdot), \sqrt{\varrho} \dot{\mathbf{u}}(s,\cdot)\right) \,\mathrm{d} s \\
& \qquad + \|\boldsymbol{\varepsilon}(\mathbf{v}(0,\cdot))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{v}(0,\cdot)))\|^2_{\mathrm{L}^2_\lambda(\Omega)}\\ & \qquad \qquad = (1-\tau^\alpha) \left(({e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}})(t_0,\cdot), {\mathbf{u}}(t_0,\cdot)\right). \end{aligned} \end{align}
Thus, thanks to Lemma \ref{le:1} the second term on the left-hand side of \eqref{eq:49} can be bounded below, yielding \begin{align}\label{eq:50} \begin{aligned}
&\frac{1}{2} \|\mathbf{u}(t_0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} + \frac{1}{2} (1-\tau^\alpha)\left[ (e_{\alpha,1} \ast_t \|\dot\mathbf{u}(\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)})(t_0)
+ \int_0^{t_0} e_{\alpha,1}(s)\|\dot\mathbf{u}(s,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s \right]\\
& ~ ~ + \|\boldsymbol{\varepsilon}(\mathbf{v}(0,\cdot))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{v}(0,\cdot)))\|^2_{\mathrm{L}^2_\lambda(\Omega)} \leq (1-\tau^\alpha) \left(({e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}})(t_0,\cdot), {\mathbf{u}}(t_0,\cdot)\right). \end{aligned} \end{align}
Next, we will show that for any $t_0>0$ such that $t_0 \leq \min\left(T,1\right)$ the term on the right-hand side of \eqref{eq:50} can be completely absorbed into the left-hand side of the inequality. Indeed, by Young's inequality, Minkowski's integral inequality, and the Cauchy--Schwarz inequality, \begin{align*}
(1-\tau^\alpha) \left(({e}_{\alpha,1} \right. &\ast_t \left.\varrho\dot{\mathbf{u}})(t_0,\cdot), {\mathbf{u}}(t_0,\cdot)\right) \\
& \leq \frac{1}{2} (1-\tau^\alpha) \|\mathbf{u}(t_0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} + \frac{1}{2} (1-\tau^\alpha) \left\| ({e}_{\alpha,1}\ast_t \dot{\mathbf{u}})(t_0,\cdot)\right\|^2_{\mathrm{L}^2_\varrho(\Omega)}\\
& \leq \frac{1}{2} (1-\tau^\alpha) \|\mathbf{u}(t_0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} + \frac{1}{2} (1-\tau^\alpha) \left[({e}_{\alpha,1}\ast_t \|\dot{\mathbf{u}}\|_{\mathrm{L}^2_\varrho(\Omega)})(t_0)\right]^2\\
& \leq \frac{1}{2} (1-\tau^\alpha) \|\mathbf{u}(t_0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)}
+ \frac{1}{2} (1-\tau^\alpha) \left[\int_0^{t_0} {e}_{\alpha,1}(t_0 -s) \,\mathrm{d} s\right] \left[({e}_{\alpha,1}\ast_t \|\dot{\mathbf{u}}\|^2_{\mathrm{L}^2_\varrho(\Omega)})(t_0)\right]\!. \end{align*} As $e_{\alpha,1}(0)=1$ and $t \in [0,\infty) \mapsto e_{\alpha,1}(t)$ is positive and monotonic decreasing, it follows that \begin{align*} (1-\tau^\alpha) \left(({e}_{\alpha,1}\ast_t \varrho \dot{\mathbf{u}})(t_0,\cdot), {\mathbf{u}}(t_0,\cdot)\right)
& \leq \frac{1}{2} (1-\tau^\alpha) \|\mathbf{u}(t_0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)}
+ \frac{1}{2} (1-\tau^\alpha) t_0 \left[({e}_{\alpha,1}\ast_t \|\dot{\mathbf{u}}\|^2_{\mathrm{L}^2_\varrho(\Omega)})(t_0)\right]. \end{align*} Substituting this into the right-hand side of \eqref{eq:50} and, because $\tau \in (0,1]$ and $t_0\leq 1$, yields \begin{align}\label{eq:51} \begin{aligned}
\frac{1}{2} \tau^\alpha \|\mathbf{u}(t_0,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} + \frac{1}{2} (1-\tau^\alpha)\left[
\int_0^{t_0} e_{\alpha,1}(s)\|\dot\mathbf{u}(s,\cdot)\|^2_{\mathrm{L}^2_\varrho(\Omega)} \,\mathrm{d} s \right]\\
\quad + \|\boldsymbol{\varepsilon}(\mathbf{v}(0,\cdot))\|^2_{\mathrm{L}^2_\mu(\Omega)} + \frac{1}{2} \|\tr(\boldsymbol{\varepsilon}(\mathbf{v}(0,\cdot)))\|^2_{\mathrm{L}^2_\lambda(\Omega)} \leq 0. \end{aligned} \end{align}
Thus we deduce that $\mathbf{u}(t,\cdot) = \mathbf{0}$ for all $t \in [0,t_0]$, for any $t_0>0$ such that $t_0 \leq \min(T,1)$. If $t_0<T$, then having shown that $\mathbf{u}(t,\cdot) = \mathbf{0}$ for all $t \in [0,t_0]$ we repeat the argument on successive time intervals $[kt_0, \min(T, (k+1)t_0)]$, $k=1,2,\dots, K$, with initial data $\mathbf{u}(kt_0, \cdot)=\mathbf{0}$, $\mathbf{u}_t(kt_0,\cdot) = \mathbf{0}$, where $K$ is the (unique) positive integer such that $Kt_0<T$ and $(K+1)t_0 \geq T$. Hence, $\mathbf{u}(t,\cdot)= \mathbf{0}$ for all $t \in [0,T]$. Thus we have shown the uniqueness of the weak solution.
STEP 5. \textit{Continuous dependence of the solution on the data.} As the problem under consideration is linear, the energy inequality \eqref{EE}
implies continuous dependence of weak solutions on the initial data and the load vector.
{\color{black} STEP 6. \textit{Attainment of the initial condition for $\boldsymbol{\sigma}$.} By \eqref{eq:stress-defin} and noting that \[ \mathcal{L}^{-1}\left(\frac{1 + p^\alpha}{1 + \tau^{\alpha} p^\alpha}\right) = (\tau^{-\alpha}-1)\, \dot{e}_\alpha(t,\tau^{-\alpha}) + \tau^{-\alpha} \delta \quad \mbox{ and } \quad \mathcal{L}^{-1}\left(\frac{p^{\alpha-1}}{1 + \tau^\alpha p^\alpha}\right) = \tau^{-\alpha}\, e_\alpha(t,\tau^{-\alpha}), \] we have that
\begin{align}\label{eq:sig-equa} \begin{aligned} \tau^\alpha \boldsymbol{\sigma}(t,\cdot)& = (1 -\tau^{\alpha}) \dot{e}_\alpha(t,\tau^{-\alpha}) \ast_t (2\mu \boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot))) \mathbf{I})\\ &\quad + (2\mu \boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot))) \mathbf{I}) + e_\alpha(t,\tau^{-\alpha}) \,(\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I})\\ & =: \mathbf{A}_1 + \mathbf{A}_2 + \mathbf{A}_3. \end{aligned} \end{align} We begin by showing that $\boldsymbol{\sigma}$ belongs to $\mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^{3 \times 3})$.
As $e_\alpha(\cdot,\tau^{-\alpha}) \in \mathrm{C}([0,\infty))$ and $\tau^\alpha\mathbf{S}-2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I} \in [\mathrm{L}^2(\Omega)]^{3 \times 3}$ thanks to \eqref{eq:19}, we have that $e_\alpha(\cdot,\tau^{-\alpha}) \,(\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I})$ belongs to the function space $\mathrm{C}([0,T];[\mathrm{L}^2(\Omega)]^{3 \times 3})$, and therefore also to $\mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^{3 \times 3})$, implying that $\mathbf{A}_3 \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^{3 \times 3})$.
To show that $\mathbf{A}_2 \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^{3 \times 3})$, recall that $\mathbf{A}_2 \in \mathrm{L}^\infty(0,T;[\mathrm{L}^2(\Omega)]^{3 \times 3})$, because $\mathbf{u}$, as a weak solution, belongs to $\mathrm{L}^\infty(0,T;[\mathrm{H}^1_0(\Omega)]^{3})$. Together with the fact that
\begin{equation}\label{eq:A2} \dot{\mathbf{A}}_2 \in \mathrm{L}^\infty(0,T;[\mathrm{H}^{-1}(\Omega)]^{3 \times 3}), \end{equation}
which we shall now show, and the continuous and dense embedding of $[\mathrm{L}^2(\Omega)]^{3 \times 3}$ into $[\mathrm{H}^{-1}(\Omega)]^{3 \time 3}$, this will then yield that $\mathbf{A}_2 \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^{3 \times 3})$ (cf., again, eq. (8.49) in Lemma 8.1, Ch. 3 of \cite{LM}), as desired. To show that \eqref{eq:A2} holds, we appeal to the following result from the theory of Sobolev spaces of Banach-space-valued functions (cf., for example, Theorem 1.4.40 on p.15 in \cite{CH}):
\textit{Suppose that} $X$ \textit{is a reflexive Banach space}, $I$ \textit{is a nonempty bounded open interval of} $\mathbb{R}$, \textit{and} $u \in \mathrm{L}^p(I;X)$ \textit{for some} $p \in [1,\infty]$. \textit{Then,} $u \in \mathrm{W}^{1,p}(I;X)$ \textit{if, and only if, there exists a function} $g \in \mathrm{L}^p(I;\mathbb{R})$ \textit{such that}
\[ \|u(t) - u(s)\|_X \leq \left|\int_s^t g(\tau) \,\mathrm{d} \tau\right|\]
\textit{for almost all $s, t \in I$, i.e., for all $s, t$ outside a common null set.}
We shall apply this result with $p=\infty$, $X = [\mathrm{H}^{-1}(\Omega)]^{3 \times 3}$, and $g(\tau)=\|\dot{\mathbf{u}}(\tau,\cdot)\|_{\mathrm{L}^2(\Omega)}$. Clearly,
\begin{align*} \|\mathbf{A}_2(t) - \mathbf{A}_2(s)\|_X &= \|2\mu \boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot)- \mathbf{u}(s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot)-\mathbf{u}(s,\cdot))) \mathbf{I}
\|_X \\
&= \left\|2\mu \boldsymbol{\varepsilon}\left(\int_s^t\dot{\mathbf{u}}(\tau,\cdot)\,\mathrm{d} \tau \right) + \lambda \tr\left(\boldsymbol{\varepsilon}\left(\int_s^t\dot{\mathbf{u}}(\tau,\cdot)\,\mathrm{d} \tau \right)\right) \mathbf{I}\right\|_X\\
& \leq 2\mu \left\|\boldsymbol{\varepsilon}\left(\int_s^t\dot{\mathbf{u}}(\tau,\cdot)\,\mathrm{d} \tau \right)\right\|_X + \lambda \left\| \tr\left(\boldsymbol{\varepsilon}\left(\int_s^t\dot{\mathbf{u}}(\tau,\cdot)\,\mathrm{d} \tau \right)\right) \mathbf{I}\right\|_X\\
& \leq 2\mu \left\|\int_s^t\dot{\mathbf{u}}(\tau,\cdot)\,\mathrm{d} \tau\right\|_{\mathrm{L}^2(\Omega)} + 3\lambda
\left\|\int_s^t\dot{\mathbf{u}}(\tau,\cdot)\,\mathrm{d} \tau\right\|_{\mathrm{L}^2(\Omega)}\\
& \leq (2\mu + 3 \lambda) \left| \int_s^t \|\dot{\mathbf{u}}(\tau,\cdot)\|_{\mathrm{L}^2(\Omega)}\,\mathrm{d} \tau \right| < \infty\qquad \forall\, s, t \in [0,T], \end{align*}
because $\dot{\mathbf{u}} \in \mathrm{L}^\infty(0,T;[\mathrm{L}^2(\Omega)]^3)$, where we have used the bound $\|\boldsymbol{\varepsilon}(\mathbf{w})\|_X \leq \|\mathbf{w}\|_{\mathrm{L}^2(\Omega)}$ with $X = [\mathrm{H}^{-1}(\Omega)]^{3\times 3}$. Therefore, $\mathbf{A}_2 \in \mathrm{W}^{1,\infty}(0,T;[\mathrm{H}^{-1}(\Omega)]^{3\times 3})$, whereby also $\dot{\mathbf{A}}_2 \in \mathrm{L}^{\infty}(0,T;[\mathrm{H}^{-1}(\Omega)]^{3\times 3})$. Thus we have shown that $\mathbf{A}_2 \in \mathrm{C}_w([0,T];[\mathrm{L}^3(\Omega)]^{3\times 3})$.
Concerning the term $\mathbf{A}_1$, as $\mathbf{A}_1 = (1 -\tau^{\alpha}) \dot{e}_\alpha(t,\tau^{-\alpha}) \ast_t \mathbf{A}_2$, and $\mathbf{A}_2 \in \mathrm{C}_w([0,T];[\mathrm{L}^3(\Omega)]^{3\times 3})$, also $\mathbf{A}_1 \in \mathrm{C}_w([0,T];[\mathrm{L}^3(\Omega)]^{3\times 3})$.
By summing $\mathbf{A}_1$, $\mathbf{A}_2$ and $\mathbf{A}_3$ we thus deduce that $\boldsymbol{\sigma} = \mathbf{A}_1 + \mathbf{A}_2 + \mathbf{A}_3 \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^{3 \times 3})$. It remains to prove the attainment of the initial condition $\boldsymbol{\sigma}(0,\cdot) = \mathbf{S}(\cdot)$.
Thanks to Fubini's theorem and the continuity of the integral with respect to its (upper) limit, \begin{align*} &\lim_{t \rightarrow 0_+} (\dot{e}_\alpha(t,\tau^{-\alpha}) \ast_t (2\mu \boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot))) \mathbf{I}),\boldsymbol{W})\\ &\quad= \lim_{t \rightarrow 0_+} \int_0^t \dot{e}_\alpha(s,\tau^{-\alpha})\, ((2\mu \boldsymbol{\varepsilon}(\mathbf{u}(t-s,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(t-s,\cdot))) \mathbf{I}),\boldsymbol{W}) \,\mathrm{d} s = 0 \qquad \forall\, \boldsymbol{W} \in [\mathrm{L}^2(\Omega)]^{3\times 3}. \end{align*} Hence, and noting that (recall that $\mathbf{A}_2 \in \mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^{3 \times 3})$)
\[ \lim_{t \rightarrow 0_+} (2\mu \boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u}(t,\cdot))) \mathbf{I}, \boldsymbol{W}) =
(2\mu \boldsymbol{\varepsilon}(\mathbf{g}(\cdot)) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g}(\cdot))) \mathbf{I}, \boldsymbol{W})\qquad \forall \, \boldsymbol{W} \in [\mathrm{L}^2(\Omega)]^{3 \times 3},\]
and because $e_\alpha(0,\tau^{-\alpha})=1$, we have from \eqref{eq:sig-equa} that
\[ \lim _{t \rightarrow 0_+} (\boldsymbol{\sigma}(t,\cdot), \boldsymbol{W}(\cdot)) = (\mathbf{S}(\cdot), \boldsymbol{W}(\cdot)) \qquad \forall \, \boldsymbol{W} \in [\mathrm{L}^2(\Omega)]^{3 \times 3}.\] Therefore, $\boldsymbol{\sigma}(0,\cdot) = \mathbf{S}(\cdot)$ as an equality in $\mathrm{C}_w([0,T];[\mathrm{L}^2(\Omega)]^{3 \times 3})$, as required. \quad $\Box$ }
{\color{black} The results of the paper can be straightforwardly extended to initial-boundary-value problems for the fractional Zener wave equation with mixed homogeneous Dirichlet/nonhomogeneous Neumann boundary conditions, i.e., to problems where the domain boundary $\partial\Omega$ is the disjoint union of $\Gamma_{\rm D}$ and $\Gamma_{\rm N}$, with $\Gamma_{\rm D}$ having positive two-dimensional surface measure, \begin{alignat*}{2} \mathbf{u} & = \boldsymbol{0} &&\qquad \mbox{on $\Gamma_{\rm D}$},\\
[(2\mu \boldsymbol{\varepsilon}(\mathbf{u}) + \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{u})) \mathbf{I}) \,+\, e_{\alpha,1}\, (\tau^\alpha\mathbf{S}- 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) - \lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I})]\cdot \boldsymbol{n} & = \boldsymbol{s}&&\qquad \mbox{on $\Gamma_{\rm N}$}, \end{alignat*} where $\boldsymbol{n}$ is the unit outward normal vector to $\partial\Omega$, and $\boldsymbol{s} \in \mathrm{L}^\infty(0,T; [\mathrm{L}^2(\Gamma_N)]^3)$ is given, at the expense of adding a term of the form
\[ \int_0^T \int_{\Gamma_{\rm N}} \boldsymbol{s}(t,\boldsymbol{\xi})\cdot \mathbf{v}(t,\boldsymbol{\xi}) \,\mathrm{d} \boldsymbol{\xi} \,\mathrm{d} t \] to the right-hand side of \eqref{eq:21}, replacing the function space $[\mathrm{H}^1_0(\Omega)]^3$ throughout by the function space $[\mathrm{H}^1_{\Gamma_{\rm D},0}(\Omega)]^3$ consisting of all functions in $[\mathrm{H}^1(\Omega)]^3$ with zero trace on $\Gamma_{\rm D}$, and $[\mathrm{H}^{-1}(\Omega)]^3$ signifying the dual space of $[\mathrm{H}^1_{\Gamma_{\rm D},0}(\Omega)]^3$. In the special case when the initial stress $\mathbf{S}$ is such that $\tau^\alpha\mathbf{S}= 2\mu \boldsymbol{\varepsilon}(\mathbf{g}) +\lambda \tr(\boldsymbol{\varepsilon}(\mathbf{g})) \mathbf{I}$, the Neumann boundary condition on $\Gamma_{\rm N}$ and the source term $\mathbf{b}$ in \eqref{eq:21}, defined by \eqref{eq:22}, are both simplified. }
As a possible further, but now nontrivial, extension of the model \eqref{eq:4}, we note that Freed and Diethelm \cite{FD} have extended Fung's nonlinear constitutive law for soft biological tissues into a constitutive law involving fractional time-derivatives in the sense of Caputo, first in one space dimension and then in three space-dimensions. The model is derived in a configuration that differs from the current configuration by a rigid-body rotation; it being the polar configuration. Freed and Diethelm introduce mappings for the fractional-order operators of integration and differentiation between the polar and spatial configurations. They then use these mappings in the construction of their proposed viscoelastic model. The mathematical analysis of the associated set of partial differential equations, {\color{black} and the study of wave propagation governed by the associated nonlinear system of nonlocal evolution equations} are beyond the scope of the present paper.
\textbf{Acknowledgements:} We are grateful to Professor Du\v{s}an Zorica (Mathematical Institute of the Serbian Academy of Sciences and Arts) for stimulating discussions, and to Sr{\dj}an Lazendi\'c (University of Ghent) for drawing our attention to reference \cite{Siskova}. Ljubica Oparnica is supported by the Serbian Ministry of Education, Science, and Technological Development, under the grants 174005 and 174024, and by the FWO Odysseus project of Michael Ruzhansky.
\noindent Ljubica Oparnica, \\ Faculty of Education, University of Novi Sad, Serbia
\noindent \textit{\&}
\noindent Department of Mathematics: Analysis, Logic and Discrete Mathematics,\\ University of Gent, Krijgslaan 281, S8, 9000 Gent, Belgium\\ \texttt{\footnotesize Oparnica.Ljubica@UGent.be}
\noindent Endre S\"uli,\\ Mathematical Institute, University of Oxford, Woodstock Road, Oxford OX2 6GG, UK\\ \texttt{\footnotesize endre.suli@maths.ox.ac.uk}
\end{document} | arXiv | {
"id": "1909.05337.tex",
"language_detection_score": 0.48371249437332153,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Partially congested propagation fronts in one-dimensional Navier-Stokes equations}
\begin{small}
\begin{abstract} These notes are dedicated to the analysis of the one-dimensional free-congested Navier-Stokes equations. After a brief synthesis of the results obtained in~\cite{DP} related to the existence and the asymptotic stability of partially congested profiles associated to the \emph{soft congestion} Navier-Stokes system, we present a first local well-posedness result for the one-dimensional free-congested Navier-Stokes equations.
\end{abstract}
\noindent{\bf Keywords:} Navier-Stokes equations, free boundary problem, traveling waves, nonlinear stability.
\noindent{\bf MSC:} 35Q35, 35L67. \end{small}
\section{Introduction}{\label{sec:intro}} In these notes, we are interested in the following one-dimensional Navier-Stokes system written in Lagrangian coordinates \begin{subnumcases}{\label{eq:NS-lim}} \partial_t v - \partial_x u = 0, \label{eq:NS-lim-v}\\ \partial_t u + \partial_x p - \mu \partial_x \left(\dfrac{1}{v}\partial_x u\right) = 0, \end{subnumcases} complemented with the \emph{unilateral constraint} \begin{equation}\label{eq:constraint} v \geq 1, ~ (v-1) p= 0, ~ p \geq 0. \end{equation} The variable $v$ denotes the specific volume of the fluid and is forced to be greater than $1$, $u$ represents the velocity and $p$ is the pressure associated to the specific volume constraint. Eventually, the constant $\mu > 0$ represents the viscosity of the fluid.
The analysis of system~\eqref{eq:NS-lim}-\eqref{eq:constraint} is motivated by the modeling of partially \emph{congested} (or saturated) flows like crowd motions or traffic flows \cites{berthelin2017,degond2011,maury2011} or mixtures \cite{bouchut2000} where the constraint $v \geq 1$ can be assimilated to a maximal packing or volume fraction constraint. A similar formulation to~\eqref{eq:NS-lim}-\eqref{eq:constraint} was also recently derived for the modeling of partially pressurized free surface flows \cites{lannes2017,godlewski2018}.\\ As developed in previous studies (see for instance~\cite{perrin2015},~\cite{bianchini2020}), equations~\eqref{eq:NS-lim}-\eqref{eq:constraint} can be approximated by the compressible Navier-Stokes equations \begin{subnumcases}{\label{eq:NS-ep}} \partial_t v_\varepsilon - \partial_x u_\varepsilon = 0, \\ \partial_t u_\varepsilon + \partial_x p_\varepsilon(v_\varepsilon) - \mu \partial_x \left(\dfrac{1}{v_\varepsilon}\partial_x u_\varepsilon\right) = 0, \\ p_\varepsilon(v) \underset{v \to 1^+}{\to} + \infty. \end{subnumcases} with a singular pressure law $p_\varepsilon$ representing repulsive forces that prevent the development of congested phases. In the previously cited papers, the rigorous justification of the limit $\varepsilon \to 0$ yields the existence of global weak finite energy solutions to \eqref{eq:NS-lim}-\eqref{eq:constraint} (we do not detail the precise setting of these results and refer to~\cite{perrin2015}-\cite{bianchini2020}). The analysis reveals the multi-scale nature of problem~\eqref{eq:NS-ep}: given a pressure law, {\it e.g.} $p_\varepsilon(v) = \varepsilon (v-1)^{-\gamma}$, one observes that small variations in the specific volume variable (of order $\varepsilon^{1/\gamma}$ here) lead to large variations of the pressure in the highly dense regions where $v$ close to $1$. Besides, one shows that $\partial_x u = 0$ (or $\mathrm{div}\ u= 0$ in the multi-dimensional case) where $\{v=1\}$ on the limit system~\eqref{eq:NS-lim}-\eqref{eq:constraint}. Therefore system~\eqref{eq:NS-lim}-\eqref{eq:constraint} can be interpreted as a compressible-incompressible free boundary problem with an interface depending on the solution itself. This interface is moreover not closed, {\it i.e.} matter passes through the boundary. Up to our knowledge, few results are known in the literature regarding the existence of more regular solutions to~\eqref{eq:NS-lim}-\eqref{eq:constraint} and the dynamics of the congested domain as time evolves.
In these notes we shall focus on particular partially congested solutions that have a stationary profile for both systems~\eqref{eq:NS-lim}-\eqref{eq:constraint} and~\eqref{eq:NS-ep}: $(v,u)(t,x) = (v,u)(\xi)$ with $\xi = x- st$ and $s$ is the constant speed at which the profile travels. We show below that these profiles, denoted in the following $(\bar v, \bar u)$ and $(\bar v_\varepsilon,\bar u_\varepsilon)$ respectively,
give us precious information about the transition from a congested state to a free state. Next, we are interested in the existence of regular solutions to~\eqref{eq:NS-ep} and~\eqref{eq:NS-lim}-\eqref{eq:constraint} for perturbations of the profiles $(\bar v_\varepsilon, \bar u_\varepsilon)$ and $(\bar v,\bar u)$ respectively. In the approximate case, with $\varepsilon > 0$ fixed, we present a global existence and stability result for small (quantified in terms of $\varepsilon$) regular perturbations of $(\bar v_\varepsilon, \bar u_\varepsilon)$. This result was initially proved in~\cite{DP}. Concerning the limit system~\eqref{eq:NS-lim}-\eqref{eq:constraint}, we announce a first local well-posedness result for ``compatible'' initial perturbations (not necessarily small) of the limit profile $(\bar v, \bar u)$, whose complete proof will be given in a forthcoming paper.\\ These two results both rely on high order energy estimates satisfied by regular solutions of the two systems. In both cases, the derivation of such estimates is facilitated by a change of velocity variable. Introducing the \emph{effective velocity} $w := u - \mu \partial_x \ln v$, the mass equation rewrites as \[ \partial_t v - \partial_x w - \mu \partial^2_x \ln v = 0, \] with an additional (nonlinear) diffusion term compared to~\eqref{eq:NS-lim-v} which let us expect regularization effects on the specific volume variable $v$ (see also~\cite{vasseur2016}). Nonetheless, the geometries of the two settings strongly differ from one another. In the approximate case ($\varepsilon > 0$) the system~\eqref{eq:NS-ep} is set on $\mathbb{R}$. On the limit system \eqref{eq:NS-lim}-\eqref{eq:constraint}, we shall restrict ourselves to initial perturbations localized on $\mathbb{R}_+$, {\it i.e.} in the free domain of $(\bar v, \bar u)$. Looking for solutions that remain partially congested, the system~\eqref{eq:NS-lim}-\eqref{eq:constraint} is then studied only on the half-line $[\tilde{x}(t),+\infty[$ where $\tilde{x}(t)$ denotes the position of the interface between the free and the congested domain at time $t$. Therefore additional difficulties in that case are expected due the free boundary $x=\tilde{x}(t)$ which is an unknown of the system.
The notes are organized as follows. First, in Section~\ref{sec:profiles}, we prove the existence and give qualitative properties of partially congested propagation fronts for both systems~\eqref{eq:NS-lim}-\eqref{eq:constraint} and~\eqref{eq:NS-ep}. Then, we present in Section~\ref{sec:stab-ep} a result concerning the asymptotic stability of the approximate profiles $(\bar v_\varepsilon,\bar u_\varepsilon)$. Finally, in Section~\ref{sec:LWP}, we announce a local well-posedness result on the limit system~\eqref{eq:NS-lim}.
\section{Partially congested propagation fronts and their soft congestion approximation}{\label{sec:profiles}}
In this section we construct traveling wave profiles for both limit and approximated problems. For that purpose, we complement system~\eqref{eq:NS-lim}-\eqref{eq:constraint} with the conditions \begin{equation}\label{eq:endstates} (v,u)(t,x) \to (v_\pm,u_\pm) \quad \text{as}~ x \to \pm \infty, \end{equation} and we assume that \begin{equation}\label{eq:endstates-0} v_- = 1 < v_+, \qquad u_- > u_+, \end{equation} in other words we assume that the left end state is congested while the right end state is free. The condition $u_+ < u_-$ is an entropy condition on the shock (we shall only consider positive speed shocks).
\subsection{Explicit free-congested front for the limit system}
\begin{lem}\label{lem:profile-ref} Assume that $u_->u_+$, $v_+>1$, and let \begin{equation}\label{df:speed} s:=\frac{u_--u_+}{v_+-1}. \end{equation} Then there exists a unique (up to a shift) travelling wave solution of \eqref{eq:NS-lim}. This travelling wave propagates at speed $s$ and is of the form $(\bar u, \bar v)(x-st)$. Furthermore, \[
\bar v(x)= \begin{cases} 1&\text{ if }x\leq 0,\\ \displaystyle\frac{v_+}{1 + (v_+-1) \exp(-sv_+x/\mu)}&\text{ if }x> 0, \end{cases}. \] \[ \bar u=u_+ + s v_+ - s\bar v = u_- + s v_- - s\bar v. \]
In the zone $x<0$, the pressure is constant and equal to $p_-= s^2 (v_+ - 1)$.
\label{lem:TW} \end{lem}
The profile is represented in Figure~\ref{fig:profile}. \begin{figure}
\caption{Let $v_+ = 2,\ u_- = 1, \ u_+ = 0$. On the left: the profiles $\bar v$ and $\bar p$, on the right: the profiles $\bar u$ and $\bar w = \bar u - \mu \partial_x \ln \bar v$. }
\label{fig:profile}
\end{figure}
\begin{proof}[Sketch of proof] Let $s$ be the speed of propagation of the profile $(\bar v, \bar u)$. System~\eqref{eq:NS-lim} becomes \begin{equation}\label{eq:EDO-prof} \begin{cases} -s \bar v' - \bar u' = 0, \\ -s \bar u' + \bar p' - \mu \left(\dfrac{\bar u'}{\bar v}\right)' = 0. \end{cases} \end{equation} We look for a profile which is congested on $]-\infty, 0]$ and free on $]0,+\infty[$\footnote{It can be proved that the analysis can always be reduced to this case, see \cite{DP}.}. In the free zone, it is easy to show that the dynamics reduces to a logistic equation for both variables: \[ \begin{cases} \bar v' = \dfrac{s}{\mu} \bar v (v_+ -\bar v), \\ \bar u' = -s \bar v' = \dfrac{1}{\mu} (u_+-\bar u)(sv_++u_+- \bar u), \end{cases} \] while in the congested zone we have $\bar v=1$, $\bar u =u_-$. Next, the two dynamics are connected at the point $x^*=0$ by imposing the continuity of $\bar v$, $\bar u$ and the flux $\bar p - \mu \frac{\bar u'}{\bar v}$. We recover then the value of the shock speed~\eqref{df:speed} be integrating~\eqref{eq:EDO-prof} between $0$ and $+\infty$. Eventually, the value of the pressure in the congested domain $x < 0$ is given by: $\bar p(x) = - \mu \lim_{x \to 0^+} \bar u'(x) = \mu s \lim_{x \to 0^+} \bar v'(x) = s^2 (v_+-1)$. \end{proof}
\subsection{Approximation through the soft congestion approach} We are now interested in travelling wave profiles $(\bar v_\varepsilon,\bar u_\varepsilon)$ associated to the soft congestion problem. To fix the ideas and make the analysis more explicit, we specify the singular pressure law and set \begin{equation}\label{df:pep} p_\varepsilon(v) = \dfrac{\varepsilon}{(v-1)^\gamma} \quad \text{for}~ v > 1, ~\text{with}~ \gamma \geq 1. \end{equation} As the reader may check, the results presented in the rest of the notes can be generalized to other pressure laws that are strictly decreasing, convex on $]1, +\infty[$ and singular close to $1$. Let $s_\varepsilon$ be the speed of propagation of the profile, we have the following system of ODEs \[ \begin{cases} s_\varepsilon \bar v'_\varepsilon + \bar u'_\varepsilon = 0, \\ -s_\varepsilon \bar u'_\varepsilon + (p_\varepsilon(\bar v_\varepsilon))' - \left(\dfrac{\bar u'_\varepsilon}{\bar v_\varepsilon}\right)' = 0. \end{cases} \] supplemented with the far field conditions \begin{equation}\begin{aligned} \label{eq:endstates-ep} v_\ep(t,x)\to v_\pm^\varepsilon \quad u_\ep(t,x)\to u_\pm \quad \text{as }x\to \pm \infty,\\ 1<v_-^\varepsilon<v_+,\quad u_->u_+. \end{aligned} \end{equation} Note that the left limit condition on the specific volume has to be modified in view of the pressure law~\eqref{df:pep} so that $p_\varepsilon(v_-^\varepsilon)$ remains bounded as $\varepsilon \to 0$. We then set $ v_-^\varepsilon := 1 + \varepsilon^{1/\gamma}$, and we take $v^\varepsilon_+=v_+ $ independent of $\varepsilon$.
\begin{lem}\label{lem:profile-ep} Assume that $u_- > u_+$, $v_+ > v_-^\varepsilon$ and let \[ s_\varepsilon := \sqrt{-\dfrac{p_\varepsilon(v_+) -1}{v_+-1}}. \] Then there exists a unique (up to a shift) traveling wave solution $(\bar v_\varepsilon,\bar u_\varepsilon)$ solution of~\eqref{eq:NS-ep} with end states $(v_-^\varepsilon,u_-)$ (resp. $(v_+,u_+)$) at $-\infty$ (resp. $+\infty$).\\ Moreover, fixing the shift by setting $\bar v_\varepsilon(0) = 1 + \varepsilon^{1/(\gamma+1)}$ and taking $ v_-^\varepsilon := 1 + \varepsilon^{1/\gamma}$, we have
, up to a subsequence, \[ \bar v_\varepsilon \to \bar v \quad \text{in}~\mathcal{C}(-R,R) ~\forall R > 0~ \text{and weakly-* in}~ W^{1,\infty}(\mathbb{R}). \] \end{lem}
\begin{proof}[Sketch of proof] It can be shown that \begin{equation}\label{EDO-vep} \bar v'_\varepsilon = \dfrac{\bar v_\varepsilon}{\mu s_\varepsilon} \left(s_\varepsilon^2(v_+ -\bar v_\varepsilon) + p_\varepsilon(v_+) - p_\varepsilon(\bar v_\varepsilon)\right), \end{equation} so that existence and uniqueness (up to a shift) of a monotone (increasing) profile $\bar v_\varepsilon$ follows easily from ODEs arguments and the convexity of the pressure law $p_\varepsilon$. Observing that $p_\varepsilon(\bar v_\varepsilon) \leq p_\varepsilon(v_-^\varepsilon) = 1$, we control $\bar v'_\varepsilon$ uniformly with respect to $\varepsilon$ and infer the uniform convergence of $\bar v_\varepsilon$ towards $\bar v$. \end{proof}
We can actually be more precise on the behavior of $\bar v_\varepsilon$. We distinguish between three zones: \begin{itemize} \item {\it Congested zone:} this corresponds to a zone $]-\infty, x_{min}]$ in which $\bar v_\varepsilon=1 + O(\varepsilon^{1/\gamma})$, so that the pressure remains bounded. In this zone, the analysis of the linearized version of \eqref{EDO-vep} around $v^\varepsilon_-$ shows that the profile $\bar v_\varepsilon$ converges exponentially towards $v_-^\varepsilon$.\\ Computations show that $x_{min} = O(\varepsilon^{1/(\gamma+1)})$.
\item {\it Free zone:} this corresponds to the region where $p_\varepsilon (\bar v_\varepsilon)\ll 1$, $p_\varepsilon' (\bar v_\varepsilon)=O (1)$. With the choice of the shift above, this corresponds to the interval $[0, +\infty[$. In this zone, one proves that $\|\bar v_\varepsilon - \bar v\|_{L^\infty} \lesssim \varepsilon^{\frac{1}{\gamma+1}}$.
\item {\it Transition zone $[-x_{min},0]$:} in this small region, we have the following error estimate.
\begin{lem} Assume the conditions of the previous lemma. Let $\tilde{v}$ be the solution of the ODE \[ \tilde{v}' = \dfrac{1}{\mu s}\left(1-\dfrac{1}{\tilde{v}^{\gamma}} \right), \qquad \tilde{v}(0) = 2, \] and let $x^* < 0$ be a suitable parameter such that $x^* = O(\varepsilon^{1/(\gamma+1)})$. Then \[
\Big| \bar v_\varepsilon(x) - \bar v(x) - \varepsilon^{\frac{1}{\gamma}} \tilde v \left(\dfrac{x - x^*}{\varepsilon^{1/\gamma}}\right)
\Big|
\leq C \varepsilon^{\frac{1}{\gamma+1}} |x| \qquad \forall \ x \in [-x_{min},0], \] with $x_{min} = O(\varepsilon^{1/(\gamma+1)})$. \end{lem}
\end{itemize}
\section{Perturbations of the approximate profiles $(\bar v_\varepsilon,\bar u_\varepsilon)$}{\label{sec:stab-ep}}
As said in the introduction, it will be convenient to rewrite Eq~\eqref{eq:NS-ep} in the variables $(v,w)$, that is \[ \begin{cases} \partial_t v - \partial_x w - \mu \partial^2_x \ln v = 0 \\ \partial_t w + \partial_x p_\varepsilon(v) = 0 \end{cases} \quad \text{for}~ t > 0,\ x \in \mathbb{R}, \] with \[ (v,w) \to (v^\varepsilon_\pm, u_\pm) \quad \text{as}~ x \to \pm \infty, \] and initial data \[ (v,w)_{t=0} = (v^0, u^0 - \mu \partial_x \ln v^0). \]
We recall that $v_+^\varepsilon:=v_+$ and $u_\pm$ are independent of $\varepsilon$, and that $v^\varepsilon_-=1 + \varepsilon^{1/\gamma}$. Now, assuming that $(v,w)_{|t=0} \in (\bar v_\varepsilon, \bar w_\varepsilon) + L^1_0 \cap L^\infty(\mathbb{R})$ where $L^1_0(\mathbb{R})$ is the set of $L^1$ functions of zero mass, we look at the system \begin{subnumcases}{\label{eq:NS-ep-int}} \partial_t V - \partial_x W - \mu \partial_x \ln\left(1+ \dfrac{\partial_x V}{\bar v_\varepsilon(x-s_\varepsilon t)} \right) = 0 \\ \partial_t W + p_\varepsilon(\bar v_\varepsilon(x-s_\varepsilon t) + \partial_x V) - p_\varepsilon(\bar v_\varepsilon(x-s_\varepsilon t)) = 0 \end{subnumcases} satisfied by the integrated variables \[ V(t,x) := \int_{-\infty}^x (v (t,z)- \bar v_\varepsilon(z-s_\varepsilon t)) \ dz, \qquad W(t,x) := \int_{-\infty}^x (w (t,z)- \bar w_\varepsilon(z-s_\varepsilon t)) \ dz. \]
Our strategy is the following: first we prove by a fixed point argument the existence and uniqueness a global regular solution $(V,W)$ under smallness assumptions on $(V,W)_{|t=0}$; then we come back to the original variables and deduce the existence and uniqueness of a couple $(v,u)$ solution to~\eqref{eq:NS-ep}. The regularity of $(v-\bar v_\varepsilon (\cdot-s_\varepsilon t),u-\bar u_\varepsilon(\cdot-s_\varepsilon t))$ will eventually ensure the asymptotic stability of $(\bar v_\varepsilon,\bar u_\varepsilon)$, {\it i.e.} the convergence of $(v-\bar v_\varepsilon (\cdot-s_\varepsilon t),u-\bar u_\varepsilon(\cdot-s_\varepsilon t))$ to $0$ as $t\to +\infty$.
In the whole subsection, we fix \[ p_\varepsilon(v) := \dfrac{\varepsilon}{(v-1)^\gamma} \quad \text{for}~ v >1, \ \varepsilon > 0, \ \gamma \geq 1. \]
Here is the first result guaranteeing the existence of a global strong solution $(V,W)$. \begin{prop}\label{prop:ex-VW}
Assume that $(V_0,W_0) \in (H^2(\mathbb{R}))^2$ with
\begin{equation}\label{eq:init-1}
\sum_{k=0}^2 \varepsilon^{\frac{2k}{\gamma}}\int_{\mathbb{R}}{\left[ \dfrac{|\partial^k_x W_0|^2}{-p'_\varepsilon(\bar v_\varepsilon)} + |\partial^k_x V_0|^2 \right] dx} \leq \delta_0^2 \varepsilon^{\frac{5}{\gamma}}
\end{equation}
for some $\delta_0$ small enough, depending only on $v_+$, $\gamma$ and $\mu$.
Then there exists a unique global solution $(V,W)$ to~\eqref{eq:NS-ep-int} satisfying
\begin{align*}
& V \in \mathcal{C}([0;+\infty);H^2(\mathbb{R})) \cap L^2(\mathbb{R}_+; H^3(\mathbb{R})), \\
& W \in \mathcal{C}([0;+\infty);H^2(\mathbb{R})).
\end{align*}
\end{prop}
\begin{proof}[Idea of the proof] As announced previously, this result is achieved thanks to a fixed point argument which relies on energy estimates satisfied by $(V,W)$ and its derivatives $(\partial^k_x V, \partial^k_x W)$, $k=1,2$. Since we are working close to the reference profile $(\bar v_\varepsilon(\cdot-s_\varepsilon t), \bar{w}_\varepsilon(\cdot-s_\varepsilon t))$, it is natural to rewrite the system~\eqref{eq:NS-ep-int} as follows \begin{equation}\label{eq:linearized-VW} \partial_t \begin{pmatrix} V \\ W \end{pmatrix} + \mathcal L_\ep \begin{pmatrix} V \\ W \end{pmatrix} = \begin{pmatrix} F_\varepsilon \\ G_\varepsilon \end{pmatrix}, \end{equation} with a linear left-hand side \[ \mathcal L_\ep\begin{pmatrix} V \\ W \end{pmatrix} := \begin{pmatrix} - \partial_x W - \mu \partial_x \left(\dfrac{\partial_x V}{\bar v_\varepsilon(\cdot-s_\varepsilon t)} \right) \\ p'_\varepsilon(\bar v_\varepsilon(\cdot-s_\varepsilon t)) \partial_x V \end{pmatrix} \] that yields the main order part of the energy and dissipation terms; and a right-hand side \begin{align*}
F_\varepsilon & = F_\varepsilon(\partial_x V) := \mu \partial_x \left[ \ln \left(1 + \dfrac{\partial_x V}{\bar v_\varepsilon(\cdot-s_\varepsilon t)}\right) - \dfrac{\partial_x V}{\bar v_\varepsilon(\cdot-s_\varepsilon t)} \right],\\
G_\varepsilon & = G_\varepsilon(\partial_x V): = - \left[p_\varepsilon (\bar v_\varepsilon (\cdot-s_\varepsilon t) + \partial_x V) - p_\varepsilon (\bar v_\varepsilon(\cdot-s_\varepsilon t)) - p_\varepsilon'(\bar v_\varepsilon(\cdot-s_\varepsilon t)) \partial_x V\right], \end{align*} which is quadratic in $\partial_x V$ and will be treated as a perturbation. Hence, taking the scalar product of \eqref{eq:linearized-VW} with $(V, \frac{W}{-p'_\varepsilon(\bar v_\varepsilon(\cdot-s_\varepsilon t))})$, we get the energy estimate \begin{align}\label{eq:k=0}
\int_\mathbb{R}{\left[-\dfrac{1}{p'_\varepsilon(\bar v_\varepsilon)}{|W|^2} + {|V|^2} \right]}
+ s_\varepsilon\int_{\mathbb{R}_+}\int_\mathbb{R}{\dfrac{p''_\varepsilon(\bar v_\varepsilon)}{(p'_\varepsilon(\bar v_\varepsilon))^2} \partial_x \bar v_\varepsilon |W|^2}
+ 2\mu \int_{\mathbb{R}_+}\int_\mathbb{R}{\dfrac{(\partial_x V)^2}{\bar v_\varepsilon}} & \nonumber\\
\leq \int_\mathbb{R}{\left[\left(\dfrac{-1}{p'_\varepsilon(\bar v_\varepsilon)}{|W|^2}\right)_{|t=0} + {|V_{|t=0}|^2} \right]}
+ 2\left|\int_{\mathbb{R}_+}\int_\mathbb{R} \left[G_\varepsilon\frac{W}{-p_\varepsilon'(\bar v_\varepsilon)} + F_\varepsilon V\right]\right|, \end{align} where we have abusively written $\bar v_\varepsilon$ as a short-hand for $\bar v_\varepsilon(\cdot-s_\varepsilon t)$ and where the integrals involving $F_\varepsilon$ and $G_\varepsilon$ are controlled by assuming that the distance between $(v,w)$ and the profile $(\bar v_\varepsilon, \bar w_\varepsilon)$ remains small enough. Regarding the diffusion term on $W$, note that \[ \frac{p_\varepsilon''(\bar v_\varepsilon)}{p_\varepsilon'(\bar v_\varepsilon)^2}= \frac{(\gamma+1) (v_\ep-1)^\gamma}{\gamma\varepsilon }= \frac{\gamma+1}{\gamma p_\varepsilon(\bar v_\varepsilon)}\geq \frac{\gamma+1}{\gamma}. \] To derive higher order estimates, we differentiate the system with respect to $x$ and perform the same calculations. Nevertheless, we need to take into account additional terms coming from the commutator of $\mathcal L_\ep$ and $\partial_x^k$, $k=1,2$. For $k=1$, we have \begin{align}\label{eq:k=1}
& \int_\mathbb{R}{\left[-\dfrac{1}{p'_\varepsilon(\bar v_\varepsilon)}{|\partial_x W|^2} + {|\partial_x V|^2} \right]}
+ s_\varepsilon\int_{\mathbb{R}_+}\int_\mathbb{R}{\dfrac{p''_\varepsilon(\bar v_\varepsilon)}{(p'_\varepsilon(\bar v_\varepsilon))^2} \partial_x \bar v_\varepsilon |\partial_x W|^2} + 2\mu \int_{\mathbb{R}_+}\int_\mathbb{R}{\dfrac{(\partial^2_x V)^2}{\bar v_\varepsilon}} \nonumber\\
& \leq \int_\mathbb{R}{\left[\left(\dfrac{-1}{p'_\varepsilon(\bar v_\varepsilon)}{|\partial_x W|^2}\right)_{|t=0} + {|\partial_x V_{|t=0}|^2} \right]}
+ 2\left|\int_{\mathbb{R}_+}\int_\mathbb{R} \left[\partial_x G_\varepsilon\frac{\partial_x W}{-p_\varepsilon'(\bar v_\varepsilon)} + \partial_x F_\varepsilon \partial_x V\right]\right| \\
& \quad + \left|\int_{\mathbb{R}_+}\int_{\mathbb{R}}{[\mathcal L_\ep, \partial_x]\begin{pmatrix}V\\ W\end{pmatrix} \cdot \begin{pmatrix}\dfrac{-\partial_x W}{p'_\varepsilon(\bar v_\varepsilon)}\\ \partial_x W\end{pmatrix}} \right| \nonumber \end{align} with \begin{align*}
\left|\int_{\mathbb{R}_+}\int_{\mathbb{R}}{[\mathcal L_\ep, \partial_x]\begin{pmatrix}V\\ W\end{pmatrix} \cdot \begin{pmatrix}\dfrac{-\partial_x W}{p'_\varepsilon(\bar v_\varepsilon)}\\ \partial_x W\end{pmatrix}} \right|
\leq \eta \int_{\mathbb{R}_+} \int_{\mathbb{R}}\partial_x \bar v_\varepsilon |\partial_x W|^2
+ \frac{C_1}{\eta}\varepsilon^{-2/\gamma} \int_{\mathbb{R}_+} \int_{\mathbb{R}}{|\partial_x V|^2}. \end{align*} The first integral can be absorbed in the left-hand side for small $\eta$. For the second integral, we could apply a Gronwall inequality to close the estimate but we would then obtain a bound on the energy that exponentially grows with time. Another way to proceed is to multiply inequality~\eqref{eq:k=1} by $\varepsilon^{2/\gamma}$ (eliminating the singularity as $\varepsilon \to 0$), and to combine the result with the estimate on the integrated variables~\eqref{eq:k=0}: \begin{align}\label{eq:k=1-bis}
& \int_\mathbb{R}{\left[-\dfrac{1}{p'_\varepsilon(\bar v_\varepsilon)}{|W|^2} + {|V|^2} \right] + \varepsilon^{2/\gamma}\int_\mathbb{R} \left[-\dfrac{1}{p'_\varepsilon(\bar v_\varepsilon)}{|\partial_x W|^2} + {|\partial_x V|^2} \right] } \nonumber \\
& \quad + \int_{\mathbb{R}_+}\int_\mathbb{R}{\partial_x \bar v_\varepsilon \left[|W|^2+ \varepsilon^{2/\gamma}|\partial_x W|^2\right]} + \int_{\mathbb{R}_+}\int_\mathbb{R}{\dfrac{(\partial_x V)^2+ \varepsilon^{2/\gamma}(\partial^2_x V)^2}{\bar v_\varepsilon}} \nonumber\\
& \leq C \int_\mathbb{R}{\left[\left(\dfrac{-1}{p'_\varepsilon(\bar v_\varepsilon)}{|W|^2}\right)_{|t=0} + {|V_{|t=0}|^2} + \varepsilon^{2/\gamma}\left(\left(\dfrac{-1}{p'_\varepsilon(\bar v_\varepsilon)}{|\partial_x W|^2}\right)_{|t=0} + {|\partial_x V_{|t=0}|^2} \right)\right]} \\
& \quad + C\left| \int_{\mathbb{R}_+}\int_\mathbb{R} \left[G_\varepsilon\frac{W}{-p_\varepsilon'(\bar v_\varepsilon)} + F_\varepsilon \ V\right] \right|
+ C \varepsilon^{2/\gamma}\left|\int_{\mathbb{R}_+}\int_\mathbb{R} \left[\partial_x G_\varepsilon\frac{\partial_x W}{-p_\varepsilon'(\bar v_\varepsilon)} + \partial_x F_\varepsilon \ \partial_x V\right]\right|. \nonumber \end{align} The passage to the integrated variables $(V,W)$ is therefore essential for the derivation of global-in-time estimates.\\ In the same manner, for $k=2$, the reader can check that by multiplying by $\varepsilon^{4/\gamma}$ the energy inequality satisfied by $(\partial^2_x V,\partial^2_x W)$ and combining it with~\eqref{eq:k=1-bis}, we can close a weighted energy estimate (recall that the nonlinear terms in $F_\varepsilon$, $G_\varepsilon$ are considered as small perturbations).\\ This explains the structure of the energy in assumption \eqref{eq:init-1}. Hence, we define \[\begin{aligned}
E_k(t;V,W):= \int_\mathbb{R}{\left[\dfrac{-1}{p'_\varepsilon(\bar v_\varepsilon(\cdot -s_\varepsilon t)}{|\partial_x^k W (t)|^2}+ {|\partial_x^k V(t)|^2} \right] dx},\\
D_k(t;V,W):= \int_\mathbb{R} \partial_x\bar v_\varepsilon (\cdot -s_\varepsilon t)|\partial_x^k W|^2 dx + \int_\mathbb{R}{{(\partial_x^{k+1} V)^2} dx}. \end{aligned} \]
The goal is to prove, by a fixed point argument, existence and uniqueness of global smooth solutions $(V,W)$, under the assumption that $E_k(0)$ is small enough for $k=0,1,2$. Given the couple $(V_1,W_1)$, we introduce the following system \begin{align*}\label{eq:syst-fixed-point} &\partial_t \begin{pmatrix} V_2\\ W_2 \end{pmatrix} + \mathcal L_\ep\begin{pmatrix} V_2\\ W_2 \end{pmatrix} = \begin{pmatrix} F_\varepsilon(\partial_x V_1)\\ G_\varepsilon(\partial_x V_1) \end{pmatrix}\\
&(V_2, W_2)_{|t=0}=(V_0, W_0) \end{align*} and the application \[ \mathcal A^\varepsilon :( V_1, W_1) \in \mathcal X \mapsto ( V_2, W_2) \in \mathcal X, \] where \[ \mathcal X:= \{ (V,W) \in L^\infty(\mathbb{R}_+; H^2(\mathbb{R}))^2 ;\ D_k(t;W,V)\in L^1(\mathbb{R}_+) \text{ for }k=0,1,2 \}. \] We endow $\mathcal X$ with the norm \begin{equation}\label{eq:df-norm}
\|(V,W)\|_{\mathcal X}^2:= \sup_{t\in [0,+\infty[} \left[\sum_{k=0}^2 c^k \varepsilon^{2k/\gamma} \left[E_k(t,V(t),W(t)) + \int_0^t D_k(s,V(s),W(s))\:ds\right] \right], \end{equation} where $c$ will be taken small but independent of $\varepsilon$. For $\delta>0$, we denote by $B_\delta$ the ball \begin{equation}
B_\delta = \{(V,W)\in \mathcal X,\ \|(V,W)\|_{\mathcal X}< \delta \varepsilon^{\frac{5}{2\gamma}}\}. \end{equation} Under the assumptions of Proposition~\ref{prop:ex-VW}, there exists $\delta = \delta(\delta_0,v_+,\mu,\gamma)$ such that the ball $B_\delta$ is stable by $\mathcal{A}_\varepsilon$ and such that furthermore $\mathcal{A}_\varepsilon$ is a contraction on $B_\delta$. See details of the proof in~\cite{DP}. \end{proof}
Let us now return to the original variables $(v=\bar v_\varepsilon(\cdot -s_\varepsilon t) + \partial_x V,u)$. In the rest of this paper, we will write $(\bar v_\varepsilon, \bar u_\varepsilon)$ for $(\bar v_\varepsilon, \bar u_\varepsilon)(\cdot -s_\varepsilon t)$ in order to lighten the notation.
\begin{lem}\label{lem:stability-u}
Assume that initially $(U_0,V_0)\in H^2(\mathbb{R})\times H^3(\mathbb{R})$ is such that~\eqref{eq:init-1} is satisfied by the couple $(W_0,V_0)$
and consider the solution $(W,V) \in B_\delta \subset \mathcal X$ of~\eqref{eq:NS-ep-int} given by the previous proposition.
Then there exists a unique regular solution $u$ to
\begin{align}\label{eq:upertub}
\partial_t(u -\bar u_\varepsilon) - \mu\partial_x\left(\frac{1}{v}\partial_x(u-\bar u_\varepsilon)\right)
& = - \partial_x(p_\varepsilon(v) -p_\varepsilon(\bar v_\varepsilon))
+ \mu \partial_x\left(\left(\frac{1}{v}-\frac{1}{\bar v_\varepsilon}\right)\partial_x \bar u_\varepsilon \right),
\end{align}
which is such that
\begin{align}
u-\bar u_\varepsilon \in \mathcal{C}([0,+\infty); H^1(\mathbb{R})) \cap L^2([0,+\infty),H^2(\mathbb{R})), \quad
\partial_t(u-\bar u_\varepsilon) \in L^2([0,+\infty)\times \mathbb{R}).
\end{align} \end{lem}
Let us note that, until now, we did not justify properly the passage to the integrated system~\eqref{eq:NS-ep-int}. The equivalence between the original system~\eqref{eq:NS-ep} and~\eqref{eq:NS-ep-int} is established by proving $L^1$ bounds on $v-\bar v_\varepsilon$, $u-\bar u_\varepsilon$, $w- \bar w_\varepsilon$.
\begin{lem} Assume that the initial data $(u_0,v_0)$ is such that \[ u_0 - \bar u_\varepsilon \in W^{1,1}_0(\mathbb{R})\cap H^1(\mathbb{R}), \quad v_0 - \bar v_\varepsilon \in W^{2,1}_0(\mathbb{R})\cap H^2(\mathbb{R}). \] Then for all times $t\geq 0$, $(v-\bar v_\varepsilon)(t,\cdot)$ and $(u-\bar u_\varepsilon)(t,\cdot)$ belong to $L^1_0(\mathbb{R})$. \end{lem}
\begin{proof}[Idea of the proof]
We first derive $L^1$ bounds on $u-\bar u_\varepsilon $ and $w-\bar w_\varepsilon$, which follow from ideas from~\cite{haspot2018}. We consider a sequence $(j_n)_{n\in \mathbb N}$ of $\mathcal C^2$, convex functions, converging as $n\to +\infty$ towards $|\cdot|$ in $W^{1,\infty}$. We multiply the equation on $u-\bar u_\varepsilon$ (resp. on $w-\bar w_\varepsilon$) by $j_n'(u-\bar u_\varepsilon)$ (resp. $j_n'(w-\bar w_\varepsilon)$) and perform integrations by part. Using the convexity of $j_n$, we observe that the diffusion term has a positive sign. We obtain eventually \[
\frac{d}{dt}\int_{\mathbb{R}} \left(j_n(u-\bar u_\varepsilon) + j_n (w-\bar w_\varepsilon)\right) \leq C_\varepsilon\left( 1 + \|u-\bar u_\varepsilon\|_{L^1} + \|w-\bar w_\varepsilon\|_{L^1} \right). \]
The constant $C_\varepsilon$ involves bounds on $ \bar u_\varepsilon, \bar v_\varepsilon$ in various Sobolev spaces ($W^{2,1}$, $W^{1,\infty} $), on $\|w-\bar w_\varepsilon\|_{L^\infty(\mathbb{R}_+, W^{1,\infty}(\mathbb{R}))}$ and on $\|u-\bar u_\varepsilon\|_{L^\infty(\mathbb{R}_+, W^{1,\infty}(\mathbb{R}))}$. Integrating in time, letting $n\to \infty$ and using a Gronwall lemma, we obtain
\begin{align} \label{eq:L^1-uw}
\|(u-u_\ep)(t)\|_{L^1_x} + \|(w-w_\ep)(t)\|_{L^1_x}
\leq C_\varepsilon \Big[\|u_0-u_\ep(0)\|_{L^1_x} + \|w_0-w_\ep(0)\|_{L^1_x}\Big] \ e^{C_\varepsilon t}.
\end{align} We then derive similar estimates on $v-\bar v_\varepsilon$.
The obtained estimate is local in time and depends on $\varepsilon$ but we are just interested in the fact that $(v-\bar v_\varepsilon)(t,\cdot)$, $(u-\bar u_\varepsilon)(t,\cdot)$ belong to $L^1(\mathbb{R})$ to legitimate the study of the integrated system~\eqref{eq:NS-ep-int}. \end{proof}
Finally, we have the following result. \begin{thm}[Nonlinear asymptotic stability of partially congested profiles]\label{thm:estimates}
Assume that the initial data $(u_0,v_0)$ is such that
\[
u_0 - \bar u_\varepsilon \in W^{1,1}_0(\mathbb{R})\cap H^1(\mathbb{R}), \quad
v_0 - \bar v_\varepsilon \in W^{2,1}_0(\mathbb{R})\cap H^2(\mathbb{R}),
\]
and the associated couple $(W_0,V_0) \in H^2\times H^3 (\mathbb{R})$ satisfies~\eqref{eq:init-1}.
Then there exists a unique global solution $(u,v)$ to~\eqref{eq:NS-ep} which satisfies
\begin{align*}
& u-\bar u_\varepsilon \in \mathcal{C}([0;+\infty);H^1(\mathbb{R})\cap L^1_0(\mathbb{R})), \\
& v-\bar v_\varepsilon \in \mathcal{C}([0;+\infty);H^1(\mathbb{R})\cap L^1_0(\mathbb{R})) \cap L^2(\mathbb{R}_+; H^2(\mathbb{R})),
\end{align*}
and
\begin{equation}\label{eq:min-v}
v(t,x) > 1 \quad \text{for all}~ t,x.
\end{equation}
Finally
\begin{equation}
\sup_{x\in \mathbb{R}} \ \Big| \big((v,u)(t,x) - (\bar v_\varepsilon,\bar u_\varepsilon)(x-s_\varepsilon t)\big)\Big| \underset{t\rightarrow +\infty}{\longrightarrow} 0.
\end{equation} \end{thm}
\begin{proof}[Idea of the proof] It basically remains us to justify the minimal constraint~\eqref{eq:min-v} and the long-time behavior.\\
For the first point, we write that for $(W,V) \in B_{\delta}= \{ (W,V) \in \mathcal{X}, ~ \|(W,V)\|_{\mathcal{X}} \leq \delta \varepsilon^{\frac{5}{2\gamma}} \}$ \begin{align*}
\|v-\bar v_\varepsilon\|_{L^\infty_{t,x}}
= \|\partial_x V\|_{L^\infty_{t,x}}
& \leq C \|\partial_x V\|_{L^\infty_t L^2_x}^{1/2} \, \|\partial_x^2 V\|_{L^\infty_t L^2_x}^{1/2} \\
& \leq C \varepsilon^{-\frac{3}{2\gamma}} \|(W,V)\|_{\mathcal{X}} \\ & \leq C \delta \varepsilon^{\frac{1}{\gamma}} , \end{align*} so that for $\delta$ small enough \[
\|v-\bar v_\varepsilon\|_{L^\infty_{t,x}} < \varepsilon^{\frac{1}{\gamma}}. \] Recalling that $\bar v_\varepsilon \geq v_-^\varepsilon =1+\varepsilon^{1/\gamma}$, we deduce that $v(t,x)>1$ for all $t > 0$, $x \in \mathbb{R}$.\\ The asymptotic stability of $(\bar v_\varepsilon,\bar u_\varepsilon)$ easily derives from the regularity of $(v -\bar v_\varepsilon,u-\bar u_\varepsilon)$. Indeed, we have on the $v$ variable \[ v-v_\ep = \partial_x V \in L^2([0,+\infty);H^2(\mathbb{R})), \qquad \partial_t (v-v_\ep) = \partial_x(u-u_\ep) \quad \text{in} \quad L^2([0,+\infty);H^1(\mathbb{R})), \] and we infer that \[
\|(v-\bar v_\varepsilon)(t)\|_{H^1_x} \underset{t\rightarrow +\infty}{\longrightarrow} 0. \] As a consequence, we have \[
|(v-\bar v_\varepsilon)(t,x)| \leq C \|(v -\bar v_\varepsilon)(t)\|_{L^2_x}^{1/2} \|\partial_x(v -\bar v_\varepsilon)(t)\|_{L^2_x}^{1/2} \underset{t\rightarrow +\infty}{\longrightarrow} 0. \] Similar arguments show the uniform convergence of $u -\bar u_\varepsilon$ to $0$ as $t\to +\infty$. \end{proof}
\section{Local well-posedness result for the free-congested Navier-Stokes equations} {\label{sec:LWP}}
In this section, we consider the system \eqref{eq:NS-lim}-\eqref{eq:constraint} endowed with an initial data $(u^0, v^0)$. To the best of our knowledge, the study of the Cauchy problem for this system has not been tackled before. Our purpose is similar to the one of the previous section. We consider the travelling wave $(\bar v, \bar u)(x-st)$ constructed in Section~\ref{sec:profiles}. We start from an initial data which is a perturbation of this profile, and we construct strong local solutions of the system.
However, due to the nature of the system \eqref{eq:NS-lim}-\eqref{eq:constraint}, we will not consider arbitrary perturbations. We restrict our study to initial data which are perturbations of the profile $(\bar v, \bar u)$ in the non-congested zone only. As time evolves, this will allow us to have a more simple description of the non-congested zone, which will simply be a half-line $]\tilde x(t), +\infty[$. Let us now explicit our assumptions on the initial data~$(u^0, v^0)$: \begin{enumerate}[(H1)] \item{\it Partially congested initial data: }$(u^0, v^0)\in (\bar u, \bar v) + L^1(\mathbb{R})$, and such that $u^0(x)=\bar u(x)=u_-$, $v^0(x)=\bar v(x)=1$ if $x<0$;
\item {\it Regularity: }$\mathbf 1_{x>0}(u^0-\bar u, v^0-\bar u)\in H^3(\mathbb{R}_+)$;
\item {\it Compatibility:} $u^0(0^+)=u_-$, $v^0(0^+)=1$, and \begin{equation}
\left[-\frac{(\partial_x u^0)^2}{\partial_x v^0} - \mu \partial_x v^0 \partial_x u^0 + \mu \partial_x^2 v^0\right]_{|x=0^+}=0; \end{equation}
\item{\label{item:no-deg}} {\it Non-degeneracy:} $\partial_x v^0(0^+)>0$, $\partial_x u^0(0^+)>0$ and $v^0(x)>1$ for $x>0$;
\end{enumerate}
Under these assumptions, the solution of \eqref{eq:NS-lim}-\eqref{eq:constraint} associated with $(u^0, v^0)$, if it exists, will not be a travelling wave. However, it is reasonable to expect such a solution to be congested in a zone $x<\widetilde{x}(t)$, and non-congested in a zone $x>\widetilde{x}(t)$, where the free boundary $x=\widetilde{x}(t)$ is an unknown of the problem. We actually announce the following result:
\begin{thm}[Local in time existence and uniqueness]
Let $(u^0, v^0)$ satisfying the assumptions (H1)-(H4). Then there exists $T>0$ and $\widetilde{x} \in H^2_\text{loc}([0,T[)$, with $\widetilde{x}(0) = 0$, $\widetilde{x}'(0) = -[\frac{\partial_xu^0}{\partial_x v^0}]_{x=0^+}$, such that~\eqref{eq:NS-lim} has a unique maximal solution $(u,v)$ of the form $(u,v)(t,x)= (u_s, v_s)(t, x-\widetilde{x}(t))$ on the interval $[0, T[$, where $u_s(t,x)= u_-$, $v_s(t,x)= 1$ and $p_s(t,x) = -\mu (\partial_x u_s)_{|x=0^+}$ for $x<0$. Furthermore, \begin{equation} v_s(t,x) > 1 \quad \text{for all}~ t\in [0,T[, ~x > 0, \end{equation} and the solution $(u_s,v_s)$ has the following regularity in the free domain: \begin{align} v_s - \bar v, \ u_s -\bar u \in L^\infty([0,T[; H^3(\mathbb{R}_+)), \label{reg-uv}\\ \partial_t(v_s -\bar v), ~ \partial_t(u_s-\bar u) \in L^\infty([0,T[;H^1(\mathbb{R}_+)) \cap L^2(]0,T[; H^2(\mathbb{R}_+)).\label{reg-dtuv} \end{align} Eventually, the pressure in the congested domain satisfies \begin{equation} p_s \in H^1(0,T). \end{equation}
\label{thm:main-loc} \end{thm}
The strategy of proof is the following. We work in the shifted variable $x-\widetilde{x}(t)$. Since $(u,v)$ is expected to be constant in $x-\widetilde{x}(t)<0$, we only consider the system satisfied by $(u_s, v_s)$ in the positive half-line, which reads \begin{subnumcases}{\label{eq:NS-shifted-1}} \partial_t v_s - \widetilde{x}'(t) \partial_x v_s- \partial_x u_s = 0,\quad t>0,\ x>0\label{eq:NS-shifted-1-v}\\ \partial_t u_s - \widetilde{x}'(t) \partial_x u_s - \mu \partial_x\left(\dfrac{1}{v_s}\partial_x u_s \right) = 0,\quad t>0,\ x>0\label{eq:NS-shifted-1-u}\\
(v_s,u_s)_{|x=0}= ( 1 ,u_-), \quad \lim_{x\to \infty} (v_s(t,x),u_s(t,x)) = (v_+,u_+) \quad \forall \ t > 0. \end{subnumcases} Of course, the dynamics of $\widetilde{x}$ is coupled with the dynamics of $(v_s, u_s)$. In order to construct a solution of \eqref{eq:NS-shifted-1}, it will be more convenient to modify the equation on $v_s$ in order to make the regularizing effects of the diffusion more explicit. Indeed, setting $w_s= u_s - \mu \partial_x \ln v_s$, we find that equation \eqref{eq:NS-shifted-1-v} can be written as \[ \partial_t v_s - \widetilde{x}'(t) \partial_x v_s- \mu\partial^2_x \ln v_s = \partial_x w_s,\quad t>0,\ x>0. \] Moreover,
\begin{equation}\label{eq:ws} \partial_t w_s - \widetilde{x}'(t) \partial_x w_s = 0,\quad t>0,\ x>0, \end{equation} therefore $w_s(t,x)= w^0 (x + \widetilde{x}(t))$ for all $t>0, x>0$ provided $\widetilde{x}'(t)>0$ for all $t>0$. In particular, letting $x\to 0^+$, we obtain \begin{equation}\label{EDO-1}
u_- - \mu \partial_x v_{s|x=0^+}= w^0(\widetilde{x}(t)). \end{equation} Now, taking the trace of \eqref{eq:NS-shifted-1-v} on $x=0^+$, we have \begin{equation}\label{EDO-2}
\widetilde{x}'(t) \partial_x v_{s|x=0^+}= - \partial_x u_{s|x=0^+} .\end{equation} Gathering \eqref{EDO-1} and \eqref{EDO-2} leads to \begin{equation}\label{EDO-tx}
\widetilde{x}'(t) = -\frac{\partial_x u_{s|x=0^+}}{ \partial_x v_{s|x=0^+}}= -\mu \frac{ \partial_x u_{s|x=0^+}}{u_- - w^0(\widetilde{x}(t))}, \end{equation} which links the dynamics of the interface to $(v_s,u_s)$.
Since $w(t)=w^0(\cdot + \widetilde{x}(t))$, the equation on $v_s$ rewrites \begin{equation}\label{eq:vs}\begin{aligned} \partial_t v_s - \widetilde{x}'(t) \partial_x v_s - \mu \partial_{x x} \ln v_s= \partial_x w^0(x+ \widetilde{x}(t)),\quad t>0, \ x>0,\\
v_{s|x=0}=1,\quad \lim_{x\to \infty} v_s(t,x)= v_+,\\
v_{s|t=0}=v^0. \end{aligned}\end{equation} Thus we will build a solution $(\widetilde{x}, v_s, u_s)$ of \eqref{EDO-tx}-\eqref{eq:vs}-\eqref{eq:NS-shifted-1-u} thanks to the following fixed point argument: \begin{enumerate}
\item For any given $\widetilde{y}\in H^2_\text{loc}(\mathbb{R}_+)$, such that $\widetilde{y}(0) = 0$ and $\widetilde{y}'(0)= - \frac{\partial_x u^0_{|x=0^+}}{\partial_x v^0_{|x=0^+}}$, we consider the solution $v$ of the equation
\begin{equation}\label{eq:vs-y}\begin{aligned} \partial_t v - \widetilde{y}'(t) \partial_x v - \mu \partial_{x x} \ln v = \partial_x w^0(x+ \widetilde{y}(t)),\quad t>0, \ x>0,\\
v_{|x=0}=1,\quad \lim_{x\to \infty} v(t,x)= v_+,\\
v_{|t=0}=v^0. \end{aligned}\end{equation}
We prove that under suitable conditions on the initial data, there exists a unique solution $v \in \bar v + L^\infty_\text{loc}(\mathbb{R}_+,H^1(\mathbb{R}_+))$, and we derive higher regularity estimates ({\it cf} the regularities announced in~\eqref{reg-uv}-\eqref{reg-dtuv}).
\item We then consider the unique solution $u\in \bar u + L^\infty_\text{loc}(\mathbb{R}_+, H^1(\mathbb{R}_+))$ of
\begin{equation}\label{eq:us-y}
\begin{aligned}
\partial_t u - \widetilde{y}'(t) \partial_x u - \mu \partial_x\left(\dfrac{1}{v}\partial_x u\right) = 0\quad t>0, \ x>0,\\
u_{|x=0}= u_-, \quad \lim_{x\to \infty} u(t,x)= u_+,\\
u_{|t=0}=u^0, \end{aligned}\end{equation}
where $v$ is the solution of \eqref{eq:vs-y}. Once again, we derive regularity estimates on $u$ ({\it cf}~\eqref{reg-uv}-\eqref{reg-dtuv}).
\item Eventually, we define
\[
\tilde z(t):=-\mu \int_0^t \ \frac{ \partial_x u(\tau, 0)}{u_- - w^0(\tilde y(\tau))}d\tau,
\]
and we consider the application $\mathcal A:\tilde y \in H^2_\text{loc}(\mathbb{R}_+) \mapsto \tilde z \in H^2_\text{loc}(\mathbb{R}_+)$.
We prove that for $T>0$ small enough the application $\mathcal A$ is a contraction, and therefore has a unique fixed point. \end{enumerate}
We then need to check that the solution $(\widetilde{x}, v_s, u_s)$ of \eqref{EDO-tx}-\eqref{eq:vs}-\eqref{eq:NS-shifted-1-u} provided by the fixed point of $\mathcal A$ is indeed a solution of the original problem. Since system \eqref{eq:NS-shifted-1} has been modified, this is not completely obvious. In fact, we need to check that the function $w_s=u_s-\mu \ln v_s$ is indeed equal to $w^0 (x+\widetilde{x}(t))$. To that end, let us compute the equation satisfied by $w_s$ if $v_s$ is the solution of \eqref{eq:vs} and if $u_s$ is the solution of \eqref{eq:NS-shifted-1-u}. Combining \eqref{eq:vs} and \eqref{eq:NS-shifted-1-u}, we have \begin{equation}\label{eq:ws-para} \partial_t w_s - \tilde x'(t) \partial_x w_s -\mu \partial_x\left(\frac{1}{v_s}\partial_x w_s\right)=-\mu \partial_x\left(\frac{1}{v_s}\partial_x w^0(x+\widetilde{x}(t))\right).
\end{equation} Furthermore, the condition $u_{s|x=0^+}=u_-$ ensure that \[
w_{s|x=0^+}= u_-- \mu \partial_x v_{s |x=0^+}, \] and using the equation \eqref{eq:vs} together with~\eqref{EDO-tx} \begin{align*}
\partial_x w_{s|x=0^+}
& = \partial_x u_{s|x=0^+} - \mu \partial_{xx} \ln v_{s|x=0^+} \\
& = \frac{w^0(\widetilde{x}(t))-u_-}{\mu}\widetilde{x}'(t) + \widetilde{x}'(t) \partial_x v_{s|x=0^+} + \partial_x w^0(\widetilde{x}(t)). \end{align*} Taking a linear combination of these two equations leads to \begin{equation}\label{BC-w}
\mu \partial_x w_{s|x=0^+} + \widetilde{x}'(t) w_{s|x=0^+} = \widetilde{x}'(t)w^0(\widetilde{x}(t)) + \mu \partial_x w^0(\widetilde{x}(t)). \end{equation} It can be easily proved that the solution of \eqref{eq:ws-para}-\eqref{BC-w} endowed with the initial data $w^0$ is the function $w^0(x+\widetilde{x}(t))$. Thus the function $(v_s,u_s)$ constructed as the solution of \eqref{eq:vs}-\eqref{eq:NS-shifted-1-u}, where $\widetilde{x}$ is the solution of \eqref{EDO-tx}, is in fact a solution of \eqref{eq:NS-shifted-1}. We extend this solution in $x<0$ by setting $v_s(t,x)=1$, $u_s(t,x)=u_-$, and we set \[
p_s(t,x)=-\mu\partial_x u_{s|x=0^+}= \widetilde{x}'(t)(u_--w^0(\widetilde{x}(t)))\quad \forall x<0. \] Eventually, we come back to the original variables and set $(v,u,p)(t,x)=(v_s,u_s,p_s)(t,x-\widetilde{x}(t)).$ Then it is easily checked that $(v,u,p)$ is a solution of the original system \eqref{eq:NS-lim}.
\begin{rmk}[About the regularity of $\widetilde{x}$] In the above discussion, we have claimed that we will prove the existence of a fixed point $\widetilde{x}$ in $H^2_{loc}(\mathbb{R}_+)$. Let us discuss why this regularity is required on $\widetilde{x}$. First, we need a control of $\widetilde{x}'$ in $L^\infty(\mathbb{R}_+)$ in order to control the transport equation~\eqref{eq:ws} satisfied by $w_s$.
Next, we see in~\eqref{EDO-tx} that the control in $L^\infty$ of $\widetilde{x}$ requires a bound on $\partial_x u_s$ in $L^\infty(\mathbb{R}_+ \times \mathbb{R}_+)$, while this latter bound would a priori rely on a control of $\widetilde{x}''$ in $L^2_{loc}(\mathbb{R}_+)$ .
Therefore the regularity $\widetilde{x} \in H^2_{loc}(\mathbb{R}_+)$ is the minimal regularity which allows us to formally close the fixed point argument with the regularity~\eqref{reg-uv}-\eqref{reg-dtuv} on $u-\bar u$. \end{rmk}
\end{document} | arXiv | {
"id": "2105.01336.tex",
"language_detection_score": 0.553443431854248,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Simultaneous Inference for Pairwise Graphical Models with Generalized Score Matching}
\author{\name Ming Yu \email mingyu@chicagobooth.edu \\
\name Varun Gupta \email varun.gupta@chicagobooth.edu \\
\name Mladen Kolar \email mladen.kolar@chicagobooth.edu \\
\addr Booth School of Business \\
The University of Chicago\\
Chicago, IL 60637, USA }
\editor{Jie Peng}
\maketitle
\begin{abstract}
Probabilistic graphical models provide a flexible yet parsimonious framework for modeling dependencies among nodes in networks. There is a vast literature on parameter estimation and consistent model selection for graphical models. However, in many of the applications, scientists are also interested in quantifying the uncertainty associated with the estimated parameters and selected models, which current literature has not addressed thoroughly. In this paper, we propose a novel estimator for statistical inference on edge parameters in pairwise graphical models based on generalized Hyv\"arinen scoring rule. Hyv\"arinen scoring rule is especially useful in cases where the normalizing constant cannot be obtained efficiently in a closed form, which is a common problem for graphical models, including Ising models and truncated Gaussian graphical models. Our estimator allows us to perform statistical inference for general graphical models whereas the existing works mostly focus on statistical inference for Gaussian graphical models where finding normalizing constant is computationally tractable. Under mild conditions that are typically assumed in the literature for consistent estimation, we prove that our proposed estimator is $\sqrt{n}$-consistent and asymptotically normal, which allows us to construct confidence intervals and build hypothesis tests for edge parameters. Moreover, we show how our proposed method can be applied to test hypotheses that involve a large number of model parameters simultaneously. We illustrate validity of our estimator through extensive simulation studies on a diverse collection of data-generating processes.
\end{abstract}
\begin{keywords} generalized score matching, high-dimensional inference, probabilistic graphical models, simultaneous inference \end{keywords}
\section{Introduction} \label{sec:introduction}
Undirected probabilistic graphical models are widely used to explore and represent dependencies between random variables \citep{Lauritzen1996Graphical}. They have been used in areas ranging from computational biology to neuroscience and finance. An undirected probabilistic graphical model consists of an undirected graph $G = (V,E)$, where $V = \{1, \ldots, p\}$ is the vertex set and $E \subset V \times V$ is the edge set, and a random vector $X = (X_1, \ldots, X_p) \in \Xcal^p \subseteq \RR^P$. Each coordinate of the random vector $X$ is associated with a vertex in $V$ and the graph structure encodes the conditional independence assumptions underlying the distribution of $X$. In particular, $X_a$ and $X_b$ are conditionally independent given all the other variables if and only if $(a,b) \not\in E$, that is, the nodes $a$ and $b$ are not adjacent in $G$. One of the fundamental problems in statistics is that of learning the structure of $G$ from {\it i.i.d.}~samples from $X$ and quantifying the uncertainty of the estimated structure. \cite{Drton2016Structure} provides a recent review of algorithms for learning the structure, while \cite{Jankova2018Inference} provides an overview of statistical inference in Gaussian graphical models.
Gaussian graphical models are a special case of undirected probabilistic graphical models and have been widely studied in the machine learning literature. Suppose that $X \sim \Ncal(\mu, \Sigma)$. In this case, the conditional independence graph is determined by the pattern of non-zero elements of the inverse of the covariance matrix $\Omega = \Sigma^{-1} = (\omega_{ab})$. In particular, $X_a$ and $X_b$ are conditionally independent given all the other variables in $X$ if and only if $\omega_{ab}$ and $\omega_{ba}$ are both zero. This simple relationship has been fundamental for the development of rich literature on Gaussian graphical models and has facilitated the development of fast algorithms and inferential procedures \citep[see, for example,][]{Dempster1972Covariance, Drton04model, Meinshausen2006High,
Yuan2007Model, Friedman2008Sparse, rothman08spice, Yuan2010High,
Sun2012Sparse, Cai2011Constrained}.
In this paper, we consider a more general, but still tractable, class of pairwise interaction graphical models with densities belonging to an exponential family $\Pcal = \{ p_\theta(x) \mid \theta \in \Theta \}$ with natural parameter space $\Theta$: \begin{multline}
\label{eq:logdensity}
\log p_\theta(x) =
\sum_{a \in V} \sum_{k \in [K]}\theta_{a}^{(k)} t_{a}^{(k)}(x_a) \\
+
\sum_{(a,b)\in E} \sum_{l \in [L]}\theta_{ab}^{(l)} t_{ab}^{(l)}(x_a,x_b)
- \Psi(\theta) + \sum_{a\in V}h_a(x_a),
\quad x \in \Xcal \subseteq \RR^p. \end{multline} The functions $t_{a}^{(k)}$, $t_{ab}^{(l)}$ are the sufficient statistics and $\Psi(\theta)$ is the log-partition function. We assume throughout the paper that the support of the densities is either $\Xcal = \RR^p$ or $\Xcal = \RR^p_+$ and $\Pcal$ is dominated by Lebesgue measure on $\RR^p$. To simplify the notation, for a log-density of the form given in \eqref{eq:logdensity} we will write $$
\log p_\theta(x) = \theta^\top t(x) - \Psi(\theta) + h(x), $$ where $\theta \in \RR^s$ and $t(x) : \RR^{p} \mapsto \RR^s$ with $s = L \cdot {p \choose 2} + p\cdot K$. The natural parameter space has the form $\Theta = \{ \theta \in \RR^s \mid \Psi(x) = \log\int_{\Xcal}\exp(\theta^\top t(x)dx)<\infty\}$. Under the model in \eqref{eq:logdensity}, there is no edge between $a$ and $b$ in the corresponding conditional independence graph if and only if $\theta_{ab}^{(1)}=\cdots=\theta_{ab}^{(L)}=0$. The model in \eqref{eq:logdensity} encompasses a large number of graphical models studied in the literature as we discuss in Section~\ref{sec:related}. \citet{Lin2015High} studied estimation of parameters in model \eqref{eq:logdensity}, however, the focus of this paper, as we discuss next, is on performing statistical inference---constructing honest confidence intervals and statistical tests---for parameters in \eqref{eq:logdensity}.
The focus of the paper is on the inferential analysis about parameters in the model given in \eqref{eq:logdensity}, as well as the Markov dependencies between observed variables. Our inference procedure does not rely on the oracle support recovery properties of the estimator and is therefore uniformly valid in a high-dimensional regime and robust to model selection mistakes, which commonly occur in ultra-high dimensional setting. Our approach is based on Hyv\"arinen generalized scoring rule estimate of $\theta$ in \eqref{eq:logdensity}. The same procedure was used in \citet{Lin2015High}, however, rather than focusing on consistent model selection, we use the initial estimator to construct a regular linear estimator \citep{Vaart1998Asymptotic}. We establish Bahadur type representation for our final regular estimator that is robust to model selection mistakes and valid for a big class of data generating distributions. The purpose of establishing a Bahadur representation is to approximate an estimate by a sum of independent random variables, and hence prove the asymptotic normality of the estimator for \eqref{eq:logdensity}, allowing us to conduct statistical inference on the model parameters \citep[see][]{Bahadur1966note}. In particular, we show how to construct confidence intervals for a parameter in the model that have nominal coverage and also propose a statistical test for existence of edges in the graphical model with nominal size. These results complement existing literature, which is focused on consistent model selection and parameter recovery, as we review in the next section. Furthermore, we develop a methodology for constructing simultaneous confidence intervals for all the parameters in the model \eqref{eq:logdensity} and apply this methodology for testing the parameters in the differential network\footnote{We adopt the notion
used in \citet{Li2007Finding} and \citet{Danaher2011Joint} and
define the differential network as a difference between parameters
of two graphical models.}. The main idea here is to use the Gaussian multiplier bootstrap to approximate the distribution of the maximum coordinate of the linear part in the Bahadur representation. Appropriate quantile obtained from the bootstrap distribution is used to approximate the width of the simultaneous confidence intervals and the cutoff values for the tests for the parameters of the differential network.
\subsection{Main Contribution}
This paper makes two major contributions to the literature on statistical inference for graphical models. First, compared to previous work on high-dimensional inference in graphical models \citep{Ren2013Asymptotic, Barber2015ROCKET, Wang2016Inference,
Jankova2014Confidence}, this is the first work on statistical inference in models where computing the log-partition function is intractable. Existing works mostly focus on Gaussian graphical models with a tractable normalizing constant, whereas our method can be applied to more general models, as we discuss in Section~\ref{sec:ExpoGM}. Second, we apply our proposed method to simultaneous inference on all edges connected to a specific node. Our simultaneous inference procedure can be used to \begin{enumerate}[topsep=0pt,itemsep=-1ex,partopsep=1ex,parsep=1ex] \item test whether a node is isolated in a graph; that is, whether
it is conditionally independent with all the other nodes; \item estimate the support of the graph by setting an appropriate
threshold on the proposed estimators; and \item test for the difference between graphical models where we have
observations of two graphical models with the same nodes and we
would like to test whether the local connectivity pattern for a specific
node is the same in the two graphs. \end{enumerate}
Once again, the existing approaches cannot deal with simultaneous testing with an intractable normalizing constant. Moreover, most of the existing work impose a sparsity condition on the inverse of Hessian and focus on $L = 1$ only. Here we relax the sparsity condition on the inverse Hessian and show how to perform inference for a general $L$.
\subsection{Related Work} \label{sec:related}
Our work straddles two areas of statistical learning which have attracted significant research of late: model selection and estimation in high-dimensional graphical models, and high-dimensional inference. We briefly review the literature most relevant to our work, and refer the reader to two recent review articles for a comprehensive overview \citep{Drton2016Structure,Jankova2018Inference}. \citet{Drton2016Structure} focuses on structure learning in graphical models, while \citet{Jankova2018Inference} reviews inference in Gaussian graphical models.
We start by reviewing the literature on learning structure of probabilistic graphical models. Much of the research effort has focused on learning structure of Gaussian graphical models where the edge set $E$ of the graph $G$ is encoded by the non-zero elements of the precision matrix $\Omega = \Sigma^{-1}$. The literature here roughly splits into two categories: global and local methods. Global methods typically estimate the precision matrix by maximizing regularized Gaussian log-likelihood \citep{Yuan2007Model,
rothman08spice, Friedman2008Sparse, dAspremont2008First,
Ravikumar2011High, fan09network, Lam2009Sparsistency}, while local methods estimate the graph structure by learning the neighborhood or Markov blanket of each node separately
\citep{Meinshausen2006High, Yuan2010High,
Cai2011Constrained, Liu2012TIGER, Zhao2014Calibrated}. Extensions to more general distributions in Gaussian and elliptical families are possible using copulas, as the graph structure within these families is again determined by the inverse of the latent correlation matrix \citep{Liu2009Nonparanormal:, Liu2012High, Xue2012Regularized,
Liu2012Transelliptical, Fan2014High}.
Once we depart from the Gaussian distribution and related families, learning the conditional independence structure becomes more difficult, primarily owing to computational intractability of evaluating the log-partition function. A computationally tractable alternative to regularized maximum likelihood estimation is regularized pseudo-likelihood which was studied in the context of learning structure of Ising models in \citet{Hoefling2009Estimation}, \citet{ravikumar09high}, and \citet{Xue2012Nonconcave}. Similar methods were developed in the study of mixed exponential family graphical models, where a node's conditional distribution is a member of an exponential family distribution, such as Bernoulli, Gaussian, Poisson or exponential. See \cite{Guo2011Joint}, \cite{Guo2011Asymptotic}, \cite{Lee2012Learning}, \cite{Cheng2013High}, \cite{Yang2012Graphical}, and \cite{Yang2014Mixed} for more details.
More recently, score matching estimators have been investigated for learning the structure of graphical models in high-dimensions when the normalizing constant is not available in a closed-form \citep{Lin2015High,Yu2018Graphical}. Score matching was first proposed in \citet{hyvarinen2005estimation} and subsequently extended for binary models and models with non-negative data in \citet{Hyvaerinen2007Some}. It offers a computational advantage when the normalization constant is not available in a closed-form, making likelihood based approaches intractable, and is particularly appealing for estimation in exponential families as the objective function is quadratic in the parameters of interest. \citet{Sun2015Learning} develop a method based on score matching for learning conditional independence graphs underlying structured infinite-dimensional exponential families. \citet{Forbes2013Linear} investigated the use of score matching for the inference of Gaussian linear models in low-dimensional settings. However, despite its power, there have not been results on inference in high-dimensional models using score matching. As one of our contributions in this paper, we build on the prior work on estimation using generalized score matching and develop an approach to statistical inference for high-dimensional graphical models. In particular, we construct a novel $\sqrt{n}$-consistent estimator of parameters in~\eqref{eq:logdensity}. This is the first procedure that can obtain a parametric $\sqrt{n}$ rate of convergence for an edge parameter in a graphical model where computing the normalizing constant is intractable.
Next, we review the literature on high-dimensional inference, focusing on work related to high-dimensional undirected graphical models. \citet{Liu2013Gaussian} developed a procedure that estimates conditional independence graph from Gaussian observations and controls false discovery rates asymptotically. \citet{Wasserman2014Berry} develop confidence guarantees for undirected graphs under minimal assumptions by developing Berry-Esseen bounds on the accuracy of Normal approximation. \citet{Ren2013Asymptotic}, \citet{Jankova2014Confidence}, and \citet{Jankova2017Honest} develop methods for constructing confidence intervals for edge parameters in Gaussian graphical models, based on the idea of debiasing the $\ell_1$ regularized estimator developed in \citep{Zhang2011Confidence,
Geer2013asymptotically, Javanmard2013Confidence}. A related approach was developed for edge parameters in mixed graphical models whose node conditional distributions belong to an exponential family in \citet{Wang2016Inference}. \citet{Wang2014Inference} develop methodology for performing statistical inference in time-varying and conditional Gaussian graphical models, while \citet{Barber2015ROCKET} and \citet{Lu2015Posta} develop methods for semi-parametric copula models. We contribute to the literature on high dimensional inference by demonstrating how to construct regular estimators for probabilistic Graphical models whose normalizing constant is intractable. Our estimators are robust to model selection mistakes and allows us to perform valid statistical inference for edge parameters in a large family of data generating distributions.
Finally, we contribute to the literature on simultaneous inference in high-dimensional models. \citet{Zhang2014Simultaneous} and \citet{Dezeure2017High} develop methods for performing simultaneous inference on all the coefficients in a high-dimensional linear regression. In the same setting, \citet{Zhao2014General} use a multiplier bootstrap approach to construct robust simultaneous confidence intervals. \citet{chang2018confidence} applies it to the simultaneous inference of Gaussian graphical models. These procedures allow for the dimensionality of the vector to be exponential in the sample size and rely on bootstrap to approximate the quantile of the test statistic. We extend these ideas to the high dimensional graphical model setting and show how we can build simultaneous hypothesis tests on the neighbors of a specific node.
A conference version of this paper was presented in the Annual Conference on Neural Information Processing Systems 2016 \citep{Yu2016Statistical}. Compared to the conference version, in this paper we extend the results in the following ways. First, we extend the results to include the generalized score matching method \citep{Yu2018Graphical, Yu2018Generalized} in place of the original score matching method. This generalized form of the score matching method allows us to improve the estimation accuracy and obtain better inference results for non-negative data. In the conference
version, we made an assumption that the inverse of the population
Hessian matrix, see Section~\ref{sec:asympt-norm-estim}, is
(approximately) sparse. We relax this sparsity condition and develop
an inference procedure that is valid even if the sparsity condition
is violated, but the inverse of the Hessian matrix has bounded
columns in the $\ell_1$ norm. Moreover, instead of focusing on a single edge as in the conference version, in this work we propose a procedure for simultaneous inference for all edges connected to a specific node. This allows us to build hypothesis tests for a broad class of applications, including testing of isolated nodes, support recovery, and testing the difference between two graphical models. Furthermore, while the conference version focused on the case
where $L=1$ in~\eqref{eq:logdensity}, here we extend the results to
a general choice of $L$. Lastly, we run additional experiments to demonstrate the effectiveness of our proposed method.
\subsection{Notation}
We use $[n]$ to denote the set $\{1,\ldots,n\}$. For a vector $a \in \RR^n$, we let ${\rm supp}(a) = \{j\ :\ a_j \neq 0\}$ be the support set (with an analogous definition for matrices
$A \in \mathbb{R}^{n_1\times n_2}$), $\|a\|_q$, $q \in [1,\infty)$, the $\ell_q$-norm defined as
$\|a\|_q = (\sum_{i\in[n]} |a_i|^q)^{1/q}$ with the usual extensions for $q \in \{0,\infty\}$, that is, $\|a\|_0 = |{\rm supp}(a)|$ and
$\|a\|_\infty = \max_{i\in[n]}|a_i|$.
For a vector $x$, $x_{M}$ is a sub-vector of $x$ with components corresponding to the set $M$, and $x_{-ab}$ is the sub-vector with component corresponding to edge $\{a,b\}$ omitted.
For a matrix $A \in \RR^{m \times n}$, denote $\|A\|_q = \sup\{ \|Ax\|_q: x \in \RR^n, \|x\|_q=1\}$ as the induced $\ell_q$ norm.
In particular, $\|A\|_{\infty }=\max _{1\leq i\leq m}\sum _{j=1}^{n}|a_{ij}|$.
We also use $\|A\|_{\max} = \max_{jk} |a_{jk}|$ to denote the maximum component of $A$. We define $\mathbb{E}_n$ as the empirical mean of $n$ samples: $\mathbb{E}_n[f(x_i,\theta)] = \frac{1}{n} \sum_{i=1}^n f(x_i,\theta)$. For two sequences of numbers $\{a_n\}_{n=1}^\infty$ and $\{b_n\}_{n=1}^\infty$, we use $a_n = \Ocal(b_n)$, or $a_n \lesssim b_n$ to denote that $a_n \leq Cb_n$ for some finite positive constant $C$, and for all $n$ large enough. We use $a_n \lesssim_P b_n$ to denote that $a_n \lesssim b_n$ happens with high probability. The notation $a_n = o(b_n)$ is used to denote that $a_nb_n^{-1}\xrightarrow{n\rightarrow\infty} 0$. We denote $a_n \longrightarrow_D \mathcal A$ as convergence in distribution to a fixed distribution $\Acal$ and $a_n \longrightarrow_P a$ as convergence in probability to a constant $a$.
We denote $a \circ b = (a_1b_1, ..., a_pb_p)$ for $a,b \in \RR^p$. For any function $f:\mathbb{R}^p \to \mathbb{R}$, we use $\nabla f(x) = \cbr{ \partial/(\partial x_j) f(x) }_{j \in [p]}$ to denote the gradient, and $\Delta f(x) = \sum_{j\in[p]} \partial^2/(\partial x_j^2) f(x)$ to denote the Laplacian operator on $\RR^p$. Note that both the gradient and the Laplacian are \emph{with respect to $x$}.
\subsection{Organization of the Paper}
The remainder of this paper is structured as follows. We begin in Section \ref{sec:background} with background on exponential family pairwise graphical model, score matching method, and a brief review of statistical inference in high dimensional models. In Section \ref{sec:methodology} we describe the construction of our novel estimator for a single edge parameter based on a three-step procedure, for $L = 1$. Section \ref{sec:asympt-norm-estim} provides theoretical results and Section \ref{sec:relaxation_L1} discusses the relaxation of sparsity condition on the inverse of population Hessian matrix. Section \ref{sec:simultaneous} extends the procedure to simultaneous inference for all edges connected to some specific node. In Section \ref{sec:general_L} we extend our results to general $L$. We provide experimental results for synthetic datasets and a real dataset in Sections \ref{sec:experiments_synthetic} and \ref{sec:experiments_real} respectively. Section \ref{sec:conclusion} provides conclusion and discussion.
\section{Background} \label{sec:background}
We begin with reviewing exponential family pairwise graphical models in Section \ref{sec:ExpoGM}, and then introduce the score matching and generalized score matching methods in Section \ref{sec:score-matching}. Finally we provide a brief overview of statistical inference for high dimensional models in Section~\ref{sec:inference}.
\subsection{Exponential Family Pairwise Graphical Models} \label{sec:ExpoGM}
Throughout the paper we focus on the case where \[ \Pcal = \{ p_\theta(x) \mid \theta \in \Theta \} \] is an exponential family with log-densities given in \eqref{eq:logdensity}, which frequently appear in graphical modeling. There are $K$ sets of sufficient statistics $\{t_a^{(k)}\}_{k \in [K]}$ for each $a \in V$ that depend on the individual nodes and $L$ sets of sufficient statistics for each $(a,b) \in {V \choose 2}$ that allow for pairwise interactions of different types. Conditional independence graph underlying a distribution $p_\theta \in \Pcal$ has no edge between vertices $a$ and $b$ if and only if $\theta_{ab}^{(1)} = \ldots = \theta_{ab}^{(L)} = 0$. A special case of the model given in \eqref{eq:logdensity} are pairwise interaction models with log-densities \begin{equation}
\label{eq:pairwise}
\log p_\theta(x) =
\sum_{(a,b)\in E} \theta_{ab} t_{ab}(x_a,x_b) - \Psi(\theta) + h(x),
\quad x \in \Xcal \subseteq \RR^p, \end{equation} where $t_{ab}(x_a, x_b)$ are sufficient statistics that depend only on $x_a$ and $x_b$. In what follows, we will consider models that either has the form given in~\eqref{eq:pairwise} or the more general form given in \eqref{eq:logdensity}.
A number of well-studied distributions have the above discussed form. We provide some examples below, including examples where the normalizing constant $\Psi(\theta)$ cannot be obtained in closed-form.
\paragraph{\emph{Gaussian graphical models.}} The most studied example of a probabilistic graphical model is the case of the Gaussian graphical model. Suppose that the random variable $X$ follows the centered multivariate Gaussian distribution with covariance $\Sigma$ and precision matrix $\Omega = \Sigma^{-1} = (\omega_{ab})$. The log-density is given as \begin{equation}
\label{eq:gaussian_density} p(x ; \Omega) \propto \exp \cbr{ -\frac 12 x^\top \Omega x }, \end{equation}
the support of the density is $\Xcal = \RR^{p}$ and the sufficient statistics take the form $t_{ab}(x_a,x_b) = x_ax_b$.
\paragraph{\emph{Non-negative Gaussian.}} Our second example of a distribution with the log-density of the form in \eqref{eq:pairwise} is that of a non-negative Gaussian random vector. The probability density function of a non-negative Gaussian random vector $X$ is proportional to that of the corresponding Gaussian vector given in \eqref{eq:gaussian_density}, but restricted to the non-negative orthant. Here the support of the density is $\Xcal = \RR_+^{p}$. The conditional independence graph is determined the same way as in the Gaussian graphical model case through the non-zero pattern of the elements in the precision matrix $\Omega$. The normalizing constant in this family has no closed-form and hence maximum likelihood estimation of $\Omega$ is intractable.
\paragraph{\emph{Normal conditionals.}} Our third example is taken from \citet{Lin2015High}. See also \citet{AndrewGelman1991Note} and \citet{Arnold1999Conditional}. Consider the family of distributions with densities of the form \begin{equation*}
p(x ; \Theta^{(1)}, \Theta^{(2)}, \eta, \beta) \propto
\exp \cbr{
\sum_{a \neq b} \Theta_{ab}^{(2)}x_a^2x_b^2 +
\sum_{a \neq b} \Theta_{ab}^{(1)}x_ax_b +
\sum_{a \in V} \eta_a x_a^2 +
\sum_{a \in V} \beta_ax_a },\, x \in \RR^{p}, \end{equation*} where the matrices $\Theta^{(1)},\Theta^{(2)} \in \RR^{p \times p}$ are symmetric interaction matrices with a zero diagonal. Members of this family have Normal conditionals, but the densities themselves need not be unimodal. The conditional independence graph does not contain an edge between vertices $a$ and $b$ if and only if both $\Omega_{ab}^{(1)}$ and $\Omega_{ab}^{(2)}$ are equal to zero. In contrast to the Gaussian graphical models, the conditional dependence may also express itself in the variances.
\paragraph{\emph{Conditionally specified mixed graphical models.}} In general, specifying multivariate distributions is difficult, since in a given problem it might not be clear what class of graphical models to use. On the other hand, specifying univariate distributions is an easier task. \citet{Chen2013Selection} and \citet{Yang2013Graphical} explored ways of specifying multivariate joint distributions via univariate exponential families. Consider a conditional density of the form \begin{equation}
\label{eq:conditional_model}
p(x_a \mid (x_b, b\neq a) ; \theta_a) =
\exp\cbr{f_a(x_a) + \sum_{b \neq a} \theta_{ab}B_a(x_a)B_b(x_b) - \Psi_a(\eta_a)},
\quad x_a \in \Xcal_a, \end{equation} where $\eta_a = \eta_a(\theta_a, f_a, (x_b)_{b\neq a})$ and $B_a(\cdot)$ are known functions for each $a \in V$. Suppose that for a random vector $X$, each coordinate $X_a$ follows the conditional density of the form in \eqref{eq:conditional_model} with $\theta_{ab} = \theta_{ba}$ for all $a, b \in V$. Then \citet{Chen2013Selection} and \citet{Yang2013Graphical} showed that there exists a joint distribution of $X$ compatible with the conditional densities and that it is of the form \begin{equation*}
p(x ; \Theta) \propto \exp \cbr{
\sum_{a \in V} f_a(x_a) + \frac12 \sum_{a\in V}\sum_{b \neq a} \theta_{ab} B_a(x_a)B_b(x_b)
}, \quad x \in \Xcal. \end{equation*} In particular, the joint density above is of the form given in \eqref{eq:logdensity}, with pairwise interaction sufficient statistics given as $t_{ab}(x_a, x_b) = B_a(x_a)B_b(x_b)$. When the support of the distribution is $\Xcal = \RR^p$ or $\Xcal = \RR_+^p$, the parameters of the distribution can be efficiently estimated using score matching. In the case of unknown function $B_a(\cdot)$, \citet{Suggala2017Expxorcist} explored nonparametric estimation via basis expansion and fitted parameters using pseudo-likelihood. Developing a valid statistical inference procedure for this nonparametric setting is beyond the scope of the current work.
As an example of a conditionally specified model, that we will return to later in the paper, consider exponential graphical models where the node-conditional distributions follow an exponential distribution. For a random vector $X$ described by an exponential graphical model, the density function is given by \begin{equation*}
p(x ; \Theta) \propto \exp \cbr{ -\sum_{a \in V} \theta_ax_a - \sum_{a \neq b} \theta_{ab}x_ax_b },
\quad x \in \RR^p_+. \end{equation*} Note that the variable takes only non-negative values. To ensure that the distribution is valid and normalizable, the natural parameter space $\Theta$ consists of matrices whose elements are positive. Therefore, one can only model negative dependencies via the exponential graphical model.
\paragraph{\emph{Exponential square-root graphical model. }} As our last example, consider the exponential square-root graphical model \citep{Inouye2016Square} with density function given by \begin{equation*}
p(x ; \eta, K) \propto \exp \cbr{ -\sqrt{x}^\top K \sqrt{x} + 2\eta^\top\sqrt{x} },
\quad x \in \RR^p_+. \end{equation*} This square-root graphical model is a multivariate generalizations of univariate exponential family distributions that can capture the positive dependency among nodes. Specifically, it assumes only a mild condition on the parameter matrix, but allows for almost arbitrary negative and positive dependencies. We refer to \citet{Inouye2016Square} for details on parameter estimation with nodewise regressions and likelihood approximation methods.
\subsection{Score Matching} \label{sec:score-matching}
In this section we briefly review the score matching method proposed in \cite{hyvarinen2005estimation,Hyvaerinen2007Some} and the generalized score matching for non-negative data proposed in \cite{Yu2018Graphical}.
\subsubsection{Score Matching} \label{sec:score_matching}
A scoring rule $S(x,Q)$ is a real-valued function that quantifies the accuracy of $Q \in \Pcal$ being the distribution from which an observed realization $x \in \Xcal$ may have been sampled. There are a large number of scoring rules that correspond to different decision problems \cite{Parry2012Proper}. Given $n$ independent realizations of $X$, $\{x_i\}_{i\in[n]}$, one finds optimal score estimator $\hat Q \in \Pcal$ that minimizes the empirical score \begin{equation}
\label{eq:score_minimization}
\hat Q = \arg\min_{Q \in \Pcal} \EE_n\sbr{ S(x_i, Q) }. \end{equation}
When $\Xcal = \RR^p$ and $\Pcal$ consists of twice differentiable densities with respect to Lebesgue measure, the Hyv\"arinen scoring rule \citep{hyvarinen2005estimation} is given as \begin{equation}
\label{eq:score_matching}
S(x, Q) = \frac 12 \big\| \nabla \log q(x) \big\|_2^2 + \Delta \log q(x), \end{equation} where $q$ is the density of $Q$ with respect to Lebesgue measure on $\Xcal$. We would like to emphasize that this gradient and Laplacian are \emph{with respect to $x$}. In this way we get rid of the normalizing constant which does not depend on $x$. This scoring rule is convenient for learning models that are specified in an unnormalized fashion or whose normalizing constant is difficult to compute. The score matching rule is proper \citep{Dawid2007geometry}, that is, $\EE_{X \sim P} S(X, Q)$ is minimized over $\Pcal$ at $Q = P$. Suppose the density $q$ of $Q \in \Pcal$ is twice continuously differentiable and satisfies \[
\EE_{X \sim P} \norm{\nabla \log q(X)}_2^2 < \infty, \qquad
\text{for all $P, Q \in \Pcal$} \] and \[
q(x) \text{ and } \norm{\nabla q(x)}_2 \text{ tend to zero as $x$
approaches the boundary of $\Xcal$ }. \] Then the Fisher divergence between $P, Q \in \Pcal$, \[
D(P, Q) = \int p(x) \|\nabla \log q(x) - \nabla \log p(x)\|_2^2 d x, \] where $p$ is the density of $P$, is induced by the score matching rule \citep{hyvarinen2005estimation}. The gradients in the equation above can be thought of as gradients with respect to a hypothetical location parameter, evaluated at the origin \citep{hyvarinen2005estimation}.
For a parametric exponential family $\Pcal = \{p_\theta \mid \theta \in \Theta\}$ with densities given in \eqref{eq:logdensity}, minimizing \eqref{eq:score_minimization} with the scoring rule in \eqref{eq:score_matching} can be done in a closed form \citep{hyvarinen2005estimation,Forbes2013Linear}. An estimator $\hat \theta$ obtained in this way can be shown to be asymptotically consistent \citep{hyvarinen2005estimation}, however, in general it will not be efficient \citep{Forbes2013Linear}.
\subsubsection{Generalized Score Matching for Non-Negative Data}
The score matching method in Section \ref{sec:score_matching} does not work for non-negative data, since the assumption that $q(x)$ and $||\nabla q(x)||_2$ tend to 0 at the boundary breaks down. To solve this problem, \citet{Hyvaerinen2007Some} proposed a generalization of the score matching approach to the case of non-negative data.
When $\Xcal = \RR^p_+$ the non-negative score matching loss (analogous to the Fisher divergence $D(P,Q)$) is defined as \begin{equation*} \label{eq:score_matching_nonnegative}
J_+(P,Q) = \int_{\RR^p_+} p(x) \cdot \big\| \nabla\log p(x) \circ x - \nabla\log q(x) \circ x \big\|_2^2 dx. \end{equation*} The scoring rule for non-negative data that induces $J_+(P,Q)$ is given as \begin{equation}
\label{eq:score_matching_nonnegative:sample} S_+(x, Q) = {\sum_{a \in V} \left[2x_a \frac{\partial \log q(x)}{\partial x_a} + x_a^2\frac{\partial^2 \log q(x)}{\partial x_a^2} + \frac{1}{2}x_a^2 \left(\frac{\partial \log q(x)}{\partial x_a}\right)^2 \right]}. \end{equation} For exponential families, the non-negative score matching loss again can be obtained in a closed form and the estimator is consistent and asymptotically normal under suitable conditions \citep{Hyvaerinen2007Some}.
\citet{Yu2018Graphical} proposed the generalized score matching for non-negative data to improve the estimation efficiency of the procedure based on the scoring rule in \eqref{eq:score_matching_nonnegative:sample}. Let $\ell_1, ..., \ell_p: \RR_+ \to \RR_+$ be positive and differentiable functions and set \[
\ell(x) = \big( \ell_1(x_1), \ldots, \ell_p(x_p) \big). \] The generalized $\ell$-score matching loss is defined as \begin{equation*}
J_\ell(P,Q) = \int_{\RR^p_+} p(x) \cdot \big\| \nabla\log p(x) \circ \ell^{1/2}(x) - \nabla\log q(x) \circ \ell^{1/2}(x) \big\|_2^2 dx, \end{equation*} where $\ell^{1/2}(x) = \big( \ell_1^{1/2}(x_1), \ldots, \ell_p^{1/2}(x_p) \big)$. Suppose the following regularity conditions are satisfied \begin{equation}
\label{eq:condition:partial_integration} \begin{aligned} \lim_{x_j \to \infty} p(x) \ell_j(x_j) \nabla_j \log q(x) = 0 ~~~~ \forall x_{-j} \in \RR^{p-1}_{+}, ~\forall p \in \Pcal_{+}, \\ \lim_{x_j \to 0} p(x) \ell_j(x_j) \nabla_j \log q(x) = 0 ~~~~ \forall x_{-j} \in \RR^{p-1}_{+}, ~\forall p \in \Pcal_{+}, \\
\EE_{X \sim \Pcal_{+}} \Big[ \| \nabla\log q(X) \circ \ell^{1/2}(X) \|_2^2 \Big] < +\infty, \\
\EE_{X \sim \Pcal_{+}} \Big[ \| (\nabla\log q(X) \circ \ell(X))' \|_1 \Big] < +\infty. \end{aligned} \end{equation} Under the condition~\eqref{eq:condition:partial_integration}, the scoring rule corresponding to the generalized $\ell$-score matching loss is given as \begin{equation*} \label{eq:score_matching_nonnegative:sample_l}
S_\ell(x, Q) = {\sum_{a \in V} \left[ \ell'_a(x_a) \frac{\partial \log q(x)}{\partial x_a} + \ell_a(x_a)\frac{\partial^2 \log q(x)}{\partial x_a^2} + \frac{1}{2}\ell_a(x_a) \left(\frac{\partial \log q(x)}{\partial x_a}\right)^2 \right]}. \end{equation*} The regularity condition~\eqref{eq:condition:partial_integration} is required for applying integration by parts and Fubini-Tonelli theorem in order to show consistency of the score-matching estimator.
Note that by choosing $\ell_j(x) = x^2$, for all $j$, one recovers the original score matching formulas for non-negative data in~\eqref{eq:score_matching_nonnegative:sample}. The advantage of this generalized score matching rule is that by choosing an increasing, but slowly growing $\ell(x)$ (for example, $\ell(x) = \log(x+1)$), one does not need to estimate high moments of the underlying distribution, which leads to better practical performance and improved theoretical guarantees. See \citet{Yu2018Graphical} for details.
\subsubsection{Score matching for probabilistic graphical models}
Score matching has been successfully applied in the context of probabilistic graphical models. \citet{Forbes2013Linear} studied score matching to learn Gaussian graphical models with symmetry constraints. \citet{Lin2015High} proposed a regularized score matching procedure to learn conditional independence graph in a high-dimensional setting by minimizing \[
\EE_n\sbr{\overline S(x_i, \theta)} + \lambda \|\theta\|_1, \] where the loss function $\overline S(x_i, \theta)$ is either $S(x_i, Q_\theta)$ defined in \eqref{eq:score_matching} or $S_+(x_i, Q_\theta)$ defined in \eqref{eq:score_matching_nonnegative:sample}. For Gaussian models, $\ell_1$-norm regularized score matching is a simple, yet efficient method, which coincides with the method in \cite{Liu2015Fast}. \citet{Yu2018Graphical} improved on the approach of \citet{Lin2015High} and studied regularized generalized $\ell$-score matching of the form \[
\EE_n\sbr{S_\ell(x_i, Q_\theta)} + \lambda \|\theta\|_1. \] Applied to data generated from a multivariate truncated normal distribution, the conditional independence graph can be recovered with the same number of samples that are needed for recovery of the structure of a Gaussian graphical model. \citet{Sun2015Learning} develop a score matching estimator for learning the structure of nonparametric probabilistic graphical models, extending the work on estimation of infinite-dimensional exponential families \citep{Sriperumbudur2013Density}. In Section \ref{sec:methodology}, we present a new estimator for components of $\theta$ in~\eqref{eq:logdensity} that is consistent and asymptotically normal, building on \citet{Lin2015High} and \citet{Yu2018Graphical}.
\subsection{Statistical Inference} \label{sec:inference}
We briefly review how to perform statistical inference for low dimensional parameters in a high-dimensional model. In many statistical problems, the unknown parameter $\beta \in \RR^p$ can be partitioned as $\beta = (\alpha, \eta)$, where $\alpha$ is a scalar of interest and $\eta$ is a $(p - 1)$ dimensional nuisance parameter. Let $\beta^* = (\alpha^*, \eta^*)$ denote the true unknown parameter. In a high-dimensional setting, where the sample size $n$ is much smaller than the dimensionality $p$ of the parameter $\beta$, it is common to impose structural assumptions on $\beta^*$. For example in several applications, it is common to assume that the true parameter $\beta^*$ is sparse. Indeed, we will work under this assumption as well.
Let us denote the empirical negative log-likelihood by $$ \Lcal(\beta) = \frac 1n \sum_{i=1}^n \Lcal_i(\beta), $$ where $\Lcal_i(\beta)$ is the negative log-likelihood for the $i^{th}$ observation. Let $I = \EE\sbr{\nabla^2 \Lcal(\beta)}$ denote the information matrix and denote the partition of $I$ corresponding to $\beta = (\alpha, \eta)$ as \begin{equation} I =
\begin{pmatrix}
I_{\alpha\alpha} & I_{\alpha\eta} \\
I_{\eta\alpha} & I_{\eta\eta}
\end{pmatrix}. \end{equation} The partial information matrix of $\alpha$ is denoted as
$I_{\alpha|\eta} = I_{\alpha\alpha} - I_{\alpha\eta} I_{\eta\eta}^{-1} I_{\eta\alpha}$.
Consider for the moment a low-dimensional setting. In order to perform statistical inference about $\alpha^*$, one can use the {\it profile partial score function} defined as \[ U(\alpha) = \nabla_\alpha \Lcal\big( \alpha, \hat\eta(\alpha) \big), \] where $\hat\eta(\alpha) = \arg\min_\eta \Lcal(\alpha, \eta)$ is the maximum partial likelihood estimator for $\eta$ with a fixed parameter $\alpha$. Under the null hypothesis that $\alpha^* = \alpha^0$, we have that \citep{Vaart1998Asymptotic} \[
\sqrt n U\rbr{\alpha^0} \longrightarrow_D N(0, I_{\alpha|\eta}^*). \] Therefore, one can reject the null hypothesis for large values of $U\rbr{\alpha^0}$. However, in a high-dimensional setting, the estimator $\hat\eta(\alpha)$ is no longer $\sqrt{n}$-consistent and we have to modify the approach above. In particular, we will show how to modify the profile partial score function to allow for valid inference in a high-dimensional setting based on a sparse estimator of $\hat\eta(\alpha)$.
Without loss of generality, assume that $\alpha^0 = 0$. For any estimator $\tilde \eta$, Taylor's expansion theorem gives \begin{equation} \label{eq:Taylor} \sqrt n \nabla_\alpha \Lcal(0, \tilde \eta) = \sqrt n \nabla_\alpha \Lcal(0, \eta^*) + \sqrt n \nabla_{\alpha\eta} \Lcal(0, \eta^*) \cdot (\tilde \eta - \eta^*) + \textsf{rem}, \end{equation} where \textsf{rem} is the remainder $o(\tilde \eta-\eta^*)$ term. The first term $\sqrt n \nabla_\alpha \Lcal(0, \eta^*)$ in \eqref{eq:Taylor} converges to a normal distribution under suitable assumptions using the central limit theorem (CLT). The distribution of the second term, however, is in general intractable to obtain. This is due to the fact that the distribution of $\tilde \eta$ depends on the selected model. Unless we are willing to assume stringent and untestable conditions under which it is possible to show that the true model can be selected, the limiting distribution of $\tilde \eta$ cannot be estimated even asymptotically \citep{Leeb2007Can}. To overcome this issue, one needs to modify the profile partial score function, so that its limiting distribution does not depend on the way the nuisance parameter is estimated.
\cite{Ning2014General} introduced the following decorrelated score function \begin{equation*}
U(\alpha, \eta) =
\nabla_\alpha \Lcal(\alpha, \eta)
- w^T \nabla_\eta \Lcal(\alpha, \eta), \end{equation*} where $w = I_{\alpha\eta}I_{\eta\eta}^{-1}$. The decorrelated score function $U(\alpha, \eta)$ is uncorrelated with the nuisance score functions $\nabla_\eta \Lcal(\alpha, \eta)$ and, therefore, its limiting distribution will not depend on the model selection mistakes incurred while estimating $\eta^*$. In particular, $U(\alpha^0, \tilde \eta)$ is indeed asymptotically normally distributed under the null hypothesis, as long as $\tilde \eta$ is a good enough estimator of $\eta^*$, but not necessarily $\sqrt{n}$-consistent estimator. Based on the asymptotic normality of the decorrelated score function, we can then build confidence intervals for $\alpha^*$ and perform hypothesis testing.
In practice, the vector $w$ is unknown and needs to be estimated. A number of methods have been proposed for its estimation in the literature. For example, \citet{Ning2014General} use a Dantzig selector-like method, \citet{Belloni2012Inference} proposed the double selection method, while \citet{Zhang2011Confidence}, \citet{Geer2013asymptotically}, and \citet{Javanmard2013Confidence} use a lasso based estimator. See also \citet{Dezeure2017High}, \citet{Zhang2014Simultaneous} for simultaneous inference, \citet{Taylor2014Exact}, \citet{yang2016selective} for post selective inference, \citet{li2019statistical}, \citet{cao2019estimation}, and \citet{cao2019synthetic} for for synthetic control, etc. In this paper, we adopt the double selection procedure of \citet{Belloni2012Inference}. Details will be given in Section \ref{sec:methodology}.
\section{Methodology} \label{sec:methodology}
In this section, we present a new procedure that constructs a $\sqrt{n}$-consistent estimator of an element $\theta_{ab}$ of $\theta$. Our procedure involves three steps that we detail below. We start by introducing some additional notation and then describe the procedure for the case where $\Xcal = \RR^p$. Extension to non-negative data is given at the end of the section. Throughout this section we consider $L = 1$ only, so that the
parameter of interest $\theta_{ab}$ is a scalar. Extensions to
general $L$ is discussed later in Section \ref{sec:general_L}.
For fixed indices $a, b \in [p]$, let \[
q^{ab}_\theta(x) := q^{ab}_\theta(x_a, x_b \mid x_{-ab}) \] be the conditional density of $(X_a, X_b)$ given $X_{-ab} = x_{-ab}$. In particular, \begin{equation} \label{eq:conditional_log_density}
\log q^{ab}_\theta(x) = \dotp{\theta^{ab}}{\varphi(x)}
- \Psi^{ab}(\theta, x_{-ab})
+ h^{ab}(x), \end{equation} where $\theta^{ab} \in \RR^{s'}$, with $s' = 2K+2p-3$, is the part of the vector $\theta$ corresponding to $\left\{ \theta_{a}^{(k)}, \theta_b^{(k)} \right\}_{k\in[K]}$, $\cbr{\theta_{ac},\theta_{bc}}_{c\in-ab}$, and $\theta_{ab}$; and $\varphi(x) = \varphi^{ab}(x) \in \RR^{s'}$ is the corresponding vector of sufficient statistics $\left\{t_a^{(k)}(x_a), t_b^{(k)}(x_b)\right\}_{k\in[K]}$, $\left\{ t_{ac}(x_a,x_c), t_{bc}(x_b,x_c) \right\}_{c\in-ab}$, and $t_{ab}(x_a,x_b)$. Here $\Psi^{ab}(\theta, x_{-ab})$ is the log-partition function of the conditional distribution and $h^{ab}(x) = h_a(x_a) + h_b(x_b)$. Let $\nabla_{ab}$ and $\Delta_{ab}$ be the gradient and Laplacian operators, respectively, with respect to $x_a$ and $x_b$ defined as: \begin{align*} \nabla_{ab} f(x) & = \Big( (\partial/\partial x_a) f(x),
(\partial/\partial x_b) f(x) \Big)^\top \in \RR^2, \\
\Delta_{ab} f(x) & = \Big( (\partial^2/ \partial x_a^2) +
(\partial^2/ \partial x_b^2) \Big) f(x). \end{align*}
With this notation, we introduce the following scoring rule \begin{equation} \begin{aligned}
\label{eq:conditional_score}
S^{ab}(x, \theta)
=
\frac 12 \big\|\nabla_{ab} \log q^{ab}_{\theta}(x) \big\|_2^2
+ \Delta_{ab} \log q^{ab}_{\theta}(x)
= \frac 12 \theta^\top \Gamma(x) \theta + \theta^\top g(x) + c(x), \end{aligned} \end{equation}
where the constant term $c(x) = \frac{1}{2} \|\nabla h^{ab}(x) \|^2 + \Delta h^{ab}(x) $, and \begin{align*}
\Gamma(x) = \varphi_1(x)\varphi_1(x)^\top + \varphi_2(x)\varphi_2(x)^\top \quad\text{ and } \quad
g(x) = \varphi_1(x) h_1^{ab}(x) + \varphi_2(x) h_2^{ab}(x) + \Delta_{ab}\varphi(x) \end{align*} with $\varphi_1 = (\partial/\partial x_a) \varphi$, $\varphi_2 = (\partial/\partial x_b) \varphi$, $h_1^{ab} = (\partial/\partial x_a) h^{ab}$, and $h_2^{ab} = (\partial/\partial x_b) h^{ab}$.
This scoring rule is related to the one in \eqref{eq:score_matching}, however, rather than using the density $q_\theta$ in evaluating the parameter vector, we only consider the conditional density $q_\theta^{ab}$. We will use this conditional scoring rule to create an asymptotically normal estimator of an element $\theta_{ab}$. Our motivation for using this estimator comes from the fact that the parameter $\theta_{ab}$ can be identified from the conditional distribution of $(X_a, X_b) \mid X_{M_{ab}}$ where \[
M_{ab} := \{ c \mid (a,c) \in E \text{ or } (b,c) \in E \} \] is the Markov blanket of $(X_a, X_b)$. Furthermore, the optimization problems arising in steps 1-3 below can be solved much more efficiently, as the scoring rule in~\eqref{eq:conditional_score} involves fewer parameters.
We are now ready to describe our procedure for estimating $\theta_{ab}$, which proceeds in three steps.
\paragraph{\emph{Step 1:}} We find a pilot estimator of $\theta^{ab}$ by solving the following program \begin{equation}
\label{eq:estimation} \begin{aligned}
\hat \theta^{ab}
& = \arg\min_{\theta \in \RR^{s'}} \ \EE_n\sbr{S^{ab}(x_i, \theta)} + \lambda_{1} \norm{\theta}_1, \end{aligned} \end{equation} where $\lambda_1$ is a tuning parameter. Let $\hat M_1 = {\rm supp}(\hat \theta^{ab}) := \{ (c,d) \mid \hat \theta^{ab}_{cd} \neq 0 \} $.
Since we are after an asymptotically normal estimator of $\theta_{ab}$, one may think that it is sufficient to find $\tilde \theta^{ab} = \arg\min\{\EE_n\sbr{S^{ab}(x_i, \theta)} \mid {\rm supp}(\theta) \subseteq \hat M_1 \}$ and appeal to results of \citet{Portnoy1988Asymptotic}, who has established asymptotic normality for $M$-estimators with increasing number of parameters. Unfortunately, this is not the case. Since $\tilde \theta$ is obtained via a model selection procedure, it is irregular and its asymptotic distribution cannot be estimated \citep{Leeb2007Can,Poetscher2009Confidence}. Therefore, we proceed to create a regular estimator of $\theta_{ab}$ in steps 2 and 3. The idea is to create an estimator $\tilde \theta_{ab}$ that is insensitive to first-order perturbations of other components of $\tilde \theta^{ab}$, which we consider as nuisance components. The idea of creating an estimator that is robust to perturbations of nuisance has been recently used in \citet{Belloni2012Inference}, however, the approach goes back to the work of \citet{Neym1959Optimal}.
\paragraph{\emph{Step 2:}} Let $\hat \gamma^{ab}$ be a minimizer of \begin{equation}
\label{eq:estimation:step2}
\begin{aligned}
\frac 12 \EE_n[
(\vpaxi[,ab]-\vpaxi[,-ab]^\top \gamma)^2 +
(\vpbxi[,ab]-\vpbxi[,-ab]^\top \gamma)^2
]
+ \lambda_{2} \norm{\gamma}_1,
\end{aligned} \end{equation} where $\lambda_2$ is a tuning parameter. Let $\hat M_2 = {\rm supp}(\hat \gamma^{ab}) := \{ (c,d) \mid \hat \gamma^{ab}_{cd} \neq 0 \} $. The intuition here is that the vector $(1, -\hat \gamma^{ab,\top})^\top $ approximately computes a row, up to a constant, of the inverse of the Hessian in \eqref{eq:estimation}.
\paragraph{\emph{Step 3:}} Let $\tilde M = \{(a,b)\} \cup \hat M_1 \cup \hat M_2$. We obtain our estimator as a solution to the following program \begin{equation}
\label{eq:estimation:step3} \begin{aligned}
\tilde \theta^{ab}
& = \arg\min_\theta \ \EE_n\sbr{ S^{ab}(x_i, \theta) }
\qquad \text{s.t.}\quad {\rm supp}(\theta) \subseteq \tilde M. \end{aligned} \end{equation} Our estimator of $\theta_{ab}$ is the coordinate $ab$ of $\tilde \theta^{ab}$---which we denote as $\tilde \theta_{ab}$. Motivation for this procedure will be clear from the proof of Theorem~\ref{thm:main} given in the next section.
\paragraph{\emph{Extension to non-negative data.}}
For non-negative data, the procedure is similar. In place of the score rule in \eqref{eq:conditional_score}, we will use a conditional score rule based on the generalized $\ell$-score rule. We define the following scoring rule \begin{equation} \label{eq:S_l_nonnegative} S^{ab}_\ell(x,\theta) = \frac{1}{2} \theta^\top \Gamma_\ell(x) \theta + \theta^\top g_\ell(x) \end{equation} with $$ \Gamma_\ell(x) = \ell_a(x_a) \cdot \varphi_1(x)\varphi_1(x)^\top + \ell_b(x_b) \cdot \varphi_2(x)\varphi_2(x)^\top $$ and \begin{equation*} \begin{aligned}
g_\ell(x) =
\ell_a(x_a) \varphi_1(x) h_1^{ab}(x) + \ell_b(x_b) \varphi_2(x) h_2^{ab}(x) &+ \ell_a(x_a) \varphi_{11}(x) + \ell_b(x_b) \varphi_{22}(x) \\
&\qquad + \ell'_a(x_a) \varphi_1(x) + \ell'_b(x_b) \varphi_2(x). \end{aligned} \end{equation*} Here $\varphi_{11} = (\partial^2/\partial x_a^2) \varphi$, and $\varphi_{22} = (\partial^2/\partial x_b^2) \varphi$. Now we can define \begin{equation} \label{eq:tilde_phi_nonnegative} \tilde \varphi_1 = \ell^{1/2}_a(x_a) \varphi_1 \quad \text{and}\quad \tilde \varphi_2 = \ell^{1/2}_b(x_b) \varphi_2. \end{equation} Then $\Gamma_\ell(x) = \tilde \varphi_1(x)\tilde \varphi_1(x)^\top + \tilde \varphi_2(x)\tilde \varphi_2(x)^\top $, which is of the same form as \eqref{eq:conditional_score} with $\tilde \varphi_1$ and $\tilde \varphi_2$ replacing $\varphi_1$ and $\varphi_2$, respectively. Thus our three-step procedure for non-negative data can be written as follows. For notation consistency, we omit the subscript $\ell$ on the estimator $\theta$ and support $M$.
\paragraph{\emph{Step 1:}} We find a pilot estimator of $\theta^{ab}$ by solving \begin{equation}
\label{eq:estimation_nonnegative} \begin{aligned}
\hat \theta^{ab}
& = \arg\min_{\theta \in \RR^{s'}} \ \EE_n\sbr{S_\ell^{ab}(x_i, \theta)} + \lambda_{1} \norm{\theta}_1, \end{aligned} \end{equation} where $\lambda_1$ is a tuning parameter and $S_\ell^{ab}$ is defined in \eqref{eq:S_l_nonnegative}. Let $\hat M_1 = {\rm supp}(\hat \theta^{ab})$.
\paragraph{\emph{Step 2:}} Let $\hat \gamma^{ab}$ be a minimizer of \begin{equation}
\label{eq:estimation:step2_nonnegative}
\begin{aligned}
\frac 12 \EE_n \big[
(\tilde \varphi_{1,ab}(x_i) - \tilde \varphi_{1,-ab}(x_i)^\top \gamma)^2 +
(\tilde \varphi_{2,ab}(x_i) - \tilde \varphi_{2,-ab}(x_i)^\top \gamma)^2
\big]
+ \lambda_{2} \norm{\gamma}_1,
\end{aligned} \end{equation} where $\lambda_2$ is a tuning parameter and $\tilde\varphi_1, \tilde\varphi_2$ are defined in \eqref{eq:tilde_phi_nonnegative}. Let $\hat M_2 = {\rm supp}(\hat \gamma^{ab})$.
\paragraph{\emph{Step 3:}} Let $\tilde M = \{(a,b)\} \cup \hat M_1 \cup \hat M_2$. We obtain our estimator as a solution to the following program \begin{equation}
\label{eq:estimation:step3_nonnegative} \begin{aligned}
\tilde \theta^{ab}
& = \arg\min_\theta \ \EE_n\sbr{ S_\ell^{ab}(x_i, \theta) }
\qquad \text{s.t.}\quad {\rm supp}(\theta) \subseteq \tilde M. \end{aligned} \end{equation} Our estimator of $\theta_{ab}$ is the coordinate $ab$ of $\tilde \theta^{ab}$---which we denote as $\tilde \theta_{ab}$.
\section{Asymptotic Normality of the Estimator} \label{sec:asympt-norm-estim}
In this section, we outline the main theoretical properties of our estimator. We start by providing high-level conditions that allow us to establish properties of each step in the procedure.
\paragraph{\emph{Assumption {\bf M}.}} We are given $n$ i.i.d. samples $\{x_i\}_{i\in[n]}$ from $p_{\theta^*}$ of the form in \eqref{eq:logdensity}. Let \begin{equation}
\label{eq:estimation:step2:true}
\begin{aligned}
\gamma^{ab,*} &= \arg\min_{\gamma} \
\EE[
(\vpaxi[,ab]-\vpaxi[,-ab]^\top \gamma)^2 +
(\vpbxi[,ab]-\vpbxi[,-ab]^\top \gamma)^2
]
\end{aligned} \end{equation} and $$ \eta_{1i} = \vpaxi[,ab]-\vpaxi[,-ab]^\top \gamma^{ab,*} ~~~\text{and}~~~ \eta_{2i} = \vpbxi[,ab]-\vpbxi[,-ab]^\top \gamma^{ab,*} ~~~\text{for}~ i \in [n]. $$
We assume that the parameter vector $\theta^*$ is sparse with $|{\rm supp}(\theta^{ab,*})| \ll n$; and the vector $\gamma^{ab,*}$ is sparse with
$|{\rm supp}(\gamma^{ab,*})| \ll n$.
Let $m = |{\rm supp}(\theta^{ab,*})| \vee |{\rm supp}(\gamma^{ab,*})|$. The assumption {\bf M} supposes that the parameter to be estimated is sparse, which makes estimation in the high-dimensional setting feasible. An extension to the approximately sparse parameter is possible but technically cumbersome, and does not provide additional insights into the problem. One of the benefits of using the conditional score to learn parameters of the model is that the sample size will only depend on the size of ${\rm supp}(\theta^{ab,*})$ and not on the sparsity of the whole vector $\theta^*$ as in \cite{Lin2015High}. The second part of the assumption states that the inverse of the population Hessian is approximately sparse, which is a reasonable assumption for a number of models, since the Markov blanket of $(X_a, X_b)$ is small under the sparsity assumption on $\theta^{ab,*}$. We relax the sparsity assumption in Section~\ref{sec:relaxation_L1}.
The vector $\gamma^{ab,*}$ is determined by the model
\eqref{eq:conditional_log_density} and parameter $\theta^*$, and is
therefore not a free parameter. For the Gaussian graphical model,
it can be shown that the sparsity of $\theta^{ab,*}$ implies the
sparsity of $\gamma^{ab,*}$. That is, assumption {\bf M} holds when
the columns of the precision matrix are sparse. For a general
model, it may not be easy to explicitly verify the exact sparsity of
$\gamma^{ab,*}$, since the calculation of $\gamma^{ab,*}$ involves
calculation of possibly intractable moments, especially when using
generalized score matching with $\ell(x) = \log(x+1)$ for
non-negative data. For normal conditionals and exponential
graphical model, we verify numerically (in Section
\ref{sec:experiments_synthetic}) that the sample version of
$\gamma^{ab,*}$ behaves approximately like a sparse vector when $n$
is large enough. These indicate that assumption {\bf M} is
reasonable, at least in an approximately sparse version. For
general models, the sparsity condition on $\gamma^{ab,*}$ could be
violated and, therefore, we discuss how to relax it in Section
\ref{sec:relaxation_L1}.
Our next condition assumes that the Hessian in \eqref{eq:estimation} and \eqref{eq:estimation:step2} is well conditioned.
\paragraph{\emph{Assumption {\bf SE}.}} Let \[ \phi_{-}(s, A) = \inf\cbr{ \delta^\top A \delta / \norm{\delta}_2^2
\mid 1 \leq \norm{\delta}_0 \leq s} \] and \[ \phi_{+}(s, A) = \sup\cbr{ \delta^\top A \delta / \norm{\delta}_2^2
\mid 1 \leq \norm{\delta}_0 \leq s} \] denote the minimal and maximal $s$-sparse eigenvalues of a semi-definite matrix $A$, respectively. We assume \[ \phi_{\min}
\leq \phi_{-}(m \cdot \log n, \EE\sbr{\Gamma(x_i)})
\leq \phi_{+}(m \cdot \log n, \EE\sbr{\Gamma(x_i)})
\leq \phi_{\max}, \] where $0 < \phi_{\min} \leq \phi_{\max} < \infty$.
Assumption {\bf SE} imposes the sparse eigenvalue condition on
the population quantity. A lower bound on the population Hessian is
required even in a low dimensional setting in order to prove
asymptotic normality of an estimator. See, for example,
\citet[][]{Forbes2013Linear} where the population Hessian is assumed
to be invertible. An upper bound on the Hessian matrix is also
commonly assumed in the literature on graphical models and
high-dimensional inference \citep[see, for
example,][]{Yang2013Graphical,Belloni2013Least}. We use the upper
bound on the Hessian to control the size of the estimated support in
steps 1 and 2 of the procedure.
For Gaussian graphical model, assumption {\bf SE} is satisfied
with non-degenerate covariance matrix. For general models,
assumption {\bf SE} puts restrictions on the model parameter in a
way that is hard to handle explicitly. Note that related work
imposes stronger assumption on the sample Fisher information matrix
directly. See, for example, conditions (C1) and (C2) in \citet{Yang2013Graphical}.
For the upper bound of the sparse eigenvalue, we remark that
the mean of $\varphi(x)$ could be non-zero. For the Gaussian
graphical model, if there is a non-zero mean $\mu$, then the
components of $\varphi_1(x)$ and $\varphi_2(x)$ would instead be
$x - \mu$. Therefore the sparse eigenvalue would not explode. In
practice, we subtract the empirical mean and only need to consider the
centered case. For other models, existing works assume boundedness
of the first and second order moments of all the components of $x$.
See Condition (C3) in \citet{Yang2013Graphical}.
With assumption {\bf SE} on the population quantity, the
following lemma, adopted from Corollary 4 in
\cite{Belloni2013Least}, quantifies the sparse eigenvalues of the
sample quantity $\EE_n\sbr{\Gamma(x_i)}$.
\begin{lemma} \label{lemma:sample_sparse_eigenvalue}
Suppose assumption {\bf SE} is satisfied. Suppose there exist $K_n$ such that $\varphi_1(x_i)$ and $\varphi_2(x_i)$ are bounded: $\sup_i \| \varphi_1(x_i) \|_\infty \leq K_n$ and $\sup_i \| \varphi_2(x_i) \|_\infty \leq K_n$ a.s. If the sample size satisfies \begin{equation*} K_n^2 \cdot m\log{p} \cdot \log^2(m\log{p}) \cdot \log{n} \cdot \log{(p \vee n)} = o(n \phi_{\min}^2 / \phi_{\max}), \end{equation*} then the event \begin{equation*} \Ecal_{\rm SE} = \cbr{ \frac{\phi_{\min}}{2}
\leq \phi_{-}\big(m \cdot \log n, \EE_n\sbr{\Gamma(x_i)}\big)
\leq \phi_{+}\big(m \cdot \log n, \EE_n\sbr{\Gamma(x_i)}\big)
\leq 2 \phi_{\max} } \end{equation*} holds with probability at least $1-o(1)$. \end{lemma}
Lemma \ref{lemma:sample_sparse_eigenvalue} ensures that the
sparse eigenvalues of the sample quantity $\EE_n\sbr{\Gamma(x_i)}$
are well-behaved provided that $\varphi_1(x_i)$ and $\varphi_2(x_i)$
can be upper bounded, and the sample size is reasonably large. The
scale of the upper bound $K_n$ depends on the sufficient statistics
$\varphi(x)$, and can be verified for concrete models. For example,
for the Gaussian graphical model, a standard result on the Gaussian
tail bound gives $K_n = C\cdot(\log n + \log p)^{1/2}$ with high
probability. As another example, Proposition 4 in
\cite{Yang2013Graphical} shows that, under mild conditions,
$K_n = C\cdot(\log n + \log p)$ with high probability when the
sufficient statistics of the conditional density are given by
$x_a, x_b$ and $x_{a}x_{b}$, which includes a wide range of
applications, such as exponential graphical model, and Poisson
graphical model. For models with more general sufficient
statistics, we can modify the proof of Proposition 4 in
\cite{Yang2013Graphical} to obtain the corresponding rate on $K_n$,
under suitable assumptions.
Let $r_{j\theta} = \norm{\hat \theta^{ab} - \theta^{ab,*}}_j$ and $r_{j\gamma} = \norm{\hat \gamma^{ab} - \gamma^{ab,*}}_j$, for $j\in\{1,2\}$, be the rates of estimation in steps 1 and 2, respectively. Under the assumption {\bf SE}, on the event $$
\Ecal_{\theta} = \cbr{ \norm{\EE_n\sbr{\Gamma(x_i) \theta^{ab,*} + g(x_i)}
}_\infty \leq \frac{\lambda_1}{2} }, $$ we have that $r_{1\theta} \lesssim m\lambda_1/\phi_{\min}$ and $r_{2\theta} \lesssim c_2\sqrt{m}\lambda_1/\phi_{\min}$. Similarly, on the event $$
\Ecal_{\gamma} = \cbr{ \norm{\EE_n\sbr{
\eta_{1i}\vpaxi[,-ab]+\eta_{2i}\vpbxi[,-ab] }}_\infty \leq
\frac{\lambda_2}{2} }, $$ we have that $r_{1\gamma} \lesssim m\lambda_2/\phi_{\min}$ and $r_{2\gamma} \lesssim \sqrt{m}\lambda_2/\phi_{\min}$, using results of \cite{negahban2010unified}. In order to ensure that
$\Ecal_{\theta}$ and $\Ecal_{\gamma}$ hold with high-probability,
one needs to choose appropriate $\lambda_1$ and $\lambda_2$. This
calculation is specific to the model at hand. For example, if the
vectors
\begin{equation}
\label{eq:sub_gaussian:example}
\Gamma(x_i) \theta^{ab,*} + g(x_i)
\quad\text{and}\quad
\eta_{1i}\vpaxi[,-ab]+\eta_{2i}\vpbxi[,-ab]
\end{equation}
have sub-Gaussian components, then by taking
$\lambda_1, \lambda_2 \propto \sqrt{\log p/n}$, the events
$\Ecal_{\theta}$ and $\Ecal_{\gamma}$ hold with probability at least
$1 - c_1p^{-c_2}$ \citep{Yang2013Graphical, negahban2010unified}.
For other distributions, we may need to choose larger $\lambda_1$
and $\lambda_2$. See also Lemma 9 in \citet{Yang2013Graphical}.
The following result establishes a Bahadur representation for $\tilde \theta_{ab}$.
\begin{theorem}
\label{thm:main}
Suppose that assumptions {\bf M} and {\bf SE} hold. Define $w^*$ with $w^{*}_{ab} = 1$ and $w^{*}_{-ab} = -\gamma^{ab,*}$, where $\gamma^{ab,*}$ is given in the assumption {\bf M}. On the event
$\Ecal_\gamma \cap \Ecal_\theta$, we have that
\begin{equation}
\label{eq:bahadur}
\begin{aligned} \sqrt{n}\cdot \rbr{\tilde \theta_{ab} - \theta_{ab}^{*}} & = - \sigma_n^{-1} \cdot\sqrt{n} \EE_n \sbr{w^{* \top}\rbr{\Gamma(x_i)\theta^{ab,*} + g(x_i)} }
+ \Ocal\rbr{\phi_{\max}^2\phi_{\min}^{-4} \cdot \sqrt{n}\lambda_1\lambda_2 m }, \end{aligned} \end{equation}
where $\sigma_n = \EE_n\sbr{\eta_{1i}\vpaxi[,ab] + \eta_{2i}\vpbxi[,ab]}$. \end{theorem}
Theorem~\ref{thm:main} is deterministic in nature. It establishes a representation that holds on the event $\Ecal_\gamma\cap\Ecal_\theta\cap\Ecal_{\rm SE}$, which in many cases holds with overwhelming probability. We will show that under suitable conditions the first term converges to a normal distribution. The following assumption is a regularity condition needed even in a low dimensional setting for asymptotic normality of the score matching estimator \citep{Forbes2013Linear}.
\paragraph{Assumption {\bf R}.} $\EE_{q^{ab}}\sbr{\norm{\Gamma(X_a,X_b,x_{-ab})\theta^{ab,*}}^2}$ and $\EE_{q^{ab}}\sbr{\norm{g(X_a,X_b,x_{-ab})}^2}$ are finite for all values of $x_{-ab}$ in the domain.
Theorem~\ref{thm:main} and Lemma~\ref{lem:L4} (Appendix~\ref{sec:technical_proofs}) together give the following corollary:
\begin{corollary} \label{corollary:normality}
Suppose that the conditions of Theorem~\ref{thm:main} hold. In
addition, suppose the assumption {\bf R} holds,
$\sqrt{n} \lambda_1\lambda_2 m = o(1)$ and
$\PP\rbr{\Ecal_\gamma\cap\Ecal_\theta\cap\Ecal_{\rm SE}} \rightarrow 1$.
Then we have
$$
\sqrt{n} (\tilde \theta_{ab} - \theta_{ab}^*)
\longrightarrow_D N(0, V_{ab}) ,
$$
where
$V_{ab} = \rbr{\EE\sbr{\sigma_n}}^{-2}\cdot\Var\rbr{w^{* \top}\rbr{\Gamma(x_i)\theta^{ab, *} + g(x_i)}}$ and
$\sigma_n$ is as in Theorem~\ref{thm:main}. \end{corollary}
When the vectors in \eqref{eq:sub_gaussian:example} are
sub-Gaussian, we choose
$\lambda_1, \lambda_2 \propto \sqrt{\log p/n}$, so that the sample
complexity is given by $(m\log{p})^2/n = o(1)$. For other
distributions, we may need a larger sample size to bound the error
term in \eqref{eq:bahadur}. We see that the variance $V_{ab}$ depends on the true $\theta^{ab,*}$ and $\gamma^{ab,*}$, which are unknown. In practice, we estimate $V_{ab}$ using the following consistent estimator $\hat V_{ab}$, \begin{equation} \label{eq:variance_est} \hat V_{ab} = e_{ab}^\top \rbr{\EE_n\sbr{\Gamma(x_i)}_{\tilde M}}^{-1} \cdot Z \cdot \rbr{\EE_n\sbr{\Gamma(x_i)}_{\tilde M}}^{-1} e_{ab}, \end{equation} with \begin{equation*} Z={\EE_n\sbr{\rbr{\Gamma(x_i)\tilde \theta^{ab} + g(x_i)}_{\tilde M}\rbr{\Gamma(x_i)\tilde \theta^{ab} + g(x_i)}_{\tilde M}^\top}}, \end{equation*} and $e_{ab}$ being a canonical vector with $1$ in the position of element $ab$ and $0$ elsewhere. The consistency of this variance estimator is provided in the appendix. Using this estimate, we can construct a confidence interval with asymptotically nominal coverage. In particular, \[ \lim_{n \rightarrow \infty} \sup_{\theta^*\in\Theta} \PP_{\theta^*}\rbr{ \theta_{ab}^* \in \tilde \theta_{ab} \pm z_{\kappa/2} \cdot \sqrt{\hat V_{ab} / n} } = \kappa. \] In the next section, we outline the proof of Theorem~\ref{thm:main}. Proofs of other technical results are relegated to appendix.
\subsection{Proof of Theorem~\ref{thm:main}} \label{sec:proof-sketch-theorem}
We first introduce some auxiliary estimates. Let $\tilde \gamma^{ab}$ be a minimizer of the following constrained problem \begin{equation}
\label{eq:estimation:step2:S}
\begin{aligned}
&\min_{\gamma} \ \,
\EE_n\sbr{
\rbr{\vpaxi[,ab]-\vpaxi[,-ab]^\top \gamma}^2 +
\rbr{\vpbxi[,ab]-\vpbxi[,-ab]^\top \gamma}^2
} \ \\
& \text{ s.t. }\ \, {\rm supp}(\gamma) \subseteq \tilde M \backslash (a,b),
\end{aligned} \end{equation} where $\tilde M$ is defined in the step 3 of the procedure. Essentially, $\tilde \gamma^{ab}$ is the refitted estimator from step 2 constrained to have the support on $\tilde M\backslash (a,b)$. Let $\tilde w \in \RR^{s'}$ with $\tilde w_{ab} = 1$, $\tilde w_{\tilde M \backslash (a,b) } = - \tilde{\gamma}_{\tilde{M} \backslash (a,b)}$ and zero elsewhere. The solution $\tilde \theta^{ab}$ satisfies the first order optimality condition $
\rbr{ \EE_n\sbr{\Gamma(x_i)} \tilde \theta^{ab} + \EE_n[g(x_i)] }_{\tilde M} = 0$. Multiplying by $\tilde w$, it follows that \begin{equation}
\label{eq:kkt3:2} \begin{aligned}
& \tilde w^{\top}\rbr{ \EE_n\sbr{\Gamma(x_i)} \tilde \theta^{ab} + \EE_n[g(x_i)] } \\
= &
\rbr{\tilde w - w^*}^\top \EE_n\sbr{\Gamma(x_i)} \rbr{\tilde \theta^{ab} - \theta^{ab,*}} + \rbr{\tilde w - w^*}^\top \rbr{\EE_n\sbr{\Gamma(x_i)\theta^{ab,*} + g(x_i) }} \\
& \qquad +
w^{* \top} \EE_n\sbr{\Gamma(x_i)} \rbr{\tilde \theta^{ab} - \theta^{ab,*}} + w^{* \top} \rbr{\EE_n\sbr{\Gamma(x_i)\theta^{ab,*} + g(x_i)} } \\
\triangleq & L_1 + L_2+L_3+L_4 = 0. \end{aligned} \end{equation} From Lemma~\ref{lem:L1} and Lemma~\ref{lem:L2} (Appendix~\ref{sec:technical_proofs}), we have that $$
\abr{L_1 + L_2} \lesssim \phi_{\max}^2\phi_{\min}^{-4} \cdot \lambda_1\lambda_2 m. $$ Using Lemma~\ref{lem:L3}, the term $L_3$ can be written as $$
L_3 = \EE_n\sbr{\eta_{1i}\vpaxi[,ab] + \eta_{2i}\vpbxi[,ab]} \rbr{\tilde
\theta_{ab} - \theta_{ab}^{ab,*}} + \Ocal\rbr{\phi_{\max}^{1/2}\phi_{\min}^{-2}\cdot\lambda_1\lambda_2 m}. $$ Putting all the pieces together, we can rewrite \eqref{eq:kkt3:2} as $$ \sigma_n \rbr{\tilde \theta_{ab} - \theta_{ab}^{ab,*}} = - w^{* \top} \rbr{\EE_n\sbr{\Gamma(x_i)\theta^{ab,*} + g(x_i)} } + \Ocal\rbr{\lambda_1\lambda_2 m}. $$ with $\sigma_n = \EE_n\sbr{\eta_{1i}\vpaxi[,ab] + \eta_{2i}\vpbxi[,ab]}$. This completes the proof.
\subsection{Theoretical Results for Non-negative Data} \label{sec:theoretical_nonnegative}
In this section we provide the theoretical results for non-negative data obtained by modifying the assumptions according to the scoring rule for non-negative data.
\paragraph{\emph{Assumption {\bf M}'.}} The parameter vector $\theta^*$ is sparse, with
$|{\rm supp}(\theta^{ab,*})| \ll n$. Let
\begin{equation}
\label{eq:estimation:step2:true_nonnegative}
\begin{aligned}
\gamma^{ab,*} &= \arg\min_\gamma \
\EE\sbr{
(\tilde \varphi_{1,ab}(x_i) - \tilde \varphi_{1,-ab}(x_i)^\top \gamma)^2 +
(\tilde \varphi_{2,ab}(x_i) - \tilde \varphi_{2,-ab}(x_i)^\top \gamma)^2
},
\end{aligned} \end{equation} with $\tilde\varphi_1, \tilde\varphi_2$ defined in \eqref{eq:tilde_phi_nonnegative}. Let $\eta_{1i} = \tilde \varphi_{1,ab}(x_i) - \tilde \varphi_{1,-ab}(x_i)^\top \gamma^{ab,*}$ and $\eta_{2i} = \tilde \varphi_{2,ab}(x_i) - \tilde \varphi_{2,-ab}(x_i)^\top \gamma^{ab,*}$, for $i \in [n]$. The vector $\gamma^{ab,*}$ is sparse with
$|{\rm supp}(\gamma^{ab,*})| \ll n$. Let
$m = |{\rm supp}(\theta^{ab,*})| \vee |{\rm supp}(\gamma^{ab,*})|$.
\paragraph{\emph{Assumption {\bf SE}'.}} We have \[ \phi_{\min}
\leq \phi_{-}(m \cdot \log n, {\EE\sbr{\Gamma_\ell(x_i)}})
\leq \phi_{+}(m \cdot \log n, {\EE\sbr{\Gamma_\ell(x_i)}})
\leq \phi_{\max}, \] where $0 < \phi_{\min} \leq \phi_{\max} < \infty$.
\paragraph{\emph{Assumption {\bf R}'.}} $\EE_{q^{ab}}\sbr{\norm{\Gamma_\ell(X_a,X_b,x_{-ab})\theta^{ab,*}}^2}$ and $\EE_{q^{ab}}\sbr{\norm{g_\ell(X_a,X_b,x_{-ab})}^2}$ are finite for all values of $x_{-ab}$ in the domain.
Denote the modified events as $$
\Ecal_{\theta} = \cbr{ \norm{\EE_n\sbr{\Gamma_\ell(x_i) \theta + g_\ell(x_i)}
}_\infty \leq \frac{\lambda_1}{2} } $$ and $$
\Ecal_{\gamma} = \cbr{ \norm{\EE_n\sbr{
\eta_{1i}\tilde \varphi_{1,-ab}(x_i)+\eta_{2i}\tilde \varphi_{2,-ab}(x_i) }}_\infty \leq
\frac{\lambda_2}{2} }. $$ We have the asymptotic normality for the estimator on non-negative data.
\nocite{hahn2018regularization} \nocite{hahn2019efficient} \nocite{he2018xbart} \nocite{he2020stochastic}
\begin{corollary}
Suppose that assumptions {\bf M'}, {\bf SE}', and {\bf R'} hold. Define $w^*$ with $w^{*}_{ab} = 1$ and $w^{*}_{-ab} = -\gamma^{ab,*}$, where $\gamma^{ab,*}$ is given in the assumption {\bf M}'.
In
addition, suppose
$\sqrt{n} \lambda_1\lambda_2 m = o(1)$ and $\PP\rbr{\Ecal_\gamma\cap\Ecal_\theta\cap\Ecal_{\rm SE}} \rightarrow 1$ where
\begin{equation*} \Ecal_{\rm SE} = \cbr{ \frac{\phi_{\min}}{2}
\leq \phi_{-}\big(m \cdot \log n, \EE_n\sbr{\Gamma_\ell(x_i)}\big)
\leq \phi_{+}\big(m \cdot \log n, \EE_n\sbr{\Gamma_\ell(x_i)}\big)
\leq 2 \phi_{\max} }. \end{equation*}
Then we have
$$
\sqrt{n} (\tilde \theta_{ab} - \theta_{ab}^*)
\longrightarrow_D N(0, V_{ab}) ,
$$
with the variance term
$$
V_{ab} = \rbr{\EE\sbr{\sigma_n}}^{-2}\cdot\Var\rbr{w^{* \top}\rbr{\Gamma_\ell(x_i)\theta^{ab} + g_\ell(x_i)}}
$$
where
$\sigma_n = \EE_n\sbr{ \eta_{1i}\tilde \varphi_{1,ab}(x_i)+\eta_{2i}\tilde \varphi_{2,ab}(x_i) }$. \end{corollary}
\section{Relaxing the Sparsity Assumption on the Inverse of Hessian} \label{sec:relaxation_L1}
For general models, the sparsity condition on $\gamma^{ab,*}$
could be violated. For example, for the non-negative Gaussian
graphical model with $\Sigma = \Omega = I_p$, by direct calculation
we obtain that almost all the components of $\gamma^{ab,*}$ take the
same value, which is approximately $1/p$. Therefore $\gamma^{ab,*}$
is neither sparse, nor approximately sparse (see Section
\ref{sec:experiments_synthetic} for details). Instead, it only
satisfies a weaker condition $\|\gamma^{ab,*}\|_1 \leq 2$. This
constant $L_1$ norm condition is studied in \cite{ma2017inter}.
Since $\gamma^{ab,*}$ is dense, we cannot select sparse support in
Step 2; and therefore Step 3 is no longer valid when $p > n$.
We relax the sparsity condition on $\gamma^{ab,*}$ to a
constant $L_1$ condition, and modify our procedure. We apply the
debias method in \cite{ma2017inter}. Specifically, recall that the
scoring rule is
\begin{equation}
S^{ab}(x, \theta) = \frac 12 \theta^\top \Gamma(x) \theta + \theta^\top g(x) + c(x),
\end{equation}
and the gradient with respect to $\theta$ is
\begin{equation}
\nabla S^{ab}(x, \theta) = \Gamma(x) \theta + g(x).
\end{equation}
We obtain an estimator $\hat\theta^{ab}$ using Step 1, which satisfies
\begin{equation}
\nabla S^{ab} \big( x, \hat\theta^{ab} \big) - \nabla S^{ab} \big( x, \theta^{ab, *} \big) = \Gamma(x) \big( \hat\theta^{ab} - \theta^{ab, *} \big).
\end{equation}
Multiplying by some matrix $M$ on both sides and rearranging terms, we obtain
\begin{equation}
\label{eq:split_general_debias}
\hat\theta^{ab} - M \cdot \nabla S^{ab} \big( x, \hat\theta^{ab} \big) = \theta^{ab, *} - M \cdot \nabla S^{ab} \big( x, \theta^{ab, *} \big) + \big( I - M \cdot \Gamma(x) \big) \big( \hat\theta^{ab} - \theta^{ab, *} \big).
\end{equation}
The empirical version of \eqref{eq:split_general_debias} is
\begin{multline}
\label{eq:split_general_debias_empirical}
\hat\theta^{ab} - M \cdot \EE_n[ \nabla S^{ab} \big( x_i, \hat\theta^{ab} \big)] \\
= \theta^{ab, *} - M \cdot \EE_n[ \nabla S^{ab} \big( x_i, \theta^{ab, *} \big) ] + \big( I - M \cdot \EE_n[\Gamma(x_i)] \big) \big( \hat\theta^{ab} - \theta^{ab, *} \big).
\end{multline}
Rather than using Step 3 in the procedure described in Section~\ref{sec:methodology},
we define the left hand side as the proposed estimator: \begin{equation} \label{eq:estimator_L1_relax} \tilde \theta^{ab} = \hat\theta^{ab} - M \cdot \EE_n[ \nabla S^{ab} \big( x_i, \hat\theta^{ab} \big) ] = \hat\theta^{ab} - M \cdot \frac{1}{n} \sum_{i=1}^n \Gamma(x_i) \hat\theta^{ab} + g(x_i). \end{equation} Notice that the first term in the right hand side of \eqref{eq:split_general_debias_empirical} is the true value. Suppose $M$ is an approximate inverse of $\EE_n[ \Gamma(x) ]$, then the third term in the right hand side of \eqref{eq:split_general_debias_empirical} would be negligible. For the second term, we see that $\EE_n[ \nabla S^{ab} \big( x_i, \theta^{ab, *} \big) ]$ is an average of $n$ i.i.d. samples. If it is independent of $M$, then this second term is asymptotically normal, and the coordinate $ab$ of $\tilde \theta^{ab}$ is the desired estimator, similar to the three-step procedure described in Section~\ref{sec:methodology}. We construct $M$ following the procedure in \cite{ma2017inter}. We first split the data into two parts and estimate $\hat\theta^{ab}$ on the first part, while $M$ is estimated on the second part. For notation simplicity, let $\{x_i\}_{i=1}^n$ denote observations on the first part and $\{x_i'\}_{i=1}^n$ on the second part. We estimate $M$ by solving the following convex program: \begin{equation} \begin{aligned}
& \text{minimize} \quad \|M\|_\infty \\
& \text{subject to} \,\,\, \left\| \, I - M \cdot \EE_n[ \Gamma(x_i') ] \, \right\|_{\max} \leq \lambda_2. \end{aligned} \end{equation} By selecting appropriate $\lambda_2$, the solution $M$ will be an approximate inverse of $\EE_n[ \Gamma(x_i') ]$ and, hence, an approximate inverse of $\EE_n[ \Gamma(x_i) ]$. On the other hand, since we estimate $M$ based on second part of the data, $\{x_i'\}_{i=1}^n$, it is independent of $\EE_n[ \nabla S^{ab} \big( x_i, \theta^{ab, *} \big) ]$. Let $M^*$ be the population version of $M$. We see that the column $ab$ of $M^*$ (denoted as $M^*_{ab}$) corresponds to $w^*$ up to a constant, where $w^*$ is defined in Theorem \ref{thm:main} with $w^{*}_{ab} = 1$ and $w^{*}_{-ab} = -\gamma^{ab,*}$. For non-negative Gaussian graphical model with $\Sigma = \Omega = I_p$, a simple calculation shows that for large
$p$, we have $\| M^*_{ab} \|_1 \leq 1.5/(1-\frac{2}{\pi}) < 5$. We then see that the bounded $L_1$ norm condition on $M^*_{ab}$ is satisfied.
To establish asymptotic normality of the modified procedure, we define the
following event
$$
\Ecal_{\gamma}' = \cbr{
\left\|\, I - M^* \cdot \EE_n[ \Gamma(x_i) ] \, \right\|_{\max}
\leq {\lambda_2} }.
$$
For example, when $\varphi_1(x)$ and $\varphi_2(x)$ are sub-Gaussian
vectors, modification of Lemma D.1 in \cite{ma2017inter} gives us
that if $\lambda_2 \asymp \sqrt{\frac{\log p}{n}}$, then
$\PP\rbr{ \Ecal_{\gamma} } \rightarrow 1$. By the proof of Lemma
\ref{lem:refit}, we have that
$\| \hat\theta^{ab} - \theta^{ab, *} \|_1 \lesssim \lambda_1 m$.
This shows that the third term of
\eqref{eq:split_general_debias_empirical} is of order
$m \cdot \log p / n$. Suppose $(m\log{p})^2/n = o(1)$, we then
obtain a similar result as in Corollary \ref{corollary:normality}.
It is also straightforward to see that the variance given by
\eqref{eq:split_general_debias_empirical} is asymptotically the same
as $V_{ab}$ in Corollary \ref{corollary:normality}. We conclude
with the following Corollary for sub-Gaussian distribution.
\begin{corollary} \label{corollary:L_1_relaxation}
Suppose that assumptions {\bf SE} and {\bf R} hold. Furthermore, suppose $\| M^*_{ab} \|_1 \leq C$. If $(m\log{p})^2/n = o(1)$ and $\PP\rbr{\Ecal_\gamma'\cap\Ecal_\theta\cap\Ecal_{\rm SE}} \rightarrow 1$, then the estimator $\tilde \theta^{ab}$ in \eqref{eq:estimator_L1_relax} satisfies
$$
\sqrt{n} (\tilde \theta_{ab} - \theta_{ab}^*)
\longrightarrow_D N(0, V_{ab}) ,
$$
where
$V_{ab} = \Var\big( {M_{ab}^*}^\top \big(\Gamma(x_i) \theta^{ab, *} + g(x_i) \big) \big) $. \end{corollary}
\section{Simultaneous Inference} \label{sec:simultaneous}
In the last two sections, we have developed a procedure for constructing a consistent and asymptotically normal estimate of a single edge parameter. In this section, we develop a procedure for simultaneous hypothesis testing of all edges connected to a specific node. We adopt the Gaussian multiplier bootstrap \citep{Chernozhukov2013Gaussian} to our setting. In this section we focus on the case where $\Xcal = \RR^p$. The analysis can be straightforwardly extended to non-negative data.
For a fixed node $a \in V$, we would like to test the null hypothesis \begin{equation} \label{eq:H0_simultaneous} H_0: \theta_{ab}^* = \breve\theta_{ab} \quad \text{for all } b \in V_a = \{1, \ldots, p\} \backslash \{a\}, \end{equation} for some values $\breve\theta_{ab}$ versus the alternative \begin{equation} \label{eq:H1_simultaneous} H_1: \theta_{ab}^* \neq \breve\theta_{ab} \quad \text{for some } b \in V_a = \{1, \ldots, p\} \backslash \{a\}. \end{equation} We propose the following test statistic \begin{equation} \label{eq:def_test_statistic_simultaneous} \max_{b \in V_a} \sqrt{n} \abr{ \tilde \theta_{ab} - \breve \theta_{ab} }, \end{equation} where $\tilde \theta_{ab}$ is obtained by the three step procedure described in Section \ref{sec:methodology}. The null hypothesis will be rejected for large values of the test statistic. Using the $\ell_\infty$ statistics will allow us to have power against alternatives that change few of the coordinates of $\breve\theta_{ab}$. In order to use the test statistic in practice, we need to be able to accurately compute the critical value of the test statistic in a high-dimensional setting. To that end, we describe a multiplier bootstrap method that will allow us to obtain an accurate critical value to the test statistic in~\eqref{eq:def_test_statistic_simultaneous}.
For each $b \in V_a$ and $i \in \{1, \ldots, n\}$, denote \begin{equation} \label{eq:def_xib_tilde} \tilde z_{iab} = -\sigma_{n,ab}^{-1} \cdot \tilde w_{ab}^{\top} \Big(\Gamma_{ab}(x_i) \breve \theta^{ab} + g_{ab}(x_i) \Big), \end{equation} where $\sigma_{n,ab} = \EE_n\sbr{\eta_{1iab}\vpaxi[,ab] + \eta_{2iab}\vpbxi[,ab]}$ as defined in Theorem~\ref{thm:main}. We use the subscript $ab$ to highlight that all of these terms depend on the node $a$ and $b$. Let $e_i$, $i = 1,\ldots, n$, be a sequence of independent standard Gaussian random variables and independent of data. We define the multiplier bootstrap statistic as \begin{equation} \label{eq:def_W_tilde} \tilde W = \max_{b \in V_a} \frac{1}{\sqrt n} \sum_{i=1}^n \tilde z_{iab} e_{i} \end{equation} and compute the bootstrap critical value as the $(1-\alpha)$ quantile of $\tilde W$ \[
c_{\tilde W}(\alpha) = \inf\{t \in \RR: \PP(\tilde W \leq t) \geq
1-\alpha\}. \] Importantly, note that the quantile of the multiplier bootstrap statistic can be estimated using a Monte-Carlo method. We will show that the quantiles of $\tilde W$ approximate the quantiles of our test statistic.
Define \begin{equation} \label{eq:def_xib} z_{iab} = -\sigma_{ab}^{-1} \cdot w_{ab}^{*\top}\Big(\Gamma(x_i)\theta^{ab,*} + g(x_i) \Big), \end{equation} as the counterpart to $\tilde z_{iab}$, where $\sigma_{ab} = \EE[\sigma_{n,ab}]$. In order to establish our main theoretical result on simultaneous inference, we need the following regularity condition.
\paragraph{\emph{Assumption RR.}} Define $\gamma_{abc}(x_i) = z_{iab}z_{iac} - \EE(z_{iab}z_{iac})$. There exist $\eta_n$ and $\tau_n^2$, such that for any $b,c \in V_a$, we have
$\|\gamma_{abc}(x_i)\|_{\infty} \leq \eta_n$ and $\frac 1n \sum_{i=1}^n \EE \gamma^2_{abc}(z_i) \leq \tau_n^2$ with probability at least $1-n^{-c_1}$. Moreover, uniformly for $b \in V_a$, we have $c_0 \leq \frac 1n \sum_{i=1}^n \EE z_{iab}^2 \leq C_0$ for some $0 < c_0 < C_0$.
The assumption RR imposes very mild technical conditions and is standard for a large number of models when the sample size is large enough. Part of the conditions are adopted from \cite{Chernozhukov2013Gaussian} in order to apply the theoretical results on the Gaussian multiplier bootstrap.
\begin{theorem}
\label{thm:simultaneous}
Suppose the assumptions {\bf M}, {\bf SE}, {\bf R} and {\bf RR} are satisfied,
and the events $\Ecal_\gamma\cap\Ecal_\theta\cap\Ecal_{\rm SE}$ hold for each $b \in V_a$.
Furthermore, suppose there exists a constant $\epsilon > 0$,
such that \begin{equation} \label{eq:asmp_regime_simultaneous} \frac 1n \Big[ (\tau_n^2 + \eta_n) \log p + (m\log{p})^2 + \log(pn)^7 \Big] = o(n^{-\epsilon}). \end{equation} Then, under the null hypothesis, we have \begin{equation}
\sup_{\alpha \in (0,1)} \bigg| \PP\Big(\max_{b \in V_a} \sqrt{n} ( \tilde \theta_{ab} - \breve \theta_{ab} ) \geq c_{\tilde W}(\alpha) \Big) - \alpha \bigg| = o(1). \end{equation} \end{theorem}
The proof of Theorem \ref{thm:simultaneous} is provided in the appendix. Since \[
| \tilde \theta_{ab} - \breve \theta_{ab} | = \max\{\tilde \theta_{ab} - \breve \theta_{ab} , \breve \theta_{ab} - \tilde \theta_{ab} \}, \] it is straightforward to obtain the following corollary for the test statistic in~\eqref{eq:def_test_statistic_simultaneous}.
\begin{corollary} \label{corollary_simultaneous} Suppose the conditions in Theorem \ref{thm:simultaneous} are satisfied. Then, under the null hypothesis, we have \begin{equation}
\sup_{\alpha \in (0,1)} \bigg| \PP\Big(\max_{b \in V_a} \sqrt{n} | \tilde \theta_{ab} - \breve \theta_{ab} | \geq c_{\overline W}(\alpha) \Big) - \alpha \bigg| = o(1), \end{equation} where \begin{equation} \label{eq:def_W_overline}
\overline W = \max_{b \in V_a} \frac{1}{\sqrt n} \bigg| \sum_{i=1}^n \tilde z_{iab} e_{i} \bigg| \end{equation} and the bootstrap critical value is defined as \[
c_{\overline W}(\alpha) = \inf\{t \in \RR: \PP(\overline W \leq t) \geq 1-\alpha\}. \] \end{corollary}
We remark that we are not aiming for a tight bound on the
sample complexity. For commonly used models, we always have that
$\gamma_{abc}(x_i)$ in Assumption {\bf RR} converges to 0 at a model
specific rate. Theorem \ref{thm:simultaneous} is valid as long as
the sample size is large enough, so that the sample complexity
condition in \eqref{eq:asmp_regime_simultaneous} is satisfied.
Based on Corollary~\ref{corollary_simultaneous}, we reject the null hypothesis if the test statistic~\eqref{eq:def_test_statistic_simultaneous} is greater than $c_{\overline W}(\alpha)$. This gives us a valid simultaneous test for all the edges connected to some node $a \in V$ with asymptotic Type I error equal to $\alpha$.
\subsection{Applications of Simultaneous Testing} \label{sec:application}
In this section, we show three concrete applications of our proposed procedure. Specifically, we consider \begin{enumerate}[topsep=0pt,itemsep=-1ex,partopsep=1ex,parsep=1ex] \item testing for isolated node; \item support recovery; \item testing for difference between graphical models. \end{enumerate}
\paragraph{\emph{Testing for isolated node.}} For a specific node $a \in V$, we would like to test whether it is isolated in the graph. This specific structural question translates into whether the variable $X_a$ is conditionally independent with all the other nodes. In this case, we would like to test the null hypothesis \begin{equation} \label{eq:H0_isolated} H_0: \theta_{ab}^* = 0 \quad \text{for all } b \in V_a = \{1, \ldots, p\} \backslash \{a\}, \end{equation} versus the alternative \begin{equation} \label{eq:H1_isolated} H_1: \theta_{ab}^* \neq 0 \quad \text{for some } b \in V_a = \{1, \ldots, p\} \backslash \{a\}. \end{equation} We can directly apply our simultaneous inference procedure with $\breve\theta_{ab} = 0$.
\paragraph{\emph{Support recovery.}} For a specific node $a \in V$, we would like to estimate the support of $a$ defined as $\text{supp}(a) = \{b \in V_a, \theta_{ab}^* \neq 0\}$. Let $S^*$ be the true support and we focus on distributions with sub-Gaussian components. For each node $b \in V_a$, let $\tau_{ab}$ be a threshold that we set as \[
\tau_{ab} = \sqrt{2\hat V_{ab} \log p/n}, \] where $\hat V_{ab}$ is the variance estimator defined in \eqref{eq:variance_est}. We can estimate the support $S^*$ by thresholding the values $\tilde \theta_{ab}$ that are smaller than $\tau_{ab}$. In particular, the support recovery procedure return the following support set \begin{equation} \label{eq:support_set}
\hat S(\tau_{ab}) = \{ b \in V_a, | \tilde \theta_{ab} | > \tau_{ab} \}. \end{equation}
We have the following result on the support recovery. \begin{corollary} Suppose that the values $\theta_{ab}^{*}$ on the true support are bounded from below as \begin{equation*}
|\theta_{ab}^*| > \sqrt{\frac{8\hat V_{ab} \log p}{n}}, \qquad \text{for all } b \in S^*. \end{equation*} Then \begin{equation} \inf \PP \big( \hat S(\tau_{ab}) = S^* \big) \xrightarrow{ n \to \infty } 1, \end{equation} where the infimum is taken over all data generating procedures that satisfy the minimum signal strength condition. \end{corollary} The proof follows in a similar way to the proof of Proposition 3.1 in \citet{Zhang2014Simultaneous} and is omitted here. The result shows that we are able to consistently recover the support of any node with overwhelming probability.
\paragraph{\emph{Testing the difference between graphical models.}} We consider a two-sample problem in which we wish to test whether the parameters of two graphical models, with the same set of nodes and belonging to the same exponential family of the form in~\eqref{eq:pairwise}, are the same. For example, we may have the data for the same set of nodes collected in different time periods, and we want to test whether the graph structure changes over time. As another example, consider functional brain connectivity. It is of interest to test whether brain connectivity is the same for the healthy subjects and people with a certain disorder.
Formally, suppose there are two densities $p_{\theta_{ab,1}^*}$ and $p_{\theta_{ab,2}^*}$ of the form in~\eqref{eq:pairwise}, indexed by parameter vectors $\theta_{ab,1}^*$ and $\theta_{ab,2}^*$. Given $n_1$ i.i.d.~samples $\{x_{i,1}\}_{i\in[n_1]}$ from $p_{\theta_{ab,1}^*}$ and $n_2$ i.i.d.~samples $\{x_{i,2}\}_{i\in[n_2]}$ from $p_{\theta_{ab,2}^*}$, we would like to test the null hypothesis \begin{equation} \label{eq:H0_difference} H_0: \theta_{ab,1}^* = \theta_{ab,2}^* \quad \text{for all } a, b \in V \times V, \end{equation} versus the alternative \begin{equation} \label{eq:H1_difference} H_1: \theta_{ab,1}^* \neq \theta_{ab,2}^* \quad \text{for some } a, b \in V \times V. \end{equation}
In order to create a test statistic for the difference, we first apply the three step procedure on each group of observations. That is, we obtain the estimators $\tilde \theta_{ab,1}$, $\tilde \theta_{ab,2}$ and estimates of their variances $\hat V_{ab,1}, \hat V_{ab,2}$. According to the Bahadur representation \eqref{eq:bahadur} in Theorem~\ref{thm:main}, we have \begin{equation} \label{eq:bahadur1} \sqrt{n_1}\cdot \rbr{\tilde \theta_{ab,1} - \theta_{ab,1}^{*}} = - \hat \sigma_{n,ab,1}^{-1} \cdot\sqrt{n_1} \EE_{n_1} \sbr{w_{ab,1}^{*\top}\rbr{\Gamma_{ab}(x_{i,1})\theta_1^{ab,*} + g_{ab}(x_{i,1})} } + o_\PP(1), \end{equation} and \begin{equation} \label{eq:bahadur2} \sqrt{n_2}\cdot \rbr{\tilde \theta_{ab,2} - \theta_{ab,2}^{*}} = - \hat \sigma_{n,ab,2}^{-1} \cdot\sqrt{n_2} \EE_{n_2} \sbr{w_{ab,2}^{*\top}\rbr{\Gamma_{ab}(x_{i,2})\theta_2^{ab,*} + g_{ab}(x_{i,2})} } + o_\PP(1). \end{equation} We propose to use the following test statistic \begin{equation}
\sqrt{n_1 + n_2} \cdot \max_{a, b \in V \times V} |\tilde \theta_{ab,1} - \tilde \theta_{ab,2}|, \end{equation} which will allow us to identify sparse changes in parameter values. We reject the null hypothesis for large values of the test statistic above. Next, we describe how to estimate the quantiles of the test statistic using the multiplier bootstrap.
Denote \begin{equation} \label{eq:def_xib_tilde1} \tilde z_{iab,1} = -\sigma_{n,ab,1}^{-1} \cdot \tilde w_{ab,1}^{\top} \Big(\Gamma_{ab}(x_{i,1}) \tilde \theta^{ab}_1 + g_{ab}(x_{i,1}) \Big), \end{equation} and \begin{equation} \label{eq:def_xib_tilde2} \tilde z_{iab,2} = -\sigma_{n,ab,2}^{-1} \cdot \tilde w_{ab,2}^{\top} \Big(\Gamma_{ab}(x_{i,2}) \tilde \theta^{ab}_2 + g_{ab}(x_{i,2}) \Big). \end{equation} We generate two sequences of independent standard Gaussian random variables \[
e_{i,j} \sim N(0,1) \qquad \text{for } i = 1,\ldots, n_j, \text{ and } j=1,2, \] that are independent of data as well. The multiplier bootstrap statistic is defined as \begin{equation} \label{eq:def_W_overline_diff} \overline W = \frac{1}{\sqrt{n_1 + n_2}} \cdot \max_{a,b \in V \times V} \abr{ \rbr{1+\frac{n_2}{n_1}}\sum_{i=1}^{n_1} \tilde z_{iab,1} e_{i,1} - \rbr{1+\frac{n_1}{n_2}}\sum_{i=1}^{n_2} \tilde z_{iab,2} e_{i,2} } \end{equation} and \[
c_{\overline W}(\alpha) = \inf\{t \in \RR: \PP(\overline W \leq t) \geq 1-\alpha\} \] is the bootstrap critical value.
Similar to Corollary \ref{corollary_simultaneous}, under the null hypothesis, we have \begin{equation} \sup_{\alpha \in (0,1)} \abr{ \PP\rbr{ \sqrt{n_1 + n_2} \cdot \max_{a,b \in V \times V} \abr{\tilde \theta_{ab,1} - \tilde \theta_{ab,2}}
\geq c_{\overline W}(\alpha) } - \alpha } = o(1). \end{equation} This gives us a valid procedure for testing whether the parameters of two graphical models are the same or not.
A recent paper \citep{kim2019two} proposed a different inference procedure that directly estimates the parameters of the differential network. \citet{Xia2015Testing} studied the two sample problem in the context of Gaussian graphical models and proposed the following test statistic \begin{equation} T = \max_{a, b \in V \times V} \, \frac{\rbr{\tilde \theta_{ab,1} - \tilde \theta_{ab,2}}^2}{\hat V_{ab,1} + \hat V_{ab,2}} \end{equation} and showed that under the null hypothesis the limiting distribution of the test statistic satisfies \begin{equation} \label{eq:test_diff_limit} \PP \big(T - 2\log p + \log\log p \leq t \big) \to \exp \big\{ (-2 \pi)^{-\frac 12} \exp(-t/2) \big\}, \qquad \text{as }n \rightarrow \infty. \end{equation} Unfortunately, the convergence to the extreme value distribution is rather slow and, as a result, the critical values based on the limiting approximation are not accurate for finite samples. In comparison, our multiplier bootstrap procedure provides non-asymptotic approximation to quantiles of the test statistic. Furthermore, the approximation quality improves polynomially with the sample size and, as a result, provides a good performance for small and moderate sample sizes.
Extending the above described inferential procedure to differential networks with latent variables \citep{Na2019Estimating} and differential functional graphical models \citep{Zhao2019Direct, Zhao2020FuDGE} is left for future work.
\section{Extension to General $L$} \label{sec:general_L}
So far we have assumed that the number of parameters
corresponding to an edge is $L = 1$. In this section we extend our
results to general $L$. Throughout the section, we treat $L$ as a
fixed quantity. Recall that $t_{ab}^{(l)}$, $l\in[L]$, represent
sufficient statistics.
\paragraph{\emph{Inference for a fixed edge.}} For a fixed index $(a, b)$, the parameter of interest is the $L$ dimensional vector, $\theta_{ab}^{[L]} = \big[\theta_{ab}^{(1)}, \ldots, \theta_{ab}^{(L)}\big]$. There is no edge between $a$ and $b$ in the corresponding conditional independence graph if and only if $\theta_{ab}^{(1)}=\cdots=\theta_{ab}^{(L)}=0$. Following the same procedure as in Section \ref{sec:methodology}, we have the logarithm of conditional density as \begin{align*}
\log q^{ab}_\theta(x) = \dotp{\theta^{ab}}{\varphi(x)}
- \Psi^{ab}(\theta, x_{-ab})
+ h^{ab}(x), \end{align*} where $\theta^{ab} \in \RR^{s'}$, with $s' = 2K+2(p-2)L+L$, is the part of the vector $\theta$ corresponding to $\left\{ \theta_{a}^{(k)}, \theta_b^{(k)} \right\}_{k\in[K]}$, $\left\{\theta_{ac}^{(l)},\theta_{bc}^{(l)}\right\}_{l\in[L],c\in-ab}$, and $\left\{\theta_{ab}^{(l)}\right\}_{l\in[L]}$;
and $\varphi(x) = \varphi^{ab}(x) \in \RR^{s'}$ is the corresponding vector of sufficient statistics \[
\left\{t_a^{(k)}(x_a), t_b^{(k)}(x_b)\right\}_{k\in[K]},\
\left\{ t_{ac}^{(l)}(x_a,x_c), t_{bc}^{(l)}(x_b,x_c) \right\}_{l\in[L], c\in-ab},
\text{ and }
t_{ab}^{(l)}(x_a,x_b)_{l\in[L]}. \]
For notation simplicity, for a given node $c \in -ab$, denote
$\theta^{ac} \in \RR^{L}$ as the stack of
$\left\{\theta_{ac}^{(l)}\right\}$ for $l \in [L]$; similarly,
denote $\theta^{bc} \in \RR^{L}$ as the stack of
$\left\{\theta_{bc}^{(l)}\right\}$. Let
$\theta^{ab, -{\rm group}}$ denote the stack of
$\left\{ \theta_{a}^{(k)}, \theta_b^{(k)} \right\}_{k\in[K]}$ and
$\left\{\theta_{ab}^{(l)}\right\}_{l\in[L]}$, which are the parameters
in $\theta^{ab}$ without group structure. We define $\gamma^{ac}$,
$\gamma^{bc}$, and $\gamma^{ab, -{\rm group}}$ similarly. Let
$E(a,b)$ denote the index set of the parameters
corresponding to the edge $(a,b)$.
Figure \ref{illustration} presents an illustrative
example with $L=K=2$, $p=6$, and $(a,b) = (1,2)$.
\begin{figure}\label{illustration}
\end{figure}
We modify the
three step procedure in Section \ref{sec:methodology} as follow.
\paragraph{\emph{Step 1:}} We find a pilot estimator of $\theta^{ab}$ by solving the following program \begin{equation}
\label{eq:estimation_general_L} \begin{aligned}
\hat \theta^{ab}
& = \arg\min_{\theta \in \RR^{s'}} \ \EE_n\sbr{S^{ab}(x_i, \theta)} +
\lambda_{1} \bigg( \| \theta^{ab, -{\rm group}} \|_1 +
\sum_{c \in -ab} \Big( \| \theta^{ac} \|_2 + \| \theta^{bc} \|_2 \Big) \bigg), \end{aligned} \end{equation} where \begin{equation}
\| \theta^{ab, -{\rm group}} \|_1 = \sum_{l=1}^L | \theta_{ab}^{(l)} | + \sum_{k=1}^K |\theta_{a}^{(k)}| + |\theta_{b}^{(k)}| \end{equation} and $\lambda_1$ is a tuning parameter. Since $L > 1$, we use the group Lasso penalty to estimate $\hat \theta^{ab}$. Let $\hat M_1$ be the support of $\hat \theta^{ab}$: \begin{equation}
\hat M_1 = {\rm supp} ( \hat\theta^{ab, -{\rm group}} ) \bigcup \{ E(a,c) \mid \| \hat\theta^{ac} \|_2 \neq 0 \} \bigcup \{ E(b,c) \mid \| \hat\theta^{bc} \|_2 \neq 0 \}. \end{equation}
\paragraph{\emph{Step 2:}} For $l \in [L]$, let $\hat \gamma^{abl} \in \RR^{s'-1} $ be a minimizer of \begin{multline}
\label{eq:estimation:step2_general_L}
\sum_{l \in [L]} \frac 12 \EE_n\Big[
(\vpaxi[,abl]-\vpaxi[,-abl]^\top \gamma^{abl})^2 +
(\vpbxi[,abl]-\vpbxi[,-abl]^\top \gamma^{abl})^2
\Big] \\
+ \lambda_{2} \bigg( \sum_{l \in [L]} \| \gamma^{abl, -\rm{group}} \|_1 + \sum_{c \in -ab} \Big( \| \gamma^{ac} \|_2 + \| \gamma^{bc} \|_2 \Big) \bigg),
\end{multline}
where $\lambda_2$ is a tuning parameter. Let $\hat M_2$ be the union of the support of $\hat \gamma^{abl}$: \begin{equation}
\hat M_2 = \bigcup_{l \in [L]}{\rm supp} ( \hat\gamma^{abl, -{\rm group}} ) \bigcup \{ E(a,c) \mid \| \hat\gamma^{ac} \|_2 \neq 0 \} \bigcup \{ E(b,c) \mid \| \hat\gamma^{bc} \|_2 \neq 0 \}. \end{equation}
\paragraph{\emph{Step 3:}} Let $\tilde M = E(a,b) \cup \hat M_1 \cup \hat M_2$. We obtain our estimator as a solution to the following program \begin{equation}
\label{eq:estimation:step3_general_L} \begin{aligned}
\tilde \theta^{ab}
& = \arg\min_\theta \ \EE_n\sbr{ S^{ab}(x_i, \theta) }
\qquad \text{s.t.}\quad {\rm supp}(\theta) \subseteq \tilde M. \end{aligned} \end{equation} Our estimator of $\theta_{ab}^{[L]}$ is $\tilde \theta_{ab}^{[L]} \in \RR^L$, a block of $\tilde \theta^{ab}$.
\paragraph{\emph{Asymptotic Normality.}} For each $l \in [L]$, define $w_l^* \in \RR^{s'}$ with $w^{*}_{abl} = 1$ and $w^{*}_{-abl} = -\gamma^{abl,*}$, where $\gamma^{abl,*}$ is the population version of $\hat \gamma^{abl}$. Define \begin{equation} \eta_{1il} = \vpaxi[,abl]-\vpaxi[,-abl]^\top \gamma^{abl,*} ~~~\text{and}~~~ \eta_{2il} = \vpbxi[,abl]-\vpbxi[,-abl]^\top \gamma^{abl,*}, \end{equation} and \begin{equation} \sigma_{n, l} = \EE_n\sbr{\eta_{1il}\vpaxi[,abl] + \eta_{2il}\vpbxi[,abl]}. \end{equation} Let $u^*_l = w_l^* / \sigma_{n, l}$ and $U^* \in \RR^{s' \times L}$ as the stack of $u^*_l$: $U^* = [u_1^*, \ldots, u_L^*]$. Similar to Theorem \ref{thm:main}, we obtain the Bahadur representation for $\tilde \theta_{ab}^{[L]} \in \RR^L$ as:
\begin{equation}
\label{eq:bahadur_general_L}
\begin{aligned} \sqrt{n}\cdot \rbr{\tilde \theta_{ab}^{[L]} - \theta_{ab}^{*[L]}} & = - \sqrt{n} \EE_n \sbr{U^{* \top}\rbr{\Gamma(x_i)\theta^{ab,*} + g(x_i)} } + \Delta, \end{aligned} \end{equation}
where $\| \Delta \|_\infty = \Ocal\rbr{\phi_{\max}^2\phi_{\min}^{-4} \cdot \sqrt{n}\lambda_1\lambda_2 m }$. Furthermore, under similar conditions as in Section \ref{sec:asympt-norm-estim}, we obtain \begin{equation} \label{eq:asymptotic_normal_general_L}
\sqrt{n} \Big( \tilde \theta_{ab}^{[L]} - \theta_{ab}^{*[L]} \Big) \longrightarrow_D N(0, V_{ab}) , \end{equation} where $V_{ab} \in \RR^{L \times L}$ is the covariance matrix defined as $V_{ab} = \Var\big( U^{* \top}(\Gamma(x_i)\theta^{ab,*} + g(x_i)) \big)$. From \eqref{eq:asymptotic_normal_general_L} we can construct a multivariate confidence interval with asymptotically nominal coverage as before.
\paragraph{\emph{Simultaneous inference.}} For simultaneous inference, with a fixed node $a \in V$, we would like to test the null hypothesis \begin{equation} \label{eq:H0_simultaneous_general_L} H_0: \theta_{ab}^{*( l)} = \breve\theta_{ab}^{\,(l)} \quad \text{for all } l \in \{1, \ldots, L\} \text{ and } b \in V_a = \{1, \ldots, p\} \backslash \{a\}, \end{equation} for some fixed $\breve\theta_{ab}$ versus the alternative \begin{equation} \label{eq:H1_simultaneous_general_L} H_1: \theta_{ab}^{*( l)} \neq \breve\theta_{ab}^{\,(l)} \quad \text{for some } l \in \{1, \ldots, L\} \text{ and } b \in V_a = \{1, \ldots, p\} \backslash \{a\}. \end{equation} Again, the test involves a large number of parameters, $(p-1)L$.
First, note that we can directly apply the procedure developed in Section 6. By ignoring the covariance structure of $\theta_{ab}^{(1)}, \ldots, \theta_{ab}^{(L)}$, we can directly use the Gaussian multiplier bootstrap. Specifically, for each $b \in V_a$, we obtain the Bahadur representation in \eqref{eq:bahadur_general_L}. Next, we stack the resulting $p-1$ vectors into a $(p-1)L$ dimensional vector and perform the Gaussian multiplier bootstrap method to calculate the test statistic and critical values. Since $L$ is an absolute constant, all the analysis in Section \ref{sec:simultaneous} remains valid. However, such a procedure disregards the group structure on parameters and ignores the off-diagonal elements of the covariance matrix $V_{ab}$ when constructing the test and computing the critical values.
An alternative approach is based on the moderate deviation result for the $\chi^2$-test developed in \cite{Liu2013Carmer}. Here, we outline the procedure and refer to \cite{Liu2013Carmer} for technical details. First, for each $b \in V_a$, we define \begin{equation} T_{nb}^2 = n \cdot \Big( \tilde \theta_{ab}^{[L]} - \breve\theta_{ab}^{[L]} \Big)^\top \cdot ( V_{ab} )^{-1} \cdot \Big( \tilde \theta_{ab}^{[L]} - \breve\theta_{ab}^{[L]} \Big). \end{equation} It follows from \eqref{eq:asymptotic_normal_general_L} that the limiting distribution of $T_{nb}^2$ is $\chi^2_L$. Under mild conditions, Theorem 2.2 of \cite{Liu2013Carmer} shows that \[
\frac{\PP\rbr{T_{nb}^2 \geq x^2}}{\PP\rbr{\chi^2_L \geq x^2}} \rightarrow 1,\quad \text{as }n \rightarrow \infty \] uniformly for $x \in [0, o(n^{1/6}))$. This motivates the following test statistic \begin{equation} \max_{b \in V_a} \, T_{nb}^2. \end{equation} We obtain the critical value $y_\alpha$ that satisfies \begin{equation*} (p-1) \cdot \PP \rbr{ \chi^2_L \geq y_\alpha } = - \log(1-\alpha). \end{equation*} The null hypothesis is rejected if $ \max_{b \in V_a} T_{nb}^2 \geq y $. We can prove that the asymptotic Type I error is $\alpha$ under the null only when the dependency among $T_{nb}^2$ is weak. We refer to \cite{Liu2013Carmer} for technical details. The disadvantage of this approach is that, the terms $T_{nb}^2$ are correlated across $b \in V_a$, which is ignored when computing the critical value. Despite ignoring the group structure, the approach based on multiplier bootstrap can control the Type I error better with small sample sizes. See Section \ref{sec:experiments_synthetic} for experimental results.
\section{Simulations} \label{sec:experiments_synthetic}
In this section, we illustrate the finite sample properties of our inference procedure on several synthetic data sets. We generate data from four different Exponential family distributions that were introduced in Section~\ref{sec:ExpoGM}. The first and third example involve Gaussian node-conditional distributions, for which we use regularized score matching. For the second and fourth setting where the node-conditional distributions follow Truncated Gaussian and Exponential distribution, respectively, we use regularized non-negative score matching procedure. Following the recommendation in \cite{Yu2018Graphical}, we set $\ell_a(x) = \log(x + 1)$ for the non-negative settings. In each example, we report the mean coverage rate of 95\% confidence intervals for several coefficients averaged over 500 independent simulation runs.
\paragraph{\emph{Gaussian graphical model.}}
For the Gaussian setting, we have $X \sim N(0,\Sigma)$ with precision matrix $\Omega = \Sigma^{-1} = (\theta_{ab})$. Without loss of generality, say we are interested in $\theta_{12}$. We have $$ \theta^* = (\theta^*_{11}, \theta^*_{12},\ldots,\theta^*_{1p},\theta^*_{22},\theta^*_{23},\ldots,\theta^*_{2p})^T, $$ $$ \varphi(x) = \Big(-\frac 12 x_1^2, -x_1x_2,\ldots,-x_1x_p,-\frac 12x_2^2,-x_2x_3,\ldots,-x_2x_p\Big)^T, $$ $$ \varphi_1(x) = (-x_1,-x_2,\ldots,-x_p,0,\ldots,0)^T, $$ $$ \varphi_2(x) = (0,-x_1,0,\ldots,0,-x_2,-x_3,\ldots,-x_p)^T, $$ $$ g(x) = (-1,0,0,\ldots,0,-1,0,\ldots,0)^T, $$ where for $g$ the second `$-1$' is at location $p+1$. Now we have \begin{equation*}
\begin{aligned}
\gamma^{ab,*} &= \arg\min \
\EE[
(\vpaxi[,ab]-\vpaxi[,-ab]^T\gamma)^2 +
(\vpbxi[,ab]-\vpbxi[,-ab]^T\gamma)^2
] \\
&= \arg\min \
\EE[
(x_2-(x_1,x_3,\ldots,x_p,0,\ldots,0)^T\gamma)^2 +
(x_1-(0,\ldots,0,x_2,x_3,\ldots,x_p)^T\gamma)^2
] .
\end{aligned} \end{equation*}
We can see that $\gamma^{ab,*}$ can be partitioned into first $p-1$ elements and last $p-1$ elements: $\gamma^{ab,*} = [\gamma^{ab,*}_1;\gamma^{ab,*}_2]$. The two parts can be optimized separately. Moreover, both the population quantity $\varphi_1(x)\varphi_1(x)^\top$ and $\varphi_2(x)\varphi_2(x)^\top$ are the covariance matrix $\Sigma$ after rearranging terms and ignoring zero components. Assumption {\bf SE} is satisfied with most of the commonly used covariance matrices with full rank. Moreover, we can verify that $\gamma^{ab,*}_1$ and $\gamma^{ab,*}_2$ are proportional to the second and first column of the precision matrix $\Omega$. Therefore, assumption {\bf M} is satisfied when the columns of the precision matrix $\Omega$ are sparse.
For the experiment, we set diagonal entries of $\Omega$ as $\theta_{jj} = 1$. The sparsity pattern of the precision matrix corresponds to the the 4-nearest neighbor graph and the non-zero coefficients are set as $\theta_{j, j-1} = \theta_{j-1, j} = 0.5$ and $\theta_{j, j-2} = \theta_{j-2, j} = 0.3$.
We set the sample size $n = 300$ and vary the number of nodes $p$. Table~\ref{simu1} shows the empirical coverage rate for different values of $p$ for four chosen coefficients. As is evident from the table, the coverage probabilities for the unknown coefficient is remarkably close to nominal.
\begin{table}[!h]
\center
\begin{tabular}{ccccc} \hline & $\theta_{1,2}$ & $\theta_{1,3}$ & $\theta_{1,4}$ & $\theta_{1,10}$ \\\hline $p = 50$ & 95.4\% & 92.4\% & 93.8\% & 93.2\% \\ $p = 200$ & 94.6\% & 92.4\% & 92.6\% & 94.0\%\\ $p = 400$ & 94.6\% & 94.8\% & 92.6\% & 93.8\% \\\hline
\end{tabular}
\caption{Empirical Coverage for Gaussian Graphical Model}
\label{simu1} \end{table}
\paragraph{\emph{Non-negative Gaussian.}}
For simplicity we first consider score matching for non-negative Gaussian model with $\ell(x) = x^2$. Following the setting and notation in the previous paragraph, we have \begin{equation*} \begin{aligned} \tilde\varphi_1(x) &= x_1 \cdot \varphi_1(x) = x_1 \cdot (-x_1,-x_2,\ldots,-x_p,0,\ldots,0)^T, \\ \tilde\varphi_2(x) &= x_2 \cdot \varphi_2(x) = x_2 \cdot (0,-x_1,0,\ldots,0,-x_2,-x_3,\ldots,-x_p)^T. \end{aligned} \end{equation*} As before, $\gamma^{ab, *}$ is separable into two parts; we focus on one to obtain \begin{equation*} \gamma^{ab, *}_2 = \sbr{ \EE \,\, x_1^2 \cdot
\begin{pmatrix}
x_1^2 & x_1x_3 & \cdots & x_1x_p \\
x_1x_3 & x_3^2 & \cdots & x_3x_p \\
\vdots & \vdots & \ddots & \vdots \\
x_1x_p & x_3x_p & \cdots & x_p^2
\end{pmatrix}
}^{-1}
\cdot
\sbr{ \EE \,\, x_1^2 x_2 \cdot
\begin{pmatrix}
x_1 \\
x_3 \\
\vdots \\
x_p
\end{pmatrix}
}. \end{equation*} We can see that it contains expectations, such as $x_1^2x_3x_4$, which are hard to calculate explicitly, in addition to the matrix inversion. To the best of our knowledge, this calculation is intractable. If we instead use generalized score matching with $\ell(x) = \log(x+1)$, the calculation would be more complicated.
One exception is when the precision matrix $\Omega = I_p$, which means $x_i$ follows i.i.d. non-negative standard normal distribution. Using the moments $\EE[x] = \sqrt{2/\pi}$, $\EE[x^2] = 1$, $\EE[x^3] = \sqrt{8/\pi}$, $\EE[x^4] = 3$, we can calculate $\gamma^{ab, *}$ explicitly. It turns out that the two parts in $\gamma^{ab, *}$ are the same. All their components take the same value at approximately $1/p$, except for one component that takes the value approximately $1.6/p$. Therefore, we can see that the sparsity assumption on $\gamma^{ab, *}$ is violated.
It instead only satisfies a weaker condition that $\| \gamma^{ab, *} \|_1 \leq 2$ for large $p$.
Similarly, we can calculate that $\| M^*_{ab} \|_1 \leq 5$ for large $p$. We then follow the debias method in Section \ref{sec:relaxation_L1} to construct confidence intervals.
For the simulation, we use the same setting as for the Gaussian graphical model with $\theta_{j, j-1} = \theta_{j-1, j} = 0.3$ and $\theta_{j, j-2} = \theta_{j-2, j} = 0.1$. We set $\ell_a(x) = \log(x + 1)$, and use the minimax tilting method to generate the data \citep{Botev2017normal}. We first support the bounded $L_1$ norm condition of $M^*$ through experiments with a small $p = 20, 50$ and large $n$. Here we focus on the edge $(a,b)=(1,2)$; results for other edges are similar, and are therefore omitted. Since we have enough samples, we estimate $M$ as the exact inverse of the empirical quantity $\EE_n[ \Gamma(x_i') ]$. Table \ref{simu_verify_M_NNG} shows the average mean and maximum of the $L_1$ norm of $M$ on column $ab$, based on 500 independent simulation runs with different sample sizes. This shows that the $L_1$ norm of the column $ab$ of $M^*$ would be bounded from above. These experimental results indicate that the bounded $L_1$ norm condition of $M^*$ is reasonable.
Table~\ref{simu_TN} shows the empirical coverage rate for various choices of $p$ and $n$. Note that since we are doing sample splitting, the real sample size is $2n$. We observe that by using the debias method, we can obtain nominal coverage rate even for relatively large $p$ with small~$n$.
\begin{table}[!h]
\center \begin{tabular}{ccccc} \hline & $n = 500$ & $n = 2000$ & $n = 10000$ & $n = 50000$ \\\hline averaged mean, $p = 20$ & 13.01 & 11.42 & 11.16 & 11.10 \\ averaged max, $p = 20$ & 17.84 & 13.46 & 11.95 & 11.52 \\\hline averaged mean, $p = 50$ & 24.71 & 15.19 & 12.90 & 12.72 \\ averaged max, $p = 50$ & 32.65 & 17.86 & 14.13 & 13.17 \\\hline \end{tabular} \caption{Averaged mean and max of the $L_1$ norm of $M$, for Non-negative Gaussian}
\label{simu_verify_M_NNG} \end{table}
\begin{table}[!h]
\center \begin{tabular}{ccccc} \hline & $\theta_{1,2}$ & $\theta_{1,3}$ & $\theta_{1,4}$ & $\theta_{1,10}$ \\\hline $p = 100, n = 150$ & 94.2\% & 93.8\% & 95.0\% & 92.4\%\\ $p = 200, n = 300$ & 95.2\% & 96.6\% & 94.8\% & 94.6\%\\ $p = 300, n = 500$ & 94.8\% & 95.8\% & 95.0\% & 94.4\% \\\hline \end{tabular} \caption{Empirical Coverage for Non-negative Gaussian, using debias method}
\label{simu_TN} \end{table}
\paragraph{\emph{Normal conditionals.}} For the experiment, we consider a special case of normal conditionals with $L = 1$ parameter matrix, whose density is \begin{equation} p(x ; B, \beta, \beta^{(2)}) \propto
\exp \cbr{
\sum_{a \neq b} \beta_{ab} x_a^2x_b^2 +
\sum_{a \in V} \beta_a^{(2)} x_a^2 +
\sum_{a \in V} \beta_ax_a },\quad x \in \RR^{p}. \end{equation}
This distribution is also considered in \citet{Lin2015High}. We set $\beta_j = 0.4$, $\beta_j^{(2)} = -2$, and we use a 4 nearest neighbor lattice dependence graph with interaction matrix: $\beta_{j, j-1} = \beta_{j-1, j} = -0.2$ and $\beta_{j, j-2} = \beta_{j-2, j} = -0.2$. Since the univariate marginal distributions are all Gaussian, we generate the data using a Gibbs sampler. The first 500 samples were discarded as `burn in' step, and of the remaining samples, we keep one in three.
We first support the assumption {\bf M} through experiments with a small $p = 20$ and large $n$. Here we focus on the edge $(a,b)=(1,2)$; results for other edges are similar, and are therefore omitted. We estimate $\hat\gamma^{ab}$ as in Step 2, but without the $L_1$ regularization term since we have enough samples. For normal conditionals, we have $\hat\gamma^{ab} \in \RR^{2p} = \RR^{40}$. There are five components in $\hat\gamma^{ab}$ with relatively large non-zero values (not decreasing with $n$), and we calculate the mean and maximum absolute value of the remaining 35 components. Table \ref{simu_verify_M_NC} shows the average mean and maximum absolute values of these 35 components, based on 500 independent simulation runs with different sample sizes. This suggests that the population quantity $\gamma^{ab, *}$ would be close to a sparse vector, with an infinite amount of samples. These experimental results indicate that assumption {\bf M} is reasonable, at least in an approximately sparse version.
We then set the number of samples $n = 500$, and follow the proposed three-step procedure to calculate the coverage rate. Table~\ref{simu2} shows the empirical coverage rate for $p=100$ and $p=300$ nodes. Again, we see that our inference algorithm behaves well on the above Normal Conditionals Model.
\begin{table}[!h]
\center \begin{tabular}{ccccc} \hline & $n = 500$ & $n = 2000$ & $n = 10000$ & $n = 50000$ \\\hline average mean & $4.3 \times 10^{-3}$ & $2.7 \times 10^{-3}$ & $1.4 \times 10^{-3}$ & $0.7 \times 10^{-3}$ \\ average max & $9.7 \times 10^{-3}$ & $8.4 \times 10^{-3}$ & $6.9 \times 10^{-3}$ & $5.5 \times 10^{-3}$ \\\hline \end{tabular} \caption{Average mean and max on the 35 components, for Normal Conditionals}
\label{simu_verify_M_NC} \end{table}
\begin{table}[!h] \center \begin{tabular}{ccccc} \hline & $\beta_{1,2}$ & $\beta_{1,3}$ & $\beta_{1,4}$ & $\beta_{1,10}$ \\\hline $p = 100$ & 93.2\% & 93.4\% & 94.6\% & 95.0\%\\
$p = 300$ & 93.2\% & 93.0\% & 92.6\% &93.0\%\\\hline \end{tabular}
\caption{Empirical Coverage for Normal Conditionals} \label{simu2} \end{table}
\paragraph{\emph{Exponential graphical model.}} We choose $\theta_j = 2$, and a 2 nearest neighbor dependence graph with $\theta_{j, j-1} = \theta_{j-1, j} = 0.3$. We again first support the assumption {\bf M} through experiment with a small $p = 20$ and large $n$, where we focus on the edge $(a,b)=(1,2)$ and use a Gibbs sampler to generate data. For exponential graphical model, we have $\hat\gamma^{ab} \in \RR^{2p-2} = \RR^{38}$. There are four components in $\hat\gamma^{ab}$ with relatively large non-zero values (not decreasing with $n$), and we calculate the mean and maximum absolute value of the remaining 34 components. Table \ref{simu_verify_M_EGM} shows the average mean and maximum absolute values of these 34 components, based on 500 independent simulation runs with different sample sizes. This suggests that the population quantity $\gamma^{ab, *}$ would be close to a sparse vector, with an infinite amount of samples. Once again, this experiment results indicate that assumption {\bf M} is reasonable, at least in an approximately sparse version.
We then set $n = 1000$ and the empirical coverage rate and histograms of estimates of four selected coefficients are presented in Table~\ref{simu3} and Figures~\ref{fig:histogram} for $p=100$ and $p=300$, respectively.
\begin{table}[!h]
\center \begin{tabular}{ccccc} \hline & $n = 500$ & $n = 2000$ & $n = 10000$ & $n = 50000$\\\hline average mean & $3.6 \times 10^{-3}$ & $2.2 \times 10^{-3}$ & $0.9 \times 10^{-3}$ & $0.4 \times 10^{-3}$ \\ average max & $9.4 \times 10^{-2}$ & $6.8 \times 10^{-3}$ & $3.8 \times 10^{-3}$ & $1.2 \times 10^{-3}$ \\\hline \end{tabular} \caption{Average mean and max on the 34 components, for Exponential Graphical Model}
\label{simu_verify_M_EGM} \end{table}
\begin{table}[!h] \center \begin{tabular}{ccccc} \hline & $\theta_{1,2}$ & $\theta_{1,3}$ & $\theta_{1,4}$ & $\theta_{1,10}$ \\\hline $p = 100$ & 94.2\% & 91.6\% & 92.6\% & 92.4\%\\ $p = 300$ & 92.6\% & 92.0\% & 92.2\% & 92.4\%\\\hline \end{tabular} \caption{Empirical Coverage for Exponential Graphical Model} \label{simu3} \end{table}
\begin{figure*}
\caption{Histograms for $\theta$ for exponential graphical
model. The first row corresponds to $p = 100$ and the second row to $p = 300$.}
\label{fig:histogram}
\end{figure*}
We can see from the simulations here that we need more samples for inference based on non-negative score matching to be valid, compared to regular score matching.
The results are still impressive as the sample size is small relative to the total number of parameters in the model. Moreover, by using the generalized score matching with $\ell_a(x) = \log(x + 1)$, we get more accurate empirical coverage compared to the original score matching, which uses $\ell_a(x) = x^2$. The histograms in Figures~\ref{fig:histogram} show that the fitting is quite good, but to get a better estimation and hence better coverage, we would need more samples.
\paragraph{\emph{Simultaneous inference.}} We then apply the simultaneous inference procedure to test for all the edges connected to some node $a \in V$. Since the sample complexity \eqref{eq:asmp_regime_simultaneous} for simultaneous inference is large, we set $p = 50$. For hypothesis testing, we focus on the first node and we would like to test the null hypothesis \begin{equation} H_0: \theta_{1b}^* = \breve\theta_{1b} \quad \text{for all } b \in V_1 = \{2, \ldots, p\} , \end{equation} versus the alternative \begin{equation} H_1: \theta_{1b}^* \neq \breve\theta_{1b} \quad \text{for some } b \in V_1 = \{2, \ldots, p\} . \end{equation} We set the designed Type I error as $\alpha = 0.05$ and we consider Gaussian and Non-negative Gaussian settings as before. Table \ref{simultaneous_rerun_L1} shows the empirical Type I error under the null $\breve\theta_{1b} = \theta_{1b}^*$ with different choices of sample size. We see that our procedure works well as long as we have enough data.
\begin{table}[!h] \begin{center} \begin{tabular}{cccccc} \hline & $n = 500$ & $n = 800$ & $n = 1000$ & $n=2000$ & $n = 5000$ \\\hline Gaussian & 0.082 & 0.074 & 0.042 & 0.052 & 0.048\\ Non-negative Gaussian & 0.072 & 0.062 & 0.054 & 0.040 & 0.046 \\\hline \end{tabular} \end{center} \caption{Empirical Type I error of simultaneous test} \label{simultaneous_rerun_L1} \end{table}
\paragraph{\emph{Simultaneous inference with general $L$.}} We finally consider the simultaneous inference with general $L$. We consider the normal conditionals model with density \begin{equation*}
p(x ; \Theta^{(1)}, \Theta^{(2)}, \eta, \beta) \propto
\exp \cbr{
\sum_{a \neq b} \Theta_{ab}^{(2)}x_a^2x_b^2 +
\sum_{a \neq b} \Theta_{ab}^{(1)}x_ax_b +
\sum_{a \in V} \eta_a x_a^2 +
\sum_{a \in V} \beta_ax_a },\, x \in \RR^{p}. \end{equation*}
This corresponds to $L = K = 2$. We apply the two methods in Section \ref{sec:general_L} to test for all the edges connected to some node $a \in V$. We set $p = 50$ and the designed Type I error $\alpha = 0.05$. For hypothesis testing, we focus on the first node (i.e., $a = 1$). Table \ref{simultaneous_rerun_L1_general_L} shows the empirical Type I error under the null with different choices of sample sizes. We see that both methods work well as long as we have enough data.
\begin{table}[!h] \begin{center} \begin{tabular}{ccccc} \hline & $n = 1000$ & $n = 2000$ & $n = 4000$ & $n=6000$ \\\hline Gaussian multiplier bootstrap & 0.076 & 0.058 & 0.054 & 0.048 \\
Moderate deviation method & 0.182 & 0.092 & 0.068 & 0.056 \\\hline \end{tabular} \end{center} \caption{Empirical Type I error of simultaneous test with general $L$} \label{simultaneous_rerun_L1_general_L} \end{table}
\section{Protein Signaling Dataset} \label{sec:experiments_real}
In this section we apply our algorithm to a protein signaling flow cytometry data set, which contains the presence of $p = 11$ proteins in $n = 7466$ cells \citep{Sachs2005Causal}.
\citet{Yang2013Graphical} fit exponential and Gaussian graphical models to the data set.
Figure~\ref{Real} shows the network structure after applying our method to the data using an Exponential Graphical Model. We learn the structure directly from the data as well as provide confidence intervals using the Exponential Graphical Model, rather than log-transforming the data and fitting Gaussian graphical model as was done in \citet{Yang2013Graphical}. To infer the network structure, we calculate the $p$-value for each pair of nodes, and keep the edges with $p$-values smaller than 0.01. Estimated negative conditional dependencies are shown via red edges. Recall that the exponential graphical model restricts the edge weights to be non-negative, hence only negative dependencies can be estimated. From the figure we see that PKA is a major protein inhibitor in cell signaling networks. This result is consistent with the estimated graph structure in \cite{Yang2013Graphical}, as well as in the Bayesian network of \cite{Sachs2005Causal}. In addition, we find significant dependency between PKC and PIP3.
\begin{figure}
\caption{Estimated Structure of Protein Signaling Dataset}
\label{Real}
\end{figure}
\section{Conclusion} \label{sec:conclusion}
Motivated by applications in Biology and Social Networks, much progress has been made in statistical learning models and methods for networks with a large number of nodes. Graphical models provide a powerful and flexible modeling framework for such networks to uncover the dependency among nodes. As a result, there is a vast literature on estimation and inference algorithms for high dimensional Gaussian graphical models, as well as more general graphical models in the exponential family. As a disadvantage of most of these works, the normalizing constant (partition function) of the conditional densities is usually computationally intractable and without closed-form formula. Score matching estimators provide a way to address this issue, but so far all the existing works on score matching focus on estimation problem for high-dimensional graphical models without statistical inference. In this paper, we fill this gap by proposing a novel estimator using the score matching method that is asymptotically normal, which allows us to build statistical inference for a single edge of the graph. Moreover, we propose the procedure on simultaneous testing on all the edges connected to some specific node in the graph, using the Gaussian multiplier bootstrap method. This procedure can be used to test if certain nodes are isolated or not, recover the support of the graph, and test the difference between two graphical models. There are a number of interesting and important directions that will be explored in future. For example, developing inferential techniques based on score matching for multi-attribute graphical models \citep{kolar13multiatticml, Kolar2014Graph}, graphical models with confounders \citep{Geng2019Partially,Geng2018Joint}, time-varying graphical models \citep{Zhou08time, Kolar2010Estimating, kolar2011time}, networks with jumps \citep{kolar10estimating} and conditional graphical models \citep{kolar10nonparametric}, as well as data with missing values \citep{kolar10nonparametric}. It is also of interest to incorporate constraints in the model and perform constrained inference \citep{Yu2019Constrained}. Finally, our method is developed for continuous data and developing results for discrete valued data is also of interest.
\acks{We are extremely grateful to the associate editor, Jie Peng, and two anonymous reviewers for their insightful comments that helped improve this paper. This work is partially supported by an IBM Corporation Faculty
Research Fund and the William S. Fishman Faculty Research Fund at
the University of Chicago Booth School of Business. This work was
completed in part with resources provided by the University of
Chicago Research Computing Center.}
\appendix
\section{Technical proofs} \label{sec:technical_proofs}
We first establish a bound on the size of $\hat m_1 = \abr{\hat M_1}$ and $\hat m_2 = \abr{\hat M_2}$ in the following lemma.
\begin{lemma}
\label{lem:size_m1}
Assume the conditions of Theorem~\ref{thm:main} are satisfied. Then
\[\hat m_1 + \hat m_2 \lesssim \phi_{\max}\phi_{\min}^{-2} m.\] \end{lemma} \begin{proof} From the KKT conditions we have that $\hat \theta^{ab}$ satisfies \[ \EE_n\sbr{ \Gamma(x_i) \hat \theta^{ab} + g(x_i) } + \lambda_1 \cdot \hat\tau = 0, \]
where $\hat \tau \in \partial\|\hat\theta^{ab}\|_1$. Restricted to $\hat M_1$, we have (elementwise) \[ \abr{\rbr{\EE_n\sbr{ \Gamma(x_i) \hat \theta^{ab} + g(x_i) }}_{\hat M_1}} = \lambda_1. \] Computing the $\ell_2$ norm on both sides, \begin{equation*} \begin{aligned} \sqrt{\hat m_1} \cdot \lambda_1 &= \bignorm{\rbr{\EE_n\sbr{ \Gamma(x_i) \hat \theta^{ab} + g(x_i) }}_{\hat M_1}}_2 \\ & \leq \bignorm{\rbr{\EE_n\sbr{ \Gamma(x_i) \rbr{\hat \theta^{ab} - \theta^{ab,*}}}}_{\hat M_1}}_2 + \bignorm{\rbr{\EE_n\sbr{ \Gamma(x_i) \theta^{ab,*} + g(x_i) }}_{\hat M_1}}_2 \\ & \triangleq L_1 + L_2. \end{aligned} \end{equation*} For the first term we have that \[ \begin{aligned} L_1 & \leq \phi_{+}(\hat m_1 + m, \EE_n\sbr{ \Gamma(x_i) }) \cdot r_{2\theta}\\ & \lesssim \phi_{+}(\hat m_1 + m, \EE_n\sbr{ \Gamma(x_i) }) \cdot \phi_{\min}^{-1}\cdot \lambda_1\sqrt{m}, \end{aligned} \] using \cite{negahban2010unified}. For the second term, we have that \[ L_2 \leq \sqrt{\hat m_1}\cdot \lambda_1/2. \] Combining the two bounds, we obtain \[ \sqrt{\hat m_1} \lesssim \phi_{+}(\hat m_1 + m, \EE_n\sbr{ \Gamma(x_i) }) \cdot \phi_{\min}^{-1}\sqrt{m}. \] Now, proceeding as in the proof of Theorem~3 in \cite{Belloni2013Least}, we establish that \[ \hat m_1 \lesssim \phi_{\max}\phi_{\min}^{-2} m. \] The proof for $\hat m_2$ is similar. \end{proof}
Our next result establishes bounds on $\tilde \theta^{ab} - \theta^{ab,*}$.
\begin{lemma}
\label{lem:refit}
Assume the conditions of Theorem~\ref{thm:main} are satisfied. Then \[ \begin{aligned} \norm{\tilde \theta^{ab} - \theta^{ab,*}}_2 & \lesssim \phi_{\max}^{1/2}\phi_{\min}^{-2}\cdot\lambda_1\sqrt{ m }, \\ \norm{\tilde \theta^{ab} - \theta^{ab,*}}_1 & \lesssim \phi_{\max}^{1/2}\phi_{\min}^{-2}\cdot\lambda_1 m . \end{aligned} \]
\end{lemma}
\begin{proof} From the KKT conditions we have that $\hat \theta^{ab}$ satisfies \[ \EE_n\sbr{ \Gamma(x_i)_{\hat M_1} } \hat \theta^{ab}_{\hat M_1} + \EE_n\sbr{ g(x_i)_{\hat M_1} } + \lambda_1 \cdot {\rm sign}(\hat\theta^{ab}_{\hat M_1}) = 0, \] while $\tilde \theta^{ab}$ satisfies \[ \EE_n\sbr{ \Gamma(x_i)_{\tilde M} } \tilde \theta^{ab}_{\tilde M} + \EE_n\sbr{ g(x_i)_{\tilde M} } = 0. \] Combining these two equations we have \[ \EE_n\sbr{ \Gamma(x_i)_{\tilde M} } \rbr{\tilde \theta^{ab}_{\tilde M} - \hat \theta^{ab}_{\hat M_1}}
= \lambda_1 \cdot {\rm sign}(\hat\theta^{ab}_{\hat M_1}) \] and \[ \phi_{\min}\cdot \norm{\tilde \theta^{ab}_{\tilde M} - \hat \theta^{ab}_{\hat M_1}}_2 \leq \bignorm{ \EE_n\sbr{ \Gamma(x_i)_{\tilde M} } \rbr{\tilde \theta^{ab}_{\tilde M} - \hat \theta^{ab}_{\hat M_1}} }_2
= \lambda_1 \sqrt{\hat m_1} . \] Therefore, using \cite{negahban2010unified}, \[ \norm{\tilde \theta^{ab} - \theta^{ab,*}}_2 \leq \norm{\tilde \theta^{ab} - \hat \theta^{ab,*}}_2 + \norm{\hat \theta^{ab} - \theta^{ab,*}}_2 \lesssim \phi_{\min}^{-1}\cdot\lambda_1\sqrt{\hat m_1}. \] Combining with Lemma~\ref{lem:size_m1}, we obtain \[ \begin{aligned} \norm{\tilde \theta^{ab} - \theta^{ab,*}}_2 \lesssim \phi_{\max}^{1/2}\phi_{\min}^{-2}\cdot\lambda_1\sqrt{ m } \quad\text{and}\quad \norm{\tilde \theta^{ab} - \theta^{ab,*}}_1 \lesssim \phi_{\max}^{1/2}\phi_{\min}^{-2}\cdot\lambda_1 m . \end{aligned} \] \end{proof}
A similar result can be established for $\tilde \gamma^{ab} - \gamma^{ab,*}$, which we state without proof, as it is analogous to the proof of Lemma~\ref{lem:refit}. \begin{lemma}
\label{lem:refit:gamma}
Assume the conditions of Theorem~\ref{thm:main} are satisfied. Then \[ \begin{aligned} \norm{\tilde \gamma^{ab} - \gamma^{ab,*}}_2 & \lesssim \phi_{\max}^{1/2}\phi_{\min}^{-2}\cdot\lambda_2\sqrt{ m }, \\ \norm{\tilde \gamma^{ab} - \gamma^{ab,*}}_1 & \lesssim \phi_{\max}^{1/2}\phi_{\min}^{-2}\cdot\lambda_2 m . \end{aligned} \]
\end{lemma}
To simplify notation later, let $\tilde{r}_{j\theta} = \norm{\tilde \theta^{ab} - \theta^{ab,*}}_j$ and $\tilde r_{j\gamma} = \norm{\tilde \gamma^{ab} - \gamma^{ab,*}}_j$, for $j\in\{1,2\}$. \begin{lemma} \label{lem:L1} Under the conditions of Theorem~\ref{thm:main}, we have \[
\abr{ \rbr{\tilde w - w^*}^\top \EE_n\sbr{\Gamma(x_i)} \rbr{\tilde \theta^{ab} - \theta^{ab,*}} }
\lesssim \phi_{\max}^2 \phi_{\min}^{-4} \cdot \lambda_1\lambda_2 {m}. \] \end{lemma} \begin{proof}
Let $\Scal_k$ be the set of $k$-sparse vectors in the unit ball, \[ \Scal_k = \cbr{u \in \RR^p : \norm{u}_2\leq 1, \norm{u}_0 \leq k}. \] Abusing the notation, let $\norm{\cdot}_{\Scal_k}$ denote the sparse spectral norm for matrices, that is, \[ \norm{M}_{\Scal_k} = \max_{u,v\in\Scal_k}u^\top Mv. \] Using Lemma 4.9 of \cite{Barber2015ROCKET},
\[|u^\top M v| \leq \left(\norm{u}_2 + \norm{u}_1/\sqrt{k}\right)\cdot \left(\norm{v}_2 + \norm{v}_1/\sqrt{k}\right)\cdot
\sup_{u',v'\in\Scal_k}|u'^\top M v'| \] for any fixed matrix $M\in\RR^{p\times p}$ and vectors $u,v\in\RR^p$, and any $k\geq 1$. With this, we have \begin{equation*} \begin{aligned}
\rbr{\tilde w - w^*}^\top \EE_n\sbr{\Gamma(x_i)} \rbr{\tilde \theta^{ab} - \theta^{ab,*}}
&\leq \norm{\EE_n\sbr{\Gamma(x_i)}}_{\Scal_{\tilde m}} \cdot
\rbr{\tilde r_{2\gamma} + \tilde r_{1\gamma}/\sqrt{\tilde m}}
\cdot
\rbr{\tilde r_{2\theta} + \tilde r_{1\theta}/\sqrt{\tilde m}} \\
&\lesssim \phi_{\max}^2\phi_{\min}^{-4} \cdot \lambda_1\lambda_2 m ,
\end{aligned} \end{equation*} where the second line follows from the assumption {\bf SE}, and Lemma~\ref{lem:refit} and Lemma~\ref{lem:refit:gamma}. \end{proof}
\begin{lemma} \label{lem:L2} Under the conditions of Theorem~\ref{thm:main}, we have \[
\abr{
\rbr{\tilde w - w^*}^\top \rbr{ \EE_n\sbr{\Gamma(x_i)}\theta^{ab,*} + \EE_n[g(x_i)] }
} \lesssim \phi_{\max}^{1/2}\phi_{\min}^{-2}\cdot\lambda_1\lambda_2 m. \] \end{lemma} \begin{proof} Using H\"older's inequality, we have \[ \abr{
\rbr{\tilde w - w^*}^\top
\rbr{ \EE_n\sbr{\Gamma(x_i)}\theta^{ab,*} + \EE_n[g(x_i)] } } \leq \tilde r_{1\gamma} \cdot \norm{\EE_n\sbr{\Gamma(x_i)}\theta^{ab,*} + \EE_n[g(x_i)]}_\infty . \] On the event $\Ecal_\theta$, we have $\norm{\EE_n\sbr{\Gamma(x_i)}\theta^{ab,*} + \EE_n[g(x_i)]}_\infty \leq \lambda_1/2$. Finally, using Lemma~\ref{lem:refit:gamma}, we conclude that \[ \abr{ \rbr{\tilde w - w^*}^\top \rbr{ \EE_n\sbr{\Gamma(x_i)}\theta^{ab,*}
+ \EE_n[g(x_i)] } } \lesssim
\phi_{\max}^{1/2}\phi_{\min}^{-2}\cdot\lambda_1\lambda_2 m. \] \end{proof}
\begin{lemma} \label{lem:L3} Under the conditions of Theorem~\ref{thm:main}, we have \begin{equation*} \begin{aligned} w^{* \top} \EE_n\sbr{\Gamma(x_i)} \rbr{\tilde \theta^{ab} - \theta^{ab,*}} = \EE_n\sbr{\eta_{1i}\vpaxi[,ab] + \eta_{2i}\vpbxi[,ab]} & \rbr{\tilde \theta_{ab} - \theta_{ab}^{ab,*}} \\ & + \Ocal\rbr{\phi_{\max}^{1/2}\phi_{\min}^{-2}\cdot\lambda_1\lambda_2 m}. \end{aligned} \end{equation*}
\end{lemma} \begin{proof}
We have that \begin{equation*} \begin{aligned} w^{* \top} \EE_n\sbr{\Gamma(x_i)} \rbr{\tilde \theta^{ab} - \theta^{ab,*}} & = \EE_n\sbr{\rbr{\eta_{1i}\vpaxi+\eta_{2i}\vpbxi}^\top } \rbr{\tilde \theta^{ab} - \theta^{ab,*}} \\ & = \EE_n\sbr{\eta_{1i}\vpaxi[,ab]+\eta_{2i}\vpbxi[,ab]} \rbr{\tilde \theta_{ab}^{ab} - \theta_{ab}^{ab,*}} \\ &\quad + \EE_n\sbr{\rbr{\eta_{1i}\vpaxi[,-ab]+\eta_{2i}\vpbxi[,-ab]}^\top } \rbr{\tilde \theta_{-ab}^{ab} - \theta_{-ab}^{ab,*}}. \end{aligned} \end{equation*} For the second term, we have \begin{equation*} \begin{aligned} &\abr{\EE_n\sbr{\rbr{\eta_{1i}\vpaxi[,-ab]+\eta_{2i}\vpbxi[,-ab]}^\top } \rbr{\tilde \theta_{-ab}^{ab} - \theta_{-ab}^{ab}}} \\ & \qquad \leq \tilde r_{1\theta} \cdot \norm{\EE_n\sbr{\eta_{1i}\vpaxi[,-ab]+\eta_{2i}\vpbxi[,-ab]}}_{\infty} \\ &\qquad \leq \tilde r_{1\theta} \cdot \lambda_2/2, \end{aligned} \end{equation*} since we are working on the event $\Ecal_\gamma$. Since $\tilde{r}_{1\theta} \leq \phi_{\max}^{1/2}\phi_{\min}^{-2}\cdot\lambda_1 m$, combining with the display above, the proof is complete. \end{proof}
\begin{lemma} \label{lem:L4} Under the assumptions {\bf M} and {\bf R}, we have that \[ \sqrt{n} \cdot w^{* \top}\rbr{ \EE_n\sbr{\Gamma(x_i)\theta^{ab,*} +
g(x_i)} } \longrightarrow_D N\rbr{0, H(\theta^*)}, \] where $H(\theta^*) = \Var\rbr{w^{* \top}\rbr{ {\Gamma(x_i)\theta^{ab,*} + g(x_i)} }}$. \end{lemma} \begin{proof} Let $Z_i = w^{* \top}\rbr{\Gamma(x_i)\theta^{ab,*} + g(x_i)}$. Then \begin{align}
\sqrt{n} \cdot w^{* \top}\rbr{ \EE_n\sbr{\Gamma(x_i)\theta^{ab,*} + g(x_i)} }
= \frac{1}{\sqrt{n}} \sum_i Z_i. \end{align} From \cite{Forbes2013Linear}, we have that $\EE[Z_i] = 0$ and $\Var(Z_i)$ is finite. An application of the central limit theorem completes the proof. \end{proof}
\begin{lemma}
\label{lem:variance_consistent}
The variance estimator $\hat V_{ab}$ is consistent, $\hat V_{ab} \rightarrow_P V_{ab}$. \end{lemma}
\begin{proof}
The variance estimator is obtained by using the second sample
moment, and replacing true $\theta^{ab, *}, \gamma^{ab,*}$ with
$\tilde \theta^{ab}, \tilde \gamma^{ab}$. We show the consistency of
$\hat V_{ab}$ by showing the consistency of the estimator for
$\sigma_n$ and
$\text{Var}\big( w^{*,T}(\Gamma(x_i)\theta^{ab,*} + g(x_i)) \big)$,
respectively.
\paragraph{Step 1.} We can write \begin{equation*} \begin{aligned} \sigma_n &= \mathbb{E}_n \big[\eta_{1i}\varphi_{1,ab}(x_i) + \eta_{2i}\varphi_{2,ab}(x_i)\big] \\ &= \mathbb{E}_n \big[w^{*,\top}\varphi_1(x_i) \cdot \varphi_{1,ab}(x_i) + w^{*,\top}\varphi_2(x_i) \cdot \varphi_{2,ab}(x_i)] \\ &= w^{*\top} \cdot \mathbb{E}_n [\Gamma(x_i)] \cdot e_{ab}. \end{aligned} \end{equation*} Let $\sigma = \EE[\sigma_n] = w^{*\top} \cdot \mathbb{E} [\Gamma(x_i)] \cdot e_{ab}$ denote the population version of $\sigma_n$ and $\tilde \sigma_n = \tilde w^{\top} \cdot \mathbb{E}_n [\Gamma(x_i)] \cdot e_{ab}$ the sample version. With high probability we have that \begin{equation*} \begin{aligned}
| \tilde \sigma_n - \sigma | &\leq | \tilde \sigma_n - \sigma_n | + | \sigma_n - \sigma | \\
&\leq \Big|(\tilde w - w^*)^{\top} \cdot \mathbb{E}_n [\Gamma(x_i)] \cdot e_{ab} \Big|
+ \Big| {w^*}^{\top} \cdot \big[ \mathbb{E}_n [\Gamma(x_i)] - \mathbb{E} [\Gamma(x_i)] \big] \cdot e_{ab} \Big| \\
& \leq \|\tilde w - w^*\|_1 \cdot \big\|\mathbb{E}_n [\Gamma(x_i)] \cdot e_{ab} \big\|_\infty
+ \| w^* \|_1 \cdot \big\| \big[ \mathbb{E}_n [\Gamma(x_i)] - \mathbb{E} [\Gamma(x_i)] \big] \cdot e_{ab} \big\|_\infty\\ & \lesssim \lambda_2 m \cdot (C + \sqrt{\log p/n}) + m \cdot \sqrt{\log p/n} = o_P(1). \end{aligned} \end{equation*}
\paragraph{Step 2.} We estimate the variance of $w^{*\top}\big(\Gamma(x_i)\theta^{ab,*} + g(x_i)\big)$. Since \[
\EE\sbr{w^{*\top}\big(\Gamma(x_i)\theta^{ab,*} + g(x_i)\big)} = 0, \] we can use the second sample moment to estimate the variance. As above, we plug in $\tilde \theta^{ab}$ and $\tilde \gamma^{ab}$, to obtain that \begin{equation*} \begin{aligned}
& \Bigg| \mathbb{E}_n\bigg\{ \tilde w^\top\Big(\Gamma(x_i)\tilde\theta^{ab} + g(x_i)\Big) \bigg\}^2 - \mathbb{E}_n\bigg\{ w^{*\top}\Big(\Gamma(x_i)\theta^{ab,*} + g(x_i)\Big) \bigg\}^2\Bigg| \\
& \qquad = \Bigg| \EE_n \bigg\{ \tilde w^\top\big(\Gamma(x_i)\tilde\theta^{ab} + g(x_i)\big) - w^{*\top}\big(\Gamma(x_i)\theta^{ab,*} + g(x_i)\big) \bigg\} \\
& \qquad \quad \quad \quad \quad \quad \quad \cdot \bigg\{\tilde w^\top\big(\Gamma(x_i)\tilde\theta^{ab} + g(x_i)\big) + w^{*\top}\big(\Gamma(x_i)\theta^{ab,*} + g(x_i)\big) \bigg\} \Bigg| \\
& \qquad \lesssim \EE_n \bigg| \tilde w^{\top} \rbr{\Gamma(x_i) \tilde \theta^{ab} + g(x_i) } - w^{*\top} \rbr{\Gamma(x_i)\theta^{ab,*} + g(x_i) } \bigg| \\
& \qquad \lesssim \EE_n \bigg| (\tilde w - w^*)^\top \rbr{\Gamma(x_i)\theta^{ab,*} + g(x_i) } + \tilde w^\top \Gamma(x_i)(\tilde \theta^{ab} - \theta^{ab,*}) \bigg| \\
& \qquad \lesssim { \|\tilde w - w^*\|_1 \cdot \EE_n\Big\|\Gamma(x_i)\theta^{ab,*} + g(x_i)\Big\|_\infty +
\|\tilde \theta^{ab} - \theta^{ab,*}\|_1 \cdot \EE_n\Big\| \tilde w^\top \Gamma(x_i) \Big\|_\infty }\\ & \qquad = o_P(1). \end{aligned} \end{equation*} Combining the results of the two steps, completes the proof. \end{proof}
\paragraph{Proof of Theorem \ref{thm:simultaneous}} Denote \begin{equation} \label{eq:def_W0} W_0 = \max_{b \in V_a} \frac{1}{\sqrt n} \sum_{i=1}^n z_{iab} e_{i} \end{equation} as the counterpart to $\tilde W$. Let \begin{equation} \label{eq:def_T0} T_0 = \max_{b \in V_a} \frac{1}{\sqrt n} \sum_{i=1}^n z_{iab} \quad \text{and} \quad \tilde T = \max_{b \in V_a} \frac{1}{\sqrt n} \sum_{i=1}^n \tilde z_{iab}. \end{equation} Denote \begin{equation}
\Delta = \max_{b,c \in V_a} \bigg| \frac 1n \sum_{i=1}^n \gamma_{abc}(x_i) \bigg|, \end{equation} where $\gamma_{abc}(x_i)$ is defined in assumption {\bf RR}. In order to apply Theorem 3.2 in \cite{Chernozhukov2013Gaussian}, we check the following conditions: \begin{enumerate} \item $\PP(\Delta \geq n^{-c}) \leq n^{-c}$.
\item $\PP( |T_0 - \tilde T| \geq n^{-c} ) \leq p^{-c}$.
\item With probability at least $1-p^{-c}$, $\PP_e( |W_0 - \tilde W| \geq n^{-c} ) \leq n^{-c}$. Here $\PP_e$ denotes the probability with respect to $\{e_i\}_{i=1}^n$, conditionally on the observed data. \end{enumerate}
We verify the first condition by applying Lemma A.1 in \citet{Geer2008High}. By the definition of $\gamma_{abc}(x_i)$, clearly we have $\EE\sbr{\gamma_{abc}(x_i)} = 0$. Together with assumption {\bf RR}, we apply Lemma A.1 in \cite{Geer2008High} and obtain \begin{equation*} \EE[\Delta] \leq \sqrt{\frac{4\tau_n^2\log{(2p)}}{n}} + \frac{2\eta_n\log{(2p)}}{n}. \end{equation*} According to \eqref{eq:asmp_regime_simultaneous}, for sufficiently large $n$, we have $\EE[\Delta] \leq n^{-2c}$, for some $c > 0$. By Markov inequality, \[
\PP(\Delta \geq n^{-c}) \leq n^c \cdot \EE[\Delta] \leq n^{-c}, \] which verifies the first condition.
Next, we verify the second condition. For a fixed $b \in V_a$, under the null, we have \begin{equation*} \begin{aligned}
\bigg| \frac{1}{\sqrt n} \sum_{i=1}^n z_{iab} - \frac{1}{\sqrt n} \sum_{i=1}^n \tilde z_{iab} \bigg| &\leq \sqrt{n} \bigg| (\sigma_{ab}^{-1} - \sigma_{n,ab}^{-1}) \cdot w_{ab}^{*\top} \Big( \EE_n\big[\Gamma_{ab}(x_i) \theta^{ab,*} + g_{ab}(x_i)\big] \Big) \bigg| \\
& \qquad + \sqrt{n} \bigg| \sigma_{n,ab}^{-1} \cdot (w_{ab}^* - \tilde w_{ab})^\top \Big( \EE_n\big[\Gamma_{ab}(x_i) \theta^{ab,*} + g_{ab}(x_i)\big] \Big) \bigg| \\ &\leq \sqrt{n} C\cdot\lambda_1\lambda_2 m \\ &\leq n^{-c}, \end{aligned} \end{equation*} with probability at least $1-p^{-c-1}$, where the second inequality comes from the consistency of $\sigma_n$, Lemma \ref{lem:L2}, and Lemma \ref{lem:L4}. We then have \begin{equation*} \begin{aligned}
\PP( |T_0 - \tilde T| \geq n^{-c} ) & \leq \PP\Big(\bigcup_{b \in V_a} \Big\{\frac{1}{\sqrt n} \big|\sum_{i=1}^n z_{iab} - \sum_{i=1}^n \tilde z_{iab} \big| \geq n^{-c} \Big\} \Big) \\
&\leq \sum_{b \in V_a} \PP\Big( \frac{1}{\sqrt n} \big | \sum_{i=1}^n z_{iab} - \sum_{i=1}^n \tilde z_{iab} \big| \geq n^{-c} \Big) \\ &\leq p \cdot p^{-c-1} = p^{-c}, \end{aligned} \end{equation*} which verifies the second condition.
Finally, we verify the third condition. We have \begin{equation} \begin{aligned} \label{eq:diff_W}
\PP_e( |W_0 - \tilde W| \geq n^{-c} ) & \leq \PP_e\Big(\max_{b \in V_a} \Big\{\frac{1}{\sqrt n} \big|\sum_{i=1}^n (z_{iab} - \tilde z_{iab}) e_i \big| \Big\} \geq n^{-c} \Big). \\
\end{aligned} \end{equation} Denote $Z_b = \frac{1}{\sqrt n} \sum_{i=1}^n (z_{iab} - \tilde z_{iab})e_i$. Under the null we have \begin{equation*} \begin{aligned} z_{iab} - \tilde z_{iab} &= \Big[ (\sigma_{ab}^{-1} - \sigma_{n,ab}^{-1}) \cdot w_{ab}^{* \top} \big( \Gamma_{ab}(x_i) \theta^{ab,*} + g_{ab}(x_i)\big) \Big] \\ & \qquad\qquad\qquad + \Big[ \sigma_{n,ab}^{-1} \cdot (w_{ab}^* - \tilde w_{ab})^\top \big( \Gamma_{ab}(x_i) \theta^{ab,*} + g_{ab}(x_i) \big) \Big]. \end{aligned} \end{equation*} According to Lemma A.1 in \cite{Chernozhukov2013Gaussian}, we have \begin{equation*}
\EE\bigg[ \frac 1n \Big\|\sum_{i=1}^n \Big(\Gamma_{ab}(x_i) \theta^{ab,*} + g_{ab}(x_i)\Big)e_i \Big\|_\infty \bigg] \lesssim \sigma_0\sqrt{\frac{\log p}{n}} + \frac{M\log p}{n}, \end{equation*} uniformly for each $b \in V_a$, where \begin{equation} \sigma_0^2 = \max_{j} \frac 1n \sum_{i=1}^n \Big[ \Big(\Gamma_{ab}(x_i) \theta^{ab,*} + g_{ab}(x_i)\Big)e_i \Big]_j^2, \end{equation} and \begin{equation}
M^2 = \EE \bigg[ \max_{i} \Big\| \Big(\Gamma_{ab}(x_i) \theta^{ab,*} + g_{ab}(x_i)\Big)e_i \Big\|_\infty \bigg]^2. \end{equation} We then have \begin{equation*} \begin{aligned}
\EE |Z_b| &\leq \frac{1}{\sqrt n} \Big((\sigma_{ab}^{-1} - \sigma_{n,ab}^{-1}) \cdot \|w_{ab}^*\|_1 + \sigma_{n,ab}^{-1} \cdot \|w_{ab}^* - \tilde w_{ab}\|_1\Big) \\
& \qquad\qquad \times \EE \bigg[ \Big\|\sum_{i=1}^n \Big(\Gamma_{ab}(x_i) \theta^{ab,*} + g_{ab}(x_i)\Big)e_i \Big\|_\infty \bigg] \\ &\leq \frac{C}{\sqrt n} \cdot \lambda m \cdot \bigg(\sigma_0\sqrt{\frac{\log p}{n}} + \frac{M\log p}{n}\bigg) \cdot n \\ &\leq n^{-2c}, \end{aligned} \end{equation*} uniformly for each $b \in V_a$ with probability at least $1-p^{-c}$, where the second inequality comes from the consistency of $\sigma_n$ and Lemma \ref{lem:refit:gamma}. Applying Markov inequality again, we obtain \[
\PP_e(|Z_b| \geq n^{-c}) \leq n^c \cdot \EE|Z_b| \leq n^{-c}. \] uniformly for each $b \in V_a$ with probability at least $1-p^{-c}$. Plugging back to \eqref{eq:diff_W}, we obtain \begin{equation} \begin{aligned}
\PP_e( |W_0 - \tilde W| \geq n^{-c} ) &\leq \PP_e\Big( \max_{b \in V_a} |Z_b| \geq n^{-c} \Big) \leq n^{-c} \end{aligned} \end{equation} with probability at least $1-p^{-c}$, which verifies the third condition.
With the three conditions verified and assumption {\bf RR}, we apply Theorem 3.2 in \cite{Chernozhukov2013Gaussian} to obtain \begin{equation*}
\sup_{\alpha \in (0,1)} \bigg| \PP\Big(\max_{b \in V_a} \sqrt{n} ( \tilde \theta_{ab} - \breve \theta_{ab} ) \geq c_{\tilde W}(\alpha) \Big) - \alpha \bigg| = o(1), \end{equation*} which completes the proof.
\end{document} | arXiv | {
"id": "1905.06261.tex",
"language_detection_score": 0.6870229840278625,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Social Networks and Stable Matchings in the Job Marketootnote{A preliminary version will appear at the $5^{ ext{th}
\begin{abstract}
For most people, social contacts play an integral part in finding a new job. As observed by Granovetter's seminal study, the proportion of jobs obtained through social contacts is usually large compared to those obtained through postings or agencies. At the same time, job markets are a natural example of {\em two-sided matching markets}. An important solution concept in such markets is that of {\em stable matchings}, and the use of the celebrated Gale-Shapley algorithm to compute them. So far, the literature has evolved separately, either focusing on the implications of information flowing through a social network, or on developing a mathematical theory of job markets through the use of two-sided matching techniques.\\
In this paper we provide a model of the job market that brings both aspects of job markets together. To model the social scientists' observations, we assume that workers learn {\em only} about positions in firms through social contacts. Given that information structure, we study both static properties of what we call {\em locally stable matchings} (i.e., stable matchings subject to informational constraints given by a social network) and dynamic properties through a reinterpretation of Gale-Shapley's algorithm as myopic best response dynamics. \\
We prove that, in general, the set of locally stable matching strictly contains that of stable matchings and it is in fact NP-complete to determine if they are identical. We also show that the lattice structure of stable matchings is in general absent. Finally, we focus on myopic best response dynamics inspired by the Gale-Shapley algorithm. We study the efficiency loss due to the informational constraints, providing both lower and upper bounds. \end{abstract}
\section{Introduction} When looking for a new job, the most often heard advice is to ``ask your friends." While in the modern world almost all of the companies have online job application forms, these are usually overloaded with submissions; and it is no secret that submitting a resume through someone on the inside greatly increases the chances of the application actually being looked at by a qualified person. This is the underlying premise behind the professional social networking site LinkedIn, which now boasts more than 40 million users \cite{wikipedia}. And, as pointed out by Jackson \cite{jacksonbook} has given a new meaning to the word `networking,' with Merriam Webster's Dictionary's defining it as ``the cultivation of productive relationships for employment or business.''
Sociologists have long studied this phenomenon, and have time and time again confirmed the role that social ties play in getting a new job. Granovetter's seminal work \cite{granovetter,granovetterbook} headlines a long history of research into the importance of social contacts in labor markets. His results are striking, for example, 65 percent of managerial workers found their job through social contacts. Interestingly, as early as 1951, a similar study about textile workers \cite{myers} found similar results: more than 60 percent found their job through social contacts. Other studies (see, e.g., \cite{rees,montgomery,datcher}) all echo the importance of social contacts in securing a new position.
While there are numerous reasons that social ties play such an important role, one may think that the employers themselves would prefer to evaluate {\em all} candidates for a position before making a hiring decision. This is in fact what happens in some segments of the job market. For example, in most Western European countries, applicants to positions within the government (public servants) pass through a centralized selection process: government officials first gather all relevant information, then proceed to match applicants to positions. In the United States, the National Resident Match Program is a significant example of a centralized selection matching mechanism. Such centralized markets have been well studied in {\em two-sided
matching theory}. Indeed, the NRMP is one of the most important practical applications of the celebrated {\em stable matching problem} in two-sided matching markets \cite{roth1984a}. For an overview of two-sided matching markets, see \cite{rothsotomayor}.
However, the task of evaluating (and ranking) all possible candidates is often simply not feasible. Especially in today's economy, it is not rare to hear of hundreds of applicants for a position, obviously the vast majority cannot be interviewed, regardless of their qualifications. The recommendation by an employee thus carries extra weight in the decision process, precisely because it separates the specific application from the masses. (It can be argued that making good recommendations is also in the employee's best interest, but we will not be addressing incentive issues in this work.)
\paragraph{Model} - In this work, we propose a new model that bridges the rigorous analysis of the two-sided matching theory with the observations made by social network analysis. Specifically, we develop a model of job markets where social contacts play a pivotal role; and then proceed to analyze it through the stable matching lens.
We integrate the usage of social contacts by allowing an applicant to apply {\em only} to jobs in firms employing her friends. Clearly this limitation depends on the underlying social graph. If it is a clique (that is everyone knows everyone else), the equilibrium behavior is exactly the same as in classical two sided matching. If it is disconnected, then some workers will never discover some firms, leading to potentially very inefficient matchings. But even in well connected social graphs this limitation leads to behaviors not observed in the traditional model. For example, a firm may lose all of its workers to the competition, and subsequently go out of business (if no one is employed at a firm, no one can recommend others to join, thereby forcing it to close its doors).
The model forces us to consider a setting where job applicants have only partial information on job opportunities. The main question we focus on in the paper is: how does the inclusion of such an informational constraint alter the model and predictions of traditional stable matching theory?
\paragraph{Our Contributions} - In traditional two-sided matching theory, a matching where no worker-firm pair can find a profitable deviation is called {\em
stable}. Analogously, we call our solution concept a {\em locally
stable matching}, where the locality is qualified by the social network graph. We study structural properties of locally stable matchings by showing that, in general, the set of locally stable matchings does not form a distributive lattice, as is the case for global stable matchings. We also show that, in general, it is NP-complete to determine whether all locally stable matchings are also globally stable. Both of these results exploit a characterization of locally stable matchings in the special case of matching one worker per firm, for particular rankings over workers and firms.
We then turn our attention to dynamic analysis. We consider how a particular interpretation of the classic Gale-Shapley algorithm \cite{galeshapley} performs under such informational constraints; we refer to our algorithm as the {\em local Gale-Shapley} algorithm. We first prove that, unlike the standard Gale-Shapley algorithm \cite{rothsotomayor}, the existence of informational constraints implies that the output of the algorithm is not independent of the order of proposals. Nevertheless, under weak stochastic conditions, we show that the local Gale-Shapley algorithm converges almost surely, assuming the same particular rankings over workers and firms as before.
Unlike the traditional Gale-Shapley algorithm, the algorithm in the limited information case is highly dependent on the initial conditions. To explore this further we define a minimal notion of efficiency, namely the number of firms still in business in the outcome matching, and quantify the efficiency loss under various initial conditions. Specifically, we show that if an adversary chooses an initial matching, he can ensure that some firms lose all of their workers; conversely there is a distribution on the preference lists used by the firms that guarantees that at least some firms remain in business, regardless of the actions of the adversary.
We finish with a model that incorporates graph constraints into a two-sided matching market, where there is no restriction on the graph. As a preliminary result, we provide a full characterization of when locally stable matchings coincide with global stable matchings of the two-sided market.
\subsection*{Related Work}
Our work touches on several threads of the literature. Most closely related is the work by Calv\'o-Armengol and Jackson \cite{calvo1,calvo2}. They consider how information dissemination through neighbors of workers on potential jobs can affect wage and employment dynamics in the job market. There are several key differences with our model. The most important one is that, in \cite{calvo1,calvo2}, there is no competition for job openings between workers. Unemployment is the result of a random sampling process and not of strategic interactions between workers and firms. Also, all workers learn {\em directly} about potential job openings with some probability, and {\em indirectly} through their social contacts, whereas in our model a worker can only learn about potential job openings through her social contacts.
Also related is the work by Lee and Schwarz \cite{interviewingschwarz}. The authors consider the stable matching problem in the job market where a costly information acquisition step (interviewing) is necessary for both workers and firms to learn their preferences. Once interviewing is over, the standard Gale-Shapley algorithm is used to calculate the matching of workers to firms. Although the authors use stable matching as their solution concept, and only partial information on jobs and candidates is available, their assumptions imply that the information available to workers and firms is {\em unchanged} throughout the matching phase. In that sense, their work is related to the equilibrium analysis performed in our model, but is dramatically different when considering the evolution of the job market during the actual matching phase.
There is a large body of literature on the impact of incomplete information in two-sided matching markets. Roth \cite{rothincomplete} studies that impact on strategic decisions. He concludes that phenomena arising in {\em all} equilibria of the complete information game need not happen in {\em any} equilibria of the corresponding incomplete information setting. Another example is the work of Chakraborty, Citanna and Ostrovsky \cite{interdependent}. They show how different assumptions on the information available to colleges when selecting aspiring students affect the existence of a stable matching mechanism.
In the study of uncoordinated two-sided matching markets, first introduced by Knuth in \cite{knuthuncoordinated}, agents on one side of the market {\em simultaneously} pick an agent on the other side of the market. Stable matchings correspond to Nash equilibria of the one-shot game. In \cite{uncoordinated}, the authors prove that the convergence time of best and better response dynamics can be exponential. Their (negative) results do not immediately apply to our model as, in the dynamics we consider, only {\em one} agent performs a best response at a time.
In \cite{repeatedmatching}, the authors consider an experimental repeated matching market for workers and firms. They study how the underlying information structure on workers and firms' preferences affect the predictive power of the Gale-Shapley algorithm. Thus their interpretation of the Gale-Shapley algorithm as a predictor of {\em real} dynamics in information constrained repeated matching markets is related to our interpretation of the algorithm as the evolution of the job market under myopic best response dynamics.
Finally, in \cite{popular,mahdianpopular}, the authors consider the problem of matching applicants to job positions. A matching is said to be {\em popular} if the number of happy applicants is as large as possible. This notion is related to the notion of efficiency used in our paper, namely that of maximizing the number of firms in business.
\section{Definitions and Notation} \label{sec:def}
Let $W$ be a set of workers, and $G = (W,E)$ be an undirected graph representing the social network among workers. Let $F$ be the set of firms, each with $k$ jobs, for some $k > 0$. We are interested in the case where there are as many workers as positions in all firms, i.e., $|W|=n=k|F|$. Following standard notation, for a worker $w\in W$, let $\Gamma(w)$ be the neighborhood of $w$ in $G$.
An assignment of workers to firms can be described by a function mapping workers to jobs, or alternatively by a function mapping firms to workers. Following the definition from the two-sided matching literature, we define both functions simultaneously.
We assume that some companies are better to work for than others, and thus each worker $w$ has a strict ranking $\succ_w$ over firms such that, for firms $f\neq f'$, $w$ prefers being employed in $f$ than in $f'$ if and only if $f\succ_w f'$. Note however, that the ranking is blind to the individual positions within a firm: all of the $k$ slots of a given firm are equivalent from the point of view of a worker.
Similarly, each firm $f$ has a strict ranking $\succ_f$ over workers. We assume that all workers strictly prefer being employed, and that all firms strictly prefer having all their positions filled. For any worker $w$, in a slight abuse of notation, we extend her ranking over firms to account for her being unemployed by setting $f\succ_w w$ for all firms $f$; in a similar way we extend the rankings of firms over workers.
\begin{definition}[Matching] \label{def:matching} \begin{enumerate} \item {\em Case 1: $k = 1$.} The function $\mu:\ W\cup F \rightarrow W\cup F$
is a {\em matching} if the following conditions
hold: (1) for all $w\in W$, $\mu(w)
\in F\cup\{w\}$; (2) for all $f\in F$, $\mu(f) \in
W\cup\{f\}$; and (3) $\mu(w) = f$ if and only if $\mu(f) = w$. \item {\em Case 2: $k > 1$.} The function $\mu:\ W\cup F \rightarrow
2^W\cup F$ is a {\em matching} if the following conditions hold: (1)
for all $w\in W$, $\mu(w) \in F\cup\{\{w\}\}$; (2) for all $f\in F$,
$\mu(f) \in 2^W\cup\{f\}$; (3) $\mu(w) = f$ if and only if $w\in
\mu(f)$; and (4) $|\mu(f)| \leq k$. \end{enumerate}
We say that a matching $\mu$ is {\em complete} if:
\[
\bigcup_{f\in F} \mu(f) = W.
\]
\end{definition}
Given a matching $\mu$ and a firm $f$, let $\min(\mu(f))$ be the {\em least preferred} worker employed by firm $f$ (w.r.t. firm $f$'s ranking) if
$|\mu(f)|=k$, and $\min(\mu(f)) = f$ otherwise.
To study the notion of stable matchings, we adapt the usual concept of a {\em blocking pair}. Given the preferences of workers and firms, a matching $\mu$, a firm $f$ and a worker $w$, we say that $(w,f)$ is a {\em blocking pair} if and only if $f\succ_w \mu(w)$ and $w\succ_f\min(\mu(f))$. In other words, worker $w$ prefers firm $f$ to her currently matched firm; and firm $w$ prefers worker $w$ to its least preferred current employee.
Instead of using the standard notion of stable matching from the matching literature, we define a generalization that accounts for the locality of information. Recall that a (global) matching is said to be {\em stable} if there are no blocking pairs. However, in our paper we assume that {\em the workers can only discover possible firms by looking at their friends' places of employment.} This informally captures a significant mechanism of information transfer: although there may exist a firm $f$ that would make $(w,f)$ a blocking pair, if none of $w$'s friends work at $f$, then it becomes much less likely that $w$ would learn of $f$ on her own. We have the following definition.
\begin{definition}[Locally Stable Matching] \label{def:localmatching} Let $G = (W,E)$ be the social network over the set of workers $W$. We say that a matching $\mu$ is a {\em locally stable matching} with respect to $G$ if, for all $w\in W$ and $f\in F$, $(w,f)$ is a blocking pair if and only if $\Gamma(w)\cap\mu(f) = \emptyset$ (i.e., no workers in $w$'s social neighborhood are employed by firm $f$). \end{definition}
Observe that this definition severely limits the number of possible firms that can form a blocking pair with $w$. In fact, if the maximum degree in $G$ is $\Delta$, the number of such blocking pairs is at most $\Delta$, which is constant in most social network models. This may initially suggest that the number of locally stable matchings is typically quite large. However, the set of firms that are ``visible'' to a worker $f$ changes over time as other blocking pairs in the system are resolved! Also note that for a given worker $w$, the set of other workers she is competing against depends on both the social network $G$ (i.e., her neighbors), and the current matching.
\begin{example}[Indirect Competition]
Assume $k=2$ and $G$ is the path over $W = \{w_1,w_2,w_3,w_4\}$: $w_1-w_2-w_3-w_4$. Consider worker $w_4$. If $\mu(f_1) = \{w_3,w_4\}$ and $\mu(f_2) = \{w_1,w_2\}$, then $w_4$ can only see positions in $f_1$. However, since $w_2$ is adjacent to $w_3$, $w_2$ can see all position in $f_1$. Hence, if $w_2\succ_{f_1}w_3\succ_{f_1}w_4$, $w_2$ could get $w_4$'s position in $f_1$, leading to $w_4$ being replaced by $w_2$ even though $w_2\notin \Gamma(w_4)$. \end{example}
In the remainder of the paper, we characterize static properties of locally stable matchings, and then analyze dynamics similar to the Gale-Shapley algorithm.
\section{Static Analysis} \label{sec:static}
For $k=1$, when the preferences of workers and firms are strict, it is known that the set of global stable matchings is a {\em distributive lattice}. Several results on global stable matchings rely on the existence of the lattice as it allows us to navigate through the set of global stable matchings \cite{parallel}, and thus permits optimization problems over global stable matchings to be tackled.
In general, the distributive lattice structure of the set of global stable matchings is not present in the set of locally stable matchings. We first recall the Lattice Theorem (by Conway), and then show how, in general, it does not hold for locally stable matchings. The exposition of the Lattice Theorem is that found in \cite{rothsotomayor} (Theorem 2.16).
The two operations defining the distributive lattice over global stable matchings are as follows. Let $\mu$ and $\mu'$ be two matchings. Define the operation $\vee_W$ over $(\mu,\mu')$ as follows: $\mu\vee_W \mu':\ W\cup F \rightarrow W\cup F$ such that, for all $w\in W$, $\mu\vee_W \mu' (w) = \mu(w)$ if $\mu(w) \succ_w \mu'(w)$, and $\mu\vee_W \mu' (w) = \mu'(w)$ otherwise. For all $f\in F$, $\mu\vee_W \mu' (f) = \mu'(f)$ if $\mu(f) \succ_f \mu'(f)$, and $\mu\vee_W \mu' (f) = \mu(f)$ otherwise. We can similarly define $\wedge_W$ by exchanging the roles of workers and firms.
\begin{thm}[Lattice Theorem (Conway)] \label{thm:conway}
When all preferences are strict, if $\mu$ and $\mu'$ are stable matchings, then the functions $\lambda = \mu \vee_W \mu'$ and $\nu = \mu \wedge_W \mu'$ are both matchings. Furthermore, they are both stable. \end{thm}
In general, given strict preferences of workers and firms, Theorem~\ref{thm:conway} does not hold for the set of locally stable matchings. This is the content of the following example.
\begin{example}[Absence of Distributive Lattice] \label{ex:lattice}
We assume $k=1$, $W = \{w_1,w_2,w_3\}$ and $F = \{f_1,f_2,f_3\}$. Further, let the preferences of all workers be $f_1\succ f_2\succ f_3$. Similarly, let the preferences of all firms be $w_1\succ w_2\succ w_3$. Finally, assume the graph $G$ is the path with $w_2$ and $w_3$ at its endpoints.
Let $\mu(w_i) = f_i$ (and $\mu(f_i)= w_i$). It is clear that $\mu$ is a 1-locally stable matching. Consider now $\mu'$ be such that $\mu'(w_1) = f_1$, $\mu'(w_2) = f_3$ and $\mu'(w_3) = f_2$ (and $\mu'(f_1) = w_1$, $\mu'(f_2)=w_3$ and $\mu'(f_3) = w_2$). The only blocking pair here is $(w_2,f_2)$, but $f_2 = \mu'(w_3)$ and $w_3\notin \Gamma(w_2)$. Hence $\mu'$ is a 1-locally stable matching.
We now construct $\lambda = \mu\vee_W \mu'$. For all $i$, $\lambda(w_i) = f_i$. Now $\lambda(f_1) = w_1$ but $\lambda(f_2) = \lambda(f_3) = w_3$. Hence $\lambda$ is not a matching. \end{example}
While very useful in resolving optimization problems over global stable matchings, it is important to note that the absence of the distributive lattice has been previously observed when the preferences are not strict. Even the introduction of ties suffices for the distributive lattice, over {\em weak} global stable matching, to be absent \cite{roth1984a}. In \cite{POstructure}, the authors prove that the distributive lattice is present when ties are allowed for {\em strong} global stable matchings, but can be absent if the indifference takes the form of an arbitrary partial order.
\paragraph{Assumption} - In the remainder of the paper we focus on a specific family of preferences over workers and firms. Under general preferences over workers and firms, the set of global stable matchings is not unique. When considering a two-sided matching market with global stable matching as its solution concept, uniqueness of global stable matching is a desirable property as it allows for sharp predictions of the outcome at equilibrium. In \cite{ClarkAR2006}, the author studies thoroughly the question and identifies a set of sufficient conditions on the preferences for the global stable matching to be unique. The set of preferences satisfying such conditions, called {\em aligned preferences}, have recently received attention in the economics literature \cite{MurielTR2009, SorensenAR2007}.
In this paper we consider a subset of aligned preferences, where all workers share the same ranking over firms, and firms share the same ranking over workers. This assumption is made for technical reasons - we believe our results extend to the case of general aligned preferences.
\begin{assumption} \label{as:preferences}
There exist a labeling of the nodes in $W = \{w_1,\dots,w_n\}$ such that all firms rank workers as follows: $w_i\succ w_j$ if and only if $i<j$. Similarly, we assume there exists a labeling of the firms $F = \{f_1,\dots,f_{n_f}\}$ such that all workers rank the firms as follows: $f_i\succ f_j$ if and only if $i<j$. \end{assumption}
We first show that, for $k=1$, the set of locally stable matchings is equivalent to the set of topological orderings over the partial order induced by $G$ and the labeling of the workers.
\begin{thm}[Characterization of Locally Stable Matchings for $k=1$] \label{thm:charac1}
Let $G(W,E)$ be the social network over the set of workers. Let $D(W,E')$ be a directed graph over $W$ such that $(w_i,w_j)\in E'$ if and only if $i<j$ and $(w_i,w_j)\in E$. Let $\mu$ be a complete matching of workers to firms. Construct the following ordering $\phi_{\mu}$ over $W$ induced by $\mu$: the $i^{\text{th}}$ node in the ordering is the node $w$ such that $\mu(w)=f_i$, i.e. $\phi_{\mu}(w)=i$.
The matching $\mu$ is a 1-locally stable matching if and only if $\phi_{\mu}$ is a topological ordering on $D$. \end{thm}
\begin{bproof}
Assume $\phi_{\mu}$ is a topological ordering on $D$. Then, for all $e=(w,w')\in E'$, $\phi_{\mu}(w)<\phi_{\mu}(w')$, which implies $\mu(w)\succ\mu(w')$. Recall that $(w,w')\in E'$ if and only if $w\succ w'$. Hence $\mu$ is locally stable.
Assume $\phi_{\mu}$ is not a topological ordering over $D$. Then there exist $(w,w')\in E'$ such that $\phi_{\mu}(w)>\phi_{\mu}(w')$. This is equivalent to $\mu(w')\succ\mu(w)$. But recall that $w\succ w'$, thus $(w,\mu(w'))$ is a blocking pair, and $(w,w')\in E$, hence $\mu$ is not locally stable. \end{bproof}
One important desirable property of the set of preferences in Assumption~\ref{as:preferences} is the uniqueness of the global stable matching - the highest $k$ workers are assigned to $f_1$, the next $k$ to $f_2$ and so on. There are several important corollaries to the characterization from Theorem~\ref{thm:charac1}. First, the set complete locally stable matchings can be exponentially large. Thus, by introducing informational constraints, the uniqueness property of global stable matchings under aligned preferences is, in general, lost under locally stable matchings.
\begin{cor}[Number of Locally Stable Matchings] \label{cor:number}
Assume $k=1$ and $G(W,E)$ is the star centered at worker $w_1$. Then there are $(n-1)!$ distinct locally stable matchings. \end{cor}
\begin{bproof}
Note that for all $i\neq 1$, $\Gamma(w_i) = \{w_1\}$. Also $w_1\succ w_i$. Hence, in any complete locally stable matching $\mu$, $\mu(w_1) = f_1$. Once the firm $w_1$ works for is set, the rest of the matching can be completed in any way as the only firms a worker $w_i\neq w_1$ can see are $f_1$ and her own firm. There are $(n-1)!$ such matchings, which proves the result. \end{bproof}
The proof of Corollary~\ref{cor:number} assumes a specific topology for the social network $G$. It is thus interesting to ask whether there are specific properties of the social network $G$ that guarantee the existence of a labeling under which there is a unique complete locally stable matching. As shown in the next corollary, it is NP-complete to answer positively such question.
\begin{cor} \label{cor:uniquenpc}
Let $(k,G(W,E))$ be given. It is NP-complete to test if there is a labeling $\{w_1,w_2,\dots,w_n\}$ of the workers such that, if all firms rank the workers according to that labeling, the complete locally stable matching is unique. \end{cor}
In order to prove Corollary~\ref{cor:uniquenpc}, we use Lemma~\ref{lem:topo}, a characterization of directed acyclic graphs with a unique topological ordering. Lemma~\ref{lem:topo} can be found in Appendix~\ref{ap:technicallemmas}.
The proof of Corollary~\ref{cor:uniquenpc} is then as follows.
\begin{bproof}
We prove the corollary by focusing on the case $k=1$. Since all firms use the same ranking over the set of workers, there is a unique global stable matching. Hence it suffices to show it is NP-complete to test whether there is a labeling of the workers such that there is a unique locally stable matching.
By Theorem~\ref{thm:charac1}, there is a unique locally stable matching if and only if there is a unique topological ordering of $D(W,E')$, where $D$ is a directed acyclic graph such that $(w_i,w_j)\in E'$ if and only if $i<j$ and $(w_i,w_j)\in E$. By Lemma~\ref{lem:topo}, $D$ has a unique topological ordering if and only if its longest path is of length $n-1$. When that is the case, the path is $w_1w_2\dots w_{n-1}w_n$. But a path in $D$ is also a path in $G$, hence there is a unique locally stable matching if and only if the path $w_1w_2\dots w_n$ is in $G$.
Thus, for $k=1$, a given labeling of the workers is such that all locally stable matchings are global stable matchings when firms rank workers according to that labeling if and only if there is a labeling $\{w_1,\dots,w_n\}$ of the nodes such that $w_1w_2\dots w_n$ is a path in $G$. Hence, for $k=1$, our problem is equivalent to Hamiltonian path. \end{bproof}
We note that, for general $k>1$, the proof of Corollary~\ref{cor:uniquenpc} can be adapted to show that a {\em sufficient} condition for the complete locally stable matchings to be unique is for $G$ to have a Hamiltonian path such that the top $k$ nodes are all visited first, then the next top $k$ nodes and so on.
\begin{cor} \label{cor:sufficientunique}
Let $G(W,E)$ and $k>1$ be given. Assume nodes in $W$ are labeled such that, for all firms $w_1\succ \dots \succ w_n$. For all $0\leq i< n_f$, define $W_i = \{w_{ki+1},\dots,w_{k(i+1)}\}$. If $G$ has a Hamiltonian path such that, for all $0\leq i<j<n_f$, all nodes in $W_i$ are visited before all nodes in $W_j$, then the complete locally stable matching is unique. \end{cor}
The proof is obvious if the path $w_1w_2\dots w_{n-1}w_n$ exists in $G$. To see why the result holds, it suffices to note that nodes in the same firm do not compete with each other as all positions in a given firm are equally valued by all workers.
Unfortunately, the condition from Corollary~\ref{cor:sufficientunique} is not necessary as we can see in the following example.
\begin{example} \label{ex:notnecessary}
Assume $k=2$ and $G$ being the path $w_4-w_2-w_1-w_3$. Note that the subgraph over $W_2$ is disconnected, hence the condition from Corollary~\ref{cor:sufficientunique} does not hold. Now note that $w_1$ is adjacent to both $w_2$ and $w_3$, hence $w_1$ is employed by $f_1$ and either $w_2$ or $w_3$ is employed by $f_2$. Since $w_1$ is employed by $f_1$ and $w_1$ is adjacent to $w_2$, $w_2$ competes directly with any employee of $f_1$ other than $w_1$. Hence $w_2$ is employed by $f_1$ which proves that the complete locally stable matching is unique. \end{example}
\section{Algorithmic Questions} \label{sec:algo}
We are now interested in finding locally stable matchings. Since all global stable matchings are locally stable matchings, we could simply use the Gale-Shapley algorithm \cite{galeshapley} to find a global stable matching. However, Gale-Shapley's algorithm requires the proposing side of the matching market to traverse its preference list in descending order. Here we assume that workers propose (i.e. that workers apply for jobs). Thus, in order to run Gale-Shapley's algorithm, all workers need to {\em know} about all possible positions, even those in firms not employing any of its neighbors.
We are thus interested in decentralized algorithms that can find a locally stable matching. In this section we propose a decentralized version of Gale-Shapley's algorithm. Assumption~\ref{as:preferences} is again enforced in this section. We first prove that our algorithm converges. Unlike the case without informational constraints, our algorithm does not always select the same locally stable matching.
Recall that the Gale-Shapley algorithm is initialized by an empty matching \cite{rothsotomayor}. Since the empty matching is a locally stable matching, our algorithm requires to be initialized by a non-empty matching. We thus explore our algorithm's performance under adversarial initial complete matchings. We use the number of firms with no employees as a proxy for efficiency. We characterize the potential efficiency loss by providing upper and lower bounds on the number of firms with no employees.
\subsection{Local Gale-Shapley Algorithm}
One can interpret the Gale-Shapley algorithm from two-sided matching theory as a constrained version of {\em myopic best response dynamics} in the following way.
The dynamics proceed in rounds, which we index by $q\in\ensuremath{\mathbb{N}}$. Let $\mu^{(q)}$ be the matching at the beginning of round $q$. Let $w^{(q)}\in W$ be the {\em active} worker, where $w^{(q)}$ is sampled uniformly at random from $W$, and independently from previous rounds. We call such sampling process the {\em activation process}. Such activation process can be thought of as follows: assume all workers decide to explore employment opportunities according to a random clock with an exponential distribution with a given mean (the same mean for all workers). When the clock of $w_i$ ``sets off'', $w_i$ becomes active and looks for a better job. It is easy to see that the sequence of active nodes has the same distribution as taking independent uniform samples from $W$.
In myopic best response dynamics, $w^{(q)}$ would consider its current firm $\mu^{(q)}(w^{(q)})$ and compare it to the best firm $f$ it could be employed by given $\mu^{(q)}$ (i.e. the best firm where the worst employee was worse than $w$ given the matching $\mu^{(q)}$). If its current firm was better, it would pass. Else it would quit its job and get employed by $f$ (leading to a worker being fired, or an empty position being filled).
Gale-Shapley's algorithm is a constrained version of the above dynamics as it requires the active worker to consider the best firm it has not considered before (in other words it requires the active worker to remember what firms he has already failed to get a position at).
We consider a local and decentralized version of the myopic best-response dynamics proposed above. We call it ``local Gale-Shapley'' algorithm. Instead of restricting the strategy space of the active worker using ``memory'' as in Gale-Shapley's algorithm, we restrict it using the graph $G(W,E)$ in the following way: $w^{(q)}$ compares its current firm in $\mu^{(q)}$ to the best firm that employs one of its neighbors in $G$ it could be employed by given $\mu^{(q)}$. An alternative way to describe the process is that the active node $w^{(q)}$ applies for a job at all the firms employing its neighbors that she strictly prefers to her current employer, and selects the best offer she gets (that offer might eventually be to stay at her current job).
More formally, the algorithm proceeds in rounds indexed by $q\in \ensuremath{\mathbb{N}}$. During round $q\geq0$: \begin{itemize}
\item the active worker $w^{(q)}$ is sampled, independently from previous rounds, uniformly at random from $W$.
\item Next, $w^{(q)}$ applies to all firms she strictly prefers to her current employer,$\mu^{(q)}(w^{(q)})$.
\item The active worker receives some offers:
\begin{itemize}
\item if at least one offer is received, $w^{(q)}$ quits her current employer and joins the best firm that sent an offer;
\item if no offers are received, $w^{(q)}$ stays at her current job.
\end{itemize} \end{itemize}
It is important to note that, unlike the Gale-Shapley algorithm, this variant of best-response dynamics can lead to a firm loosing all its employees. This is due to the assumption that positions within firms are ``visible'' to workers only through neighbors in $G$. Thus, if a firm has no employees, its job openings are not visible to any worker.
\begin{example}[Firm with no Employees] \label{ex:fob} Let $n=4$ and $k=2$. Thus there are four workers and two firms. Assume that $G=K_4$. Consider the following initial matching: \[
\mu^{(0)}(f_1) = \{w_3,w_4\}\ \text{and}\ \mu^{(0)}(f_2) = \{w_1,w_2\} \] in other words, the best company has the worst workers. Then if we activate workers $w_1$ and $w_2$ before activating $w_3$ or $w_4$, both $w_1$ and $w_2$ would quit $f_2$ and work for $f_1$, getting both $w_3$ and $w_4$ fired. In that setting, $f_2$ has no employees, and thus the process ends. \end{example}
It is also important to understand the need of the activation process. Recall that the matching found by the Gale-Shapley algorithm is independent on the order of activation of the workers \cite{rothsotomayor}. When considering locally stable matchings, this is no longer the case even if the underlying graph is the complete graph. Let us reconsider Example~\ref{ex:fob}.
\begin{example}
Now consider the resulting matching when the activation sequence is as follows: $\{w_1,w_4,w_2,w_3\}$. First, $w_1$ leaves $f_2$ and gets a position at $f_1$. This makes $w_4 = \min(\mu^{(0)}(f_1))$ unemployed. Next, since we activate $w_4$, she gets the free position from $f_2$. Next $w_2$ leaves $f_2$ and gets a position at $f_1$, which results in $w_3$ loosing her job. Finally, $w_3$ gets the free position at $f_2$. Thus the resulting matching is now
\[
\mu(f_1) = \{w_1,w_2\},\ \text{and}\ \mu(f_2) = \{w_3,w_4\}
\]
which is a locally stable matching different from that obtained with the activation sequence in Example~\ref{ex:fob}. \end{example}
An important question is whether this local decentralized version of best response dynamics converges as it is not immediately clear it can't cycle. This is the content of our first result.
\begin{thm}[Convergence of Local Gale-Shapley Algorithm] \label{thm:convergence} Let $G(W,E)$ and $\mu^{(0)}$ be given. Then the local Gale-Shapley algorithm started at $\mu^{(0)}$ converges almost surely to a locally stable matching. \end{thm}
\begin{proofsketch} First note that if the algorithm converges, it does so to a locally stable matching. This is a consequence to the action taken by the active node $w^{(q)}$: it will change firms if and only if $w^{(q)}$ participates in a blocking pair of $\mu^{(q)}$. Let us now give a proof sketch of convergence.
We can assume without loss of generality that $f_1$ has at least one worker in $\mu^{(0)}$. If it doesn't, then we can relabel the firms and call $f_1$ be best firm with at least one employee in $\mu^{(0)}$. The proof proceeds in stages. During the first stage, since all workers prefer $f_1$ to all other firms, $f_1$ will never loose any employees. Moreover, since $G$ is connected, after sufficiently many rounds, all positions in $f_1$ are filled. Note that all workers are ordered the same way in all firms. Thus the worst worker of $f_1$ is strictly improving (or, equivalently, its label is strictly decreasing), which proves, by a standard Lyapunov argument, that the set of workers employed by $f_1$ converges.
The second stage starts once the set of workers employed by $f_1$ converged. Let $f_i$ be the best firm with at least one worker after the set of workers in $f_1$ converged. Then all workers not employed by $f_1$ strictly prefer $f_i$ to all other firms that have at least one worker. Hence, by an argument similar to that establishing convergence of the set of workers employed by $f_1$, the set of workers employed by $f_i$ converges.
We can then repeat the same argument subject to the set of workers in $f_1$ and $f_i$ having converged, which proves the result. \end{proofsketch}
\subsection{Worst Case Efficiency}
An important result in stable matching theory is the characterization of the matching obtained when using the Gale-Shapley algorithm. As we showed in Example~\ref{ex:lattice}, in general, the distributive lattice over the set of locally stable matchings is absent. Hence a result similar to that of the traditional stable matching literature seems unlikely.
In this subsection we consider the following question. Given that firms can go out of business when running the local Gale-Shapley algorithm, can we measure the quality of matchings selected by the algorithm. Since we assume that workers are only aware of positions within firms employing her neighbors, we explore the previous question assuming a given initial complete matching $\mu^{(0)}$.
We consider the following setting. An adversary observes $G(W,E)$ (but not the ranking over workers used by firms) and produces a probability distribution ${\cal P}_M$ over initial matchings. The ranking of workers (possibly taken from a distribution) is then revealed, a sample from ${\cal P}_M$ is taken to produce $\mu^{(0)}$; and the local Gale-Shapley algorithm run.
To compare the efficiency of different final matchings we simply look at the total number of firms losing all of their employees and subsequently going out of business. One can easily imagine more intricate notions of efficiency, our point here is that even in this austere model, the power of the adversary is non-trivial.
\subsubsection{The power of the adversary} We first show that even without knowing the relative rankings of the individual workers, the adversary is powerful enough to force some firms to go out of business.
\begin{thm}[Lower Bound on Firms] \label{thm:lbfirms}
Let $G(W,E)$ be given. Let $\Delta$ be its maximum degree, and $M$ a maximum matching in $G$. Then there exist a probability distribution ${\cal P}_M$ over complete assignment matchings such that
\[
\ensuremath{\mathbb{E}}[N_{\text{fob}}] \geq \left\lfloor \frac{|M|}{k(2\Delta)} \right\rfloor \frac{1}{2^k k!(2\Delta-1)^k}
\]
where $N_{\text{fob}}$ is the number of firms going out of business; and the expectation is taken both over the distribution ${\cal P}_M$ and over the activation process.
Further, one can find ${\cal P}_M$ in time polynomial in $n$. \end{thm}
We provide a constructive proof of Theorem~\ref{thm:lbfirms}. First, we define a special type of matching in a graph $G$.
\begin{definition}[Independent Matching] \label{def:independentmatching}
Let $G(W,E)$ be an undirected graph. We say that $M\subseteq E$ is as {\em independent matching} if
\begin{enumerate}
\item $M$ is a matching; and
\item the vertices matched by $M$ are an independent set of the subgraph $G'(W,E\setminus M)$.
\end{enumerate} \end{definition}
Equivalently, a matching is independent if, for any two nodes $u$ and $v$ matched in $M$ (but not necessarily matched to each other), $u\in \Gamma(v)$ if and only if $(u,v)\in M$.
In order to prove Theorem~\ref{thm:lbfirms}, we next prove the following lemma.
\begin{lemma} \label{lem:independentmatching}
Let $M'$ be an independent matching of $G$. Then there is an algorithm that constructs a distribution ${\cal P}_M$ in time $O(n)$ such that
\[
\ensuremath{\mathbb{E}}[N_{\text{fob}}] \geq \left\lfloor \frac{|M'|}{k} \right\rfloor\frac{1}{2^k k!(2\Delta-1)^k}
\]
where the expectation is taken both over the distribution ${\cal P}_M$ and over the activation process. \end{lemma}
\begin{lemmaproof}
Assume $M'$ has at least $k$ edges, and let $M' = \{e_1,\dots,e_{m'}\}$.
For $\ell = 0$ to $\left\lfloor \frac{|M'|}{k} \right\rfloor-1$, consider the edges $\{e_{k\ell+1},\dots,e_{k(\ell+1)}\}$. For each edge $e_{k\ell + j}$, call $u_{k\ell + j}$ and $v_{k\ell + j}$ its endpoints. Since $M'$ is a matching, all such vertices are distinct. Consider a binary string of length $k$. Sample one such string $S = s_1\dots s_k$ uniformly at random.
For each $1\leq j \leq k$, set $\mu(u_{k\ell + j}) = f_{2\ell +1}$ and $\mu(v_{k\ell + j}) = f_{2(\ell+1)}$ if $s_j=0$. Otherwise, set $\mu(u_{k\ell +j})=f_{2(\ell+1)}$ and $\mu(v_{k\ell +j})=f_{2\ell+1}$.
For all the remaining workers, assign all of the remaining positions in the firms in any order.
Note that using the algorithm described above, all of the positions in the top $2\left\lfloor \frac{|M'|}{k} \right\rfloor$ firms are assigned to workers matched in $M'$. The following is a key fact derived from $M'$ being an independent matching:
\noindent {\bf Key fact}: Let $(u,v)\in M'$ be one of the edges within the first $\left\lfloor \frac{|M'|}{k} \right\rfloor$ edges of $M'$. Let $f_{\max}$ be the best firm employing one of $u$ or $v$'s neighbors. Then $\mu(u)\succ f_{\max}$ and $\mu(v)\succ f_{\max}$.
The key fact follows from the nodes in $M'$ being only adjacent to nodes not in $M'$.
Without loss of generality, assume that for all $\ell$ and $j$, $u_{k\ell +j}\succ v_{k\ell +j}$. Consider now firms $f_{2\ell +1}$ and $f_{2(\ell +1)}$. Note that
\[
\ensuremath{\mathbb{P}}[\forall 1\leq j\leq k,\ \mu(v_{k\ell +j})\succ \mu(u_{k\ell +j})] = \frac{1}{2^k}
\]
Assume further without loss of generality that $v_{k\ell +j}\succ v_{k\ell +i}$ for $j>i$. Then, if we activate $u_{k\ell +1}$ before all of the other nodes in
\[
U_{k\ell +1} = \Gamma(u_{k\ell + i})\cup \Gamma(v_{k\ell + i})\cup\bigcup_{i=2}^k[u_{k\ell + i}\cup \Gamma(u_{k\ell + i})\cup \Gamma(v_{k\ell + i})]
\]
since $v_{k\ell +1}$ is $f_{2\ell +1}$ worst worker, $v_{k\ell +1}$ would loose its job to $u_{k\ell +1}$. This happens with probability
\begin{align*}
\frac{1}{|U_{k\ell +1}|+1} &\geq \frac{1}{1+(2\Delta-2)+\sum_{i=2}^k|u_{k\ell + i}\cup \Gamma(u_{k\ell + i})\cup \Gamma(v_{k\ell + i})|}\\
&\geq \frac{1}{2\Delta-1+\sum_{i=2}^k (2\Delta-1)}\\
&= \frac{1}{k(2\Delta-1)}
\end{align*}
Conditioned on this event, the probability that $v_{k\ell +2}$ would loose its job to $u_{k\ell +2}$ is equal to
\[
\frac{1}{|U_{k\ell +2}|+1} \geq \frac{1}{(k-1)(2\Delta-1)}
\]
More generally, the probability that the worst $i$ workers in firm $f_{2\ell +1}$ loose their job to the workers they were matched in $M'$ is at least
\[
\frac{1}{2^k}\prod_{j=1}^i\frac{1}{(k-j+1)(2\Delta-1)}
\]
and thus the probability that $f_{2(\ell+1)}$ goes out of business (because of all its workers going to work for firm $f_{2\ell+1}$) is at least
\[
\frac{1}{2^k k!(2\Delta-1)^k}.
\]
The result follows by linearity of expectation. \end{lemmaproof}
The proof of Theorem~\ref{thm:lbfirms} proceeds then as follows.
\begin{bproof}
Note that, given an independent matching, the algorithm from Lemma~\ref{lem:independentmatching} takes $O(n)$ time. However, as we will show in Lemma ~\ref{lem:nphmatching}, finding the maximum independent matching is NP-hard. Here we give a simple algorithm that achieves a $2\Delta$ approximation. Let $M$ be a maximum matching in $G$. $M$ can be calculated in polynomial time. To get an independent matching, start from an edge $e$ in $M$ and remove all edges in $M$ that are adjacent to a neighbor of either endpoint of $e$. Doing so, we remove at most $2\Delta$ edges from $M$. By repeating this greedy algorithm (that takes $O(|E|)$ time) we create an independent matching $M'$ of size at least $|M|/(2\Delta)$.
The previous fact, together with Lemma~\ref{lem:independentmatching}, proves the result. \end{bproof}
Finally, note that the lower bound in Theorem~\ref{thm:lbfirms} does not use a maximum independent matching, but instead approximates one in order to achieve efficiency. This is due to the NP-hardness of finding a maximum independent matching. See Lemma~\ref{lem:nphmatching} in Appendix~\ref{ap:technicallemmas}.
An important observation is that not only does the adversary force some firms to go out of business, but he controls the identities of these firms. Thus, if we measure efficiency by the identity of the firms of the positions filled in a matching, Theorem~\ref{thm:lbfirms} provides a lower bound on the efficiency loss of the local Gale-Shapley algorithm (under adversarial initial conditions).
\subsubsection{The power of the social planner}
Given the lower bound from Theorem~\ref{thm:lbfirms} on the expected number of firms going out of business, we can ask the following question: can similar guarantees be proven if a social planner had full control over the ranking used by firms? More precisely, given $G(W,E)$, if the ranking over workers used by firms was a sample from a random variable, can the social planner guarantee, in expectation, a minimal number of firms that will not go out of business {\em regardless of the power given to the adversary}? The following theorem answers positively that question.
\begin{thm}[Upper Bound on Firms] \label{thm:ubfirms}
Let $G(W,E)$ be given. There exist a probability distribution over the ranking used by firms such that
\[
\ensuremath{\mathbb{E}}[N_{\text{fob}}] \leq n_f-\left\lceil \frac{|I|}{k} \right\rceil
\]
where $I$ is a maximum independent set of $G$ ($n_f$ is the number of firms and $k$ the number of positions at each firm) \end{thm}
\begin{bproof}
Let $I$ be a maximum independent set of $G$. Consider the following distribution over rankings used by firms. With probability one, assign the top $|I|$ workers to the nodes in $I$. Assign the rest arbitrarily.
Let $w\in I$ be given. Since $I$ is an independent set, all workers that are better ranked than $w$ are not neighbors of $w$. Thus, regardless of the initial assignment matching picked by the adversary, $w$ can lose its job to a worker $w'\succ w$ only if one of $w'$'s neighbors, say $y$ is employed by the same firm as $w$. Note that $w'\succ w$ implies $w'\in I$, and hence $y$ is not in $I$. It follows that $w\succ y$, and hence the firm employing $w$ would first fire $y$ to hire $w'$. We just proved that $w$ cannot lose its job $w'$. \end{bproof}
Note that, just as in Theorem~\ref{thm:lbfirms} we were able to identify the firms forced out of business (the top firms) but not the unemployed workers, in Theorem~\ref{thm:ubfirms} we are able to identify the workers that are going to be employed (the top employees), but not which firms will remain in business.
\subsubsection{Discussion} We have now shown that neither the adversary, nor the social planner have all the power --- we can reinterpret the results above as a game between these two players. The game proceeds as follows, the adversary picks the initial assignment matching (possibly random), and the social planner chooses the ordering on the workers (possibly random). Once they both pick an action we run the local Gale-Shapley algorithm.
Theorem~\ref{thm:lbfirms} then states that, even if the social planner knows the probability distribution selected by the firm adversary, there is a probability distribution over initial assignments that the firm adversary can use such that, in expectation, at least some number of firms go out of business.
Theorem~\ref{thm:ubfirms} states the converse: Even if the firm adversary knows the probability distribution selected by the social planner, there is a deterministic ordering of the workers such that at least some number of workers will never loose their job.
We note that by looking at the number of firms going out of business, we have used a very minimal notion of efficiency. It is not hard to imagine more complex notions which may take into account the relative rankings of the firms going out of business or workers remaining unemployed. We further note that for dense graphs, where the size of the independent set, and the independent matchings are quite small, our bounds are quite loose. Our main contribution here is not the precise bound on $N_{fob}$, although that remains an interesting open question, but rather the fact that the adversary has non-trivial power, and the initial matching plays a pivotal role in determining the final outcome.
\section{Conclusions} \label{sec:conclusion} In this work we have introduced a new model for incorporating social network ties into classical stable matching theory. Specifically, we show that restricting the firms willing to consider a worker only to those employing his friends has a profound impact on the system. We defined the notion of locally stable matchings and showed that while a simple variation of the Gale-Shapley mechanism converges to a stable solution, this solution may be far from efficient; and, unlike in traditional Gale-Shapley, the initial matching plays a large role in the final outcome. In fact, if the adversary controls the initial matching, he can force some firms to be left with {\em no} workers in the final solution.
The model we propose is ripe for extensions and further analysis. To give an example, we have assumed that as employees leave the firm, it may find itself with empty slots that it cannot fill (and go out of business). However, this is precisely the time when it can start looking actively for workers, by advertising online, recruiting through headhunters, etc. This has the effect of it becoming visible to the unemployed workers in the system. Understanding the dynamics and inefficiencies of final matchings under this scenario is one interesting open question.
\subsection*{Acknowledgments} We would first like to thank Ramesh Johari for many discussions on the model studied and its static analysis. We also want to thank David Liben-Nowell for the discussions that lead to the proofs of Theorems 7 and 8. Finally, we would like to thank Ravi Kumar and Matt Jackson for many useful discussions.\\ This research was supported by the National Science Foundation and by the Defense Advanced Research Projects Agency under the ITMANET program.
\section*{Appendix}
\appendix
\section{General Model} \label{ap:general}
In this appendix we consider a general framework to include graph constraints in two-sided matching markets with stable matching as solution concept. For ease of exposition we focus on the stable marriage problem, and assume agents have ordinal preferences. In this appendix Assumption~\ref{as:preferences} is not assumed to hold true. \\ More formally, we assume $n$ men $M$ and $n$ women $W$ participate in a social network $G(M\cup W,E)$, where $uv\in E$ represents a social connection between $u$ and $v$. For each man $m$ (and woman $w$), there is a ranking $\succ_m$ (respectively $\succ_w$) over all women $W$ (resp. men $M$) such that $w\succ_m w'$ indicates that $m$ prefers being matched to $m$ than to $m'$.
The definition of matching is carried over from case 1 in Definition~\ref{def:matching} where we replace $F$ by $M$. The definition of locally stable matching is a straight forward generalization identical to that in Definition~\ref{def:localmatching}. For a given subset $S\subseteq M\cup W$ and a matching $\mu$, we call $\mu(S)\subseteq M\cup W$ the subset of men and women matched in $\mu$ to a men or women in $S$ (i.e. $x\in \mu(S)$ if and only if $\mu(x) \in S$ and $x\neq \mu(x)$).
\begin{definition}[Locally Stable Matching] \label{def:localmatchinggeneral} Let $G =(M\cup W,E)$ be the social network over the set of men $M$ and women $W$. We say that a matching $\mu$ is a {\em locally stable matching} with respect to $G$ if, for all $w\in W$ and $m\in M$, $(w,m)$ is a blocking pair if and only if $m\notin\Gamma(w)\cup\mu(\Gamma(w))$ and $w\notin \Gamma(m)\cup\mu(\Gamma(m))$ (i.e., $m$ (resp. $w$) is not directly connected to $w$ (resp. $m$) or matched to a neighbor of $w$ (resp. neighbor of $m$)). \end{definition}
We can now see that the case of the job market (with one position per firm) corresponds to the stable marriage problem where the social network is restricted to have edges between nodes in one side of the market only. Notice that in Section~\ref{sec:static}, we focused on complete matchings $\mu$. The reason was simple: the lack of edges between the two sides of the market made the empty matching a locally stable matching. Note that in this general framework, the empty matching is not de-facto locally stable: if $(m,w)\in E$ and $m$ (resp. $w$) prefers being matched to $w$ (resp. $m$) than being unmatched, then the empty matching is not locally stable.
We now provide the main result of this appendix. We give necessary and sufficient conditions on $G$ under which the set of locally stable matchings coincides with that of global stable matchings {\em for all preferences}. This result is relevant for online social networks: the social network is observable, and membership of a node to either side of the market is available; preferences are hard if at all possible to infer.
\begin{thm} \label{thm:gsmlsm}
Let $G(M\cup W,E)$ be given. Given preferences $\succ = (\succ_x;\ x\in M\cup W)$, we denote by ${\cal M}_{\succ}$ the set of global stable matchings with respect to $\succ$. Similarly, we denote by ${\cal M}_{\succ}(G)$ the set of locally stable matchings with respect to $\succ$ and $G$.
For all $\succ$, ${\cal M}_{\succ}(G) = {\cal M}_{\succ}$ if and only if $K_{M,W}\subseteq G$, where $K_{M,W}$ is the complete bipartite graph with $M$ and $W$ as the partitions. \end{thm}
\begin{bproof}
From Definition~\ref{def:localmatchinggeneral} it is easy to see that $K_{M,W}\subseteq G$ implies that all locally stable matchings are global stable matchings as, for all $m$ (resp. $w$), $W \subseteq\Gamma(m)$ (resp. $M \subseteq\Gamma(w)$). We prove the direct direction by counter positive.
Assume $K_{M,W}$ is not a subgraph of $G$, i.e. there exists $m_0\in M$ and $w_0\in W$ such that $(m_0,w_0)\notin G$. We now construct a set of preferences $\succ$ such that there exist $\mu\in{\cal M}_{\succ}(G)$ and $\mu\notin{\cal M}_{\succ}$. Consider a labeling of the set of men and women such that $M = \{m_i;\ 0\leq i<n\}$ and $W = \{w_i;\ 0\leq i<n\}$. We assume all men (resp. all women) share the same preferences $\succ_M$ (resp. $\succ_W$): $w_i \succ_M w_j$ if and only if $i>j$ (resp. $m_i \succ_W m_j$ if and only if $i>j$).
Under such preferences, the only global stable matching $\mu$ is such that, for all $0\leq i<n$, $\mu(w_i) = m_i$. Consider now the matching $\mu'$: for all $0<i<n$, $\mu'(w_i)= m_i$ and $\mu'(m_0) = m_0$ and $\mu'(w_0) = w_0$. In other words, $\mu$ and $\mu'$ coincide over $M\cup W \setminus \{m_0,w_0\}$ and leave $m_0$ and $w_0$ unmatched.
To see that $\mu'$ is in ${\cal M}_{\succ}(G)$, it suffices to prove that neither $m_0$, nor $w_0$ are in a blocking pair. First, note that all men (resp. women) other than $m_0$ (resp. $w_0$) prefer their match in $\mu'$ to $w_0$ (resp. $m_0$). Thus the only possible blocking pair $m_0$ could participate in is $(m_0,w_0)$. But $w_0\notin \Gamma(m_0)\cup\mu'(\Gamma(m_0))$, hence $(m_0,w_0)$ is not a blocking pair for $\mu'$ in $G$, which proves the result. \end{bproof}
\section{Technical Lemmas} \label{ap:technicallemmas}
In this appendix we state and prove two technical lemmas used in the paper. Lemma~\ref{lem:topo} is a necessary and sufficient condition for a directed acyclic graph to have a unique topological ordering. Lemma~\ref{lem:nphmatching} is a complexity result regarding finding maximum independent matchings.
\begin{lemma}[Topological Orderings] \label{lem:topo}
Let $D(V,E)$ be a directed acyclic graph over $n>1$ nodes. $D$ has a unique topological ordering if and only if the longest path in $D$ has length $n-1$. \end{lemma}
\begin{lemmaproof}
By induction on $n$. Base case: $n=2$. If $D$'s longest path is of length strictly less than $n-1 = 1$, then $D$ is the empty graph and any ordering of the nodes is a topological ordering. Since $n=2$, there are $2!=2$ such orderings. If $D$'s longest path is of length $n-1=1$, then $D$ has an edge between its two nodes, and the topological ordering is unique.
Assume the inductive step holds for $n>1$. Assume $|V|=n+1$. We consider two cases.\\
Case 1: there is a unique node $v$ in $D$ with indegree of zero. Then there is a path from $v$ to any other node in $V$. Hence $v$ is the first node in any topological ordering of $D$. Further, in any such $D$, all longest paths start from $v$. Hence the length of the longest path in $D$ is equal to 1 plus the length of the longest path in $D_v$, where $D_v$ is the subgraph of $D$ on $V\setminus\{v\}$. Thus $D$'s longest path is of length $n+1-1$ if and only if $D_v$'s longest path is of length $n-1$. Also, any topological ordering of $D$ consists of $v$ being the first node, and any topological ordering on $D_v$. Hence $D$ has a unique topological ordering if and only if $D_v$ has a unique topological ordering. Note that $D'$ has $n+1-1=n$ nodes, hence we apply the induction hypothesis.
Case 2: there are at least two nodes $u\neq v$ with indegree zero. Then, since there is no path from $u$ to $v$, the length of the maximum path is at most $(n+1)-1-1 < n$. We now construct two distinct topological orderings. Since $v$ has indegree zero, we choose $v$ to be the first node. As in the previous case, we can build a topological ordering on $D$ by appending a topological ordering on $D_v$, the subgraph of $D$ over $V\setminus \{v\}$. In any such topological ordering, $v$ comes before $u$. Similarly, since $u$ has indegree zero, we can choose $u$ to be the first node, and complete the topological ordering over $D$ by appending a topological ordering over $D_u$, the subgraph of $D$ over $V\setminus\{u\}$. in any such topological ordering, $u$ comes before $v$. Hence there are at least 2 topological orderings of $D$. \end{lemmaproof}
\begin{lemma}[NP-Hardness of Maximum Independent Matching] \label{lem:nphmatching}
Let $G(V,E)$ be given. It is NP-Hard to calculate the size of the maximum independent matching in $G$. \end{lemma}
\begin{lemmaproof}
By reduction to maximum independent set. Let $G(V,E)$ be given. We know it is NP-hard to find the size of its maximum independent set. Consider the graph $G'(Z,E')$ over $Z = V\cup W$ where $W$ is distinct set of nodes such that $|W|=|V|$. We label the nodes both in $V$ and $W$ such that $V=\{v_1,\dots,v_n\}$ and $W=\{w_1,\dots,w_n\}$. The set of edges $E'$ is as follows. For each edge $(v_i,v_j)\in E$, we create four edges in $E'$: $(v_i,v_j)$, $(v_i,w_j)$, $(w_i,w_j)$ and $(w_i,v_j)$. Hence $|Z| = 2|V|$ and $|E'| = 4|E|$.
We now prove that the size of the maximum independent matching in $G'$ is the same as the size of the maximum independent set in $G$. Let ${\cal I}(G)$ and ${\cal I}(G')$ denote the size of the maximum independent set of $G$ and $G'$ respectively. Note that $G$ is a subgraph of $G'$, hence ${\cal I}(G)\leq {\cal I}(G')$. Let $I,J\subseteq \{1,\dots,n\}$ such that $\{v_i,w_j|\ i\in I,\ j\in J\}$ is a maximum independent set of $G'$. Since for all $i$, $(v_i,w_i)\in E'$, it follows that $I\cap J = \emptyset$. Also, $\Gamma_{G'}(v_i)\cup \{v_i\} = \Gamma_{G'}(w_i)\cup \{w_i\}$. Hence $\{v_i|\ i\in I\cup J\}$ is also an independent set of $G'$. This proves ${\cal I}(G)={\cal I}(G')$.
Now let ${\cal MI}(G')$ be the size of the maximum independent matching in $G'$. It is clear that ${\cal MI}(G')\leq {\cal I}(G')$ as selecting only one of the endpoints of the edges in any independent matching creates an independent set. Let again $\{v_i|\ i\in I\}$ be a maximum independent set in $G'$ (that we assumed without loss of generality to be composed of nodes only in $V$). Since for any $J\subseteq \{1,\dots,n\}$, $\{(v_j,w_j)|\ j\in J\}$ is a matching in $G'$, it follows that $\{(v_i,w_i)|\ i\in I\}$ is an independent matching in $G'$ of size $|I| = {\cal I}(G')$. Hence ${\cal MI}(G') = {\cal I}(G')$. But ${\cal I}(G') = {\cal I}(G)$, which yields ${\cal MI}(G') = {\cal I}(G)$.
Since we can construct $G'$ in linear time, finding the size of the maximum independent matching is as hard as finding the size of the maximum independent set. \end{lemmaproof}
\end{document} | arXiv | {
"id": "0910.0916.tex",
"language_detection_score": 0.8917657136917114,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\baselineskip6.5mm
\begin{abstract} We obtain an explicit uniform upper bound for the derivative of a conformal mapping of the unit disk onto a convex domain. This estimate depends only on the outer and inner radii of the domain, and on the minimum curvature radius of its boundary. Its proof is based on a M\"obius invariant metric of hyperbolic type, introduced by Kulkarni and Pinkall in 1994. \end{abstract}
\maketitle
\section{Introduction}\label{sec:intro}
Estimates for the derivatives of conformal maps, collectively known as distortion theorems, have historically been given on compact subsets of the domain~\cite[\S2.3]{Duren-book}. Uniform bounds for the derivative have received less attention. The classical theorem of Kellogg~\cite[Theorem~3.5]{Pommerenke-book} states that the derivative of a conformal map $f$ between $C^{1,\alpha}$ domains is uniformly bounded, but does not provide an explicit upper bound. Such an upper bound was recently obtained in~\cite{Kovalev2017} and was found to be useful both in the spectral theory of PDEs with applications to quantum physics~\cite{Lotoreichik}, and as a global upper bound on the integral Hardy norm of $f'$ in fluid dynamics~\cite{Hassainia}.
This paper improves the uniform upper bound on $|f'|$ from~\cite{Kovalev2017}. Our main result is stated below.
\begin{theorem*} Let $\Omega$ satisfy the $(R_O,R_I,R_C)$ condition in Definition~\ref{RadiiDefinition}. Then for any conformal map $f\colon \mathbb{D}\xrightarrow{\textnormal{onto}}\Omega$ fixing $0$ we have \[
\|f'\|_{H^\infty} \leqslant R_Ce^{2F(R_O,R_I,R_C)} \] where $F(R_O,R_I,R_C)$ is as in Theorem~\ref{DistanceBounds}. Equality is attained whenever $\Omega$ is a disk. \end{theorem*}
For comparison, the bound in~\cite{Kovalev2017} is sharp only for disks centered $0$, that is, only when $f$ is a linear function. The main tool we use to estimate $f'$ is the Kulkarni-Pinkall metric~\cite{KulkarniPinkall} which is defined in Section~\ref{sec:prelim}. A precise estimate for this metric is derived in Section~\ref{sec:hyperbolic}. Section~\ref{sec:derivative} contains the proof of the main result. The paper concludes with examples in Section~\ref{sec:examples}.
\section{Definitions and preliminary results}\label{sec:prelim}
Throughout this section let $\Omega\subsetneq \mathbb C$ be a simply connected domain. The \emph{hyperbolic metric}~\cite[\S4.6]{Pommerenke-book} of $\Omega$ is conformally invariant and has constant Gaussian curvature $-4$. The hyperbolic distance between $z,w\in \Omega$ is denoted $h_{\Omega}(z,w)$, and the density at $z$ by $\lambda_{\Omega}(z)$. When $\Omega$ is a disk of radius $r$ and $z$ is a point at distance $d$ from its center, we have \begin{equation}\label{hyp-density-in-disk}
\lambda_{\Omega}(z) = \frac{r}{r^2-d^2}. \end{equation} For more general $\Omega$ however, explicit formulas for $\lambda_{\Omega}$ or $h_{\Omega}$ are tied to explicit conformal maps between $\Omega$ and the unit disk---in most cases neither exist. For this reason alternative metrics are used as approximations to the hyperbolic~\cite{HKMbook}. The most common is the \emph{quasihyperbolic metric} with density $1/\text{dist}(z, \partial\Omega)$. Quasihyperbolic distance, usually denoted $k(z,w)$, employs the surprisingly effective simplification of substituting inverse distance to the boundary for the hyperbolic density. Using our $-4$ curvature convention, we have the well known comparison $\frac14 k\leqslant h \leqslant k$. The quasihyperbolic metric was used to attain the bound in~\cite{Kovalev2017}. We will improve this bound by using a more refined metric in its place.
\begin{definition}\label{def:KPmetric} Distance in the \emph{Kulkarni-Pinkall (KP) metric} between $z,w\in \Omega$ is denoted by $\texttt{KP}_{\Omega}(z,w)$, the density by $\mu_{\Omega}(z)$. If we take $\Delta$ to be the set of all disks $D$ such that $z\in D\subset \Omega$, then \begin{equation}\label{inf-def-mu} \mu_{\Omega}(z)\eqtext{def}\inf_{D\in\Delta}\lambda_D(z). \end{equation} \end{definition}
The KP density is comparable to the hyperbolic density~\cite[\S3]{HMM2005}, \begin{equation}\label{KPcomparison} \frac12 \mu \leqslant \lambda \leqslant \mu, \end{equation} again shown with the $-4$ curvature convention. Note that the KP metric gives a better approximation to the hyperbolic metric than the quasihyperbolic does.
The KP metric was introduced by Kulkarni and Pinkall in a 1994 article~\cite{KulkarniPinkall} with an emphasis on its M\"obius invariance. In different ways the KP and quasihyperbolic metrics both take advantage of the fact that the hyperbolic metric is monotone with respect to domain, that is, if $\Omega_1\subset \Omega_2 \subset\mathbb{C}$ are simply connected domains then \begin{equation}\label{eq:HyperbolicDomainMonotonic} \forall a,b\in \Omega_1,\quad h_{\Omega_1}(a,b)\geqslant h_{\Omega_2}(a,b). \end{equation} This can be seen as a consequence of the Schwarz-Pick lemma.
By~\cite[Theorem 3.5]{HMM2003} for each point $z$ in a simply connected domain $\Omega\subsetneq\mathbb C$, there exists a unique disk that attains the infimum in~\eqref{inf-def-mu}, referred to as the \textit{extremal disk} (this disk is understood in the sense of the Riemann sphere when $\Omega$ is unbounded). The extremal disk is determined by a subtle trade-off between the size of the disk and the proximity of its center to $z$.
\begin{lemma}\label{lemma-extremal-disk}~\cite[\S2.3]{HMM2005} For a simply connected domain $\Omega\subsetneq\mathbb C$ and a point $z\in \Omega$, the $\texttt{KP}_\Omega$~extremal disk for $z$ is the unique disk $D$ satisfying the condition that $z$ lies in the closure of the convex hull of $\partial D\cap \partial \Omega$ with respect to the hyperbolic metric on $D$. \end{lemma}
\begin{remark}\label{rem:ExtremalDiskDomainMonotone} Suppose $\Omega_1\subset\Omega_2$ are domains and $D$ is the $\texttt{KP}_{\Omega_2}$~extremal disk for some $z\in\Omega_2$. If $D\subset\Omega_1$, then $D$ is also the $\texttt{KP}_{\Omega_1}$~extremal disk for $z$. \end{remark}
\begin{example}\label{ex:ExtremalDiskInStrip}
The $\texttt{KP}_H$~extremal disk at $x\in\mathbb{R}$ for the infinite strip $H \eqtext{def} \{|\text{Im } z| <1\}$ is $D \eqtext{def} \{z\colon|x-z|<1\}$ with $\partial D\cap\partial H = \{x\pm i\}$. Here $(x-i,x+i)$ is a hyperbolic geodesic in $D$ and is the hyperbolic convex hull of $\partial D\cap\partial H$ in $D$. \end{example}
\begin{example}\label{ex:ExtremalDiskInSector}
Fix $\theta\in(0,\pi/2)$. The $\texttt{KP}_S$~extremal disk at $x>0$ for the sector $S\eqtext{def}\{|\text{Arg } z|<\theta\}$ is the disk $D$ with $\partial D\cap\partial S=\{xe^{\pm i\theta}\}$ and $A\eqtext{def}\{xe^{it}:|t|<\theta\}\subset D$. Here $A$ is a hyperbolic geodesic in $D$ and is the hyperbolic convex hull of $\partial D\cap\partial S$ in~$D$. \end{example}
\begin{example}\label{ex:ExtremalDiskInDisk} The $\texttt{KP}$~extremal disk for every point in a domain $D$ that is itself a disk is $D$, because the convex hull of $\partial D$ with respect to the hyperbolic metric on $D$ is all of $D$. \end{example}
\begin{example}\label{ex:HullOfCircularArc} Suppose a domain $S$ contains a disk $D$ such that $\Gamma\eqtext{def}\partial D\cap\partial S$ is a circular arc. Then the convex hull of $\Gamma$ in the hyperbolic metric on $D$ is the portion of $D$ bounded by $\Gamma$ and a circle orthogonal to $\Gamma$ at both of its endpoints. Here we use circle in the sense of the Riemann sphere so that if $\Gamma$ is a semicircle, the orthogonal circle is a line. \end{example}
The inclusion of Remark~\ref{rem:ExtremalDiskDomainMonotone} and subsequent examples are to clarify the extremal disks on the segment between centers of a stadium as described in Definition~\ref{definition-stadium}, and in the proof of Lemma~\ref{KPdistance}. Throughout the paper $\mathbb{D}$ is the unit disk and $D(a,r)\eqtext{def} \{z\colon |a-z|<r\}$.
\begin{definition}\label{RadiiDefinition} Suppose $\Omega$ is a simply connected \emph{convex} domain that contains~$0$ and has $C^{1,1}$-smooth boundary. We say that such a domain satisfies the $(R_O, R_I, R_C)$ condition if: \begin{itemize}
\item $R_O$, $R_I$, $R_C$ are all positive,
\item $R_O$ is the minimal $r$ such that $\Omega\subset D(0,r)$,
\item $R_I$ is the maximal $r$ such that $D(0,r)\subset\Omega$,
\item $\Omega$ can be expressed as a union of open disks of radius~$R_C$. \end{itemize} \end{definition}
The subscripts in Definition~\ref{RadiiDefinition} serve to indicate that $R_O$ is the \emph{outer radius}, $R_I$ the \emph{inner radius}, and $R_C$ a \emph{curvature radius}. The following remark clarifies the geometric meaning of $R_C$.
\begin{remark}\label{rem:CurvatureRadius} Suppose $\Omega$ and $R_C$ are as in Definition~\ref{RadiiDefinition}. Then for every $w\in \partial \Omega$ there exists $a\in\Omega$ such that $D(a, R_C)\subset \Omega$ and $w\in \partial D(a, R_C)$.
Indeed, one can take a sequence $z_n\to w$ of points $z_n\in \Omega$ and cover each $z_n$ with a disk $D(a_n, R_C)\subset \Omega $. The sequence $\{a_n\}$ has a convergent subsequence $\{a_{n_k}\}$. Let $a$ be its limit. Clearly $D(a, R_C)\subset \Omega$, which implies $|a-w|\geqslant R_C$. On the other hand, $|a-w|=\lim_{k\to\infty} |a_{n_k}-z_{n_k}|\leqslant R_C$. \end{remark}
We denote and define the Hardy norm of a holomorphic function $f$ on $\mathbb{D}$ by \[
\|f\|_{H^p} \eqtext{def} \sup_{0<r<1}\left(\int_0^{2\pi} |f(re^{i\theta})|^p \frac{d\theta}{2\pi} \right)^{1/p}, \quad \|f\|_{H^\infty} = \sup_{\mathbb{D}}|f|. \] Using the KP metric we will improve the derivative bound from~\cite{Kovalev2017}, which relied on the quasihyperbolic metric and can be stated as \begin{equation}\label{old-bound}
\|f'\|_{H^\infty} \leqslant R_C\exp\{2(R_O-R_C)\Phi(R_I,R_C)\} \end{equation} where \[ \Phi(a,b)\eqtext{def}\left\{\begin{array}{cc} \frac{\log a - \log b}{a-b}, &\text{ if } a\ne b\\\frac{1}{a}, &\text{ if } a=b.\end{array}\right. \] according to~\cite[Proposition~19]{Lotoreichik}. Our improved bound (Theorem~\ref{Main}) is sharp in a wider class of convex domains than~\eqref{old-bound}.
\section{Estimates for the hyperbolic metric in convex domains}\label{sec:hyperbolic}
We introduce a class of convex domains which are convenient for estimating the hyperbolic metric.
\begin{definition}\label{definition-stadium} A \emph{stadium} is the convex hull of the union of two open disks in the plane. It is denoted $\mathcal{S}(r_1,r_2,d)$ where $r_1$ and $r_2$ are the radii of the two disks and $d$ is the distance between their centers. \end{definition}
The notation $\mathcal{S}(r_1,r_2,d)$ in Definition~\ref{definition-stadium} omits the centers of the disks that form the stadium since they are usually irrelevant to the hyperbolic geometry of the domain. The centers will be given in context when relevant.
\begin{lemma}\label{lemma-stadium boundary smooth} The boundary of a stadium is $C^{1,1}$-smooth. That is, the unit speed parameterization of its boundary has Lipschitz continuous derivative. \end{lemma}
\begin{proof} The boundary of a stadium $\mathcal{S}(r_1,r_2,d)$ consists of circular arcs, possibly joined by tangent line segments. If we take $w$ to be a unit speed parameterization of the boundary and $r=\min(r_1, r_2)$, then the inequality \begin{equation}\label{eq-lip-deriv}
|w'(t_1)-w'(t_2)|\leqslant\frac{1}{r}|t_1-t_2| \end{equation} holds on each of two circular arcs. It also holds on linear segments where $w'$ is constant. It now follows that $w'$ is Lipschitz continuous. \end{proof}
\begin{definition}\label{def:infinite-sector}
Let $D_{r_1}$ and $D_{r_2}$ denote the two open disks from Definition~\ref{definition-stadium}. If $d>|r_2-r_1|$, then the boundary of the stadium $\mathcal{S}\eqtext{def} \mathcal{S}(r_1,r_2,d)$ is composed of two circular arcs and two congruent line segments. These segments can be extended to circumscribe an \emph{infinite sector} $\widehat{\mathcal{S}}$ around $\mathcal{S}$. The opening of the sector is $2\theta$ where $\theta\eqtext{def}\arcsin\frac{|r_2-r_1|}{d}$. When working with a sector $\widehat{\mathcal{S}}(r_1,r_2,d)$ it is useful to assume that after a rigid motion, $\widehat{\mathcal{S}}=\{z\colon |\text{Arg } z|<\theta\}$. \end{definition}
\begin{lemma}\label{KPdistance} Given a stadium $\mathcal{S}(R,r,d)$ where $r\leqslant R$, let $\theta=\arcsin\frac{R-r}{d}$ if $d\geqslant R-r$, and $\theta=\pi/2$ otherwise. The $\texttt{KP}_{\mathcal{S}}$ distance between the centers of $D_R$ and $D_r$ is given by \begin{subequations} \begin{alignat}{2} &\frac{d}{r}
&&\text{ if } r=R \label{eq:KPdistanceA}\\ &\text{and when $r<R$}&& \nonumber\\ &\frac12\log\frac{R+d}{R-d}
&&\text{ if } d\leqslant R\tan(\theta/2) \label{eq:KPdistanceB}\\ &\frac12\left[\cot\frac{\theta}{2}\log\left(\frac{R}{r}\cos\theta\right)+\log\frac{1+\tan(\theta/2)}{1-\tan(\theta/2)}\right] &&\text{ if } d> R\tan(\theta/2). \label{eq:KPdistanceC} \end{alignat} \end{subequations} \end{lemma}
Observe that whenever~\eqref{eq:KPdistanceC} applies we have $\tan(\theta/2)\in (0,1)$.
\begin{proof} Throughout this proof we refer to the disks $D_r$ and $D_R$ as well as their centers; these are the disks from Definition~\ref{definition-stadium}.
If $r=R$, then $\mathcal{S}$ is contained in an infinite strip of width $2r$. By Remark~\ref{rem:ExtremalDiskDomainMonotone} and in light of Example~\ref{ex:ExtremalDiskInStrip}, at every point $z$ along the segment connecting the centers the extremal disk is $D(z,r)$. Then $\mu_\mathcal{S}(z)=\lambda_{D(0,r)}(0)$, from~\eqref{hyp-density-in-disk} the density is $1/r$, and integrating this along a segment of length $d$ yields the result in~\eqref{eq:KPdistanceA}.
Next assume $d\leqslant R-r$; then $\theta=\pi/2$. In this case we clearly have $d<R \tan(\theta/2)$, and furthermore $D_r\subset D_R$ so $\mathcal{S}=D_R$. Like Example~\ref{ex:ExtremalDiskInDisk}, for every point in $\mathcal{S}$ the $\texttt{KP}_{\mathcal{S}}$~extremal disk will be $D_R$. Thus the KP distance between the centers is the KP length of a radial segment of length $d$, with the center of $D_R$ as one endpoint. This distance is equivalent to $h_{D(0,R)}(0,d)$. Evaluating this by integrating~\eqref{hyp-density-in-disk} gives the formula in~\eqref{eq:KPdistanceB}.
Now assume that $R-r<d\leqslant R\tan(\theta/2)$. We will show that the segment connecting the centers is contained in the convex hull of $\partial D_R\cap\partial\mathcal{S}$ in the hyperbolic metric on $D_R$, and thus $D_R$ is the extremal disk along the whole segment. Let $\widehat{\mathcal{S}}=\{ |\text{Arg } z|<\theta\}$ as in Definition~\ref{def:infinite-sector}. Then it is easily verified that $\partial D_R\cap\partial\widehat{\mathcal{S}}=\{Re^{\pm i\theta}\cot\theta\}$ and $\partial D_R\cap\partial\mathcal{S}$ has endpoints $\{Re^{\pm i\theta}\cot\theta\}$. The convex hull of $\partial D_R\cap\partial\mathcal{S}$ in the hyperbolic metric on $D_R$ is the portion of $D_R$ bounded by $\partial D_R\cap\partial\mathcal{S}$ and $D(0,R\cot\theta)\cap\{|\text{Arg } z|<\theta\}$ (see Example~\ref{ex:HullOfCircularArc}). The distance from the center of $D_R$, located at $R\csc\theta$, to the boundary of $D(0,R\cot\theta)$ along the real axis is $R\csc\theta-R\cot\theta=R\tan(\theta/2)$. Then because $d\leqslant R\tan(\theta/2)$, the segment is contained in the convex hull, $D_R$ is the $\texttt{KP}_{\mathcal{S}}$~extremal disk along the segment, and the center of $D_R$ is one endpoint of the segment. The $\texttt{KP}_{\mathcal{S}}$ length can again be calculated as $h_{D(0,R)}(0,d)$. This completes the result in~\eqref{eq:KPdistanceB}.
Finally, assume $d>R\tan(\theta/2)$, and thus the segment connecting the centers extends beyond the convex hull of $\partial D_R\cap\partial\mathcal{S}$. We will divide the segment into a \emph{proximal segment} $[r\csc\theta,R\cot\theta]$ and a \emph{distal segment} $[R\cot\theta,R\csc\theta]$, where proximal and distal indicate relative position with respect to the vertex at 0. The distal segment will have as its extremal disk $D_R$, and as before we calculate the length as
\begin{equation} \texttt{KP}_\mathcal{S}(R\cot\theta,R\csc\theta)=\int_0^{R(\csc\theta-\cot\theta)} \lambda_{D(0,R)}(z)\,dz =\frac12\log\frac{1+\tan(\theta/2)}{1-\tan(\theta/2)}. \end{equation}
For the proximal segment we rely on work done by Herron, Ma, and Minda~\cite[p. 331]{HMM2005}. They produced a formula for the KP metric density at any point in an infinite sector. After adjusting the notation, the curvature convention, and taking advantage of the simplification that our segment is along the central axis, the formula is
\begin{equation}\label{HMMSectorFormula} \mu_{\widehat{\mathcal{S}}}(z)=\frac{1}{2z}\cot(\theta/2). \end{equation}
We need to show that the $\texttt{KP}_{\widehat{\mathcal{S}}}$~extremal disk for every point on the proximal segment is contained in $\mathcal{S}$. Then, by Remark~\ref{rem:ExtremalDiskDomainMonotone} it will also be the extremal disk in $\mathcal{S}$. This will justify using the infinite sector formula in~\eqref{HMMSectorFormula} to give the KP density in our stadium $\mathcal{S}$. It will suffice to show that the extremal disk for the two endpoints of the proximal segment are in $\mathcal{S}$.
The proximal endpoint of the proximal segment is $r\csc\theta$, constructing the extremal disk in $\widehat{\mathcal{S}}$ for $r\csc\theta$ gives a disk tangent to $\partial\widehat{\mathcal{S}}$ at $\{re^{\pm i\theta}\csc\theta\}$. The disk $D_r$ is tangent to $\partial\widehat{\mathcal{S}}$ at $\{re^{\pm i \theta}\cot\theta\}$, and because $\csc\theta<\cot\theta$ in $(0,\pi)$ the $\texttt{KP}_{\widehat{\mathcal{S}}}$~extremal disk for the endpoint $r\csc\theta$ is far enough from the vertex to be contained in $\mathcal{S}$. The other endpoint is $R\cot\theta$, and we have already seen that $\forall z\in\mathcal{S}\colon |z|\geqslant R\cot\theta$, the $\texttt{KP}_\mathcal{S}$~extremal disk is $D_R$.
We now calculate the KP length of the proximal segment in $\mathcal{S}$ by integrating the density given in~\eqref{HMMSectorFormula}: \begin{equation} \int_{r\csc\theta}^{R\cot\theta} \frac{1}{2z}\cot(\theta/2)\,dz = \frac12 \cot(\theta/2)\log\left(\frac{R}{r}\cos\theta\right). \end{equation} Combining the proximal and distal lengths completes the proof, \begin{equation} \texttt{KP}_\mathcal{S}(r\csc\theta,R\csc\theta)= \frac12 \left[ \cot(\theta/2)\log\left(\frac{R}{r}\cos\theta\right)+\log\frac{1+\tan(\theta/2)}{1-\tan(\theta/2)} \right]. \qedhere \end{equation} \end{proof}
\begin{lemma}\label{lem:DistanceMonotone} Let $h(r_1,r_2,d)$ denote the hyperbolic distance between the centers in a stadium $\mathcal{S}(r_1,r_2,d)$. Then $h(r_1,r_2,d)$ is an increasing function in $d$. \end{lemma}
\begin{proof} Fix $r_1$ and $r_2$. Let $d_1<d_2$ and define $\lambda=d_2/d_1$. Dilating $\mathcal{S}(r_1,r_2,d_1)$ by a factor of $\lambda$ and observing conformal invariance of the hyperbolic metric we have \begin{equation}\label{eqn:DistanceMonotone-one} h(r_1,r_2,d_1)=h(\lambda r_1,\lambda r_2, d_2). \end{equation} Now consider $\mathcal{S}(r_1,r_2,d_2)$ and $\mathcal{S}(\lambda r_1,\lambda r_2, d_2)$. After a rigid motion, the segments connecting the centers of two stadia are coincident and $\mathcal{S}(r_1,r_2,d_2)\subset\mathcal{S}(\lambda r_1,\lambda r_2, d_2)$. Then by the monotonicity of the hyperbolic metric~\eqref{eq:HyperbolicDomainMonotonic} we have \begin{equation}\label{eqn:DistanceMonotone-two} h(\lambda r_1,\lambda r_2,d_2)\leqslant h(r_1,r_2,d_2), \end{equation} and combining~\eqref{eqn:DistanceMonotone-one} and~\eqref{eqn:DistanceMonotone-two} gives the result. \end{proof}
\begin{lemma}\label{ArcsineDefined}
Let $\Omega$ satisfy the $(R_O,R_I,R_C)$ condition in Definition~\ref{RadiiDefinition}. Then \[|R_I-R_C|\leqslant R_O-R_C.\] \end{lemma} \begin{proof} If $R_C \leqslant R_I$, then we are trying to show $R_I - R_C \leqslant R_O - R_C$. It is clear from the definitions that $R_I\leqslant R_O$, so the inequality is verified in this case.
Now assume $R_I < R_C$, we want to show $R_C-R_I \leqslant R_O-R_C$. There exists a point $\xi \in \partial\Omega$ such that $|\xi|=R_I$. The smoothness of $\partial\Omega$ implies that it must be tangent to $\partial D(0,R_I)$ at $\xi$.
Since $D(0,R_I)$ and $\partial\Omega$ are tangent at $\xi$, by Remark~\ref{rem:CurvatureRadius} there must be a disk $D(a,R_C)\subset\Omega$ with $\xi$ as a boundary point. By the definition of $R_C$ and the fact that $\Omega$ is convex, $\partial D(a,R_C)$ must be tangent to $\partial\Omega$ at $\xi$ and therefore tangent to the disk $D(0,R_I)$ at $\xi$. Observe that $D(a,R_C)\subset\Omega\subset D(0,R_O)$. It follows that $2R_C\leqslant\text{diam }\Omega\leqslant R_O+R_I$, and thus $R_C-R_I \leqslant R_O-R_C$. \end{proof}
The necessary condition in Lemma~\ref{ArcsineDefined} turns out also to be sufficient for the existence of such $\Omega$.
\begin{lemma}\label{lemma-construction}
We can construct a domain $\Omega$ satisfying the $(R_O,R_I,R_C)$ condition for an arbitrary $R_O, R_I, R_C$ so long as they satisfy the relationship $|R_I-R_C|\leqslant R_O-R_C$. \end{lemma}
\begin{proof} If $R_I>R_C$, let $K$ be the closed Euclidean convex hull of the set $D(0,R_I-R_C)\cup\{R_O-R_C\}$. Otherwise, let $K$ be the line segment $[R_C-R_I,R_O-R_C]$. Define $\Omega=\bigcup\limits_{z\in K} D(z,R_C)$.
By construction, $\Omega$ is convex and has a $C^{1,1}$-smooth boundary. More specifically, $\Omega$ is a stadium in the sense of Definition~\ref{definition-stadium}. That $\Omega$ has the required values of $R_O$ and $R_I$ is a consequence of the fact that $-R_I$ and $R_O$ are boundary points of $\Omega$, and that $D(0,R_I)\subset\Omega\subset D(0,R_O)$. \end{proof}
Our main result of this section provides an upper bound on the hyperbolic distance from the base point 0 to any point $a$ such that $D(a,R_C)\subset\Omega$. This bound is given in terms of the outer, inner, and curvature radii of $\Omega$.
\begin{theorem}\label{DistanceBounds} Let $\Omega$ satisfy the $(R_O,R_I,R_C)$ condition in Definition~\ref{RadiiDefinition} and let $a$ be any point such that $D(a,R_C)\subset\Omega$. Let $R=\max(R_C, R_I)$, $r=\min(R_C,R_I)$, $d=R_O-R_C$, and $\theta=\arcsin\frac{R-r}{d}$. Define $F(R_O,R_I,R_C)$ as \begin{subequations} \begin{alignat}{2} &\frac{d}{R}
&&\text{ if } r=R,\label{eq:DistanceBoundsTheoremA}\\ \text{when $r<R$} & && \nonumber\\ &\frac12\log \frac{R+d}{R-d}
&&\text{ if } d\leqslant R\tan\frac{\theta}{2}\label{eq:DistanceBoundsTheoremB}\\ &\frac12\left[\cot\frac{\theta}{2}\log\left(\frac{R}{r}\cos\theta\right)+\log\frac{1+\tan(\theta/2)}{1-\tan(\theta/2)}\right]\quad &&\text{ if } d> R\tan\frac{\theta}{2}.
\label{eq:DistanceBoundsTheoremC} \end{alignat} \end{subequations} Then $h_\Omega(0, a)\leqslant F(R_O,R_I,R_C)$. \end{theorem}
\begin{proof}
First, note that Lemma~\ref{ArcsineDefined} allows us to define $\theta$ in this way. Observe that $D(a,R_C)\subset\Omega\subset D(0,R_O)$, thus $|a|+R_C\leqslant R_O$ and $|a|\leqslant d$. Since $\Omega$ is a convex domain containing the disks $D(0, R_I)$ and $D(a,R_C)$, it contains the corresponding stadium $\mathcal{S}(R_I, R_C, |a|)$. For containment the position of the stadium is important, so to be clear $\mathcal{S}$ is the convex hull of $D(0,R_I)\cup D(a,R_C)$. It follows from the domain monotonicity of the hyperbolic metric and Lemma~\ref{lem:DistanceMonotone} that \begin{equation}\label{A}
h_\Omega(0,a) \leqslant h(R_I,R_C,|a|)
= h(R,r,|a|) \leqslant h(R,r,d) \end{equation} where $h$ taking three arguments is as in Lemma~\ref{lem:DistanceMonotone}. Since the hyperbolic distance is majorized by the KP distance~\eqref{KPcomparison}, the claim follows from the explicit formulas for KP distance from Lemma~\ref{KPdistance}.
In each of the three cases we find the longest possible line segment from $0$ to an allowable $a$, construct $\mathcal{S}(R_I,R_C,R_O-R_C)=\mathcal{S}(R,r,d)\subset\Omega$ around the segment, and find its $\texttt{KP}_{\mathcal{S}}$ length. The formula in~\eqref{eq:DistanceBoundsTheoremA} corresponds to the case where the KP metric has constant density along the segment. The formula in~\eqref{eq:DistanceBoundsTheoremB} corresponds to the case where the $\texttt{KP}_{\mathcal{S}}$ extremal disk is the same at every point of the segment. The formula in~\eqref{eq:DistanceBoundsTheoremC} corresponds to the case where the extremal disk and density vary along the segment. \end{proof}
\section{Estimates for the derivative of a conformal map}\label{sec:derivative}
We are now ready to prove the main result.
\begin{theorem}\label{Main} Let $\Omega$ satisfy the $(R_O,R_I,R_C)$ condition in Definition~\ref{RadiiDefinition}. Then for any conformal map $f\colon \mathbb{D}\xrightarrow{\textnormal{onto}}\Omega$ fixing $0$ we have \begin{equation}\label{Main-theorem-formula}
\|f'\|_{H^\infty} \leqslant R_Ce^{2F(R_O,R_I,R_C)} \end{equation} where $F$ is as in Theorem~\ref{DistanceBounds}. \end{theorem}
\begin{proof}
By assumption, $\Omega$ has a smooth boundary and $f'$ exists on $\partial\mathbb{D}$. Take any number $L > R_C e^{2F(R_O,R_I,R_C)}$. It suffices to show that $|f'|\leqslant L$ in $\mathbb{D}$, which we will do by proving that this inequality holds on the boundary and then applying the maximum principle. More specifically, it suffices to show \begin{equation}\label{p1}
\limsup_{|z|\nearrow 1}\frac{\text{dist}(f(z),\partial\Omega)}{1-|z|}\leqslant L. \end{equation}
Fix $z\in\mathbb{D}$. Let $d=\text{dist}(f(z),\partial\Omega)$, since we are interested in the limit as $d\to 0$, we may assume $d<R_C$. We will show that \begin{equation}\label{p2}
\frac{d}{L} \leqslant 1-|z| \end{equation} for sufficiently small $d$, thus establishing the inequality in~\eqref{p1}.
We choose a point $w\in\partial\Omega$ such that $|f(z)-w|=d$. By Remark~\ref{rem:CurvatureRadius} there is a disk $D=D(a,R_C)$ that has $w$ on its boundary and is contained in $\Omega$. Observe that the smoothness of $\partial\Omega$ and $\partial D$ at $w$ require that $f(z)$ lie on the radius of $D$ that ends at $w$, and therefore $|f(z)-a|=R_C-d$. Keeping in mind that the hyperbolic metric is monotone with respect to domain and that the formula for hyperbolic distance in a disk along a radius is well known, observe \begin{equation}\label{p3} h_{\Omega}(f(z),a)\leqslant h_{D(a,R_C)}(f(z),a)=\frac12\log\frac{2R_C-d}{d}<\frac12\log\frac{2R_C}{d}. \end{equation} Next we estimate $h_{\Omega}(a,0)$ using the KP estimate from Theorem~\ref{DistanceBounds}: \begin{equation}\label{p4} h_{\Omega}(a,0) \leqslant F(R_O,R_I,R_C). \end{equation} Now suppose for the sake of contradiction that~\eqref{p2} is false, this implies
\[ 1-|z|<d/L \ssp{3}{\text{ and }} 1+|z|>2-d/L.\] By conformal invariance of the hyperbolic metric, $h_{\Omega}(f(z),0)=h_\mathbb{D}(z,0)$, so
\begin{equation}\label{p5}
h_\Omega(f(z),0)=\frac12\log\frac{1+|z|}{1-|z|} > \frac12\log\frac{2-d/L}{d/L}. \end{equation}
Using the triangle inequality to combine this with~\eqref{p3} and~\eqref{p4}, we get \[ \frac12\log\frac{2-d/L}{d/L} < \frac12\log\frac{2R_C}{d}+F(R_O,R_I,R_C) \] which can be be rearranged to \[ L-\frac{d}{2}<R_C e^{2F(R_O,R_I,R_C)}. \] But $R_C e^{2F(R_O,R_I,R_C)}<L$, so we have a contradiction when $d$ is sufficiently small. This contradiction proves~\eqref{p2}. \end{proof}
\section{Examples}\label{sec:examples}
\begin{example}
Let $\Omega=\{|z|<r\}$, then $R_O=R_I=R_C=r$. Theorem~\ref{Main} says that for all conformal $f\colon \mathbb{D}\xrightarrow{\textnormal{onto}}\Omega$ fixing 0, $\|f'\|_{H^\infty} \leqslant re^0=r$. The function $f(z)=rz$ shows the bound is attained in this case. \end{example}
This can be generalized to show that the bound in Theorem~\ref{Main} is sharp whenever $\Omega$ is a disk containing 0.
\begin{proposition}\label{prop-sharp in disks} Let $\Omega=D(a, r)$ with $0\leqslant |a|<r$. Then the bound in Theorem~\ref{Main} is sharp for a conformal map $f\colon \mathbb{D}\xrightarrow{\textnormal{onto}}\Omega$. \end{proposition}
\begin{proof} After a rotation about the origin, we may assume $0\leqslant a <r$. Then $\Omega$ satisfies the condition ($R_O=r+a,R_I=r-a,R_C=r$), and one can check that Theorem~\ref{Main} gives \begin{equation}\label{prop-disk-bound}
\|f'\|_{H^\infty}\leqslant r\frac{r+a}{r-a}. \end{equation}
Take \[f_1(z)=\frac{z-\frac{a}{r}}{1-\frac{a}{r}z}, \quad f_2(z)=z+\frac{a}{r}, \quad\text{and }f_3(z)=rz.\] Then the conformal mapping $f_3\circ f_2\circ f_1\colon\mathbb{D}\xrightarrow{\textnormal{onto}} D(a,r)$ fixes 0, and its derivative attains the bound in~\eqref{prop-disk-bound} at $z=1$. \end{proof}
To illustrate the improvement over the bound given in~\eqref{old-bound}, we close with two more examples.
\begin{example} One of the examples considered in~\cite{Kovalev2017} is a rounded triangle with $R_O=0.6,R_I=0.5,R_C=0.4$. For this domain, the bound~\eqref{old-bound} is
$\|f'\|_{H^\infty}\leqslant 0.977$. Theorem~\ref{Main} improves this to $\|f'\|_{H^\infty}\leqslant 0.931$. \end{example}
\begin{example} For the domain in Proposition~\ref{prop-sharp in disks} with $0\leqslant a<r$ the bound~\eqref{old-bound} is \begin{equation}\label{old-in-disks}
\|f'\| \leqslant \frac{r^3}{(r-a)^2}. \end{equation} The ratio of two bounds~\eqref{prop-disk-bound} and ~\eqref{old-in-disks} tends to $0$ as $a\to r$, indicating a substantial improvement.
For a specific example, let $a=1$ and $r=2$, so $\Omega=D(1,2)$. The bound in~\eqref{prop-disk-bound} becomes $\|f'\|_{H^\infty} \leqslant 6$, which is sharp as noted above. In contrast~\eqref{old-bound} gives $\|f'\|_{H^\infty} \leqslant 8$ for this example. \end{example}
\section*{Acknowledgments}
This paper is based on a part of a PhD thesis written by the author under the supervision of Leonid Kovalev. The author thanks the anonymous referees for many useful suggestions in revising this paper.
\end{document} | arXiv | {
"id": "2106.03808.tex",
"language_detection_score": 0.6765961647033691,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{On the intersection of homoclinic classes in intransitive sectional-Anosov flows}
\begin{abstract} We show that if $X$ is a {\em Venice mask}\index{Venice mask} (i.e. nontransitive sectional-Anosov flow with dense periodic orbits, \cite{bmp}, \cite{mp}, \cite{mp2},\cite{ls}) supported on a compact $3$-manifold, then the omega-limit set of every non-recurrent point in the unstable manifold of some singularity is a closed orbit. In addition, we prove that the intersection of two different homoclinic classes in the maximal invariant set of a sectional-Anosov flow can be decomposed as the disjoint union of, singular points, a non-singular hyperbolic set, and regular points whose {\em alpha-limit set} and {\em omega-limit set} is formed by singular points or hyperbolic sets. \end{abstract}
\section{Introduction}
The dynamical systems theory is interested to describes the behavior as time goes to infinity for the majority of orbits in a determinated system. An important tool for hyperbolic sets \index{Hyperbolic set} is the known {\em connecting lemma} \index{Connecting lemma} \cite{hay}, \cite{ap}, \cite{bdv}. Specifically, the lemma says that if $X$ is an Anosov flow on a compact manifold $M$ and $p, q \in M$ satisfy that for all $\varepsilon> 0$ there is a trajectory from a point $\varepsilon$-close to $p$ to a point $\varepsilon$-close to $q$, then there is a point $x\in M $ such that $\alpha_X(x)=\alpha_X(p)$ and $\omega_X(x)=\omega_X(q)$.
In \cite{bm2} was proved a similar result for sectional-Anosov flows, which is known as {\em sectional-connecting lemma}. \index{Sectional-connecting lemma} Recall, the sectional hyperbolic sets and sectional Anosov flows were introduced in \cite{mo} and \cite{mem} respectively as a generalization of the hyperbolic sets and Anosov flows to include important examples such as the saddle-type hyperbolic attracting sets, the geometric and multidimensional Lorenz attractors \cite{abs}, \cite{bpv}, \cite{gw} and certain robustly transitive sets. A fundamental hypothesis in the sectional-hyperbolic case consists in the alpha-limit set of $p\in M(X)$ to be non-singular. As the unstable manifold of every singularity $\sigma$ \index{Singularity} of a sectional-Anosov $X$ is contained in the maximal invariant set $M(X)$, would be interesting to know what is the omega-limit set of a point in $W^u_X(\sigma)$. In fact, it can be seen as a extension of the {\em sectional-connecting lemma}.
On the other hand, the class of Venice masks (i.e. intransitive sectional-Anosov flows with dense periodic orbits) has a particular interest since its existence shows that the spectral decomposition theorem \cite{sma} is not valid in the sectional-hyperbolic case. Its study has been collected by different authors during the last years. The examples exhibited in \cite{bmp}, \cite{ls}, \cite{mp} are characterized because the maximal invariant set can be decomposed as the disjoint finite union of homoclinic classes. In addition, the intersection between two different homoclinic classes is contained in the closure of the union of the unstable manifold of the singularities. Specifically, this intersection can be decomposed as the disjoint union of, a singularity $\sigma$, a closed orbit $C$, and regular points such that its {\em alpha-limit set} \index{Alpha-limit set} is $\sigma$ and the {\em omega-limit set} \index{Omega-limit set} is $C$. Particularly, was proved in \cite{mp}, \cite{mp2} that every Venice mask with a unique singularity has these properties.
In search of properties which allow to characterized the dynamic of Venice masks, \index{Venice mask} will be studied the behavior of homoclinic classes \index{Homoclinic classes} and its relation with the unstable manifolds of the singularities.
Let us state our results in a more precise way.\\
Consider a Riemannian compact manifold $M$ of dimension $n$ (a {\em compact $n$-manifold} for short). $M$ is endowed with a Riemannian metric $\langle\cdot,\cdot\rangle$ and an induced norm $\lVert\cdot\rVert$.
We denote by $\partial M$ the boundary of $M$.
Let ${\cal X}^1(M)$ be the space
of $C^1$ vector fields in $M$ endowed with the
$C^1$ topology.
Fix $X\in {\cal X}^1(M)$, inwardly
transverse to the boundary $\partial M$ and denotes
by $X_t$ the flow of $X$, $t\in I\!\! R$.
The {\em $\omega$-limit set} of $p\in M$ is the set $\omega_X(p)$ formed by those $q\in M$ such that $q=\lim_{n \rightarrow \infty}X_{t_n}(p)$ for some sequence $t_n\to\infty$. The {\em $\alpha$-limit set} of $p\in M$ is the set $\alpha_X(p)$ formed by those $q\in M$ such that $q=\lim_{n \rightarrow \infty}X_{t_n}(p)$ for some sequence $t_n\to -\infty$. The {\em non-wandering set} of $X$ is the set $\Omega(X)$ of points $p\in M$ such that for every neighborhood $U$ of $p$ and every $T>0$ there is $t>T$ such that $X_t(U)\cap U\neq\emptyset$. Given $\Lambda \in M$ compact, we say that $\Lambda$ is {\em invariant} if $X_t(\Lambda)=\Lambda$ for all $t\in I\!\! R$. We also say that $\Lambda$ is {\em transitive} if $\Lambda=\omega_X(p)$ for some $p\in \Lambda$; {\em singular} if it contains a singularity and {\em attracting} if $\Lambda=\cap_{t>0}X_t(U)$ for some compact neighborhood $U$ of it. This neighborhood is often called {\em isolating block}. It is well known that the isolating block $U$ can be chosen to be positively invariant, i.e., $X_t(U)\subset U$ for all $t>0$. An {\em attractor} is a transitive attracting set. An attractor is {\em nontrivial} if it is not a closed orbit.
The {\em maximal invariant} set of $X$ is defined by
$M(X)= \bigcap_{t \geq 0} X_t(M)$.
\begin{defi}\label{defhyp}
\label{hyperbolic}
A compact invariant set $\Lambda$ of $X$ is {\em hyperbolic}
if there are a continuous tangent bundle invariant decomposition
$T_{\Lambda}M=E^s\oplus E^X\oplus E^u$ and positive constants
$C,\lambda$ such that
\begin{itemize}
\item $E^X$ is the vector field's
direction over $\Lambda$.
\item $E^s$ is {\em contracting}, i.e.,
$
\lVert DX_t(x) \left|_{E^s_x}\right.\rVert
\leq Ce^{-\lambda t}$,
for all $x \in \Lambda$ and $t>0$.
\item $E^u$ is {\em expanding}, i.e.,
$
\lVert DX_{-t}(x) \left|_{E^u_x}\right.\rVert
\leq Ce^{-\lambda t},
$
for all $x\in \Lambda$ and $t> 0$.
\end{itemize} \end{defi} A compact invariant set $\Lambda$ has a {\em dominated splitting} with respect to the tangent flow if there are an invariant splitting $T_{\Lambda}M = E\oplus F$ and positive numbers $K,\lambda$ such that
$$\lVert DX_t(x)e_x\rVert\cdot \lVert f_x\rVert\leq Ke^{-\lambda t} \lVert DX_t(x)f_x\rVert\cdot \lVert e_x\rVert,\qquad \forall x\in\Lambda, t \geq 0, (e_x , f_x) \in E_x\times F_x .$$
Notice that this definition allows every compact invariant set $\Lambda$ to have a dominated splitting with respect to the tangent flow (See \cite{bamo}): Just take $E_x = T_xM$ and $F_x = 0$, for every $x\in\Lambda$ (or $E_x = 0$ and $F_x = T_x M$ for every $x\in\Lambda$).
A compact invariant set $\Lambda$ is {\em partially hyperbolic} if it has a {\em partially hyperbolic splitting}, i.e., a dominated splitting $T_{\Lambda}M = E\oplus F$ with respect to the tangent flow whose dominated subbundle $E$ is contracting in the sense of Definition \ref{defhyp}.
The Riemannian metric $\langle\cdot ,\cdot\rangle$ of $M$ induces a $2$-Riemannian metric \cite{mv}, $$\langle u, v/w\rangle_p= \langle u, v\rangle_p\cdot \langle w, w\rangle_p - \langle u, w\rangle_p\cdot \langle v, w\rangle_p,\quad \forall p\in M, \forall u, v, w \in T_p M.$$ This in turns induces a 2-norm \cite{gah} (or areal metric \cite{kat}) defined by $$\lVert u, v\rVert =\sqrt{\langle u, u/v\rangle_p} \qquad \forall p\in M, \forall u, v \in T_p M.$$
Geometrically, $\lVert u, v\rVert$ represents the area of the paralellogram generated by $u$ and $v$ in $T_p M$.
If a compact invariant set $\Lambda$ has a dominated splitting $T_{\Lambda}M = E \oplus F$ with respect to the tangent flow, then we say that its central subbundle $F$ is {\em sectionally expanding} if
$$\lVert DX_t (x)u, DX_t (x)v\rVert \geq K^{-1}e^{\lambda t} \lVert u, v\rVert, \quad\forall x \in \Lambda, u,v\in F_x, t \geq 0.$$
By a {\em sectional-hyperbolic splitting} for $X$ over $\Lambda$ we mean a partially hyperbolic splitting $T_{\Lambda}M = E\oplus F$ whose central subbundle $F$ is sectionally expanding.
\begin{defi} A compact invariant set $\Lambda$ is {\em sectional-hyperbolic} for $X$ if its singularities are hyperbolic and if there is a sectional-hyperbolic splitting for $X$ over $\Lambda$. \end{defi}
\begin{defi} \label{secflow} We say that $X$ is a {\em sectional-Anosov flow} if $M(X)$ is a sectional-hyperbolic set. \end{defi}
The Invariant Manifold Theorem [3] asserts that if $x$ belongs to a hyperbolic set $H$ of $X$, then the sets
$$W^{ss}_X(p) = \{x\in M:d(X_t(x),X_t(p))\to 0, t\to \infty\} \qquad and$$ $$W^{uu}_X(p) = \{x\in M:d(X_t(x),X_t(p))\to 0, t\to -\infty\},$$
are $C^1$ immersed submanifolds of $M$ which are tangent at $p$ to the subspaces $E^s_p$ and $E^u_p$ of $T_pM$ respectively.
$$W^{s}_X(p) = \bigcup_{t\in I\!\! R}W^{ss}_X(X_t(p))\qquad\and\qquad W^{u}_X(p) = \bigcup_{t\in I\!\! R}W^{uu}_X(X_t(p))$$
are also $C^1$ immersed submanifolds tangent to $E^s_p\oplus E^X_p$ and $E^X_p\oplus E^u_p$ at $p$ respectively.
Recall that a singularity of a vector field is hyperbolic if the eigenvalues of its linear part have non zero real part.
\begin{defi} \label{ll} We say that a singularity $\sigma$ of a sectional-Anosov flow $X$ is {\em Lorenz-like} if it has three real eigenvalues $\lambda^{ss},\lambda^{s},\lambda^u$ with $\lambda^{ss}<\lambda^s<0<-\lambda^s<\lambda^u$. such that the real part of the remainder eigenvalues are outside the compact interval $[\lambda^{s},\lambda^u]$. $W^s_X(\sigma)$ is the manifold associated to the eigenvalues with negative real part. The strong stable foliation\index{Strong stable foliation} associated to $\sigma$ and denoted by $\mathcal{F}^{ss}_X(\sigma)$, is the foliation contained in $W^s_X(\sigma)$ which is tangent to space generated by the eigenvalues with real part less than $\lambda^{s}$. \end{defi}
\begin{defi} A periodic orbit of $X$ is the orbit of some $p$ for which there is a minimal $t > 0$ (called the period) such that $X_t(p) = p$. An orbit is called closed if it is a periodic orbit or a singularity. \end{defi}
A homoclinic orbit of a hyperbolic periodic orbit $O$ is an orbit $\gamma\subset W^s(O)\cap W^u(O)$. If additionally $T_qM = T_qW^s(O) + T_qW^u(O)$ for some (and hence all) point $q\in\gamma$, then we say that $\gamma$ is a transverse homoclinic orbit of $O$. The homoclinic class $H(O)$ of a hyperbolic periodic orbit $O$ is the closure of the union of the transverse homoclinic orbits of $O$. We say that a set $\Lambda$ is a homoclinic class if $\Lambda = H(O)$ for some hyperbolic periodic orbit $O$.
\begin{defi} A Venice mask is a sectional-Anosov flow with dense periodic orbits which is not transitive. \end{defi}
If $A$ is a compact invariant set of $X$ we denote $Sing_X(A)$ the set of singularites of $X$ in $A$, and $Sing(X)=Sing_X(M(X))$. The closure of $B\subset M$ is denoted by $Cl(B)$. With these definitions we can state our main results.
\section{Main statements}
We show that if $X$ is a {\em Venice mask}\index{Venice mask} supported on a compact $3$-manifold, then the omega-limit set of every non-recurrent point in the unstable manifold of some singularity is a closed orbit. In addition, we prove that the intersection of two different homoclinic classes in the maximal invariant set of a sectional-Anosov flow can be decomposed as the disjoint union of, singular points, a non-singular hyperbolic set, and regular points whose {\em alpha-limit set} and {\em omega-limit set} is formed by singular points or hyperbolic sets.\\
Specifically, we have the following statements.
\begin{maintheorem} \label{thH} If $X$ is a three-dimensional Venice mask and $\sigma$ is a singularity of $X$, then for every $q\in W^u_X(\sigma)$ such that $q$ is non-recurrrent we have the following dichotomy: \begin{itemize} \item $\omega_X(q)\in Sing(X)$. \item $\omega_X(q)=O$, where $O$ is a hyperbolic periodic orbit. \end{itemize}
\end{maintheorem}
\begin{maintheorem} \label{thH'} The intersection of two different homoclinic classes $H_1,H_2$ in the maximal invariant set of a sectional-Anosov flow $X$ is the disjoint union of a set $S$ (possibly empty) of singularities, a non-singular hyperbolic set $H$ (possibly empty), and a set $R$ (possibly empty) of regular points \index{Regular point} such that if $q\in R$ then $\alpha_X(q)\subset H\cup S$ and $\omega_X(q)\subset H\cup S$.
\end{maintheorem}
\section{Preliminaries} \label{prelim}
We mention the following results which are essentials to proving the theorems.
\begin{thm}[\cite{mpp2}] \label{th1} Let $\Lambda$ be a sectional-hyperbolic set with dense periodic orbits. \index{Dense periodic orbits} Then, every $\sigma\in Sing_X(\Lambda)$ is Lorenz-like and satisfies $\Lambda\cap \mathcal{F}^{ss}_X(\sigma) =\{\sigma\}$. \end{thm}
We observe that $W^s_X(\sigma)\setminus \mathcal{F}^{ss}_X(\sigma)$ is decomposed by two connected components \index{Connected components} $W^{s,+}_X(\sigma)$ and $W^{s,-}_X(\sigma)$ (see figure \ref{Ws+}). Hence for a Venice mask, a regular point in $M(X)$ contained in the stable manifold \index{Stable manifold} of some singularity $\sigma$, necessarily is contained either $W^{s,+}_X(\sigma)$ or $W^{s,-}_X(\sigma)$.
\begin{figure}
\caption{ Connected components.}
\label{Ws+}
\label{VS}
\end{figure}
\begin{lemma}[Hyperbolic lemma \cite{mpp2}] \label{lem1} A compact invariant set without singularities of a sectional-hyperbolic set is hyperbolic saddle-type. \index{Hyperbolic lemma} \end{lemma}
\begin{rk} Theorem \ref{th1} and the Hyperbolic Lemma imply that every Venice mask has singularities, and these are Lorenz-like.
\end{rk}
\begin{defi} We say that a $C^1$ vector field $X$ with hyperbolic closed orbits has the Property $(P)$ \index{Property $(P)$} if for every periodic orbit $O$ there is a singularity $\sigma$ such that \begin{equation} \label{P} W^u_X(O)\cap W^s_X(\sigma)\neq \emptyset. \end{equation}
\end{defi}
The above definition is useful by the interesting fact below.
\begin{lemma} \label{lem2} Every point in the closure of the periodic orbits of a vector field with the Property $(P)$ is accumulated by points for which the omega-limit set is a singularity. \end{lemma}
Moreover, we have an important property.
\begin{lemma}[\cite{mp}] \label{l3} Every sectional-Anosov flow with singularities and dense periodic orbits on a compact 3-manifold has the Property $(P)$. \end{lemma}
\begin{rk} By Lemma \ref{lem2} and Lemma \ref{l3} we can assert that every Venice mask $X$ has the Property $(P)$ and $W^s(Sing(X))\cap M(X)$ is dense in $M(X)$.
\end{rk}
\begin{defi} Given $\Sigma\subset M$ we say that $q\in M$ satisfies Property $(P)_{\Sigma}$ \index{Property $(P)_{\Sigma}$} if $Cl(O^+(q))\cap\Sigma =\emptyset$ and there is open arc \index{Open arc} $I$ in $M$ with $q\in\partial I$ such that $O^+(x)\cap\Sigma \neq\emptyset$ for every $x\in I$. \end{defi}
We finish to exhibit the preliminar statements with the following characterization. \begin{thm}[\cite{bamo2}] \label{thPsigma} Let $X$ be a $C^1$ vector field in a compact 3-manifold $M$. If $q\in M$ has sectional-hyperbolic omega-limit set $\omega(q)$, then the following properties are equivalent:
\begin{itemize} \item $\omega(q)$ is a closed orbit\index{Closed orbit}. \item $q$ satisfies $(P)_{\Sigma}$ for some closed subset $\Sigma$. \end{itemize} \end{thm}
In Figure \ref{PS} is exhibited the case when the omega-limit set $\omega(q)$ of the point $q$ is a hyperbolic singularity of saddle-type.
\begin{figure}
\caption{ Property $(P)_{\Sigma}$.}
\label{PS}
\end{figure}
\section{Characterizing the omega-limit set} \label{omegalimit}
In this section we will prove the {\em Theorem \ref{thH}}. The idea is to consider a sequence of points satisfying the Property $(P)_{\Sigma}$, which approximates a point $q$ in the unstable manifold of a fixed singularity. We show that $q$ satisfies the Property $(P)_{\Sigma}$ too. Hereafter in this section, we assume that every regular point $q\in W^u(Sing(X))$ is non-recurrent.\index{Non-recurrent point}\\
First, we mention some facts of topology. Given a compact metric space $(Y,d)$, define a distance function between any point $x$ of $Y$ and any non-empty set $B$ of $Y$ by:
$$d(x,B)=\inf \{ d(x,y) | y \in B \}.$$
Now, consider the collection $\mathcal{C}(Y)=\{C\in Y: C \text{ is a non-empty compact subset of $(Y,d)$}\}$. For $\mathcal{C}(Y)$, take the Hausdorff metric $d_H$ defined as the distance function between any two non-empty sets $A$ and $B$ of $Y$ by:
$$d_H(A,B)=\sup \{ d(x,B) | x \in A \}.$$
\begin{lemma} \label{l4} Let $\{A_n:n\in{\Bbb N}\}$ be a sequence of closed sets contained in a compact metric space $(Y,d)$, such that $A_n\to A$ in the Hausdorff metric induced by $d$. Then $\partial A_n\to \partial A$. \end{lemma}
For now and on this section, let $M$ be a riemaniann compact 3-manifold, and let $X$ be a Venice mask on $M$. So, for a hyperbolic point $p$ of $X$, $W^s_X(p)$ is just denoted by $W^s(p)$. The same interchanging $s$ by $u$.\\
\subsection{Existence of singular partitions} \label{singpart}
We introduce the following definition which can also be found in \cite{bau} and \cite{lec}, and extends the notion given in \cite{mp1}. \\
A cross section \index{Cross section} of $X$ is a codimension one submanifold $S$ transverse to $X$. We denote the interior and the boundary (in topological sense) of $S$ by $Int(S)$ and $\partial S$ respectively. If $\mathcal{R} = \{S_1 ,\cdots , S_k \}$ is a collection of cross sections we still denote by $\mathcal{R}$ the union of its elements. Moreover
$$\partial \mathcal{R} :=\bigcup_{i=1}^k\partial S_i\qquad and \qquad Int(\mathcal{R}) :=\bigcup_{i=1}^k Int(S_i)$$
The size of $\mathcal{R}$ will be the sum of the diameters of its elements.
\begin{defi} A singular partition \index{Singular partition} of an invariant set $H$ of a vector field $X$ is a finite disjoint collection $\mathcal{R}$ of cross sections of $X$ such that $H\cap\partial\mathcal{R}=\emptyset$ and
$$ H\cap Sing(X)=\{y\in H: X_t(y)\notin \mathcal{R}, \forall t\in{\Bbb R}\}. $$ \end{defi}
For a Lorenz-like singularity \index{Lorenz-like singularity} $\sigma$, the center unstable manifold \index{Center unstable manifold} $W_X^{cu}(\sigma)$ associated is divided by $W^u(\sigma)$ and $W^s(\sigma)\cap W^{cu}(\sigma)$ in the four sectors $s_{11}$, $s_{12}$, $s_{21}$, $s_{22}$. $\pi: V_{\sigma}\to W^{cu}(\sigma)$ is the projection defined in a neighborhood $V_{\sigma}$ of $\sigma$. Figure \ref{cusigma} exhibits the case when $\pi(M(X)\cap V_{\sigma})$ intersects $s_{11}$ and $s_{12}$.
\begin{figure}
\caption{ Center unstable manifold of $\sigma$.}
\label{cusigma}
\end{figure}
\begin{lemma} \label{lem41} Consider $\sigma$ a Lorenz-like singularity of a Venice mask $X$, and $O$ a hyperbolic periodic orbit satisfying $Cl(W^u(O))\cap W^{s,+}(\sigma)\neq\emptyset$ and $Cl(W^u(O))\cap W^{s,-}(\sigma)\neq\emptyset$. Moreover, $\pi(Cl(W^u(O))) \cap s_{1i}\neq\emptyset$ and $\pi(Cl(W^u(O)))\cap s_{2i}\neq\emptyset$ for some $i\in\{1,2\}$. If $q$ is a regular point in $W^u(\sigma)\cap Cl(s_{1i})\cap Cl(s_{2i})$, then $O=\omega_X(q)$. \end{lemma}
\begin{proof} We take $q\in W^u(\sigma)$ a regular point close to $\sigma$. We assert that $q\in W^s(O)$. Indeed, if we suppose that is not the case, we will get a contradiction. \\
So, we assume $q\in W^u(\sigma)\setminus W^s(O)$. Then, there is a sequence $p_n^-\to q$ such that $p_n^-\in W^u(O)$ for all $n$. In addition, $\{O_X(p_n^-):n\in{\Bbb N}\}$ accumulates some regular point $p^-$ in $W^{s,-}(\sigma)$ or in $W^{s,+}(\sigma)$. We can suppose the accumulation in some point of $W^{s,-}(\sigma)$. Also, we can take $\{p_n^+:n\in{\Bbb N}\}\subset W^u(O)$ be a sequence such that $p_n^+\to q$. Moreover, $\{O_X(p_n^+):n\in{\Bbb N}\}$ accumulates $\sigma$ and some point $p^+$ in $W^{s,+}(\sigma)$. We have $p_n^+,p_n^-\notin W^u(\sigma)$ for all $n$. On the other hand, $q\in Cl(W^u(O))$ and the invariance of $W^u(\sigma)$ imply $O_X(q)\subset Cl(W^u(O))$. But $Cl(W^u(O))$ is a closed set, therefore $Cl(O_X(q))\subset Cl(W^u(O))$. Applying the compactness of $Cl(W^u(O))$ and Tubular Flow Box Theorem \index{Tubular Flow Box Theorem} \cite{dmp} in a neighborhood of $O^+(q)$ we obtain that $\{O^+(p_n^+):n\in{\Bbb N}\}$ and $\{O^+(p_n^-)n\in{\Bbb N}\}$ accummulate all point in $W^u(\sigma)$ close to $\omega_X(q)$. \\
As $O$ and $\omega_X(q)$ are invariant closed sets, then they are disjoints and $d(x,\omega_X(q))>0$ for all $x\in O$. This implies that there exists $\varepsilon>0$ such that every point $y$ closen to $\omega_X(q)$ satisfies $d(y,O)>\varepsilon$. Moreover $y\notin O_X(q)$ and, $\{O^+(p_n^+):n\in{\Bbb N}\}$, $\{O^+_X(p_n^-):n\in{\Bbb N}\}$ acummulate $y$. The positive orbits of $p_n^+$ and $p_n^-$ cannot intersect $\omega_X(q)$. So, we have two possibilities, either any orbit intersects $O_X(q)$, or no orbit does it. The first case means that there is a point $w\in W^u(\sigma)\cap W^u(O)$ which is absurd. So, neither orbit intersects $O_X(q)$. Now, $q$ is a non-recurrent point. Then, $\{O^+_X(p_n^+):n\in{\Bbb N}\}$ does not accumulate on $W^{s,+}(\sigma)$. But this contradicts the choice of the sequences. Therefore $q\in W^s(O)$. So, we conclude $O=\omega_X(q)$.
\end{proof}
From {\em Lemma} \ref{lem41} we obtain the following corollary.\\
\begin{clly} \label{clly5} Consider $\sigma$ a Lorenz-like singularity of a Venice mask $X$, and $O$ a hyperbolic periodic orbit satisfying $W^u(O)\cap W^{s,+}(\sigma)\neq\emptyset$ and $W^u(O)\cap W^{s,-}(\sigma)\neq\emptyset$. Let $q$ be a regular point in $W^u(\sigma)\cap Cl(W^u(O))$ and let $\{p_n:n\in{\Bbb N}\}\subset Cl(W^u(O))\cap W^s(O)$ be a sequence such that $p_n\to q$. Then $p_n\in O_X(q)$ for all $n$ large. \end{clly}
\begin{proof} For this is sufficient to observe that $O_X(q)$ is contained in $W^s(O)$.
\end{proof}
\begin{rk} \label{rk3} {\em Corollary} \ref{clly5} says that for $i\in\{1,2\}$ and for every hyperbolic periodic orbit $O$ of $X$, is not possible $H(O)\cap s_{1i}\neq\emptyset$ and $H(O)\cap s_{2i}\neq\emptyset$ simultaneously. \end{rk}
\begin{lemma} \label{lem6} Let $\sigma$ be a singularity of a Venice mask $X$, and let $O$ be a hyperbolic periodic orbit such that $W^u(O)\cap W^s(\sigma)\neq\emptyset$. Then for $q\in W^u(\sigma)\setminus\{\sigma\}$, $\omega_X(q)$ has singular partitions \index{Singular partition} of arbitrarily small size. \end{lemma}
\begin{proof} We adapt the proof of {\em Theorem 17} given in \cite{lec}. Observe that $\omega_X(q)$ is sectional-hyperbolic. Therefore, if $\omega_X(q)$ is a closed orbit, then {\em Theorem} \ref{thPsigma} implies that $q$ satisfies the property $(P)_{\Sigma}$ \index{Property $(P)_{\Sigma}$} for some closed subset $\Sigma$. Moreover, we can apply {\em Theorem 16} in \cite{lec} to conclude that $\omega_X(q)$ has singular partitions of arbitrarily small size.\\
Hereafter, we assume $\omega_X(q)$ is not a closed orbit. By {\em Proposition 3} in \cite{lec} is sufficient to prove that for all $z\in \omega_X(q)$ there is cross section $\Sigma_z$ close to $z$ such that $z\in Int(\Sigma_z)$ and $\omega_X(q)\cap\partial\Sigma_z= \emptyset$.\\
We assert that $\omega_X(q)$ cannot contain any local strong stable manifold. \index{Strong stable manifold} Indeed, we first assume that $\omega_X(q)$ has no singularities. By {\em Hyperbolic lemma}, \index{Hyperbolic lemma} it is hyperbolic saddle-type. Suppose $\omega_X(q)$ containing a local strong stable manifold. Then, by {\em Lemma 11} in \cite{lec}, $q$ would be a recurrent point. Therefore using {\em Lemma 5.6} in \cite{mpa1}, there is $x^*\in Per(X)\cap \omega_X(q)$ such that $q\in W^s_X(x^*)$. This means that $\omega_X(q)$ is a periodic orbit which contradicts our assumption. Now, if $\omega_X(q)$ is a sectional-hyperbolic set with singularities, applying {\em Main Theorem} in \cite{mor}, $\omega_X(q)$ cannot contain any local strong stable manifold.\\
We can fix a foliated rectangle of small diameter $R_z^0$ such that $z\in Int(R_z^0)$ and $\omega_X(q)\cap\partial^hR_0^z =\emptyset$. By {\em Theorem} \ref{th1}, the intersection of $W^u(O)$ with $W^s(\sigma)$ occurs in some connected component $W^{s,+}(\sigma)$ or $W^{s,-}(\sigma)$ (or both). We initially assume the intersection in $W^{s.+}(\sigma)$.\\
Since $z\in\omega_X(q)$ and the omega-limit set is not a closed orbit, we have that the positive orbit of $q$ intersects either only one or the two connected components of $R_z^0\setminus \mathcal{F}^s(z,R^0_z)$. \\
Assume the intersection is occurring in just one component only, we shall consider the following cases:\\
\begin{itemize}
\item $W^{s,-}(\sigma)\cap M(X)=\emptyset$.
Using this and linear coordinates around $\sigma$, we can construct an open interval $I^+ = I^+_q\subset W^u(O)$, contained in a suitable cross section throught $q\in W^u(\sigma)\setminus \{\sigma\}$ and $q\in\partial I^+$. As $W^u(O)\cap W^{s,+}(\sigma)$ is dense in $W^{u}(O)$ we have $I^+\cap W^{s,+}(\sigma)$ is dense in $I^+$.
It is possible to assume $I^+$ is contained in that component of $R_z^0\setminus \mathcal{F}^s(z,R^0_z)$. It is because of the positive orbit of $q$ carries the positive orbit of $I^+$ into such a component. Furthermore, the stable manifolds throught $I^+$ form a subrectangle $R_I^+$ in there. So, $W^{s,+}(\sigma)\cap R_I^+$ is dense in $R_I^+$.
Now, as in {\em Theorem 17} of \cite{lec}, we suppose $\omega_X(q)\cap Int(R_I^+)\neq\emptyset$ to obtain a contradiction. By hypothesis, the omega-limit set of $q$ is not a periodic orbit. Then {\em Lemma 5.6} in \cite{mpa1} implies that the positive orbit of $q$ cannot intersects $\mathcal{F}^s(q,R^0_z)$ infinitely many times. Now, if it intersects $R_I^+$, then by the density of $W^{s,+}(\sigma)\cap R_I^+$ in $R_I^+$, we can assert that the positive orbit of a point $p$ in $W^{s,-}(\sigma)$ would intersect $R_I^+$. Therefore $p\in Cl(W^u(O))\subset M(X)$ which we get a contradiction. So $\omega_X(q)\cap Int(R_I^+)=\emptyset$.
To continue, we choose a point $z'\in Int(R_I^+)$ and a point $z''$ in the connected component $R_z^0\setminus \mathcal{F}^s (z, R_z^0 )$ not intersected by the positive orbit of $q$. The desired rectangle $\Sigma_z$ is a subrectangle of $R^0_z$ bounded by $\mathcal{F}^s(z',R_z^0)$ and $\mathcal{F}^s(z'',R_z^0)$.\\
\item $W^s(\sigma)\cap W^u(O)\subset W^{s,+}(\sigma)$ and $W^s(\sigma)\cap W^u(O')\subset W^{s,-}(\sigma)$ for some hyperbolic periodic orbit $O'\neq O$.
In this way, we have the hypotheses of {\em Theorem 17} in \cite{lec}. Therefore there exists an interval $I^-\subset W^u(O')$ contained in that component of $R_z^0\setminus \mathcal{F}^s(z,R^0_z)$, such that $q\in\partial I^-$ and $I^-\cap W^{s,-}(\sigma)$ is dense in $I^-$. The stable manifolds throught $I=I^+\cup\{q\}\cup I^-$ form a subrectangle $R_I$ in there, with $Int(R_I)\cap\omega_X(q)= \emptyset$. So, the existence of $\Sigma_z$ is guaranteed such as last item. \\
\item $W^{s,+}(\sigma)\cap W^u(O)\neq\emptyset$ and $W^{s,-}(\sigma)\cap W^u(O)\neq\emptyset$.
We assert that there are $O_1,O_2$ hyperbolic periodic orbits such that, $W^s(\sigma)\cap W^u(O_1)\subset W^{s,+}(\sigma)$ and $W^s(\sigma)\cap W^u(O_2)\subset W^{s,-}(\sigma)$. Indeed, we take $q_1\in W^{s,+}(\sigma)\cap W^u(O)$ and $q_2\in W^{s,-}(\sigma)\cap W^u(O)$.
As $M(X)$ is union of homoclinic classes and $W^u(O)\subset M(X)$, there are hyperbolic periodic orbits $O_1,O_2$ satisfying $q,q_1\in H(O_1)$ and $q,q_2\in H(O_2)$. Therefore $O_X(q_1)\subset H(O_1)$ and $O_X(q_2)\subset H(O_2)$. Moreover, since the homoclinic classes are closed set we have that $\sigma$ and $O$ are in $H(O_1)\cap H(O_2)$. From {\em Remark} \ref{rk3} follows $H(O_1)\cap W^s(\sigma)\subset W^{s,+}(\sigma)$ and $H(O_2)\cap W^s(\sigma)\subset W^{s,-}(\sigma)$. On the other hand, let $W^+(O)$ be the connected component of $W^u(O)\setminus O$ containing $q_1$, then $W^+(O) \subset H(O_1)$. Analogously, for $W^-(O)$, the connected component of $W^u(O)\setminus O$ containing $q_2$, we have $W^-(O)\subset H(O_2)$. Therefore $W^u(O_1)\cap W^s(\sigma)\subset W^{s,+}(\sigma)$ and $W^u(O_2)\cap W^s(\sigma) \subset W^{s,-}(\sigma)$. Again we have the hypotheses of {\em Theorem 17} in \cite{lec}.\\
\item $W^{s,+}(\sigma)\cap W^u(O)\neq\emptyset$ and $W^{s,-}(\sigma)\cap H(O)\neq\emptyset$.
It is not possible by {\em Corollary} \ref{clly5}.
\item $W^{s,+}(\sigma)\cap W^u(O)\neq\emptyset$, $W^{s,-}(\sigma)\cap Cl(W^u(O'))\neq\emptyset$ and $q\in Cl(W^u(O'))$, where $O'$ is a hyperbolic periodic orbit of $X$.
From last item $O'\notin H(O)$. As $X$ satisfies the Property $(P)$\index{Property $(P)$}, there is $\sigma'\in Sing(X)$ such that $W^u(O')\cap W^s(\sigma')\neq\emptyset$. If $\sigma'=\sigma$ then $W^u(O')$ intersects $W^{s,+}(\sigma)$ or $W^{s,-}(\sigma)$. Observe that those alternatives were already analyzed. If $\sigma'\neq\sigma$, then we can obtain an interval $J^-$ such that $J^-\subset W^u(O')$ and $J^-\cap W^s(\sigma')$ is dense in $J^-$. Moreover we can assume $W^{s}(\sigma)\cap W^u(O)\subset W^{s,+}(\sigma)$ to obtain an interval $I^+$ such that $I^+\subset W^u(O)$ and $I^+\cap W^{s,+}(\sigma)$ is dense in $I^+$. Since $O'\notin H(O)$, follows that $W^u(O')\nsubseteq H(O)$. Therefore $W^u(O')$ cannot intersect $W^{s,+}(\sigma)$. In this way, there is an open arc $I^-\subset \bigcup_{t\geq 0}X_t(J^-)$ such that $q\in\partial I^-$. $I^-$ works such as in second item. The stable manifolds \index{Stable manifold} throught $I=I^+\cup\{q\}\cup I^-$ generates a subrectangle $R_I$. This acts such as {\em Theorem} 17 in \cite{lec}.
\end{itemize}
Now assume the positive orbit intersects both components of $R_z^0\setminus\mathcal{F}^s(z,R_z^0)$. Therefore we take $I$ (or $I^+$ to first case) with the positive orbit as before to obtain two subrectangles $R_I^t$ and $R_I^b$, like $R_I$ (or $R_I^+$ to first case), in each component. Then we select two points $z'\in Int(R_I^t )$ and $z''\in Int(R_I^b)$ and define $\Sigma_z$ as the rectangle in $R_z^0$ bounded by $\mathcal{F}^s(z', R_z^0)$ and $\mathcal{F}^s(z'', R_z^0)$.
From {\em Proposition} 3 in \cite{lec} we conclude the result.
\end{proof}
We remember the concept of {\em singular cross section} \index{Singular cross section} that appears in \cite{mp2}. For a disjoint collection of rectangles $\mathcal{S}=\{S_1,\cdots,S_l\}$ we denote $\mathcal{S}^o=\mathcal{S}\setminus\partial\mathcal{S}$. and $\partial^{\ast}\mathcal{S}=\bigcup_{S\in\mathcal{S}} \partial^{\ast} S$ for $\ast=h,v,o$.
\begin{defi} A singular cross section of $X$ is a finite disjoint collection $\mathcal{S}$ of foliated rectangles with $M(X)\cap\partial ^h S =\emptyset$ such that for every $S\in\mathcal{S}$ there is a leaf $l_S$ of $\mathcal{F}^s$ in $S^o$ such that the return time $t_S(x)$ for $x\in S\cap Dom(\Pi_S )$ goes uniformly to infinity as $x$ approaches $l_S$.
We define the singular curve \index{Singular cruve} of $\mathcal{S}$ as the union,
$$l_{\mathcal{S}}=\bigcup_{S\in\mathcal{S}}l_S.$$
\end{defi}
\begin{prop} \label{prop31} Let $q$ be a regular point \index{Regular point} in $W^u(\sigma)$, with $\sigma$ a singularity of a Venice mask $X$, and let $O$ be a hyperbolic periodic orbit such that $W^u(O)\cap W^s(\sigma)\neq\emptyset$. Then $\omega_X(q)$ \index{Omega-limit set} is a closed orbit\index{Closed orbit}. \end{prop}
\begin{proof} If $\omega_X(q)$ is a singularity, then it is done. Hereafter, we assume that $\omega_X(q)$ is not a singularity. From {\em Lemma \ref{lem6}} follows that $\omega_X(q)$ has singular partitions of arbitrarily small size. On the other hand, let $T_U M=\hat{F}_U^s\oplus \hat{F}_U^c$ be a continous extension of the sectional-hyperbolic splitting \index{Sectional-hyperbolic splitting} $T_{\omega_X(q)}M=F_{\omega_X(q)}^s\oplus F_{\omega_X(q)}^c$ of $\omega_X(q)$ to a neighborhood $U$ of $\omega_X(q)$. Let $I$ be an arc tangent to $\hat{F}_U^c$, transverse to $X$, with $q$ as boundary point. {\em Theorem 18} in \cite{lec} guarantees for every singular partition \index{Singular partition} $\mathcal{R}=\{S_1,\cdots S_k\}$ of $\omega_X(q)$, the existence of $S\in\mathcal{R}$, $\delta>0$, a sequence $q'_1,q'_2,\cdots\in S$ in the positive orbit of $q$, and a sequence of intervals $J_1',J_2'\cdots\subset S$ in the positive orbit of $I$ with $q_j'$ as a boundary point of $J_j'$ for all such that $length (J_j')\geq\delta$, for all $j=1,2,3,\cdots$.\\
We can assume $I=J_1'$. As $q, q'_j\in M(X)$ and $X$ is a Venice mask, we can use the {\em Lemma \ref{lem2}} to obtain a sequence $\{q_n:n\in{\Bbb N}\}\subset M$ such that $q_n\to q$ and $\omega(q_n)$ is a singularity for any $n$. As $X$ has just a finite singular points, we can take $\omega(q_n)=\{\sigma'\}$ for all $n$, and some $\sigma'\in Sing(X)$. If $q_n\in W^u(\sigma)$ for all $n$, then $\omega(q)=\{\sigma'\}$ which contradicts our assumption. Therefore $q_n\notin W^ u(\sigma)$ for any $n$. We can take $q_n$ such that $q_n\in S$ for all $n$\\
On the other hand, for $\sigma'$ are possible the following two alternatives, either $\sigma'\in\omega_X(q)$, or $\sigma'\notin\omega_X(q)$. We begin to consider $\sigma'\in\omega_X(q)$. {\em Lemma 14} in \cite{lec} asserts $O^+(q)\cap\mathcal{R}=\{\hat{q}_1,\hat{q}_2,\cdots\}$ an infinite sequence ordered in a way that $\Pi(\hat{q}_n)= \hat{q}_{n+1}$, and the existence of a curve $c_n\subset W^s(Sing(X)\cap \omega_X(q))\cap B_{\delta}(\hat{q}_n)$ such that
$$B_{\delta}^+(\hat{q}_n)\subset Dom(\Pi)\qquad and\qquad \Pi |_{B_{\delta}^+(\hat{q}_n)}\quad is \quad C^1, $$
where $B_{\delta}^+(\hat{q}_n)$ denotes the connected component \index{Connected component} of $B_{\delta}(\hat{q}_n)\setminus c_n$ containing $\hat{q}_n$.
In particular, we can reduce $\delta$ to obtain $\Pi_S=\Pi|_S$ such that
$$(\Pi_S)|_{B_{\delta}^+(q)}\quad\textit{is $C^1$}.$$
However $W^s(\sigma')$ accumulates $q$ on $S$, so we obtain a contradiction.
Therefore the first alternative cannot occur. We conclude $\sigma'\notin \omega_X(q)$.\\
Hartman-Grobman's Theorem \index{Hartman-Grobman's Theorem} implies the existence of a neighborhood $V_{\sigma'}$ of $\sigma'$, where the flow is $C^0$-conjugated to its linear part. Let $\eta>0$ be such that $V_{\sigma'}\subset B_{\eta}(\sigma')$ and $O^+(q)\cap V_{\sigma'}=\emptyset$. From {\em Lemma 2.2} in \cite{mp2} there are singular cross sections $\Sigma^+,\Sigma^-\subset V_{\sigma'}$ such that every orbit of $M(X)$ passing close to some point in $W^{s,+}(\sigma')$ (respectively $W^{s,-}(\sigma')$) intersects $\Sigma^+$(respectively $\Sigma^-$). Moreover {\em Lemma 2.3} in \cite{apu} guarantees the existence of two disks $\Lambda^+, \Lambda^-\subset V_{\sigma'}$ transverse to $X$ such that for $B_{\varepsilon}(\sigma')\subset V_{\sigma'}$, and for any point $x\in B_{\varepsilon}(\sigma')$, there are two numbers $t_- < 0 < t_+$ with $X_{t_-}(x) \in \Sigma^+\cup\Sigma^-$ and $X_{t_+}(x)\in \Lambda^+\cup\Lambda^-$. In addition, $X_t(x)\in V_{\sigma'}$ for all $t\in(t_-, t_+)$. See Figure \ref{singcross}.\\
\begin{figure}\label{singcross}
\end{figure}
As $q_n\to q$, we can take a sequence of open arcs $I_1,I_2,\cdots$ with $q_n$ as a boundary point of $I_n$ such that $Cl(I_n)$ converges to $Cl(I)$. In particular, we can assume $\delta\leq length (I_n)<\epsilon$ for all $n=1,2,3,\cdots$ and $diam(S)=\epsilon$. In addition, we can take $I_n\subset S$ for all $n$. On the other hand, $q_n\in W^s(\sigma')$ implies that $O^+(q_n)$ intersects $\Sigma^+\cup\Sigma^-$. Assume that the intersection occurs in $\Sigma^+$ for all $n$. As we can choose the singular partition of arbitrarily small size and $q$ is non-recurrent, there is $\varepsilon'>0$ such that $diam(\mathcal{R})=\varepsilon'$ and $O^+(s_n)\cap \Sigma^+\neq\emptyset$ for all $s_n\in I_n$. \\
We assert that $q$ satisfies the property $(P)_{\Sigma}$, where $\Sigma=\Sigma^+$. Indeed, from $O^+(q)\cap V_{\sigma'} =\emptyset$ follows $O^+(q)\cap \Sigma^+=\emptyset$. Now, for $x\in I$ there are $\beta_1, \beta_2>0$ such that $B_{\beta_1}(x)\cap \partial I=\emptyset$, $B_{\beta_2}(x)\cap \{q_l\}=\emptyset$ and $B_{\beta_2}(x)\cap I_l\neq\emptyset$ $l$ for all $l$ large. We define $\beta=\min\{\beta_1,\beta_2\}$. Let $\{x_l\}_l$ be a sequence with $x_l\in I_l\cap B_{\beta}(x)$ such that $x_l\to x$. As in \cite{lec}, we define the {\em holonomy map}\index{Holonomy map} $\Pi_{S,\Sigma^+}$ from $S$ to $\Sigma^+$ by
$$ Dom(\Pi_{S,\Sigma^+})=\{y\in S: X_t(y)\in\Sigma^+\textit{ for some $t>0$}\} $$
and
$$ \Pi_{S,\Sigma^+}(y)=X_{t_{S,\Sigma^+}(y)}(y), $$
where $t_{S,\Sigma^+}(y)=\inf\{t>0: X_t(y)\in\Sigma^+\}$.\\
Therefore $x_l\in Dom(\Pi_{S,\Sigma^+})$ for all $n$. From {\em Lemma 19} and {\em Theorem 22} in \cite{lec} follows that $x\in Dom(\Pi_{S,\Sigma^+})$.\\
Finally, {\em Theorem \ref{thPsigma}} implies that $\omega_X(q)$ is a closed orbit. As we assume $\omega_X(q)$ not being a singularity, then we conclude that the omega-limit set of $q$ is a periodic orbit.
\end{proof}
\subsection{Property $(P_{\sigma'})_q^+$}
\begin{defi} Let $\sigma,\sigma'\in Sing(X)$ and $q$ be a regular point in $W^u(\sigma)$. We say that an open arc $I\subset M$ satisfies the Property $(P_{\sigma'})_q^+$ if $q\in \partial I$ and $I\cap W^{s,+}(\sigma')$ is dense in $I$. In a similar way, an open arc $J\subset M$ satisfies the Property $(P_{\sigma'})_q^-$ if $q\in \partial J$ and $J\cap W^{s,-}(\sigma')$ is dense in $J$. \index{Property $(P_{\sigma'})_q^+$} \end{defi}
\begin{prop} \label{prop41} Let $O$ be a hyperbolic periodic orbit of a Venice mask $X$. Assume $\sigma'\in Sing(X)$ satisfying $\emptyset \neq W^u(O)\cap W^s(\sigma')\subset W^{s,+}(\sigma')$. Let $q$ be a regular point with $q\in W^u(\sigma)\cap Cl(W^u(O))$, for some $\sigma\in Sing(X)$. Then there is an open arc satisfying the Property $(P_{\sigma'})_q^+$. The same interchanging $+$ by $-$. \end{prop}
\begin{proof} Let $p\in W^u(\sigma')$ be a regular point. We assert that there is an open interval $J$ satisfying the Property $(P_{\sigma'})_p^+$. Indeed, $\sigma'$ and $p$ are contained in $Cl(W^u(O))$. As $W^u(O)$ intersects $W^{s,+}(\sigma')$, then $W^u(O)\cap W^{s}(\sigma)$ is dense in $W^{s,+}(\sigma')$. Consider an open arc $J\subset W^u(O)$ with $p\in \partial J$. So, the density of $W^u(O)\cap W^{s,+}(\sigma)$ in $W^{u}(O)$ implies that $J\cap W^{s,+}(\sigma')$ is dense in $J$. \\
If $\sigma=\sigma'$, then we obtain the desired result. Now, we consider $\sigma\neq\sigma'$. From {\em Lemma \ref{prop31}} follows that the omega-limit set of every point in $W^u(\sigma')$ is a closed orbit. Now, take two point $p_1,p_2$, one on each branch of $W^u(\sigma')\setminus\{\sigma'\}$. We analize the following cases which are ilustrated in Figure \ref{omegacases}.
\begin{figure}
\caption{ Proof Proposition \ref{prop41}}
\label{omegacases}
\end{figure}
\begin{itemize}
\item $\omega_X(p_1)$ is a singularity. Let $\sigma_1$ be a singularity with $\omega_X(p_1)=\{\sigma_1\}$. If $\omega_X(p_1)=\{\sigma'\}$, then $\omega_X(p_2)\neq\{\sigma'\}$. Indeed, $\omega_X(p_1)=\{\sigma'\}=\omega_X(p_2)$ implies either $W^u(O)\cap W^s(\sigma)\neq\emptyset$ or $Cl(W^u(O))\cap W^s(\sigma)\neq\emptyset$. But $W^u(O)\cap W^s(\sigma)=\emptyset$ by hypothesis. Moreover $\sigma\in Cl(W^u(O))$. So, $\sigma_1\neq\sigma'$.
Let $w\in W^u(\sigma')\cap W^s(\sigma_1)$ be a point in $O_X^+(p_1)$ close to $\sigma_1$. Using it and linear coordinates around $\sigma_1$, we can construct an open interval $J_1\subset\bigcup_{t\geq 0}X_t(J)\subset W^u(O)$ contained in a suitable cross section throught $w$, such that $w\in\partial J_1$. From {\em Inclination lemma} \cite{dmp}, follows that $W^u(O)$ accumulates points in some branch of $W^u(\sigma_1)$. Therefore, for $q_1\in (W^u(\sigma_1)\cap Cl(W^u(O)))\setminus\{\sigma_1\}$ there is an open arc $I_1$ such that $I_1\subset\bigcup_{t\geq 0}X_t(J_1)$ and $q_1\in\partial I_1$. The density of $W^{s,+}(\sigma')\cap W^u(O)$ in $W^u(O)$ implies the density of $W^{s,+}(\sigma')\cap I_1$ in $I_1$. Then $I_1$ satisfies $(P_{\sigma'})_{q_1}^+$.
\item When the omega-limit set of $p_1$ and $p_2$ are respectively hyperbolic periodic orbits $O_1,O_2$, we have that $W^u(O_i)$ intersects the stable manifold of some singularity $\sigma_i$ of $X$, $i=1,2$. We first assume $\sigma_1=\sigma_2=\sigma'$. That intersection cannot just only occurs in $W^s(\sigma')$ because of this would imply $\sigma\notin Cl(W^u(O_1)\cup W^u(O_2))$ and $Cl(W^u(O))\subset Cl(W^u(O_1)\cup W^u(O_2))$. But $\sigma\in Cl(W^u(O))$ which produces a contradiction. Therefore we can assume that $W^u(O_1)\cap W^s(\sigma_1)\neq\emptyset$ with $\sigma_1\neq\sigma'$.
Applying {\em Inclination lemma}\index{Inclination lemma}, $Cl(W^u(O))$ and $\bigcup_{t\geq 0}X_t(J)$ intersect $W^s(\sigma_1)$ transversally. Again, let $w\in W^u(O)\cap W^s(\sigma)$ be a point in $\bigcup_{t\geq 0}X_t(J)$ close to $\sigma_1$. Using it and linear coordinates around $\sigma_1$, we can construct an open interval $J_1\subset W^u(O)$ contained in a suitable cross section throught $w$. $J_1\setminus\{w\}$ is formed by two open arcs $J_1^+,J_1^-\subset W^u(O)$. Therefore, for $q_1\in W^u(\sigma_1)\setminus\{\sigma_1\}$ there is an open arc $I_1$ such that and $q_1\in\partial I_1$ and, $I_1\subset\bigcup_{t\geq 0}X_t(J^+)$, or $I_1\subset\bigcup_{t\geq 0}X_t(J^-)$. The density of $W^{s,+}(\sigma')\cap W^u(O)$ in $W^{s,+}(\sigma')$ implies the density of $W^{s,+}(\sigma)\cap I_1$ in $I_1$. Then $I_1$ satisfies $(P_{\sigma'})_{q_1}^+$.
\end{itemize}
If $\sigma_1=\sigma$, then the result is obtained. Otherwise, we apply a similar process to $\sigma_1$ to get $\sigma_3\in Sing(X)$ with $\sigma_3\notin\{\sigma',\sigma_1\}$, and an open arc $I_3\subset Cl(W^u(O))$ such that $I_3$ satisfies the Property $(P_{\sigma'})_{q_3}^+$.
As $\sigma\in Cl(W^u(O))$ and $X$ just has finitely many singularities, we conclude the existence of some open arc satisfying the Property $(P_{\sigma'})_{q}^+$ for $q\in W^u(\sigma)\cap Cl(W^u(O))$.
\end{proof}
\subsection{Proof of Theorem \ref{thH}}
It is sufficient to prove the existence of singular partitions\index{Singular partition} of arbitrarily small size.
Let $q$ be a regular point in $W^u(\sigma)$, where $\sigma\in Sing(X)$.\\
As $M(X)$ is union of homoclinic classes, there is a hyperbolic periodic orbit $O$ such that $\sigma$ and $q$ are contained in the homoclinic class associated to $O$, denoted by $H(O)$. In addition $H(O)$ intersects only one or the two connected components $W^{s,+}(\sigma),W^{s,-}(\sigma)$ of $W^s(\sigma)\setminus\mathcal{F}^{ss}_X (\sigma)$. We begin to analize the intersection in $W^{s,+}(\sigma)$. On the other hand, $X$ satisfies the Property $(P)$. This implies that there is a singularity $\sigma'\in Sing(X)$ with $W^u(O)\cap W^s(\sigma')\neq\emptyset$. By {\em Theorem} \ref{th1}, the intersection of $W^u(O)$ with $W^s(\sigma')$ is either only one or the two connected components $W^{s,+}(\sigma'),W^{s,-}(\sigma')$ of $W^s(\sigma')\setminus\mathcal{F}^{ss}_X(\sigma')$. If $\sigma=\sigma'$ then from {\em Lemma \ref{lem6}} follows the existence of singular partitions of arbitrarily small size. Hereafter, we assume $\sigma\neq \sigma'$ and $W^{s.+}(\sigma')\cap W^u(O)\neq\emptyset$.\\
If $Cl(W^u(O))\cap W^{s,-}(\sigma')\neq\emptyset$, then {\em Lemma} \ref{lem41} and {\em Proposition} \ref{prop31} imply that for some $p\in W^u(\sigma')\cap Cl(W^u(O))$, $O=\omega_X(p)$ and $H(O)\subset Cl(W^u(\sigma'))$. But $q\notin W^u(\sigma')$. This contradicts $q\in H(O)$. So, $Cl(W^u(O))\cap W^{s,-}(\sigma')=\emptyset$. {\em Proposition} \ref{prop41} guarantees the existence of an open arc $I^+\subset M$ satisfying the Property $(P_{\sigma'})_q^+$. \\
We suppose $\omega_X(q)$ is not a periodic orbit. Let $z$ be a point in $\omega_X(q)$. In a similar way as {\em Lemma} \ref{lem6}, we fix a foliated rectangle of small diameter $R_z^0$ such that $z\in Int(R_z^0)$ and $\omega_X(q)\cap\partial^h R^0_z =\emptyset$. The positive orbit of $q$ intersects either only one or the two connected components of $R_z^0\setminus\mathcal{F}^s(z, R_z^0)$.
Assume the intersection is occurring in just one component only.
Now, analize the following cases:
\begin{itemize}
\item $q\notin H(O')$ for all hyperbolic periodic orbit $O'$ of $X$ such that $H(O')\cap W^{s,-}(\sigma)\neq\emptyset$.
The existence of the singular partitions of arbitrarily small size is obtained such as the first case in {\em Lemma} \ref{lem6}.
\item There is a sequence $\{p_n\}_n\subset W^u(O)$ such that $p_n\to p\in W^{s,-}(\sigma)$, and there is a sequence $\{q_n\}$ such that $q_n\in O_X(p_n)$ and $q_n\to q$.
From {\em Lemma} \ref{lem41} follows that $\omega_X(q)=O$. But this contradicts our assumption that the omega-limit set is not a periodic orbit.
\item For some periodic orbit $O'\neq O$, there is a sequence $\{p_n:n\in{\Bbb N}\}\subset W^u(O')$ such that $p_n\to p\in W^{s,-}(\sigma)$, and there is a sequence $\{q_n:n\in{\Bbb N}\}$ satisfying $q_n\in O_X(p_n)$ and $q_n\to q$.
Again, {\em Lemma} \ref{lem41} implies that $W^u(O')$ does not intersect the open arc $I^+$. From Property $(P)$, there is $\sigma''\in Sing(X)$ such that $W^u(O')\cap W^s(\sigma'')\neq\emptyset$. Then for some $r\in W^u(\sigma'')$ there is an interval $J^-\subset W^u(O')$, such that $r\in\partial J^⁻$ and $J^-\cap W^s(\sigma'')$ is dense in $J^-$. Also there is an open arc $I^-\subset \bigcup_{t\geq 0}X_t(J^-)$ satisfying $q\in\partial I^-$. Therefore $I^-\subset W^u(O')$ and $I^-\cap W^s(\sigma'')$ is dense in $I^-$. In addition, $W^{s,+}(\sigma)\cap I^-=\emptyset$. The stable manifolds throught $I=I^+\cup\{q\}\cup I^-$ generates a subrectangle $R_I$. This rectangle acts such as {\em Lemma} 17 in \cite{lec}.
\end{itemize}
The existence of the singular partition of arbitrarily small size is obtain such as {\em Lemma} \ref{lem6}.\\
If the intersection of $O^+_X(q)$ with $R^0_z$ occurs in both connected components of $R^0_z\setminus\mathcal{F}^s(z,R^0_z)$, then we proceed such as {\em Lemma} \ref{lem6} to get a cross section $\Sigma_z$ with $z\in\Sigma_z$ and $\partial\Sigma_z\cap\omega_X(q)=\emptyset$.
In this way, {\em Proposition} 3 in \cite{lec} implies the existence of the singular partition of arbitrarily small size for $\omega_X(q)$.
Finally, we follow the proof of {\em Proposition} \ref{prop31} to conclude that $\omega_X(q)$ is a closed orbit.
\section{Intersection of homoclinic classes} \label{inthomcla}
In this section we are interested in the study of the intersection of homoclinic classes in a sectional-Anosov flow. We follow some ideas developed in \cite{bamo} to obtain {\em Theorem \ref{thH'}}. More specifically, we prove that in this context, this intersection can be decomposed in three specific sets. a non-singular hyperbolic set, finitely many singularities and regular orbits joining them. Recall that an invariant set is nontrivial if it does not reduces to a single orbit. The conclusion of {\em Theorem \ref{thH'}} is obvious when $H_1$ or $H_2$ are trivial invariant sets. Hereafter, $H_1$ and $H_2$ are two non trivial different homoclinic classes in $M(X)$. Let $\Lambda$ be the intersection between $H_1$ and $H_2$. We start with the following lemma.
\begin{lemma} \label{lem7} Assume that there is a singularity $\sigma\in\Lambda$, then for $\delta>0$ small, every sequence $\{x_n:n\in{\Bbb N}\}\subset\Lambda\cap B_{\delta}(\sigma)$ such that $x_n\to\sigma$ is contained in $W^s(\sigma)\cup W^u(\sigma)$. \end{lemma}
\begin{proof} We suppose by contradiction that there is a sequence $\{x_n:n\in{\Bbb N}\}\subset\Lambda\cap B_{\delta}(\sigma)$ such that $x_n\to\sigma$ and $x_n\notin W^s(\sigma)\cup W^u(\sigma)$ for all $n$.
So, we obtain two sequences $x^s_n$ and $x_n^u$, in the orbit of $x_n$ such that $x_n^s\to y^s$ and $x_n^u\to y^u$ for some $y^s\in W^s(\sigma)\setminus\{\sigma\}$ and $y^u\in W^u(\sigma)\setminus \{\sigma\}$ close to $\sigma$. Let $O_1,O_2$ be two orbits such that $H(O_1)=H_1$ and $H(O_2)=H_2$. Then there exist sequences $\{p_n:n\in{\Bbb N}\}\subset (W^u(O_1)\cap W^s(O_1))$ and $\{q_n:n\in{\Bbb N}\}\subset (W^u(O_2)\cap W^s(O_2))$ satisfying $p_n\to x_n^s$ and $q_n\to x_n^s$. We can assume $p_n\notin H_2$ for all $n$. This means that $p_n\to x^s$ and $q_n\to x^s$ too. The behavior of the orbits of $x_n$, $p_n$ and $q_n$ nearby $\sigma$, are as described in Figure \ref{lemmaint}.
Since homoclinic classes have density of periodic points \cite{haka}, for each $n$ we have that $p_n$ and $q_n$ are approximated respectively by a sequence of periodic orbits $\{O_1^{mn}:m\in{\Bbb N}\}$ and $\{O_2^{mn}:m\in{\Bbb N}\}$. Define the map $\pi:B_{\delta}(\sigma)\to W^{cu}(\sigma)$ such as in {\em Subsection \ref{singpart}}. Observe that $\{\pi(W^u(O_1^{mn})):m\in{\Bbb N}\}$ and $\{\pi(W^u(O_2^{mn})):m\in{\Bbb N}\}$ accumulate $y^s$ in the same sector $s_{ij}$ of $W^{cu}(\sigma)$. Follows from {\em Lemma} 3.1 in \cite{carmo} that these sequences can be chosen in a way that, for $i=1,2$ and for all $n,m$, $W^s(O_i^{nm})$ is uniformly bounded away from zero. This implies that for $m_1,m_2,n_1,n_2$ large, $W^u(O_1^{m_1n_1})\cap W^s(O_2^{m_2n_2})\neq\emptyset$. Consider $x\in W^u(O_1^{n_1m_1})\cap W^s(O_2^{m_2n_2})$. As $O_1^{m_1n_1}\subset (H_1\setminus H_2)$ and $O_2^{m_2n_2} \subset H_2$, then there is $x^*\in O_X(x)$ such that $x^*\in\Lambda$. But $\Lambda$ is an invariant closed set, then $O_1^{m_1n_1}\subset Cl(O_X(x^*))=Cl(O_X(x^*))\subset \Lambda$. However $O_1^{m_1n_1}\nsubseteq H_2$ and $\Lambda\subset H_2$, which is a contradiction.
We conclude $x_n\in W^s(\sigma)\cup W^u(\sigma)$ for all $n\in{\Bbb N}$.
\end{proof}
\begin{figure}
\caption{ {\em Lemma} \ref{lem7}}
\label{lemmaint}
\end{figure}
\subsection{Proof theorem \ref{thH'}}
Theorem \ref{thH'} gives a description about the set $\Lambda$.
\begin{proof} The idea of the proof is the same given in {\em Lemma 3.3} by \cite{bamo}. Follows to {\em Lemma \ref{lem7}} that there is $\delta>0$ such that $\Lambda\cap B_{\delta}(\sigma)\subset W^s(\sigma)\cup W^u(\sigma)$, and the balls $B_{\delta}(\sigma)$ are pairwise disjoint for every $\sigma\in\Lambda\cap Sing(X)=S$. Define
$$H=\bigcap_{(t,\sigma)\in {\Bbb R}\times S}X_t(\Lambda\setminus B_{\delta}(\sigma)).$$
By construction, $H$ is a non-singular, \index{Non-singular set} compact invariant sectional-hyperbolic set. So, applying {\em Lemma \ref{lem1}} we have that $H$ is hyperbolic. Now define $R=\Lambda\setminus(S\cup H)$. For $x\in R$ there is $(t,\sigma)\in{\Bbb R}\times S$ with $X_t(x)\in B_{\delta}(\sigma)$, and by {\em Lemma \ref{lem7}} $X_t(x)\in W^s(\sigma)\cup W^u(\sigma)$.
If $x\in W^u(\sigma)$ we obtain $\alpha(x)\subset H\cup S$. Assume $X_s(x)\notin \bigcup_{\rho\in S} B_{\delta}(\rho)$ for all $s\geq 0$, then $\omega(x)\subset H$. Now, if there is $(s,\rho)\in{\Bbb R}\times S$ such that $X_s(x)\in B_{\delta}(\rho)$ then $x\in W^s(\rho)$, So $\omega(x)\in H\cup S$.
With a similar argument we have $\alpha(x)\subset H\cup S$ and $\omega(x)\subset H\cup S$ for $x\in W^s(\sigma)$. So, we conclude the result.
\end{proof}
\section{Some conjectures}
Because of the study developed in this work, different questions have appeared. All known examples of Venice mask are characterized because the maximal invariant set is the finite union of homoclinic classes and the intersection between two different homoclinic classes $H_1$ and $H_2$ is contained in $Cl(W^u(Sing(X)))$. Moreover, every regular point $q\in W^u(Sing(X))\cap H_1\cap H_2$ is non-recurrent.
Consider a Venice mask $X$ supported on a compact 3-manifold $M$. Let $H_1$ and $H_2$ be two different homoclinic classes in $M(X)$ and let $\Lambda$ be the intersection between $H_1$ and $H_2$. Assume the decomposition of $\Lambda$ given in {\em Theorem \ref{thH'}}, it is $\Lambda=S\cup H\cup R$. \\
We announce the following conjecture.
\begin{conj} \label{conj1} Every regular point $q\in R$ is non-recurrent. \end{conj}
From {\em Lemma \ref{lem7}} we have that for $\delta>0$ small, $x\in B_{\delta}(\sigma)$ implies $x\in W^s(\sigma)\cup W^u(\sigma)$ for some $\sigma\in S$. If $x\in W^u(\sigma)$ then $\alpha(x)=\{\sigma\}$. Now we take $x\in W^s(\sigma)\setminus W^u(\sigma)$. Therefore we shall consider two cases, either $\alpha(x)=\{\rho\}$ for some $\rho\in S$ or $\alpha(x)\subset H$. In the first case, we obtain the desired result. If we prove that the second case cannot occur, then the following conjecture would be true.
\begin{conj} \label{conj2} $\Lambda\subset Cl(W^u(Sing(X)))$. \end{conj}
Let us state direct consequence of the hyperbolic Lemma \ref{lem1} that appears in \cite{lec}.
\begin{clly} Every periodic orbit of a sectional-Anosov flow on a compact manifold is hyperbolic. In particular, all such flows have countably many closed orbits. \end{clly}
This implies that the maximal invariant set of every Venice mask is union of countably many homoclinic classes. So, if {\em Conjecture \ref{conj1}} and {\em Conjecture \ref{conj2}} are true, then would be possible to realize the following statement.
\begin{conj} \label{conj3} The maximal invariant set of every Venice mask is finite union of homoclinic classes. \end{conj}
\begin{proof} Let $X$ be a Venice mask supported on a compact 3-manifold $M$. Then $X$ has finite many singularities, we say $n$. Let $H_1$, $H_2$ be two different homoclinic classes associated to $M(X)$. From Conjectures \ref{conj1} and \ref{conj2} is possible to apply Theorem \ref{thH} to conclude that for each singularity $\sigma$ of $X$, $Cl(W^u(\sigma))=\{\sigma\}\cup W^u(\sigma)\cup C_{\sigma}$, it is a disjoint union and $C_{\sigma}$ is a closed orbit. On the other hand, the branches of $W^u(\sigma)$ are uni-dimensional. Therefore Theorem \ref{conj2} implies $H_1\cap H_2$ has just only a finite number of possibilities to occur. Moreover, at most three homoclinic classes can contain the branch of the unstable manifold of some singularity.
This finishes the proof.
\end{proof}
\flushleft H. M. S\'anchez\\ Instituto de Matem\'atica, Universidade Federal do Rio de Janeiro\\ Rio de Janeiro, Brazil\\ E-mail: hmsanchezs@unal.edu.co
\end{document} | arXiv | {
"id": "1704.02045.tex",
"language_detection_score": 0.6921567320823669,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Pointwise Bounds and Blow-up for\\ Nonlinear Fractional Parabolic Inequalities}
\author{Steven D. Taliaferro\\ Department of Mathematics\\ Texas A\&M University\\ College Station, TX 77843-3368\\ USA\\ {\tt stalia@math.tamu.edu}}
\date{} \maketitle
\begin{abstract} We investigate pointwise upper bounds for nonnegative solutions $u(x,t)$ of the nonlinear initial value problem \begin{equation}\label{0.1}
0\leq(\partial_t-\Delta)^\alpha u\leq u^\lambda \quad\text{ in }\mathbb{R}^n \times\mathbb{R},\,n\geq1, \end{equation} \begin{equation}\label{0.2}
u=0\quad\text{in }\mathbb{R}^n\times(-\infty,0) \end{equation} where $\lambda$ and $\alpha$ are positive constants. To do this we first give a definition---tailored for our study of \eqref{0.1}, \eqref{0.2}---of fractional powers of the heat operator $(\partial_t-\Delta)^\alpha :Y\to X$ where $X$ and $Y$ are linear spaces whose elements are real valued functions on $\mathbb{R}^n \times\mathbb{R}$ and $0<\alpha<\alpha_0$ for some $\alpha_0$ which depends on $n$, $X$ and $Y$.
We then obtain, when they exist, optimal pointwise upper bounds on $\mathbb{R}^n \times(0,\infty)$ for nonnegative solutions $u\in Y$ of the initial value problem \eqref{0.1}, \eqref{0.2} with particular emphasis on those bounds as $t\to0^+$ and as $t\to\infty$.
\noindent 2010 Mathematics Subject Classification. 35B09, 35B33, 35B44, 35B45, 35K58, 35R11, 35R45.
\noindent {\it Keywords}. Blow-up, Pointwise bounds, Fractional heat operator, Parabolic. \end{abstract}
\iffalse
\renewcommand{R\'esum\'e} \begin{abstract}{R\'esum\'e} \begin{abstract} Nous \'etudions
des majorations ponctuelles pour les solutions positives $u(x,t)$ du
probl\`eme de valeurs initiales \begin{equation}\label{0.3}
0\leq(\partial_t-\Delta)^\alpha u\leq u^\lambda \quad\text{ dans }\mathbb{R}^n \times\mathbb{R},\,n\geq1, \end{equation} \begin{equation}\label{0.4}
u=0\quad\text{dans }\mathbb{R}^n\times(-\infty,0) \end{equation} o\`u $\lambda$ et $\alpha$ sont des constantes strictement positives. Pour ce faire nous donnons tout d'abord une d\'efinition--adapt\'ee \`a notre \'etude de \eqref{0.3}, \eqref{0.4}--- des puissances fractionnaires de l'op\'erateur de la chaleur $(\partial_t-\Delta)^\alpha :Y\to X$ ou $X$ et $Y$ sont des espaces vectoriels dont les \'el\'ements sont des fonctions \`a valeurs r\'eelles sur $ \mathbb{R}^n \times\mathbb{R}$ et $0<\alpha<\alpha_0$ pour $\alpha_0$ d\'ependant de $n$, $X$ et $Y$.
Nous obtenons ensuite, lorsqu'elles existent, des majorations ponctuelles optimales sur $\mathbb{R}^n \times(0,\infty)$ pour les solutions positives $u\in Y$ du probl\`eme de valeurs initiales \eqref{0.3}, \eqref{0.4}, et un soin tout particulier est port\'e \`a ces majorations lorsque $t\to0^+$ et lorsque $t\to\infty$. \end{abstract}
\fi
\section{Introduction}\label{sec1} In this paper we study pointwise upper bounds for nonnegative solutions $u(x,t)$ of the nonlinear inequalities \begin{equation}\label{1.2}
0\leq(\partial_t-\Delta)^\alpha u\leq u^\lambda \quad\text{in }\mathbb{R}^n \times\mathbb{R},\,n\geq1, \end{equation} satisfying the initial condition \begin{equation}\label{1.3}
u=0\quad \text{in }\mathbb{R}^n \times(-\infty,0) \end{equation} where $\lambda$ and $\alpha$ are positive constants.
To do this, we first give in Section \ref{sec2} a definition---appropriate for our analysis of the initial value problem \eqref{1.2}, \eqref{1.3}---of fractional powers of the heat operator \begin{equation}\label{1.1}
(\partial_t-\Delta)^\alpha :Y\to X \end{equation} where $\Delta$ is the Laplacian with respect to $x\in\mathbb{R}^n$, $X$ and $Y$ are linear spaces whose elements are real valued functions on $\mathbb{R}^n \times\mathbb{R}$, and $0<\alpha<\alpha_0$ for some $\alpha_0 >0$ which depends on $n$, $X$ and $Y$.
With the definition of \eqref{1.1} in hand, we obtain, when they exist, optimal pointwise upper bounds on $\mathbb{R}^n\times(0,\infty)$ for nonnegative solutions $u\in Y$ of the initial value problem \eqref{1.2}, \eqref{1.3} with particular emphasis on these bounds as $t\to0^+$ and as $t\to\infty$. These results are stated in Section \ref{sec3} and proved in Section \ref{sec8}.
Since the operator \eqref{1.1} is nonlocal, we must require the initial condition \eqref{1.3} to hold in $\mathbb{R}^n \times(-\infty,0)$ (not just in $\mathbb{R}^n \times \{0\}$) and nonnegative solutions of \eqref{1.2}, \eqref{1.3} may not tend pointwise to zero as $t\to 0^+$ (see Theorem \ref{thm3.5}) even though they satisfy the initial condition \eqref{1.3}.
Of course any estimates we obtain for nonnegative solutions of \eqref{1.2}, \eqref{1.3} also hold for nonnegative solutions of the initial value problem consisting of \eqref{1.3} and the {\it equation} \[
(\partial_t-\Delta)^\alpha u=u^\lambda \quad \text{in }\mathbb{R}^n \times\mathbb{R}. \]
According to our results in Section \ref{sec3} there are essentially only three possibilities for the solutions of \eqref{1.2}, \eqref{1.3} depending on $X$, $Y$, $\lambda$, and $\alpha$:
\begin{enumerate} \item The only solution is $u\equiv 0$ in $\mathbb{R}^n\times\mathbb{R}$; \item There exist sharp nonzero pointwise bounds for solutions as
$t\to 0^+$ and as $t\to\infty$; \item There do not exist pointwise bounds for solutions as
$t\to 0^+$ and as $t\to\infty$. \end{enumerate} All possiblities can occur. For the precise statements of possibilities (i), (ii), and (iii) see Theorem \ref{thm3.1}, Theorems \ref{thm3.2}--\ref{thm3.4}, and Theorems \ref{thm3.5} and \ref{thm3.6}, respectively.
The operator \eqref{1.1} is a fully fractional heat operator as opposed to time fractional heat operators in which the fractional derivatives are only with respect to $t$, and space fractional heat operators, in which the fractional derivatives are only with respect to $x$.
Some recent results for nonlinear PDEs containing time (resp. space) fractional heat operators can be found in \cite{AV,A,ACV,DVV,K,
KSVZ,M,OD,SS,VZ,ZS} (resp. \cite{AABP,AMPP,BV,CVW,DS,FKRT,GW,JS,MT,PV,V,VV,VPQR}). We know of no results for nonlinear PDEs containing the fully fractional heat operator \eqref{1.1}. However results for linear PDEs containing \eqref{1.1}, including in particular \[ (\partial_t-\Delta)^\alpha u=f, \] where $f$ is a given function, can be found in \cite{ACM,NS,SK,ST}.
\section{Definition and properties of fully fractional heat operators}\label{sec2} In this section we give a well-motivated definition of the fully fractional heat operator \eqref{1.1}, suitable for our study of the initial value problem \eqref{1.2}, \eqref{1.3}, and then give some of its properties.
Some of the material in this section is inspired by---and can be viewed as the parabolic analog of---the material in \cite[Sec. 5.1]{S} concerning the fractional Laplacian.
Since for functions $u:\mathbb{R}^n \times\mathbb{R}\to\mathbb{R},\,n\geq1$, which are sufficiently smooth and small at infinity we have
$$((\partial_t-\Delta)u)\ \widehat{ }\ (y,s)=(|y|^2 -is)\widehat{u}(y,s),$$ where $\ \widehat{ }\ $ is the Fourier transform operator on $\mathbb{R}^n \times\mathbb{R}$ given by $$\widehat{u}(y,s)=\iint_{\mathbb{R}^n \times\mathbb{R}}e^{i(y,s)\cdot(x,t)}u(x,t)\, dx \, dt,$$ the fractional heat operator $(\partial_t-\Delta)^\alpha ,\,\alpha>0$, is formally defined in \cite[Chapter 2]{SP} by \begin{equation}\label{2.1}
((\partial_t-\Delta u)^\alpha u)\ \widehat{ }\ (y,s)=(|y|^2 -is)^\alpha \widehat{u}(y,s). \end{equation} If $f=(\partial_t-\Delta)^\alpha u$ then from \eqref{2.1} and the fact (see \cite[Theorem 2.2]{SP} and Theorem \ref{thm2.1}(i) below) that
$$\widehat{\Phi}_\alpha(y,s)=(|y|^2 -is)^{-\alpha}\quad\text{for }0<\alpha<(n+2)/2$$ in the sense of tempered distributions where \begin{equation}\label{2.2}
\Phi_\alpha (x,t)=\frac{t^{\alpha-1}}{\Gamma(\alpha)}\,
\frac{1}{(4\pi t)^{n/2}}e^{-|x|^2/(4t)}\raisebox{2pt}{$\chi$}_{(0,\infty)}(t), \end{equation} we formally get $$\widehat{u}=\widehat{\Phi}_\alpha \widehat{f}.$$ Hence by the convolution theorem we formally find that \begin{equation}\label{2.3}
u=J_\alpha f:=\Phi_\alpha *f \end{equation} where $*$ is the convolution operation in $\mathbb{R}^n \times\mathbb{R}$. Since $\Phi_\alpha (x,t)=0$ for $t\leq0$ we have \begin{equation}\label{2.4}
J_\alpha f(x,t)=\iint_{\mathbb{R}^n \times(-\infty,t)}\Phi_\alpha (x-\xi,t-\tau)f(\xi,\tau)\, d\xi \, d\tau . \end{equation}
By part (ii) of the following theorem, equations \eqref{2.1} and \eqref{2.3} are equivalent in the sense that \[
(J_\alpha f)\ \widehat{ }\ =(|y|^2 -is)^{-\alpha} \widehat{f}\quad\text{for }f\in L^1 (\mathbb{R}^n \times\mathbb{R}) \text{ and } 0<\alpha<(n+2)/2 \] in the sense of tempered distributions.
\begin{thm}\label{thm2.1}
Suppose $0<\alpha<(n+2)/2$.
\begin{enumerate}
\item[(i)] The Fourier transform of $\Phi_\alpha (x,t)$ is the
function $(|y|^2 -is)^{-\alpha}$ in the sense that
$$\iint_{\mathbb{R}^n \times\mathbb{R}}\Phi_\alpha (x,t)\widehat{\varphi}(x,t)\, dx \, dt=\iint_{\mathbb{R}^n \times\mathbb{R}}(|y|^2-is)^{-\alpha}\varphi(y,s)\, dy \, ds$$
for all $\varphi\in S$ where $S$ is the Schwarz class of rapidly decreasing functions.
\item[(ii)] The identity $(J_\alpha f)\ \widehat{ }\ (y,t)=(|y|^2 -is)^{-\alpha}\widehat{f}(y,s)$ holds in the sense that
\begin{equation}\label{2.5}
\iint_{\mathbb{R}^n \times\mathbb{R}}J_\alpha f(x,t)\widehat{g}(x,t)\, dx \, dt=\iint_{\mathbb{R}^n \times\mathbb{R}}(|y|^2 -is)^{-\alpha}\widehat{f}(y,s)g(y,s)\, dy \, ds
\end{equation}
for all $f\in L^1 (\mathbb{R}^n \times\mathbb{R})$ and all $g\in S$.
\end{enumerate} \end{thm}
Motivated by these formal calculations, we will now define the operator $(\partial_t-\Delta)^\alpha$ as the inverse of a linear operator \begin{equation}\label{2.6}
J_\alpha :X\to Y \end{equation} where $J_\alpha$ is defined by \eqref{2.4} and \eqref{2.2} and $X$ and $Y$ are linear spaces whose elements are functions $f:\mathbb{R}^n \times\mathbb{R}\to\mathbb{R}$ such that the operator \eqref{2.6} has the following properties: \begin{enumerate}
\item[(P1)] it makes sense because the integral in \eqref{2.4}
defines a real valued measurable function on
$\mathbb{R}^n\times\mathbb{R}$ for all $f\in X$,
\item[(P2)] it is one-to-one and onto, and
\item[(P3)] if $u=J_\alpha f$ then $f=0$ in $\mathbb{R}^n \times(-\infty,0)$ if and only if $u=0$ in $\mathbb{R}^n \times(-\infty,0)$. \end{enumerate} Property (P3) will be needed to handle the initial condition \eqref{1.3}. The domain of $J_\alpha$ is usually taken to be $L^p (\mathbb{R}^n \times\mathbb{R}),\,1\leq p<\frac{n+2}{2\alpha}$ (see \cite[Section 9.2]{SK}). However since the region of integration for the integral \eqref{2.4} is not $\mathbb{R}^n \times\mathbb{R}$ but rather $\mathbb{R}^n \times(-\infty,t)$, we see that more natural and less restrictive choices for the domain and range of $J_\alpha$ are \begin{align}\label{2.9}
X^p &:=\bigcap_{T\in\mathbb{R}}L^p (\mathbb{R}^n \times\mathbb{R}_T )\\ \label{2.10}
Y^p_\alpha &:=J_\alpha (X^p ) \end{align} respectively, where $\mathbb{R}_T =(-\infty,T)$. By \eqref{2.9} we mean $X^p$ is the set of all measurable functions $f:\mathbb{R}^n \times\mathbb{R}\to\mathbb{R}$ such that
$$\| f\|_{L^p (\mathbb{R}^n \times\mathbb{R}_T )}<\infty\quad\text{for all }T\in\mathbb{R}.$$ The notation in \eqref{2.9} should be interpreted similarly elsewhere in this paper.
According to the following two theorems the formal operator \begin{equation}\label{2.11}
J_\alpha :X^p \to Y^p_\alpha , \end{equation} where $X^p$ and $Y^p_\alpha$ are defined in \eqref{2.9} and \eqref{2.10}, satisfies properties (P1)--(P3) provided either \begin{equation}\label{2.12}
\left(p>1 \text{ and } 0<\alpha<\frac{n+2}{2p}\right)\quad\text{or} \quad\left(p=1 \text{ and } 0<\alpha\leq\frac{n+2}{2p}\right). \end{equation}
When $p$ and $\alpha$ satisfy \eqref{2.12}, part (i) of the following theorem shows that the operator \eqref{2.11} satisfies (P1) and parts (ii) and (iii) give some of its properties.
\begin{thm}\label{thm2.2}
Suppose $p$ and $\alpha$ are real numbers satisfying \eqref{2.12} and $f\in X^p$. Then
\begin{enumerate}
\item[(i)] $J_\alpha f,\,J_\alpha |f|\in
L^{p}_{\text{loc}}(\mathbb{R}^n \times\mathbb{R})$ and
\item[(ii)] $J_\beta (J_\gamma f)=J_\alpha f$ in $L^{p}_{\text{\rm loc}}
(\mathbb{R}^n \times\mathbb{R})$ whenever $\beta>0,\,\gamma>0$,
and $\beta+\gamma=\alpha$. \end{enumerate} If in addition $\alpha>1$ then \begin{enumerate} \item[(iii)] $HJ_\alpha f=J_{\alpha-1}f$ in $\mathcal{D}^\prime (\mathbb{R}^n \times\mathbb{R})$ where $H=\partial_t-\Delta$ is the heat operator. \end{enumerate} \end{thm}
\begin{rem}\label{rem2.1}
Theorem \ref{thm2.2}(i) can be improved to $J_\alpha f\in L^{q}_{\text{loc}}(\mathbb{R}^n \times\mathbb{R})$ when
$$1<p<\frac{n+2}{2\alpha} \quad\text{ and }\quad \frac{1}{q}=\frac{1}{p}-\frac{2\alpha}{n+2}.$$
This can be seen by applying Gopala Rao \cite[Theorem 3.1]{GR} to the function $f_T$ defined in the proof of Theorem \ref{thm2.2} in Section \ref{sec6}. \end{rem}
According to the following theorem, if $p$ and $\alpha$ satisfy \eqref{2.12} then the operator \eqref{2.11} satisfies properties (P2) and (P3) where $X^p$ and $Y^p_\alpha$ are defined by \eqref{2.9} and \eqref{2.10}.
\begin{thm}\label{thm2.3}
Suppose $p$ and $\alpha$ are real numbers satisfying \eqref{2.12}. Then
\begin{enumerate}
\item[(i)] the operator \eqref{2.11} is one-to-one and onto, and
\item[(ii)] if
\begin{equation}\label{2.13}
f\in X^p \text{ and } T\in\mathbb{R}
\end{equation}
then
$$f|_{\mathbb{R}^n \times\mathbb{R}_T} =0\quad\text{if and only if}
\quad (J_\alpha f)|_{\mathbb{R}^n \times\mathbb{R}_T}=0.$$
\end{enumerate} \end{thm}
By the results in this section, the following definition is natural and makes sense. \begin{defn}\label{def2.1}
Suppose $p$ and $\alpha$ are real numbers satisfying \eqref{2.12} and $X^p$ and $Y^p_\alpha$ are defined by \eqref{2.9} and \eqref{2.10}. Then the operator
\begin{equation}\label{2.14}
(\partial_t-\Delta)^\alpha :Y^p_\alpha \to X^p
\end{equation}
is defined to be the inverse of the operator \eqref{2.11}. \end{defn}
\begin{rem}\label{rem2.2} The functions $\mu_T:X^p\to\mathbb{R}$, $T\in\mathbb{R}$, defined by
$\mu_T(f)=\|f\|_{L^p(\mathbb{R}^n \times\mathbb{R}_T)}$, form a separating family of seminorms on $X^p$ which turns $X^p$ into a locally convex topological vector space (see for example \cite[Theorem 1.37]{R}). Thus assuming \eqref{2.12} and defining a subset $O'$ of $Y^p_\alpha$ to be open if $O'=J_\alpha(O)$ for some open set $O\in X^p$, we see by Theorem \ref{2.3}(i) that $Y^p_\alpha$ is also a locally convex topological vector space and the operator \eqref{2.14} is a homeomorphism. \end{rem}
We conclude this section by investigating \[\lim_{a\to0^+}(\partial_t -a^2 \Delta)^\alpha \quad\text{ and }\quad\lim_{b\to0^+}(b\partial_t -\Delta)^\alpha\] where $\alpha>0$.
To do this we first repeat the above procedure with $\partial_t -\Delta$ replaced with $b\partial_t -a^2 \Delta$ where $a$ and $b$ are positive constants. The end result after defining \begin{equation}\label{J1}
J_{\alpha,a,b}:X^p \to Y^{p}_{\alpha,a,b}:=J_{\alpha,a,b}(X^p ) \end{equation} by \[J_{\alpha,a,b}f=\Phi_{\alpha,a,b}*f,\] where $a,b,\alpha,p$ are positive constants satisfying \eqref{2.12} and $$\Phi_{\alpha,a,b}(x,t)=\frac{1}{a^n b}\Phi_\alpha \left(\frac{x}{a},\frac{t}{b}\right),$$ is the following modified version of Definition 2.1.
\begin{defn}
Suppose $a,b,p$ and $\alpha$ are positive constants satsfying
\eqref{2.12} and $X^p$ and $Y^{p}_{\alpha,a,b}$ are defined in \eqref{2.9} and \eqref{J1}. Then the operator
$$(b\partial_t -a^2 \Delta)^\alpha :Y^{p}_{\alpha,a,b}\to X^p$$
is defined to be the inverse of the operator \eqref{J1}. \end{defn}
The following theorem states in what sense \[(\partial_t -a^2 \Delta)^\alpha \to\partial^{\alpha}_{t}\quad\text{ as }a\to0^+\] where we formally define the equation \[\partial^{\alpha}_{t}u=f\] to mean \[u=J_{\alpha,0,1}f\] where \[(J_{\alpha,0,1}f)(x,t):=\int^{t}_{-\infty}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}f(x,\tau)\,d\tau\] is the Riemann-Liouville integral of $f$ with respect to $t$ of order $\alpha$ with base point $-\infty$.
\begin{thm}\label{thm2.4}
Suppose $\alpha>0$ and
$f:\mathbb{R}^n \times\mathbb{R}\to\mathbb{R}$ is a continuous
function with compact support. Then
\[J_{\alpha,a,1}f\to J_{\alpha,0,1}f\quad\text{ as }a\to0^+\]
uniformly on compact subsets of $\mathbb{R}^n \times\mathbb{R}$. \end{thm}
The following theorem states in what sense $$(b\partial_t -\Delta)^\alpha \to(-\Delta)^\alpha \quad\text{ as }b\to0^+$$ where we formally define the equation $$(-\Delta)^\alpha u=f$$ to mean $$u=J_{\alpha,1,0}f$$ where
\[(J_{\alpha,1,0}f)(x,t):=\frac{1}{\gamma(n,\alpha)}\int_{\mathbb{R}^n}\frac{f(y,t)\,dy}{|x-y|^{n-2\alpha}}\] is the Riesz potential of $f$ with respect to $x$ of order $\alpha$. Here \begin{equation}\label{R0}
\gamma(n,\alpha)=\frac{4^\alpha \pi^{n/2}\Gamma(\alpha)}{\Gamma(n/2-\alpha)}. \end{equation}
\begin{thm}\label{thm2.5}
Suppose $0<2\alpha<n$ and
$f:\mathbb{R}^n \times\mathbb{R}\to\mathbb{R}$ is a continuous
function with compact support. Then
$$J_{\alpha,1,b}f\to J_{\alpha,1,0}f\quad\text{ as }b\to0^+$$
uniformly on compact subsets of $\mathbb{R}^n \times\mathbb{R}$. \end{thm}
\section{Results for fully fractional initial value problems}\label{sec3} In this section we state our results concerning pointwise bounds for nonnegative solutions \begin{equation}\label{3.1}
u\in Y^p_\alpha \end{equation} of the fully fractional initial value problem \begin{equation}\label{3.2}
0\leq(\partial_t-\Delta)^\alpha u\leq u^\lambda \quad\text{in } \mathbb{R}^n \times\mathbb{R},\,n\geq1, \end{equation} \begin{equation}\label{3.3}
u=0\quad\text{in } \mathbb{R}^n \times(-\infty,0) \end{equation} where $\lambda>0$ and, as in the Definition \ref{def2.1} of the operator \eqref{2.14}, $\alpha$ and $p$ satisfy \eqref{2.12}.
\begin{rem}\label{rem3.1}
If $\alpha$ and $p$ satisfy \eqref{2.12} and $u$ satisfies \eqref{3.1} and the first inequality in \eqref{3.2} then
$$f:=(\partial_t-\Delta)^\alpha u\geq0\quad\text{in } \mathbb{R}^n \times\mathbb{R}$$
and hence $u=J_\alpha f\geq0$ in $\mathbb{R}^n \times\mathbb{R}$ by
\eqref{2.4}. Thus the assumption that $u$ be nonnegative can be
omitted when studying \eqref{3.1}--\eqref{3.3}. \end{rem}
In order to state our results we first note that for each fixed $p\geq1$ the open first quadrant of the $\lambda\alpha$-plane is the union of the following pairwise disjoint sets. \begin{align*}
&A=\left\{(\lambda,\alpha):\lambda\geq1 \text{ and } \alpha>\dfrac{n+2}{2p}\left(1-\dfrac{1}{\lambda}\right)\right\}\\
&B=\left\{(\lambda,\alpha):0<\lambda<1 \text{ and } \alpha>0\right\}\\
&C=\left\{(\lambda,\alpha):\lambda>1 \text{ and } 0<\alpha<\dfrac{n+2}{2p}\left(1-\dfrac{1}{\lambda}\right)\right\}\\
&D=\left\{(\lambda,\alpha):\lambda>1 \text{ and } \alpha=\dfrac{n+2}{2p}\left(1-\dfrac{1}{\lambda}\right)\right\}. \end{align*}
Note that $A$, $B$, and $C$ are two dimensional regions in the $\lambda\alpha$-plane whereas $D$ is the curve separating $A$ and $C$. (See Figure \ref{fig1}.) Our results in this section deal with solutions of \eqref{3.1}--\eqref{3.3} when $(\lambda,\alpha)$ is in $A$, $B$, or $C$. We have no results when $(\lambda,\alpha)\in D$.
\begin{figure}
\caption{Graphs of the regions $A$, $B$, and $C$.}
\label{fig1}
\end{figure}
The following theorem deals with the case that $(\lambda,\alpha)\in A$.
\begin{thm}\label{thm3.1}
Suppose $\alpha$ and $p$ satisfy \eqref{2.12}, $(\lambda, \alpha)\in A$, and $u$ satisfies \eqref{3.1}--\eqref{3.3}. Then
$$u=(\partial_t-\Delta)^\alpha u=0\quad\text{almost everywhere in }\mathbb{R}^n \times\mathbb{R}.$$ \end{thm}
The following three theorems deal with the case $(\lambda,\alpha)\in B$.
\begin{thm}\label{thm3.2}
Suppose $\alpha$ and $p$ satisfy \eqref{2.12}, $(\lambda,\alpha)\in B$, and $u$ satisfies \eqref{3.1}--\eqref{3.3}. Then for all $T>0$ we have
\begin{equation}\label{3.4}
\| u\|_{L^\infty (\mathbb{R}^n \times(0,T))}\leq(MT^\alpha )^{\frac{1}{1-\lambda}}
\end{equation}
and
\begin{equation}\label{3.5}
\|(\partial_t-\Delta)^\alpha u\|_{L^\infty (\mathbb{R}^n \times(0,T))}\leq(MT^\alpha )^{\frac{\lambda}{1-\lambda}}
\end{equation}
where
\begin{equation}\label{3.6}
M=M(\alpha,\lambda)=\frac{\Gamma(\frac{\alpha\lambda}{1-\lambda}+1)}{\Gamma(\alpha+\frac{\alpha\lambda}{1-\lambda}+1)}
\end{equation} where $\Gamma$ is the Gamma function. \end{thm}
By the following theorem the bounds \eqref{3.4} and \eqref{3.5} in Theorem \ref{thm3.2} are optimal.
\begin{thm}\label{thm3.3}
Suppose $\alpha$ and $p$ satisfy \eqref{2.12}, $(\lambda,\alpha)\in B,\,T>0$, and $N<M$ where $M$ is given by \eqref{3.6}. Then there exists a solution
$$u\in Y^p_\alpha \cap C(\mathbb{R}^n \times\mathbb{R})$$
of \eqref{3.2}, \eqref{3.3} such that
$$(\partial_t-\Delta)^\alpha u\in L^p (\mathbb{R}^n \times\mathbb{R})\cap C(\mathbb{R}^n \times\mathbb{R}),$$
$$u(0,t)\geq(Nt^\alpha )^{\frac{1}{1-\lambda}}\quad\text{for }0<t<T$$
and
$$(\partial_t-\Delta)^\alpha u(0,t)=(Nt^\alpha )^{\frac{\lambda}{1-\lambda}}\quad\text{for }0<t<T.$$ \end{thm}
Although the estimates \eqref{3.4} and \eqref{3.5} are optimal there still remains the question as to whether there is a {\it single} solution which has the same size as these estimates as $t\to\infty$. By the following theorem there is such a solution.
\begin{thm}\label{thm3.4}
Suppose $\alpha$ and $p$ satisfy \eqref{2.12} and $(\lambda,\alpha)\in B$. Then there exists $N>0$ and $u\in Y^p_\alpha$ satisfying \eqref{3.2}, \eqref{3.3} such that
$$u(x,t)\geq(Nt^\alpha )^{\frac{1}{1-\lambda}}\quad\text{for }(x,t)\in\Omega$$
and
$$(\partial_t-\Delta)^\alpha u(x,t)\geq(Nt^\alpha )^{\frac{\lambda}{1-\lambda}}\quad\text{for }(x,t)\in\Omega$$
where $\Omega=\{(x,t)\in\mathbb{R}^n\times\mathbb{R} :|x|^2 <t\}$. \end{thm}
According to the following theorem, if $(\lambda,\alpha)\in C$ then there exist bounds as $t\to0^+$ for solutions of \eqref{3.1}--\eqref{3.3} in neither the pointwise (i.e. $L^\infty$) sense nor in the $L^q$ sense when $q>p$.
Moreover by Theorem \ref{thm3.6} the same is true as $t\to\infty$ provided $q\in[q_0 ,\infty]$ for some $q_0 =q_0(n,\alpha,\lambda)>p$.
\begin{thm}\label{thm3.5}
Suppose $\alpha$ and $p$ satisfy \eqref{2.12}
$$(\lambda,\alpha)\in C \quad\text{ and }\quad q\in(p,\infty].$$
Then there exists a solution $u\in Y^p_\alpha$ of \eqref{3.2}, \eqref{3.3} and a sequence $\{t_j \}\subset(0,1)$ such that
$$\lim_{j\to\infty}t_j =0$$
and
$$\|u^\lambda\|_{L^q(R_j )}=\|(\partial_t-\Delta)^\alpha u\|_{L^q (R_j )}=\infty\quad\text{for }j=1,2,...,$$
where
\begin{equation}\label{3.7}
R_j =\{(x,t)\in\mathbb{R}^n \times\mathbb{R}:|x|<\sqrt{t_j} \text{ and } t_j <t<2t_j \}.
\end{equation} \end{thm}
\begin{thm}\label{thm3.6}
Suppose $\alpha$ and $p$ satisfy \eqref{2.12},
$$(\lambda,\alpha)\in C \quad\text{ and }\quad q\in\left[\frac{n+2}{2\alpha}\left(1-\frac{1}{\lambda}\right),\infty\right].$$
Then there exists a solution $u\in Y^p_\alpha$ of \eqref{3.2}, \eqref{3.3} and a sequence $\{t_j \}\subset(1,\infty)$ such that
$$\lim_{j\to\infty}t_j =\infty$$
and
$$\|u^\lambda\|_{L^q(R_j )}=\|(\partial_t-\Delta)^\alpha u\|_{L^q
(R_j )}=\infty \quad\text{for } j=1,2,...,$$
where $R_j$ is given in \eqref{3.7}. \end{thm}
\section{$J_\alpha$ version of fully fractional initial value problems}\label{sec4} In order to prove our results stated in Section \ref{sec3}, we will first reformulate them in terms of the inverse $J_\alpha$ of the fractional heat operator \eqref{2.14} as follows.
Suppose that $\lambda>0$ and, as assumed in Definition \ref{def2.1} and Theorems \ref{thm3.1}--\ref{thm3.6}, that $p$ and $\alpha$ satisfy \eqref{2.12}. Then, by Theorem \ref{thm2.3}, $u$ satisfies \eqref{3.1}--\eqref{3.3} if and only if $f:=(\partial_t-\Delta)^\alpha u$ satisfies \begin{equation}\label{4.1}
f\in X^p \end{equation} \begin{equation}\label{4.2}
0\leq f\leq(J_\alpha f)^\lambda \quad\text{in } \mathbb{R}^n \times\mathbb{R} \end{equation} \begin{equation}\label{4.3}
f=0\quad\text{in } \mathbb{R}^n \times(-\infty,0). \end{equation}
Thus the two problems \eqref{3.1}--\eqref{3.3} and \eqref{4.1}--\eqref{4.3} are equivalent under the transformation $u=J_\alpha f$ when $p$ and $\alpha$ satisfy \eqref{2.12}. This restriction on $p$ and $\alpha$ was imposed so that $J_\alpha f$ would be defined pointwise in $\mathbb{R}^n \times\mathbb{R}$ for all $f\in X^p$. If $p\geq1$ and $\alpha>0$ do not satisfy \eqref{2.12}, that is, if \begin{equation}\label{4.3.5} \left(p>1 \text{ and } \alpha\geq\frac{n+2}{2p}\right)\quad\text{or} \quad \left(p=1
\text{ and } \alpha>\frac{n+2}{2p}\right) \end{equation} then $J_\alpha f$ is generally not defined pointwise as an extended real valued function for $f\in X^p$. (However it can be defined for all $f$ in the subspace $L^p (\mathbb{R}^n \times\mathbb{R})$ of $X^p$ as a distribution on a certain subspace of the Schwarz space $S$ (see \cite[Sec 9.2.5]{SK}).
Even though $J_\alpha f$ is generally not defined pointwise as and extended real valued function for $f\in X^p$ when $p$ and $\alpha$ satisfy \eqref{4.3.5}, it is defined pointwise as a nonnegative extended real value function for all {\it nonnegative} functions $f\in X^p$ for all $p\geq1$ and $\alpha>0$ because then the integrand of $J_\alpha f$ is a nonnegative function. Hence, since $f$ is nonnegative in the problem \eqref{4.1}--\eqref{4.3}, we see that the problem \eqref{4.1}--\eqref{4.3} makes sense for all $p\geq 1$ and $\alpha>0$ when $J_\alpha$ is defined in the pointwise sense, which is the sense in which we will define it in this section. However $J_\alpha$, when restricted to the set $X^{p}_{+}$ of all nonnegative functions $f\in X^p$, is not one-to-one when $p$ and $\alpha$ satisfy \eqref{4.3.5}. Thus our results in this section for the problem \eqref{4.1}--\eqref{4.3} when $p\geq1$ and $\alpha>0$ will yield corresponding results for the problem \eqref{3.1}--\eqref{3.3} only when $p$ and $\alpha$ satisfy \eqref{2.12}.
In view of these remarks, we will consider in this section solutions \begin{equation}\label{4.4}
f\in X^p \end{equation} of the following $J_\alpha$ version of the fully fractional initial value problem \eqref{3.2}, \eqref{3.3}: \begin{equation}\label{4.5}
0\leq f\leq K(J_\alpha f)^\lambda \quad\text{in } \mathbb{R}^n \times\mathbb{R},\,n\geq1 \end{equation} \begin{equation}\label{4.6}
f=0\quad\text{in } \mathbb{R}^n \times(-\infty, 0) \end{equation} where \begin{equation}\label{4.7}
p\in[1,\infty) \quad\text{ and }\quad K,\lambda,\alpha\in(0,\infty) \end{equation} are constants, $X^p$ is defined by \eqref{2.9}, and $J_\alpha$ is given by \eqref{2.4}.
Under the equivalence of problems \eqref{3.1}--\eqref{3.3} and \eqref{4.1}--\eqref{4.3} discussed above, the following Theorems \ref{thm4.1}--\ref{thm4.6}, when restricted to the case that $p$ and $\alpha$ satisfy \eqref{2.12} and $ K=1$, clearly imply Theorems \ref{thm3.1}--\ref{thm3.6} in Section \ref{sec3}. We will prove Theorems \ref{thm4.1}--\ref{thm4.6} in Section \ref{sec8}.
\begin{thm}\label{thm4.1}
Suppose $(\lambda,\alpha)\in A$ and $f,p$, and $ K$ satisfy \eqref{4.4}--\eqref{4.7}. Then
\begin{equation}\label{4.8}
f=J_\alpha f=0\quad\text{almost everywhere in }\mathbb{R}^n \times\mathbb{R}.
\end{equation} \end{thm}
\begin{thm}\label{thm4.2}
Suppose $(\lambda,\alpha)\in B$ and $f,p$, and $ K$ satisfy \eqref{4.4}--\eqref{4.7}. Then for all $b>0$ we have
\begin{equation}\label{4.9}
\| f\|_{L^\infty (\mathbb{R}^n \times(0,b))}\leq K^{\frac{1}{1-\lambda}}(Mb^\alpha )^{\frac{\lambda}{1-\lambda}}
\end{equation}
and
\begin{equation}\label{4.10}
\| J_\alpha f\|_{L^\infty (\mathbb{R}^n \times(0,b))}\leq K^{\frac{1}{1-\lambda}}(Mb^\alpha )^{\frac{1}{1-\lambda}}
\end{equation}
where
\begin{equation}\label{4.11}
M=M(\alpha,\lambda)=\frac{\Gamma(\frac{\alpha\lambda}{1-\lambda}+1)}{\Gamma(\alpha+\frac{\alpha\lambda}{1-\lambda}+1)}.
\end{equation} \end{thm}
\begin{thm}\label{thm4.3}
Suppose $p$ and $ K$ satisfy \eqref{4.7}, $(\lambda,\alpha)\in B$, $T>0$, and $0<N<M$ where $M$ is given by \eqref{4.11}. Then there exists a solution
\begin{equation}\label{4.12}
f\in L^p (\mathbb{R}^n \times\mathbb{R})\cap C(\mathbb{R}^n \times\mathbb{R})
\end{equation}
of \eqref{4.5}, \eqref{4.6} such that
\begin{equation}\label{4.12.5}
J_\alpha f\in C(\mathbb{R}^n \times\mathbb{R})
\end{equation}
\begin{equation}\label{4.13}
f(0,t)= K^{\frac{1}{1-\lambda}}(Nt^\alpha )^{\frac{\lambda}{1-\lambda}}\quad\text{for }0<t<T
\end{equation}
and
\begin{equation}\label{4.14}
J_\alpha f(0,t)\geq K^{\frac{1}{1-\lambda}}(Nt^\alpha )^{\frac{1}{1-\lambda}}\quad\text{for }0<t<T.
\end{equation} \end{thm}
\begin{thm}\label{thm4.4}
Suppose $p$ and $ K$ satisfy \eqref{4.7} and $(\lambda,\alpha)\in B$. Then there exists $N>0$ and
$$f\in X^p$$
satisfying \eqref{4.5}, \eqref{4.6} such that
\begin{equation}\label{4.15}
f(x,t)\geq K^{\frac{1}{1-\lambda}}(Nt^\alpha )^{\frac{\lambda}{1-\lambda}}\quad\text{for }|x|^2 <t
\end{equation}
and
\begin{equation}\label{4.16}
J_\alpha f(x,t)\geq K^{\frac{1}{1-\lambda}}(Nt^\alpha )^{\frac{1}{1-\lambda}}\quad\text{for }|x|^2 <t.
\end{equation} \end{thm}
\begin{thm}\label{thm4.5}
Suppose $p$ and $ K$ satisfy \eqref{4.7}, \begin{equation}\label{4.17} (\lambda,\alpha)\in C\qquad and \qquad q\in(p,\infty]. \end{equation} Then there exists a solution \begin{equation}\label{4.18}
f\in L^p (\mathbb{R}^n \times\mathbb{R})
\end{equation}
of \eqref{4.5}, \eqref{4.6} and a sequence $\{t_j \}\subset(0,1)$ such that
$$\lim_{j\to\infty}t_j =0$$
and
\begin{equation}\label{4.19}
\| f\|_{L^q (R_j )}=\infty\quad\text{for }j=1,2,...,
\end{equation}
where \begin{equation}\label{4.19.5} R_j =\{(x,t)\in\mathbb{R}^n
\times\mathbb{R}:|x|<\sqrt{t_j} \text{ and } t_j <t<2t_j \}. \end{equation} \end{thm}
\begin{thm}\label{thm4.6}
Suppose $p$ and $ K$ satisfy \eqref{4.7},
\begin{equation}\label{4.20}
(\lambda,\alpha)\in C \quad\text{ and }\quad \frac{n+2}{2\alpha}(1-\frac{1}{\lambda})\le q\le\infty.
\end{equation}
Then there exists a solution
\begin{equation}\label{4.21}
f\in X^p
\end{equation}
of \eqref{4.5}, \eqref{4.6} and a sequence $\{t_j \}\subset(1,\infty)$ such that
$$\lim_{j\to\infty}t_j =\infty$$
and
\begin{equation}\label{4.22}
\| f\|_{L^q (R_j )}=\infty\quad\text{for }j=1,2,...,
\end{equation}
where $R_j$ is given in \eqref{4.19.5}.
\end{thm}
\section{Preliminary results for fully fractional heat operators}\label{sec5} In this section we provide some lemmas needed for the proofs of our results in Section \ref{sec2} concerning the fully fractional heat operator \eqref{2.14}.
The following lemma is needed for the proof of Theorem \ref{thm2.2}. \begin{lem}\label{lem5.1}
Suppose $\alpha,\beta>0$. Then
\begin{equation}\label{5.1}
\Phi_{\alpha+\beta}=\Phi_\alpha *\Phi_\beta \quad\text{in } \mathbb{R}^n \times\mathbb{R}
\end{equation}
where $\Phi_\alpha$ is defined in \eqref{2.2}. \end{lem}
\begin{proof}
Since
\begin{align}\label{5.2}
\notag &\Phi_\alpha *\Phi_\beta (x,t)=\int^{\infty}_{-\infty}\int_{\xi\in\mathbb{R}^n}\Phi_\alpha (x-\xi,t-\tau)\Phi_\beta (\xi,\tau)\, d\xi \, d\tau\\
&=
\begin{cases}
0 & \text{for }(x,t)\in\mathbb{R}^n \times(-\infty,0]\\
\int^{t}_{0}\int_{\xi\in\mathbb{R}^n}\Phi_\alpha (x-\xi,t-\tau)\Phi_\beta (\xi,\tau)\, d\xi \, d\tau & \text{for }(x,t)\in\mathbb{R}^n \times(0,\infty),
\end{cases}
\end{align}
we have \eqref{5.1} holds in $\mathbb{R}^n \times(-\infty,0]$.
Using the well-known facts that \begin{equation}\label{5.2.5}
\widehat{\Phi}_\alpha (\cdot,t)(y)=\frac{t^{\alpha-1}}{\Gamma(\alpha)}e^{-t|y|^2}\quad\text{for }t>0 \text{ and } y\in\mathbb{R}^n
\end{equation}
and
\begin{equation}\label{5.3}
\int^{t}_{0}\frac{(t-\tau)^{\alpha-1}\tau^{\beta-1}}{\Gamma(\alpha)\Gamma(\beta)}d\tau =\frac{t^{\alpha+\beta-1}}{\Gamma(\alpha+\beta)}\quad\text{for }t,\alpha,\beta>0,
\end{equation}
and assuming we can interchange the order of integration in the following calculation (we will justify this after the calculation) we obtain for $t>0$ and $y\in\mathbb{R}^n$ that
\begin{align}
\notag &(\Phi_\alpha *\Phi_\beta )\widehat{\phantom{\Phi}}(\cdot,t)(y)\\
\label{5.4}
&=\int_{x\in\mathbb{R}^n}e^{ix\cdot y}\int^{t}_{0}\Biggl(\int_{\xi\in\mathbb{R}^n}\Phi_\alpha
(x-\xi,t-\tau)\Phi_\beta (\xi,\tau)\, d\xi\Biggr)\, d\tau\, dx\\
\notag &=\int^{t}_{0}\Biggl(\int_{x\in\mathbb{R}^n}e^{ix\cdot y}\Biggl(\int_{\xi\in\mathbb{R}^n}\Phi_\alpha (x-\xi,t-\tau)\Phi_\beta (\xi,\tau)\,d\xi\Biggr)dx\Biggr)d\tau\\
\notag &=\int^{t}_{0}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}e^{-|y|^2
(t-\tau)}\frac{\tau^{\beta-1}}{\Gamma(\beta)}e^{-|y|^2
\tau}d\tau\quad\text{(by the convolution theorem)}\\
\notag &=e^{-|y|^2 t}\int^{t}_{0}\frac{(t-\tau)^{\alpha-1}\tau^{\beta-1}}{\Gamma(\alpha)\Gamma(\beta)}d\tau\\
&=e^{-t|y|^2}\frac{t^{\alpha+\beta-1}}{\Gamma(\alpha+\beta)}=\widehat{\Phi}_{\alpha+\beta}(\cdot,t)(y)\label{5.5}.
\end{align}
This calculation is justified by Fubini's theorem and the fact that
the integral \eqref{5.4} with $e^{ix\cdot y}$ replaced with $1$ is, by Fubini's theorem for nonnegative functions and \eqref{5.3}, equal to
\begin{align*}
&\int^{t}_{0}\int_{\xi\in\mathbb{R}^n}\biggl(\int_{x\in\mathbb{R}^n}\Phi_\alpha (x-\xi,t-\tau)dx\Biggr)\Phi_\beta (\xi,\tau)\, d\xi \, d\tau\\
&=\int^{t}_{0}\int_{\xi\in\mathbb{R}^n}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}\Phi_\beta (\xi,\tau)\, d\xi \, d\tau\\
&=\frac{t^{\alpha+\beta-1}}{\Gamma(\alpha+\beta)}\quad\text{for }t>0 \text{ and } y\in\mathbb{R}^n .
\end{align*}
It follows now from \eqref{5.5} that \eqref{5.1} holds in $\mathbb{R}^n \times(0,\infty)$. \end{proof}
The following lemma is needed for the proof of Lemma \ref{lem5.3} which in turn is needed for the proof of Theorem \ref{thm2.3}. \begin{lem}\label{lem5.2}
Suppose $f\in L^1 (-\infty,0)$ and $0<\alpha\leq1$. Then
$$g(t):=\int^{t}_{-\infty}(t-\tau)^{\alpha-1}|f(\tau)|d\tau<\infty\quad\text{ for almost all }t\in(-\infty,0).$$ \end{lem}
\begin{proof}
The lemma is clearly true if $\alpha=1$. Hence we can assume $0<\alpha<1$. Since
\begin{align*}
\int^{0}_{-\infty}&(-t)^{-\alpha}g(t)\,dt
=\int^{0}_{-\infty}(-t)^{-\alpha}\int^{t}_{-\infty}(t-\tau)^{\alpha-1}|f(\tau)|\, d\tau \, dt\\
&=\int^{0}_{-\infty}|f(\tau)|\biggl(\int^{0}_{\tau}(-t)^{(1-\alpha)-1}(t-\tau)^{\alpha-1}dt\Biggr)d\tau\\
&=\Gamma(1-\alpha)\Gamma(\alpha)\int^{0}_{-\infty}|f(\tau)|\,d\tau<\infty,
\end{align*}
where we have used \eqref{5.3}, we see that
$g(t)<\infty$ for almost all $t\in(-\infty,0)$. \end{proof}
\begin{lem}\label{lem5.3}
Suppose $f\in L^1 (\mathbb{R}^n \times(-\infty,0)),\,\alpha\in(0,1]$, and $y\in\mathbb{R}^n$. Then for almost all $t\in(-\infty,0)$ we have \[ \widehat{J_\alpha f}(\cdot,t)(y) =\int^{t}_{-\infty}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}
e^{-|y|^2 (t-\tau)} \widehat{f}(\cdot,\tau)(y)\,d\tau. \] \end{lem}
\begin{proof}
By Fubini's theorem for nonnegative functions and Lemma \ref{lem5.2} we find for almost all $t\in(-\infty,0)$ that
\begin{align*}
\int_{x\in\mathbb{R}^n}&|e^{ix\cdot
y}|\int^{t}_{-\infty}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}\int_{\xi\in\mathbb{R}^n}\Phi_1(x-\xi,t-\tau)|f(\xi,\tau)| \,d\xi\, d\tau \,dx\\
&=\int^{t}_{-\infty}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}\Biggl(\int_{\xi\in\mathbb{R}^n}|f(\xi,\tau)|d\xi\Biggr)d\tau<\infty.
\end{align*}
Hence by Fubini's theorem, the convolution theorem for Fourier
transforms, and \eqref{5.2.5}, we see for almost all $t\in(-\infty,0)$ that
\begin{align*} \widehat{J_\alpha f}(\cdot,t)(y)
&=\int^{t}_{-\infty}\int_{x\in\mathbb{R}^n}e^{ix\cdot y}\int_{\xi\in\mathbb{R}^n}\Phi_\alpha(x-\xi,t-\tau)f(\xi,\tau)\,d\xi\, dx\,d\tau\\
&=\int^{t}_{-\infty}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}e^{-|y|^2
(t-\tau)}
\widehat{f}(\cdot,\tau)(y)\,d\tau.
\end{align*} \end{proof}
\section{Fully fractional heat operator proofs}\label{sec6} In this section we prove our fully fractional heat operator results which we stated in Section \ref{sec2}.
\begin{proof}[Proof of Theorem \ref{thm2.1}] Part (i) was proved by Sampson \cite[Theorem 2.2]{SP}. We prove part (ii) in two steps.
\noindent\underline{Step 1.} Suppose $f,g\in S$. Let $(x,t)\in\mathbb{R}^n \times\mathbb{R}$ be momentarily fixed and define $\varphi\in S$ by $$\varphi(y,s)=f(x+y,t+s).$$ Then $$\widehat{\widehat{\varphi}}(y,s)=(2\pi)^{n+1}\varphi(-y,-s)=(2\pi)^{n+1}f(x-y,t-s)$$ and $$\widehat{\varphi}(y,s)=e^{-ix\cdot y-its}\widehat{f}(y,s).$$ Thus by part (i) with $\varphi$ replaced with $\widehat{\varphi}$ we get \begin{align}\label{6.1}
\notag (2\pi)^{n+1}J_\alpha f(x,t)&=(2\pi)^{n+1}\iint_{\mathbb{R}^n \times\mathbb{R}}\Phi_\alpha (y,s)f(x-y,t-s)\, dy \, ds\\
\notag &=\iint_{\mathbb{R}^n \times\mathbb{R}}\Phi_\alpha (y,s)\widehat{\widehat{\varphi}}(y,s)\, dy \, ds\\
\notag &=\iint_{\mathbb{R}^n \times\mathbb{R}}(|y|^2 -is)^{-\alpha}\widehat{\varphi}(y,s)\, dy \, ds\\
&=\iint_{\mathbb{R}^n \times\mathbb{R}}(|y|^2 -is)^{-\alpha}\widehat{f}(y,s)e^{-ix\cdot y-its}\, dy \, ds. \end{align} Multiplying \eqref{6.1} by $\widehat{g}(x,t)/(2\pi)^{n+1}$, integrating the resulting equation with respect to $(x,t)$, and interchanging the order of integration in the resulting integral on the RHS, which is allowed by Fubini's theorem and the fact that \begin{equation}\label{6.1.5}
\iint_{||y|^2-is|\le 1}||y|^2-is|^{-\alpha}dy\,ds<\infty\quad\text{for } 0<\alpha<(n+2)/2, \end{equation} we get \eqref{2.5}.
\noindent\underline{Step 2.} Suppose $f\in L^1 (\mathbb{R}^n \times\mathbb{R})$ and $g\in S$. Then $\widehat{g}\in S$ and $\widehat{f}\in C(\mathbb{R}^n \times\mathbb{R}) \cap L^\infty (\mathbb{R}^n\times\mathbb{R} )$. Since $S$ is dense in $L^1 (\mathbb{R}^n \times\mathbb{R})$ there exists $\{f_j \}\subset S$ such that $f_j \to f$ in $L^1 (\mathbb{R}^n \times\mathbb{R})$ and by Step 1 \begin{equation}\label{6.2}
\iint_{\mathbb{R}^n \times\mathbb{R}}J_\alpha f_j (x,t)\widehat{g}(x,t)\, dx \, dt=\iint_{\mathbb{R}^n \times\mathbb{R}}(|y|^2 -is)^{-\alpha}\widehat{f}_j (y,s)g(y,s)\, dy \, ds. \end{equation} Since
$$\|\widehat{f}_j -\widehat{f}\|_{L^\infty (\mathbb{R}^n
\times\mathbb{R})}\leq\| f_j -f\|_{L^1 (\mathbb{R}^n
\times\mathbb{R})}\to 0\quad\text{as }j\to\infty$$ we have \begin{align*}
\Biggl|&\iint_{\mathbb{R}^n \times\mathbb{R}}(\widehat{f}_j (y,s)-\widehat{f}(y,s))(|y|^2 -is)^{-\alpha}g(y,s)\, dy \, ds\Biggr|\\
&\leq\|\widehat{f}_j -\widehat{f}\|_{L^\infty (\mathbb{R}^n \times\mathbb{R})}\iint_{\mathbb{R}^n \times\mathbb{R}}||y|^2 -is|^{-\alpha}|g(y,s)|\, dy \, ds\\ &\to0\quad\text{as }j\to\infty \end{align*} by \eqref{6.1.5}. Thus the RHS of \eqref{6.2} tends to the RHS of \eqref{2.5} as $j\to\infty$.
Also, defining $h(x,t)=|\widehat{g}(-x,-t)|$ we have \begin{align*}
\Big|\iint_{\mathbb{R}^n \times\mathbb{R}}J_\alpha (f_j
&-f)(x,t)\widehat{g}(x,t)
\, dx \, dt\Big|\\
&\leq\iint_{\mathbb{R}^n \times\mathbb{R}}\iint_{\mathbb{R}^n
\times\mathbb{R}}
\Phi_\alpha (x-y,t-s)|(f_j -f)(y,s)|\, dy \, ds\,|\widehat{g}(x,t)|\, dx \, dt\\
&=\iint_{\mathbb{R}^n \times\mathbb{R}}|(f_j -f)(y,s)| (\Phi_\alpha*h)(-y,-s) \,dy\,ds\\ &\to 0\quad\text{as }j\to\infty \end{align*} because noting that $h\in L^1(\mathbb{R}^n\times\mathbb{R})\cap L^\infty(\mathbb{R}^n\times\mathbb{R})$, \begin{align}
\notag \|\Phi_\alpha \raisebox{2pt}{$\chi$}_{\mathbb{R}^n \times(0,1)}\|_{L^1 (\mathbb{R}^n \times\mathbb{R})}&=\int^1_0\frac{t^{\alpha-1}}{\Gamma(\alpha)}\int_{x\in\mathbb{R}^n}\Phi_1(x,t)\, dx \, dt\\
\label{6.2.5} &=\int^1_0\frac{t^{\alpha-1}}{\Gamma(\alpha)}dt<\infty \quad\text{for }\alpha>0, \end{align} and $\Phi_\alpha\raisebox{2pt}{$\chi$}_{\mathbb{R}^n\times(1,\infty)}\in L^\infty(\mathbb{R}^n\times\mathbb{R})$ for $\alpha<(n+2)/2$ we find that \[ \Phi_\alpha *h =\Phi_\alpha\raisebox{2pt}{$\chi$}_{\mathbb{R}^n\times(0,1)}*h +\Phi_\alpha\raisebox{2pt}{$\chi$}_{\mathbb{R}^n\times(1,\infty)}*h\in L^\infty(\mathbb{R}^n\times\mathbb{R}) \] by Young's inequality. Thus the LHS of \eqref{6.2} tends to the LHS of \eqref{2.5} as $j\to\infty$. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm2.2}]
Since \[ \bigcap_{T\in\mathbb{R}}L^{p}_{\text{loc}}(\mathbb{R}^n \times \mathbb{R}_T ) =L^{p}_{\text{loc}}(\mathbb{R}^n \times\mathbb{R}) \] and since
$(J_\alpha f_T )|_{\mathbb{R}^n \times\mathbb{R}_T}
=(J_\alpha f)|_{\mathbb{R}^n \times\mathbb{R}_T}$, where $f_T =f\raisebox{2pt}{$\chi$}_{\mathbb{R}^n \times \mathbb{R}_T}$ to prove (i), (ii) and (iii) it suffices to prove for all $T\in\mathbb{R}$ that \begin{enumerate}
\item[(i)$^\prime$] $J_\alpha f_T ,\,J_\alpha |f_T |\in L^{p}_{\text{loc}}(\mathbb{R}^n \times \mathbb{R}_T )$
\item[(ii)$^\prime$] $J_\beta J_\gamma f_T =J_\alpha f_T \quad\text{in } L^{p}_{\text{loc}}(\mathbb{R}^n \times\mathbb{R}_T )$ whenever $\beta>0,\,\gamma>0$, and $\beta+\gamma=\alpha$\\
and
\item[(iii)$^\prime$] $HJ_\alpha f_T =J_{\alpha-1}f_T \quad\text{in } D^\prime (\mathbb{R}^n \times\mathbb{R}_T)$ when $\alpha>1$. \end{enumerate} To do this, let $T\in\mathbb{R}$ be fixed. Since $f\in X^p \subset L^p (\mathbb{R}^n \times \mathbb{R}_T )$ we have \begin{equation}\label{6.3}
f_T \in L^p (\mathbb{R}^n \times\mathbb{R}). \end{equation}
\noindent\underline{Proof of (i)$^\prime$.} Since $|J_\alpha f_T |\leq J_\alpha |f_T |$, to prove (i)$^\prime$ it suffices to prove only that \begin{equation}\label{6.4}
J_\alpha |f_T |\in L^{p}_{\text{loc}}(\mathbb{R}^n \times\mathbb{R}_T ). \end{equation} By \eqref{2.3} we have \begin{equation}\label{6.5}
J_\alpha |f_T |=u_1 +u_2 , \end{equation} where
$$u_1 =(\Phi_\alpha \raisebox{2pt}{$\chi$}_{\mathbb{R}^n \times(0,1)})*|f_T| \quad\text{and}\quad u_2 =(\Phi_\alpha \raisebox{2pt}{$\chi$}_{\mathbb{R}^n \times(1,\infty)})*|f_T|.$$ It follows from \eqref{6.2.5}, \eqref{6.3}, and Young's inequality that \[ u_1 \in L^p (\mathbb{R}^n \times\mathbb{R}). \] Thus to complete the proof of \eqref{6.4} and hence of (i)$^\prime$ it suffices to show \begin{equation}\label{6.6}
u_2 \in L^\infty (\mathbb{R}^n \times\mathbb{R}). \end{equation} To do this we consider two cases.
\noindent\underline{Case I.} Suppose $1<p<\frac{n+2}{2\alpha}$. Let $q$ be the conjugate H\"older exponent for $p$. Then \[\frac{1}{q}=1-\frac{1}{p}<1-\frac{2\alpha}{n+2}=\frac{n+2-2\alpha}{n+2}\] and thus making the change of variables $\sqrt{\frac{q}{4s}}y=z$ we obtain \begin{align*}
\|\Phi_\alpha \raisebox{2pt}{$\chi$}_{\mathbb{R}^n \times(1,\infty)}\|&^{q}_{L^q
(\mathbb{R}^n \times\mathbb{R})}
=C(n,\alpha,q)\int^{\infty}_{1}\int_{y\in\mathbb{R}^n}s^{(\alpha-1-n/2)q}e^{-\frac{q}{4s}|y|^2}\, dy \, ds\\
&=C(n,\alpha,q)\int^{\infty}_{1}s^{(\alpha-1-n/2)q+n/2}\int_{z\in\mathbb{R}^n}e^{-|z|^2}dz\,ds
<\infty. \end{align*} Hence \eqref{6.6} follows from \eqref{6.3} and Young's inequality.\\
\noindent\underline{Case II.} Suppose $1=p\leq\frac{n+2}{2\alpha}$. Then \begin{align*}
\Phi_\alpha \raisebox{2pt}{$\chi$}_{\mathbb{R}^n \times(1,\infty)}(y,s)&\leq C(n,\alpha)s^{\alpha-1-n/2}\raisebox{2pt}{$\chi$}_{\mathbb{R}^n \times(1,\infty)}(y,s)\\
&\leq C(n,\alpha)\quad\text{for }(y,s)\in\mathbb{R}^n \times\mathbb{R}. \end{align*} Thus \eqref{6.6} follows from \eqref{6.3} and so the proof of (i)$^\prime$ is complete.\\
\noindent\underline{Proof of (ii)$^\prime$.} Using Fubini's theorem for nonnegative functions and Lemma \ref{lem5.1} we have \begin{align*}
J_\beta (J_\gamma |f_T |)(x,t)&=\iint_{\mathbb{R}^n \times\mathbb{R}}\Phi_\beta (x-\xi,t-\tau)\iint_{\mathbb{R}^n \times\mathbb{R}}\Phi_\gamma (\xi-\eta,\tau-\zeta)|f_T (\eta,\zeta)|\, d\eta \, d\zeta \, d\xi \, d\tau\\
&=\iint_{\mathbb{R}^n \times\mathbb{R}}\Phi_{\beta+\gamma}(x-\eta,t-\zeta)|f_T (\eta,\zeta)|\, d\eta \, d\zeta\\
&=(J_\alpha |f_T |)(x,t)<\infty\quad\text{a.e. in }\mathbb{R}^n \times\mathbb{R} \end{align*}
by part (i)$^\prime$. Hence by Fubini's theorem the above calculation can be repeated with $|f_T|$ replaced with $f_T$ which gives (ii)$^\prime$.
\noindent\underline{Proof of (iii)$^\prime$.} By (i)$^\prime$ we have \begin{equation}\label{6.7}
J_\alpha |f_T| ,\,J_{\alpha-1}|f_T |\in L^{p}_{\text{loc}}(\mathbb{R}^n \times\mathbb{R}_T )\subset D^\prime (\mathbb{R}^n \times\mathbb{R}_T ). \end{equation} Let $\varphi\in C^{\infty}_{0}(\mathbb{R}^n \times\mathbb{R}_T )$. Then noting that \begin{equation}\label{6.8}
\iint_{\mathbb{R}^n \times\mathbb{R}_T}\Phi_1 (x-\eta,t-\zeta)H^* \varphi(x,t)\, dx \, dt=\varphi(\eta,\zeta)\quad\text{for }(\eta,\zeta)\in\mathbb{R}^n \times\mathbb{R}_T \end{equation} where $H^* =-\partial_t-\Delta$ and assuming we can interchange the order of integration in the following calculation (we will justify this after the calculation) it follows from Lemma \ref{lem5.1} that \begin{align} \label{6.9}&(H(J_\alpha f_T ))(\varphi)=(J_\alpha f_T )(H^* \varphi)\\ \notag &=\iint_{\mathbb{R}^n \times\mathbb{R}_T}\Biggl(\iint_{\mathbb{R}^n \times\mathbb{R}_T}\Phi_\alpha (x-\xi,t-\tau)f_T (\xi,\tau)\, d\xi \, d\tau\Biggr)H^* \varphi(x,t)\, dx \, dt\\ \notag&=\iint_{\mathbb{R}^n \times\mathbb{R}_T}\iint_{\mathbb{R}^n \times\mathbb{R}_T}\Biggl(\iint_{\mathbb{R}^n \times\mathbb{R}_T}\Phi_1 (x-\eta,t-\zeta)\Phi_{\alpha-1}(\eta-\xi,\zeta-\tau)\, d\eta \, d\zeta\Biggr)\\ \notag&\phantom{=\iint_{\mathbb{R}^n \times\mathbb{R}_T}}\times f_T (\xi,\tau)\, d\xi \, d\tau H^* \varphi(x,t)\, dx \, dt\\ \notag&=\iint_{\mathbb{R}^n \times\mathbb{R}_T}\Biggl(\iint_{\mathbb{R}^n \times\mathbb{R}_T}\Biggl(\iint_{\mathbb{R}^n \times\mathbb{R}_T}\Phi_1 (x-\eta,t-\zeta)H^* \varphi(x,t)\, dx \, dt\Biggr)\Phi_{\alpha-1}(\eta-\xi,\zeta-\tau)\, d\eta \, d\zeta\Biggr)\\ \label{6.10}&\phantom{=\iint_{\mathbb{R}^n \times\mathbb{R}_T}}\times f_T (\xi,\tau)\, d\xi \, d\tau \\ \notag&=\iint_{\mathbb{R}^n \times\mathbb{R}_T}\Biggl(\iint_{\mathbb{R}^n \times\mathbb{R}_T}\Phi_{\alpha-1}(\eta-\xi,\zeta-\tau)f_T (\xi,\tau)\, d\xi \, d\tau\Biggr)\varphi(\eta,\zeta)\, d\eta \, d\zeta\\ \notag&=(J_{\alpha-1}f_T )(\varphi). \end{align}
To justify this calculation, it suffices by Fubini's theorem to show the integral \eqref{6.10}, with $f_T$ and $H^* \varphi$ replaced with $|f_T |$ and $|H^* \varphi|$, is finite. However in the same way that \eqref{6.10} was obtained from \eqref{6.9}, we see that this modified integral equals
$$\iint_{\mathbb{R}^n \times\mathbb{R}_T}(J_\alpha |f_T|)(x,t)|H^* \varphi|(x,t)\, dx \, dt<\infty$$ by \eqref{6.7}. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm2.3}]
Clearly (ii) implies (i). We now prove (ii). Suppose \eqref{2.13}. It follows from \eqref{2.4} that
$$f|_{\mathbb{R}^n \times\mathbb{R}_T}=0\text{ implies }(J_\alpha f)|_{\mathbb{R}^n \times\mathbb{R}_T}=0.$$ Conversely suppose \begin{equation}\label{6.11}
(J_\alpha f)|_{\mathbb{R}^n \times\mathbb{R}_T}=0. \end{equation} The complete the proof of (ii) it suffices to prove \begin{equation}\label{6.12}
f|_{\mathbb{R}^n \times\mathbb{R}_T}=0. \end{equation} By Theorem \ref{thm2.2}(iii) and mathematical induction, we can, without loss of generality, assume for the proof \eqref{6.12} that \begin{equation}\label{6.13}
0<\alpha\leq1. \end{equation} Moreover, by translating we can assume \begin{equation}\label{6.14}
T=0. \end{equation} We divide the proof of \eqref{6.12} into two cases.\\
\noindent\underline{Case I.} Suppose \eqref{2.12}$_2$ holds. Then \begin{equation}\label{6.15}
1=p\leq\frac{n+1}{2\alpha}. \end{equation} Let \begin{equation}\label{6.16}
F(y,t)=\widehat{f}(\cdot,t)(y)\quad \text{for }(y,t)\in\mathbb{R}^n \times(-\infty,0). \end{equation} By \eqref{2.13} and \eqref{6.15} we have \begin{equation}\label{6.17}
f\in L^1 (\mathbb{R}^n \times(-\infty,0)) \end{equation} and thus \[f(\cdot,t)\in L^1 (\mathbb{R}^n )\quad\text{for almost all }t\in(-\infty,0)\] which implies \[F(\cdot,t)\in C(\mathbb{R}^n )\quad\text{for almost all }t\in(-\infty,0).\] Also, by \eqref{6.17} \begin{align}\label{6.18}
\notag\| F(y,\cdot)\|_{L^1 (-\infty,0)}&=\int^{0}_{-\infty}\left|\int_{\mathbb{R}^n}e^{ix\cdot y}f(x,t)\,dx\right|\,dt\\
&\leq\| f\|_{L^1 (\mathbb{R}^n \times(-\infty,0)}<\infty\quad\text{ for all }y\in\mathbb{R}^n . \end{align}
\noindent\underline{Case I(a).} Suppose $\alpha=1$. Then by \eqref{6.17}, \eqref{6.11}, and Lemma \ref{lem5.3} we have for each $y\in\mathbb{R}^n$ that
$$\int^{t}_{-\infty}e^{|y|^2 \tau}F(y,\tau)\,d\tau=e^{|y|^2 t}\int^{t}_{-\infty}e^{-|y|^2 (t-\tau)}F(y,\tau)\,d\tau=0$$ for almost all $t\in(-\infty,0)$. Hence, by \eqref{6.18} and the measure theoretic fundamental theorem of calculus, we get $F=0$ in $L^1 (\mathbb{R}^n \times(-\infty,0))$ which together with \eqref{6.16} implies \eqref{6.12}.
\noindent\underline{Case I(b).} Suppose $0<\alpha<1$. To handle this case we hold $y\in\mathbb{R}^n \backslash\{0\}$ fixed and define \begin{equation}\label{6.19}
F_0 (t):=F(y,t). \end{equation} Then by \eqref{6.18} \begin{equation}\label{6.20}
F_0 \in L^1 (-\infty,0). \end{equation} From \eqref{6.17}, \eqref{6.11}, and Lemma \ref{lem5.3} we have \begin{equation}\label{6.21}
g(t):=\int^{t}_{-\infty}(t-\tau)^{\alpha-1}e^{|y|^2 \tau}F_0 (\tau)d\tau=0 \end{equation} for almost all $t\in(-\infty,0)$. On the other hand, assuming we can interchange the order of integration in the following calculation (we will justify this after the calculation), we find for $b\in\mathbb{R}$ that \begin{align}\label{6.22}
\notag &\int^{0}_{-\infty}\Biggl(\int^{0}_{t}(\zeta-t)^{-\alpha}\cos b\zeta \,d\zeta\Biggr)g(t)\,dt\\
\notag &=\int^{0}_{-\infty}e^{|y|^2 \tau}F_0 (\tau)\Biggl(\int^{0}_{\tau}\cos b\zeta\Biggl(\int^{\zeta}_{\tau}(t-\tau)^{\alpha-1}(\zeta-t)^{-\alpha}dt\Biggr)d\zeta\Biggr)d\tau\\
&=C(\alpha)\int^{0}_{-\infty}e^{|y|^2 \tau}F_0 (\tau) \Biggl(\int^{0}_{\tau}\cos b\zeta \,d\zeta\Biggr)d\tau \end{align} because making the change of variables $t=\zeta-(\zeta-\tau)s$ we see that $$\int^{\zeta}_{\tau}(t-\tau)^{\alpha-1}(\zeta-t)^{-\alpha}dt=\int^{1}_{0}(1-s)^{\alpha-1}s^{-\alpha}ds=C(\alpha).$$
The calculation \eqref{6.22} is justified by Fubini's theorem and the fact that if we replace $\cos b\zeta$ and $g(t)$ with $|\cos b\zeta|$ and
$$g_0 (t)=\int^{t}_{-\infty}(t-\tau)^{\alpha-1}e^{|y|^2 \tau}|F_0 (\tau)|\,d\tau$$ respectively in the above calculation we get by Fubini's theorem for nonnegative functions that \begin{align*}
&\int^{0}_{-\infty}\int^{0}_{t}(\zeta-t)^{-\alpha}|\cos b\zeta|\,d\zeta\, g_0(t)\,dt\\
&\leq C(\alpha)\int^{0}_{-\infty}e^{|y|^2 \tau}|F_0 (\tau)|
\Biggl(\int^{0}_{\tau}|\cos b\zeta|\,d\zeta\Biggr)d\tau\\
&\leq C(\alpha)\int^{0}_{-\infty}(-\tau)e^{|y|^2 \tau}|F_0 (\tau)|\,d\tau<\infty \end{align*} by \eqref{6.20}
It follows now from \eqref{6.21}, \eqref{6.22} and \eqref{6.19} that
$$0=\int^{0}_{-\infty}e^{|y|^2 \tau}F(y,\tau)\sin b\tau \,d\tau$$ for all $y\in\mathbb{R}^n \backslash\{0\}$ and all $b\in\mathbb{R}$. Thus since the Fourier sine transform is one to one on $L^1 (-\infty,0)$ we have $F(y,\cdot)=0$ in $L^1 (-\infty,0)$ for all $y\in\mathbb{R}^n \backslash\{0\}$. Hence by Fubini's theorem, $F=0$ in $L^1 (\mathbb{R}^n \times(-\infty,0))$, which together with \eqref{6.16} and \eqref{6.14} implies \eqref{6.12}.\\
\noindent\underline{Case II.} Suppose \eqref{2.12}$_1$ holds. Let $f_T =f\raisebox{2pt}{$\chi$}_{\mathbb{R}^n \times\mathbb{R}_T}$ and $u=J_\alpha f_T$. Then by \eqref{2.13} we have $$f_T \in L^p (\mathbb{R}^n \times\mathbb{R}),$$ and by \eqref{2.4} and \eqref{6.11} we have \begin{equation}\label{6.23}
u=0\quad\text{in } \mathbb{R}^n \times\mathbb{R}_T . \end{equation} Let $J^{-\alpha}_{\varepsilon}u$ be as defined in Theorem \ref{thmA.1}. By \eqref{6.23} we have for $l>\alpha$ that $(\Delta^{l}_{y,\tau}u)(x,t)=0$ for $(x,t)\in\mathbb{R}^n \times\mathbb{R}_T$ and $(y,\tau)\in\mathbb{R}^n \times(0,\infty)$. Thus for $\varepsilon>0$ we have $$J^{-\alpha}_{\varepsilon}u=0\quad\text{in } \mathbb{R}^n \times\mathbb{R}_T .$$ Hence \eqref{6.12} follows from Theorem \ref{thmA.1}. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm2.4}]
For $a,\tau>0$ and $\delta\geq0$ we have
\begin{align}\label{L1}
\notag \int_{|\xi|>\delta}\Phi_{\alpha,a,1}(\xi,\tau)\,d\xi
\notag &=\int_{|\xi|>\delta}\frac{\tau^{\alpha-1}}{\Gamma(\alpha)}\,\frac{1}{(4\pi a^2 \tau)^{n/2}}e^{-\frac{|\xi|^2}{4a^2 \tau}}\,d\xi\\
&=\frac{\tau^{\alpha-1}}{\Gamma(\alpha)}\,\frac{1}{\pi^{n/2}}\int_{|\eta|>\frac{\delta}{\sqrt{4a^2 \tau}}}e^{-|\eta|^2}d\eta.
\end{align}
In particular, taking $\delta=0$ we find that
\begin{equation}\label{L2}
\int_{\mathbb{R}^n}\Phi_{\alpha,a,1}(\xi,\tau)\,d\xi=\frac{\tau^{\alpha-1}}{\Gamma(\alpha)}\quad\text{ for }a,\tau>0.
\end{equation}
Let $\Omega$ be a compact subset of $\mathbb{R}^n \times\mathbb{R}$. Choose $T>0$ such that
\begin{equation}\label{L3}
f=0\quad\text{ on }\mathbb{R}^n \times\mathbb{R}_{-T}
\end{equation}
and
\begin{equation}\label{L4}
\Omega\subset\mathbb{R}^n \times\mathbb{R}_T .
\end{equation}
Let $\varepsilon>0$. Since $f$ is uniformly continuous on $\mathbb{R}^n \times\mathbb{R}$ there exists $\delta>0$ such that
\begin{equation}\label{L5}
|f(x-\xi,\zeta)-f(x,\zeta)|<\varepsilon
\end{equation}
whenever $x,\xi\in\mathbb{R}^n ,\zeta\in\mathbb{R}$, and $|\xi|<\delta$.
Let $(x,t)\in\Omega$. Then $t<T$ and thus for $\tau\geq2T$ we have
$$t-\tau<T-2T=-T.$$
Hence for $a>0$ we have by \eqref{L3} and \eqref{L2} that
\begin{align}\label{L6}
\notag &|(J_{\alpha,a,1}f-J_{\alpha,0,1}f)(x,t)|\\
\notag &\leq\int^{2T}_{0}\left|\int_{\mathbb{R}^n}\Phi_{\alpha,a,1}(\xi,\tau)f(x-\xi,t-\tau)\,d\xi-\frac{\tau^{\alpha-1}}{\Gamma(\alpha)}f(x,t-\tau)\right|d\tau\\
\notag &=\int^{2T}_{0}\left|\int_{\mathbb{R}^n}\Phi_{\alpha,a,1}(\xi,\tau)(f(x-\xi,t-\tau)-f(x,t-\tau))\,d\xi\right|d\tau\\
&\leq K_1 (x,t)+K_2 (x,t)
\end{align}
where
$$K_1 (x,t)=\int^{2T}_{0}\int_{|\xi|<\delta}\Phi_{\alpha,a,1}(\xi,\tau)|f(x-\xi,t-\tau)-f(x,t-\tau)|\,d\xi\,d\tau$$
and
$$K_2 (x,t)=\int^{2T}_{0}\int_{|\xi|>\delta}\Phi_{\alpha,a,1}(\xi,\tau)|f(x-\xi,t-\tau)-f(x,t-\tau)|\,d\xi\,d\tau.$$
From \eqref{L5} and \eqref{L2} we conclude that
$$K_1 (x,t)\leq\varepsilon\int^{2T}_{0}\left(\int_{\mathbb{R}^n}\Phi_{\alpha,a,1}(\xi,\tau)\,d\xi\right)d\tau=\varepsilon\int^{2T}_{0}\frac{\tau^{\alpha-1}}{\Gamma(\alpha)}d\tau$$
and letting $M=2\| f\|_{L^\infty (\mathbb{R}^n \times\mathbb{R})}$ and using \eqref{L1} we obtain
$$K_2 (x,t)\leq M\int^{2T}_{0}\left(\int_{|\xi|>\delta} \Phi_{\alpha,a,1}(\xi,\tau)\,d\xi\right)d\tau\leq M\left(\int^{2T}_{0}\frac{\tau^{\alpha-1}}{\Gamma(\alpha)}d\tau\right)C(n,a,\delta,T)$$
where
$$C(n,a,\delta,T)=\frac{1}{\pi^{n/2}}\int_{|\eta|>\frac{\delta}{\sqrt{8a^2 T}}}e^{-|\eta|^2}d\eta\to0\quad\text{ as }a\to0^+ .$$
The theorem therefore follows from \eqref{L6}. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm2.5}]
For $b>0,\delta>0$, and $\xi\in\mathbb{R}^n \backslash\{0\}$ we have
\begin{align}
\notag \int^{\infty}_{\delta}\Phi_{\alpha,1,b}(\xi,\tau)\,d\tau
&=\int^{\infty}_{\delta}
\frac{(\tau/b)^{\alpha-1}}{\Gamma(\alpha)}\,\frac{1}{(4\pi\tau/b)^{n/2}}e^{-\frac{b|\xi|^2}{4\tau}}d\tau/b\\
\notag &=\int^{\infty}_{\delta}\frac{1}{\Gamma(\alpha)(4\pi)^{n/2}}\left(\frac{\tau}{b}\right)^{\alpha-1-n/2}e^{-\frac{b|\xi|^2}{4\tau}}\frac{1}{b}\,d\tau\\
\notag &=\int^{\frac{b|\xi|^2}{4\delta}}_{0}\frac{1}{\Gamma(\alpha)(4\pi)^{n/2}}\left(\frac{|\xi|^2}{4\zeta}\right)^{\alpha-1-n/2}e^{-\zeta}\frac{|\xi|^2}{4\zeta^2}\,d\zeta\\
\notag &=\frac{(|\xi|^2 /4)^{\alpha-n/2}}{\Gamma(\alpha)(4\pi)^{n/2}}\int^{\frac{b|\xi|^2}{4\delta}}_{0}\zeta^{n/2-\alpha-1}e^{-\zeta}d\zeta\\
\label{R1} &=\frac{|\xi|^{2\alpha-n}}{4^\alpha \pi^{n/2}\Gamma(\alpha)}\int^{\frac{b|\xi|^2}{4\delta}}_0\zeta^{n/2-\alpha-1}e^{-\zeta}d\zeta\\
\label{R2} &\leq\left(\frac{|\xi|^{2\alpha-n}}{4^\alpha \pi^{n/2}\Gamma(\alpha)}\right)\frac{1}{n/2-\alpha}\left(\frac{b|\xi|^2}{4\delta}\right)^{n/2-\alpha}
=C(n,\alpha)\left(\frac{b}{\delta}\right)^{n/2-\alpha}.
\end{align}
Moreover, letting $\delta\to0^+$ in \eqref{R1} we obtain
\begin{equation}\label{R3}
\int^{\infty}_{0}\Phi_{\alpha,1,b}(\xi,\tau)\,d\tau=\frac{|\xi|^{2\alpha-n}}{\gamma(n,\alpha)}\quad\text{ for }b>0\text{ and }\xi\neq0,
\end{equation}
where $\gamma$ is given in \eqref{R0}.
Let $\Omega$ be a compact subset of $\mathbb{R}^n \times\mathbb{R}$. Choose $R>0$ such that
\begin{equation}\label{R4}
f=0\quad\text{ on }(\mathbb{R}^n \backslash B_R (0))\times\mathbb{R}
\end{equation}
and
\begin{equation}\label{R5}
\Omega\subset B_R (0)\times\mathbb{R}.
\end{equation}
Let $\varepsilon>0$. Since $f$ is uniformly continuous on $\mathbb{R}^n \times\mathbb{R}$ there exists $\delta>0$ such that
\begin{equation}\label{R6}
|f(\eta,t-\tau)-f(\eta,t)|<\varepsilon
\end{equation}
whenever $\eta\in\mathbb{R}^n$, $t,\tau\in\mathbb{R}$, and $|\tau|<\delta$.
Let $(x,t)\in\Omega$. Then $|x|<R$ and thus for $|\xi|\geq2R$ we have
$$|x-\xi|\geq|\xi|-|x|>2R-R=R.$$
Hence for $b>0$ we find by \eqref{R4} and \eqref{R3} that
\begin{align}\label{R7}
\notag &|(J_{\alpha,1,b}f-J_{\alpha,1,0}f)(x,t)|\\
\notag &\le \int_{|\xi|<2R}\left|\int^{\infty}_{0}\Phi_{\alpha,1,b}(\xi,\tau)f(x-\xi,t-\tau)\,d\tau-\frac{f(x-\xi,t)}{\gamma(n,\alpha)|\xi|^{n-2\alpha}}\right|d\xi\\
\notag &=\int_{|\xi|<2R}\left|\int^{\infty}_{0}\Phi_{\alpha,1,b}(\xi,\tau)(f(x-\xi,t-\tau)-f(x-\xi,t))\,d\tau)\right|d\xi\\
&\leq K_1 (x,t)+K_2 (x,t)
\end{align}
where
$$K_1 (x,t)=\int_{|\xi|<2R}\int^{\delta}_{0}\Phi_{\alpha,1,b}(\xi,\tau)|f(x-\xi,t-\tau)-f(x-\xi,t)|\,d\tau \,d\xi$$
and
$$K_2 (x,t)=\int_{|\xi|<2R}\int^{\infty}_{\delta}\Phi_{\alpha,1,b}(\xi,\tau)|f(x-\xi,t-\tau)-f(x-\xi,t)|\,d\tau \,d\xi.$$
From \eqref{R6} and \eqref{R3} we conclude
$$K_1 (x,t)\leq\varepsilon\int_{|\xi|<2R}\left(\int^{\infty}_{0}\Phi_{\alpha,1,b}(\xi,\tau)\,d\tau\right)d\xi=\varepsilon\int_{|\xi|<2R}\frac{d\xi}{\gamma(n,\alpha)|\xi|^{n-2\alpha}}$$
and letting $M=2\| f\|_{L^\infty (\mathbb{R}^n \times\mathbb{R})}$ and using \eqref{R2} we obtain
\begin{align*}
K_2 (x,t)&\leq M\int_{|\xi|<2R}\left(\int^{\infty}_{\delta}\Phi_{\alpha,1,b}(\xi,\tau)\,d\tau\right)d\xi\\
&\leq MC(n,\alpha)\left(\frac{b}{\delta}\right)^{n/2-\alpha}|B_{2R}(0)|\to0\quad\text{ as }b\to0^+.
\end{align*}
The theorem therefore follows from \eqref{R7}. \end{proof}
\section{Preliminary results for $J_\alpha$ problems}\label{sec7} In this section we provide some lemmas needed for the proofs of our results in Section \ref{sec4} dealing with solutions of the $J_\alpha$ problem \eqref{4.4}--\eqref{4.7}.
Let $\Omega=\mathbb{R}^n \times(a,b)$ where $n\geq1$ and $a<b$. Lemmas \ref{lem7.1} and \ref{lem7.2}
give estimates for the convolution \begin{equation}\label{7.1}
(V_{\alpha,\Omega}f)(x,t)=\iint_{\Omega}\Phi_\alpha (x-\xi,t-\tau)f(\xi,\tau)\, d\xi \, d\tau \end{equation} where $\alpha>0$ and $\Phi_\alpha$ is defined in \eqref{2.2}.
\begin{rem}\label{rem7.1}
Note that if $f:\mathbb{R}^n \times\mathbb{R}\to\mathbb{R}$ is a nonnegative measurable function such that $\| f\|_{L^\infty (\mathbb{R}^n \times\mathbb{R}_a)}=0$ then
$$V_{\alpha,\Omega}f=J_\alpha f\quad\text{in } \Omega:=\mathbb{R}^n \times(a,b).$$ \end{rem}
\begin{lem}\label{lem7.1}
For $\alpha>0,\,\Omega=\mathbb{R}^n \times(a,b)$ and $f\in L^\infty (\Omega)$ we have
$$\| V_{\alpha,\Omega}f\|_{L^\infty (\Omega)}\leq\frac{(b-a)^\alpha}{\Gamma(\alpha+1)}\| f\|_{L^\infty (\Omega)}.$$ \end{lem}
\begin{proof}
The lemma is obvious if $\| f\|_{L^\infty (\Omega)}=0$. Hence we can assume $\| f\|_{L^\infty (\Omega)}>0$. Then for $(x,t)\in\Omega$
\begin{align*}
\frac{|(V_{\alpha,\Omega}f)(x,t)|}{\| f\|_{L^\infty (\Omega)}}&\leq\int^{t}_{a}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}\overbrace{\Biggl(\int_{\xi\in\mathbb{R}^n}\Phi_1 (x-\xi,t-\tau)d\xi\Biggr)}^{=1}d\tau\\
&=-\frac{(t-\tau)^\alpha}{\Gamma(\alpha+1)}\Biggr|^{\tau=t}_{\tau=a}=\frac{(t-a)^\alpha}{\Gamma(\alpha+1)}\\
&\leq\frac{(b-a)^\alpha}{\Gamma(\alpha+1)}.
\end{align*} \end{proof}
\begin{lem}\label{lem7.2}
Let $p,q\in[1,\infty]$, $\alpha$, and $\delta$ satisfy
\begin{equation}\label{7.2}
0\leq\delta:=\frac{1}{p}-\frac{1}{q}<\frac{2\alpha}{n+2}<1.
\end{equation}
Then $V_{\alpha,\Omega}$ maps $L^p (\Omega)$ continuously into $L^q (\Omega)$ and for $f\in L^p (\Omega)$ we have
$$\| V_{\alpha,\Omega}f\|_{L^q (\Omega)}\leq M\| f\|_{L^p (\Omega)}$$
where
$$M=C(b-a)^{\frac{2\alpha-(n+2)\delta}{2}}\text{ for some constant }C=C(n,\alpha,\delta).$$ \end{lem}
\begin{proof}
Define $r\in[1,\infty)$ by
\begin{equation}\label{7.3}
1-\frac{1}{r}=\delta
\end{equation}
and define $P_\alpha ,\bar f:\mathbb{R}^n \times\mathbb{R}\to\mathbb{R}$ by
\[ P_\alpha (x,t)=\Phi_\alpha (x,t)\raisebox{2pt}{$\chi$}_{(0,b-a)}(t) \]
and
$$\bar f(x,t)=
\begin{cases}
f(x,t) & \text{if }(x,t)\in\Omega\\
0 & \text{elsewhere.}
\end{cases}$$
Since for $t\in(a,b)$ and $\tau\in(a,t)$ we have $t-\tau\in(0,b-a)$ we see for $(x,t)\in\Omega$ that
\begin{align}\label{7.4}
\notag V_{\alpha,\Omega}f(x,t)&=\int^{t}_{a}\int_{\xi\in\mathbb{R}^n}P_\alpha (x-\xi,t-\tau)f(\xi,\tau)\, d\xi \, d\tau\\
\notag &=\iint_\Omega P_\alpha (x-\xi,t-\tau)f(\xi,\tau)\, d\xi \, d\tau\\
&=(P_\alpha *\bar f)(x,t)
\end{align}
where $*$ is the convolution operation in $\mathbb{R}^n \times\mathbb{R}$.
Also since
$$\int_{\mathbb{R}^n}e^{-r|x|^2 /(4t)}dx=\left(\frac{4\pi t}{r}\right)^{n/2}$$
we have by \eqref{7.2} and \eqref{7.3} that
\begin{align*}
\| P_\alpha \|_{L^r (\mathbb{R}^n \times\mathbb{R})}&=\frac{1}{\Gamma(\alpha)(4\pi)^{n/2}}\biggl(\int^{b-a}_{0}t^{r(\alpha-1-n/2)}\biggl(\int_{x\in\mathbb{R}^n}e^{-r|x|^2 /(4t)}dx\Biggr)dt\Biggr)^{1/r}\\
&=C(n,\alpha,r)\Biggl(\int^{b-a}_{0}t^{r(\alpha-1-n/2)+\frac{n}{2}}dt\Biggr)^{1/r}\\
&=C(n,\alpha,r)(b-a)^{\frac{2\alpha-(n+2)\delta}{2}}.
\end{align*}
Thus by \eqref{7.4}, \eqref{7.2}, \eqref{7.3}, and Young's inequality we have
\begin{align*}
\| V_{\alpha,\Omega} f\|_{L^q (\Omega)}&=\| P_\alpha *\bar f\|_{L^q (\Omega)}\leq\| P_\alpha *\bar f\|_{L^q (\mathbb{R}^n \times\mathbb{R})}\\
&\leq\| P_\alpha \|_{L^r (\mathbb{R}^n \times\mathbb{R})}\|\bar f\|_{L^p (\mathbb{R}^n \times\mathbb{R})}\\
&\leq C(b-a)^{\frac{2\alpha-(n+2)\delta}{2}}\| f\|_{L^p (\Omega)}.
\end{align*} \end{proof}
\begin{lem}\label{lem7.3}
Suppose $f,\,p$, and $ K$ satisfy \eqref{4.4}--\eqref{4.7} and $(\lambda,\alpha)\in A\cup B$. Then
$$f\in X^\infty .$$ \end{lem}
\begin{proof}
Let $T>0$ be fixed. Then $f\in L^p (\mathbb{R}^n \times \mathbb{R}_T )$
and to complete the proof it suffices to show
\begin{equation}\label{7.5}
f\in L^\infty (\mathbb{R}^n \times(0,T)).
\end{equation}
We consider two cases.\\
\noindent\underline{Case I.} Suppose $0<\alpha<\frac{n+2}{2p}$. Then
$$0<\lambda<\frac{n+2}{n+2-2\alpha p}$$
and thus there exists $\varepsilon=\varepsilon(n,\lambda,\alpha,p)>0$ such that
\[ \varepsilon<2\alpha p,\qquad 2\varepsilon<n+2-2\alpha p, \quad\text{ and }\quad \lambda<\frac{n+2}{n+2-2\alpha p+2\varepsilon}. \]
Suppose
\begin{equation}\label{7.6}
f\in L^{p_0}(\mathbb{R}^n \times(0,T))\quad \text{ for some }p_0 \in\left[p,\frac{n+2}{2\alpha}\right).
\end{equation} Then letting \[ q=\frac{(n+2)p_0}{n+2-2\alpha p_0 +\varepsilon} \] we have \[ \frac{1}{p_0}-\frac{1}{q}=\frac{2\alpha}{n+2}-\frac{\varepsilon}{(n+2)p_0} \in \left(0,\frac{2\alpha}{n+2}\right). \] Hence by \eqref{4.6}, Remark \ref{rem7.1}, and Lemma \ref{lem7.2} we see that
\[J_\alpha f\in L^q (\mathbb{R}^n \times(0,T)).\]
Thus by \eqref{4.5} we find that
\begin{equation}\label{7.7}
0\leq f\leq K(J_\alpha f)^\lambda \in L^{q/\lambda}(\mathbb{R}^n \times(0,T)).
\end{equation}
Since
\begin{align*}
\frac{q/\lambda}{p_0}&=\frac{n+2}{\lambda(n+2-2\alpha p_0 +\varepsilon)}\geq\frac{n+2-2\alpha p+2\varepsilon}{n+2-2\alpha p_0 +\varepsilon}\\
&\geq\frac{n+2-2\alpha p+2\varepsilon}{n+2-2\alpha p+\varepsilon}=C(n,\lambda,\alpha,p)>1
\end{align*}
we see that starting with $p_0 =p$ and iterating a finite number of times the process of going from \eqref{7.6} to \eqref{7.7} yields
$$f\in L^{p_0}(\mathbb{R}^n \times(0,T))\quad\text{ for some }p_0 >\frac{n+2}{2\alpha}.$$
Hence \eqref{7.5} follows from \eqref{4.5} and Lemma \ref{lem7.2}.\\
\noindent\underline{Case II.} Suppose $\alpha\geq\frac{n+2}{2p}$. Clearly there exists $\widehat{\alpha}\in(0,\frac{n+2}{2p})$ such that $(\lambda,\widehat{\alpha})\in A\cup B$. Then for $(x,t),(\xi,\tau)\in\mathbb{R}^n \times(0,T)$ we have
\begin{align*}
\frac{\Phi_\alpha (x-\xi,t-\tau)}{\Phi_{\widehat{\alpha}}(x-\xi,t-\tau)}&=(t-\tau)^{\alpha-\widehat{\alpha}}\Gamma(\widehat{\alpha})/\Gamma(\alpha)\\
&\leq T^{\alpha-\widehat{\alpha}}\Gamma(\widehat{\alpha})/\Gamma(\alpha)\\
&=C(T,\alpha,\widehat{\alpha}).
\end{align*}
Thus for $(x,t)\in\mathbb{R}^n \times(0,T)$ we have
$$J_\alpha f(x,t)\leq C(T,\alpha,\widehat{\alpha})J_{\widehat{\alpha}}f(x,t)$$
and hence by \eqref{4.5} we see that
$$0\leq f\leq K C(T,\alpha,\widehat{\alpha})^\lambda
(J_{\widehat{\alpha}}f)^\lambda \quad\text{almost everywhere in }\mathbb{R}^n \times(0,T).$$
It follows therefore from Case I that $f$ satisfies \eqref{7.5}. \end{proof}
\begin{lem}\label{lem7.4}
Suppose $x\in\mathbb{R}^n$ and $t,\tau\in(0,\infty)$ satisfy
\begin{equation}\label{7.8}
|x|^2 <t \quad\text{and}\quad \frac{t}{4}<\tau<\frac{3t}{4}.
\end{equation}
Then
$$\int_{|\xi|^2 <\tau}\Phi_1 (x-\xi,t-\tau)\,d\xi\geq C(n)>0$$
where $\Phi_\alpha$ is defined by \eqref{2.2}. \end{lem}
\begin{proof}
Making the change of variables $z=\frac{x-\xi}{\sqrt{4(t-\tau)}}$, letting $e_1 =(1,0,...,0)$, and using \eqref{7.8} and \eqref{2.2} we find that
\begin{align*}
\int_{|\xi|^2 <\tau}\Phi_1 (x-\xi,t-\tau)\,d\xi
&=\frac{1}{\pi^{n/2}}\int_{|z-\frac{x}{\sqrt{4(t-\tau)}}|<\frac{\sqrt{\tau}}{\sqrt{4(t-\tau)}}}e^{-|z|^2}dz\\
&\geq\frac{1}{\pi^{n/2}}\int_{|z-\frac{\sqrt{t}}{\sqrt{4(t-\tau)}}e_1 |<\frac{\sqrt{\tau}}{\sqrt{4(t-\tau)}}}e^{-|z|^2}dz\\
&\geq\frac{1}{\pi^{n/2}}\int_{|z-e_1 |<\frac{1}{2\sqrt{3}}}e^{-|z|^2}dz\\
&=C(n)>0
\end{align*}
where in this calculation we used the fact that the integral of $e^{-|z|^2}$ over a ball is decreased if the absolute value of the center of the ball is increased or the radius of the ball is decreased. \end{proof}
\begin{lem}\label{lem7.5}
For $\tau<t\leq T$ and $|x|\leq\sqrt{T-t}$ we have
$$\int_{|\xi|<\sqrt{T-\tau}}\Phi_1(x-\xi,t-\tau)\,d\xi\geq C$$
where $C=C(n)$ is a positive constant. \end{lem}
\begin{proof}
Making the change of variables $z=\frac{x-\xi}{\sqrt{t-\tau}}$ and letting $e_1 =(1,0,...,0)$ we get
\begin{align}
\notag
\int_{|\xi|<\sqrt{T-\tau}}\Phi_1(x-\xi,t-\tau)\,d\xi&=\frac{1}{(4\pi)^{n/2}}
\frac{1}{(t-\tau)^{n/2}}\int_{|\xi|<\sqrt{T-\tau}}e^{-\frac{|x-\xi|^2}{4(t-\tau)}}d\xi\notag\\
&=\frac{1}{(4\pi)^{n/2}}\int_{|z-\frac{x}{\sqrt{t-\tau}}|<\frac{\sqrt{T-\tau}}{\sqrt{t-\tau}}}
e^{-|z|^2/4}dz \label{7.9}\\
&\geq\frac{1}{(4\pi)^{n/2}}\int_{|z-\frac{\sqrt{T-\tau}}{\sqrt{t-\tau}}e_1|<\frac{\sqrt{T-\tau}}{\sqrt{t-\tau}}}e^{-|z|^2/4}dz \label{7.10}\\
&\geq\frac{1}{(4\pi)^{n/2}}\int_{|z-e_1
|<1}e^{-|z|^2/4}dz, \label{7.11}
\end{align}
where the last two inequalities need some explanation. Since
$|x|\leq\sqrt{T-t}<\sqrt{T-\tau}$, the center of the ball of integration
in \eqref{7.9} is closer to the origin than the center of the ball
of integration in \eqref{7.10}. Thus, since the integrand is a
decreasing function of $|z|$, we obtain \eqref{7.10}. Since
$\sqrt{T-\tau}\geq\sqrt{t-\tau}$, the ball of integration in \eqref{7.10}
contains the ball of integration in \eqref{7.11} and hence
\eqref{7.11} holds. \end{proof}
\begin{lem}\label{lem7.6}
Suppose $\alpha>0$, $\gamma>0$, $p\geq1$, and
\[f_0
(x,t)=\left(\frac{1}{t}\right)^{\frac{n+2}{2p}-\gamma}\raisebox{2pt}{$\chi$}_{\Omega_0}(x,t)\quad\text{
where }\Omega_0 =\{(x,t)\in \mathbb{R}^n\times\mathbb{R}:|x|^2 <t\}.\]
Then $f_0 \in X^p$ and
\[C_1 \left(\frac{1}{t}\right)^{\frac{n+2}{2p}-\gamma-\alpha}\leq J_\alpha f_0 (x,t)\leq C_2 \left(\frac{1}{t}\right)^{\frac{n+2}{2p}-\gamma-\alpha}\quad\text{for }(x,t)\in\Omega_0\]
where $C_1$ and $C_2$ are positive constants depending only on $n,\alpha,\gamma$, and $p$. \end{lem}
\begin{proof}
For $T>0$ we have
\begin{align*}
\| f_0 \|^{p}_{L^p(\mathbb{R}^n \times\mathbb{R}_T )}&=\int^{T}_{0}\int_{|x|<\sqrt{t}}\left(\frac{1}{t}\right)^{\frac{n+2}{2}-\gamma p}\, dx \, dt\\
&=C(n)\int^{T}_{0}t^{\gamma p-1}dt<\infty
\end{align*} because $\gamma p>0$. Hence $f_0\in X^p$.
Also for $(x,t)\in \mathbb{R}^n\times (0,\infty)$ we have \begin{align}
\notag J_\alpha f_0 (x,t)&=\int^{t}_{-\infty}\int_{\xi\in\mathbb{R}^n}\Phi_\alpha (x-\xi,t-\tau)f_0 (\xi,\tau)\, d\xi \, d\tau\\
\label{7.12}&=\frac{1}{\Gamma(\alpha)}\int^{t}_{0}(t-\tau)^{\alpha-1}\left(\frac{1}{\tau}\right)^{\frac{n+2}{2p}-\gamma}\biggl(\int_{|\xi|^2 <\tau}\Phi_1(x-\xi,t-\tau)\,d\xi\Biggr)d\tau. \end{align} Hence by Lemma \ref{lem7.4} we see for $(x,t)\in\Omega_0$ that \begin{align*}
J_\alpha f_0 (x,t) &\geq C(n,\alpha)\int^{3t/4}_{t/4}(t-\tau)^{\alpha-1}\left(\frac{1}{\tau}\right)^{\frac{n+2}{2p}-\gamma}d\tau\\ &=C(n,\alpha)t^{\alpha-\frac{n+2}{2p}+\gamma}\int^{3/4}_{1/4}(1-s)^{\alpha-1}\left(\frac{1}{s}\right)^{\frac{n+2}{2p}-\gamma}ds\quad\text{where }\tau=ts\\
&=C(n,\alpha,\gamma,p)t^{\alpha-\frac{n+2}{2p}+\gamma}.
\end{align*}
Moreover for $(x,t)\in\mathbb{R}^n \times(0,\infty)$ and $0<\tau<t/2$ we have
\begin{align*}
\int_{|\xi|^2 <\tau}\Phi_1
(x-\xi,t-\tau)\,d\xi&=\frac{1}{\pi^{n/2}}
\int_{|z-\frac{x}{\sqrt{4(t-\tau)}}|<\frac{\sqrt{\tau}}{\sqrt{4(t-\tau)}}}e^{-|z|^2}dz \quad\text{where }z=\frac{x-\xi}{\sqrt{4(t-\tau)}}\\
&\leq\frac{|B_1(0)|}{\pi^{n/2}} \Biggl(\frac{\sqrt{\tau}}{\sqrt{4(t-\tau)}}\Biggr)^n
\end{align*}
and for $(x,t)\in\mathbb{R}^n \times(0,\infty)$ and $t/2<\tau<t$ we have
$$\int_{|\xi|^2 <\tau}\Phi_1 (x-\xi,t-\tau)\,d\xi\leq\int_{\mathbb{R}^n}\Phi_1 (x-\xi,t-\tau)\,d\xi=1.$$
Thus by \eqref{7.12} for $(x,t)\in\mathbb{R}^n \times(0,\infty)$ we have
\begin{align*}
J_\alpha f_0 (x,t)&\leq C(n,\alpha)\Biggl[\int^{t/2}_{0}(t-\tau)^{\alpha-1}\left(\frac{1}{\tau}\right)^{\frac{n+2}{2p}-\gamma}\left(\frac{\tau}{t-\tau}\right)^{n/2}d\tau\\
&\phantom{\leq C(n,\alpha)+}+\int^{t}_{t/2}(t-\tau)^{\alpha-1}\left(\frac{1}{\tau}\right)^{\frac{n+2}{2p}-\gamma}d\tau\Biggr]\\
&=C(n,\alpha)t^{\alpha-\frac{n+2}{2p}+\gamma}\Biggl[\int^{1/2}_{0}(1-s)^{\alpha-1}\left(\frac{1}{s}\right)^{\frac{n+2}{2p}-\gamma}\left(\frac{s}{1-s}\right)^{n/2}ds\\
&\phantom{=C(n,\alpha)t^{\alpha-\frac{n+2}{2p}+\gamma}+}+\int^{1}_{1/2}(1-s)^{\alpha-1}\left(\frac{1}{s}\right)^{\frac{n+2}{2p}-\gamma}ds\Biggr]\\
&=C(n,\alpha,\gamma,p)t^{\alpha-\frac{n+2}{2p}+\gamma}
\end{align*}
because $\alpha$ and $\gamma$ are positive. \end{proof}
\begin{lem}\label{lem7.7}
Suppose $\alpha>0$, $\gamma\in\mathbb{R}$, $0\leq t_0 <T,\,p\in[1,\infty)$, and
\[ f(x,t)=\left(\frac{1}{T-t}\right)^{\frac{n+2}{2p}-\gamma}\raisebox{2pt}{$\chi$}_\Omega(x,t) \]
where
$$\Omega=\{(x,t)\in\mathbb{R}^n \times(t_0 ,T):|x|<\sqrt{T-t}\}.$$
Then
$$J_\alpha f(x,t)\geq C\left(\frac{1}{T-t}\right)^{\frac{n+2}{2p}-\gamma-\alpha}$$
for $(x,t)\in\Omega^+ :=\{(x,t)\in\Omega:\frac{T+t_0}{2}<t<T\}$ where $C=C(n,\alpha,\gamma,p)>0$. Moreover,
\begin{equation}\label{7.13}
f\in L^p (\mathbb{R}^n \times\mathbb{R})\text{ if and only if }\gamma>0
\end{equation}
and in this case
\begin{equation}\label{7.14}
\| f\|^{p}_{L^p (\mathbb{R}^n \times\mathbb{R})}=C(n)\int^{T-t_0}_{0}s^{\gamma p-1}ds.
\end{equation} \end{lem}
\begin{proof}
Since
\begin{align*}
\| f\|^{p}_{L^p (\mathbb{R}^n \times\mathbb{R})}&=\int^{T}_{t_0}\int_{|x|<\sqrt{T-t}}(T-t)^{\gamma p-\frac{n+2}{2}}\, dx \, dt\\
&=C(n)\int^{T}_{t_0}(T-t)^{\gamma p-1}dt=C(n)\int^{T-t_0}_{0}s^{\gamma p-1}ds
\end{align*}
we see that \eqref{7.13} and \eqref{7.14} hold.
Let $r=\frac{n+2}{2p}-\gamma-\alpha$. Then for $(x,t)\in\Omega$ we have
\begin{align*}
J_\alpha
f(x,t)&=\int^{t}_{t_0}(T-\tau)^{-r-\alpha}\int_{|\xi|<\sqrt{T-\tau}}\Phi_\alpha
(x-\xi,t-\tau)\, d\xi \, d\tau\\
&=C\int^{t}_{t_0}(T-\tau)^{-r-\alpha}(t-\tau)^{\alpha-1}\Biggl(\int_{|\xi|<\sqrt{T-\tau}}\Phi_1 (x-\xi,t-\tau)\,d\xi\Biggr)d\tau\\
&\geq C\int^{t}_{t_0}(T-\tau)^{-r-\alpha}(t-\tau)^{\alpha-1}d\tau, \quad\text{ by Lemma \ref{lem7.5},}\\
&=C(T-t)^{-r}g\biggl(\frac{t-t_0}{T-t}\biggr)
\end{align*}
where $g(z)=\int^{z}_{0}(\zeta+1)^{-r-\alpha}\zeta^{\alpha-1}d\zeta$ and where we made the change of variables $t-\tau=(T-t)\zeta$. Thus
$$J_\alpha f(x,t)\geq C(T-t)^{-r}\quad\text{for }(x,t)\in\Omega^+$$
because $\frac{t-t_0}{T-t}>1$ in $\Omega^+$. \end{proof}
\section{Proofs of results for $J_\alpha$ problems}\label{sec8} In this section we prove our results stated in Section \ref{sec4} concerning pointwise bounds for nonnegative solutions $f$ of \eqref{4.4}--\eqref{4.7}. As explained in Section \ref{sec4}, these results immediately imply Theorems \ref{thm3.1}--\ref{thm3.6} in Section \ref{sec3}.
\begin{rem}\label{rem8.1}
The function $g:\mathbb{R}^n \times\mathbb{R}\to[0,\infty)$ defined by
$$g(x,t)=g(t)=
\begin{cases}
(Mt^\alpha )^{\frac{\lambda}{1-\lambda}} & \text{for }t>0\\
0 & \text{for }t\leq0,
\end{cases}$$
where $\alpha>0$, $0<\lambda<1$, and $M=M(\alpha,\lambda)$ is defined in \eqref{4.11}, satisfies
\begin{equation}\label{8.1}
g=(J_\alpha g)^\lambda \quad\text{in } \mathbb{R}^n \times\mathbb{R}
\end{equation}
which can be verified using \eqref{5.3}. Even though $g\notin X^p$
for all $p\geq1$, it will be useful in our analysis of solutions of
\eqref{4.5}, \eqref{4.6} which are in $X^p$ for some $p\geq1$. \end{rem}
\begin{rem}\label{rem8.2}
It will be convenient to scale \eqref{4.5} as follows. Suppose $
K,\lambda,\alpha,T\in(0,\infty)$, $\lambda\neq1$, and $f,\bar f:\mathbb{R}^n \times\mathbb{R}\to\mathbb{R}$ are nonnegative measurable functions such that $f=\bar f=0$ in
$\mathbb{R}^n \times(-\infty,0)$ and
$$f(x,t)= K^{\frac{1}{1-\lambda}}T^{\frac{\alpha\lambda}{1-\lambda}}\bar f(\bar x,\bar t)$$
where
$$x=T^{1/2}\bar x \quad\text{ and }\quad t=T\bar t.$$
Then $f$ satisfies
$$0\leq f\leq K(J_\alpha f)^\lambda \quad\text{in } \mathbb{R}^n \times\mathbb{R}$$
if and only if $\bar f$ satisfies
$$0\leq\bar f\leq(J_\alpha \bar f)^\lambda \quad\text{in } \mathbb{R}^n \times\mathbb{R}.$$
Moreover
$$\frac{f(x,t)}{ K^{\frac{1}{1-\lambda}}t^{\frac{\alpha\lambda}{1-\lambda}}}=\frac{\bar f(\bar x,\bar t)}{\bar t^{\frac{\alpha\lambda}{1-\lambda}}}\quad\text{for }(x,t)\in\mathbb{R}^n \times(0,\infty)$$
and
$$\frac{J_\alpha f(x,t)}{ K^{\frac{1}{1-\lambda}}t^{\frac{\alpha}{1-\lambda}}}=\frac{J_\alpha \bar f(\bar x,\bar t)}{\bar t^{\frac{\alpha}{1-\lambda}}}\quad\text{for }(x,t)\in\mathbb{R}^n \times(0,\infty).$$ \end{rem}
\begin{proof}[Proof of Theorem \ref{thm4.1}]
Suppose for contradiction that \eqref{4.8} is false. Then there
exists $T>0$ such that \[
\|f\|_{L^\infty(\mathbb{R}^n \times\mathbb{R}_T)}>0. \] Hence by \eqref{4.6} there exists $t_0\in[0,T)$ such that \[
\| f\|_{L^\infty (\mathbb{R}^n \times\mathbb{R}_t )} \begin{cases}
=0 & \text{for }t\leq t_0\\
>0 & \text{for }t>t_0 . \end{cases} \] Thus by Remark \ref{rem7.1}, we have for all $b>t_0$ that $$J_\alpha f=V_{\alpha, \Omega_b}f\quad\text{in } \Omega_b$$ where $\Omega_b =\mathbb{R}^n \times (t_0 ,b)$ and $V_{\alpha,\Omega}$ is defined by \eqref{7.1}. Also, by Lemma \ref{lem7.3},
$$\| f\|_{L^\infty (\Omega_b)}\leq\| f\|_{L^\infty (\Omega_T )}<\infty\quad\text{for }t_0 <b<T.$$ It follows therefore from \eqref{4.5} and Lemma \ref{lem7.1} that for $t_0 <b<T$ we have
$$ 0<K^{-1}\leq\frac{\| V_{\alpha, \Omega_b}f\|^{\lambda}_{L^\infty (\Omega_b)}}{\| f\|_{L^\infty (\Omega_b )}}\leq\Biggl(\frac{(b-t_0 )^\alpha}{\Gamma(\alpha+1)}\Biggr)^\lambda \| f\|^{\lambda-1}_{L^\infty (\Omega_b )}\to0\quad\text{ as }b\to t^{+}_{0}$$ because $\lambda\geq1$. This contradiction proves Theorem \ref{thm4.1}. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm4.2}]
By Remark \ref{rem8.2} with $T=1$ we can assume $ K=1$. For $b>0$ we have by Lemma \ref{lem7.3} that $$f\in L^\infty (\mathbb{R}^n \times\mathbb{R}_b )$$ and by \eqref{4.5}, \eqref{4.6}, Remark \ref{rem7.1} with $a=0$, and Lemma \ref{lem7.1} that
$$\| f\|_{L^\infty (\Omega_b )}\leq\| J_\alpha f\|^{\lambda}_{L^\infty (\Omega_b )}\leq\Biggl(\frac{b^\alpha}{\Gamma(\alpha+1)}\| f\|_{L^\infty (\Omega_b )}\Biggr)^\lambda$$ where $\Omega_b =\mathbb{R}^n \times(0,b)$. Thus, since $0<\lambda<1$, we see that \begin{equation}\label{8.2}
\| f\|_{L^\infty (\Omega_b
)}\leq\Biggl(\frac{b^\alpha}{\Gamma(\alpha+1)}\Biggr)^{\frac{\lambda}{1-\lambda}} \quad\text{for all }b>0. \end{equation} Define $\{\gamma_j \}\subset(0,\infty)$ by $\gamma_1 =1$ and \begin{equation}\label{8.3}
\gamma_{j+1}=(\bar M\gamma_j )^\lambda ,\,j=1,2,...,\quad\text{ where }\bar M=\Gamma(\alpha+1)M. \end{equation} Then, since $0<\lambda<1$, we see that \begin{equation}\label{8.4}
\gamma_j \to\bar M^{\frac{\lambda}{1-\lambda}}\quad\text{ as }j\to\infty. \end{equation} Suppose for some positive integer $j$ that \begin{equation}\label{8.5}
\| f\|_{L^\infty (\Omega_b )}\leq\gamma_j
\Biggl(\frac{b^\alpha}{\Gamma(\alpha+1)}\Biggr)^{\frac{\lambda}{1-\lambda}}\quad\text{for
all }b>0. \end{equation} Then for $b>0$ and $(x,t)\in\Omega_b$ we find from \eqref{4.5} and \eqref{5.3} that \begin{align}\label{8.6}
\notag f(x,t)&\leq(J_\alpha f(x,t))^\lambda \\
\notag &\leq\Biggl(\int^{t}_{0}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}\Biggl(\int_{\xi\in\mathbb{R}^n}\Phi_1(x-\xi,t-\tau)\,d\xi\Biggr)\| f\|_{L^\infty (\Omega_\tau )}d\tau\Biggr)^\lambda \\
\notag &\leq\Biggl(\int^{t}_{0}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}\gamma_j \Biggl(\frac{\tau^\alpha}{\Gamma(\alpha+1)}\Biggr)^{\frac{\lambda}{1-\lambda}}d\tau\Biggr)^\lambda \\
\notag &=\Biggl(\gamma_j \frac{1}{\Gamma(\alpha)\Gamma(\alpha+1)^{\frac{\lambda}{1-\lambda}}}\int^{t}_{0}(t-\tau)^{\alpha-1}\tau^{\frac{\alpha\lambda}{1-\lambda}}d\tau\Biggr)^\lambda \\
\notag &=\Biggl(\gamma_j \frac{\Gamma(\alpha)\Gamma(\frac{\alpha\lambda}{1-\lambda}+1)t^{\alpha+\frac{\alpha\lambda}{1-\lambda}}}{\Gamma(\alpha)\Gamma(\alpha+1)^{\frac{\lambda}{1-\lambda}}\Gamma(\alpha+\frac{\alpha\lambda}{1-\lambda}+1)}\Biggr)^\lambda \\
\notag &=\Biggl(\gamma_j \frac{Mt^{\frac{\alpha}{1-\lambda}}}{\Gamma(\alpha+1)^{\frac{\lambda}{1-\lambda}}}\Biggr)^\lambda =\Biggl(\gamma_j \frac{\bar Mt^{\frac{\alpha}{1-\lambda}}}{\Gamma(\alpha+1)^{\frac{1}{1-\lambda}}}\Biggr)^\lambda \\
&=\gamma_{j+1}\Biggl(\frac{t^\alpha}{\Gamma(\alpha+1)}\Biggr)^{\frac{\lambda}{1-\lambda}}. \end{align} Thus
$$\| f\|_{L^\infty (\Omega_b
)}\leq\gamma_{j+1}\Biggl(\frac{b^\alpha}{\Gamma(\alpha+1)}\Biggr)^{\frac{\lambda}{1-\lambda}}\quad\text{for all }b>0.$$ Hence \eqref{4.9} follows inductively from \eqref{8.2}--\eqref{8.5}.
Finally, repeating the calculation \eqref{8.6} with $\gamma_j =\gamma_{j+1}=\bar M^{\frac{\lambda}{1-\lambda}}$ we get $$(J_\alpha f(x,t))^\lambda \leq\bar M^{\frac{\lambda}{1-\lambda}}\Biggl(\frac{t^\alpha}{\Gamma(\alpha+1)}\Biggr)^{\frac{\lambda}{1-\lambda}}\quad\text{for }(x,t)\in\Omega_b$$ which proves \eqref{4.10}. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm4.3}]
By Remark \ref{rem8.2} we can assume $ K=T=1$. For $(x,t)\in\mathbb{R}^n \times\mathbb{R}$ and $\delta\in(0,1)$ let \begin{equation}\label{8.7}
g_\delta (x,t)=g_\delta (t)=\psi_\delta (t)g(t) \end{equation} where $g$ is as in Remark \ref{rem8.1} and $\psi_\delta \in C^\infty (\mathbb{R}\to[0,1])$ satisfies $$\psi_\delta (t)= \begin{cases}
1 & \text{if }t\leq1\\
0 & \text{if }t\geq1+\delta. \end{cases}$$ Then for $1\leq t\leq1+\delta$ \begin{align*}
J_\alpha g(t)-J_\alpha g_\delta (t)&=\int^{t}_{1}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}g(\tau)(1-\psi_\delta (\tau))\,d\tau\\
&\leq\int^{t}_{1}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}g(\tau)\,d\tau\leq g(1+\delta)\int^{t}_{1}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}\,d\tau\\
&=g(1+\delta)\frac{(t-1)^\alpha}{\Gamma(\alpha+1)}\leq g(2)\frac{\delta^\alpha}{\Gamma(\alpha+1)} \end{align*} and thus by \eqref{8.1} we have for $1\leq t\leq1+\delta$ that \begin{align*}
\frac{J_\alpha g_\delta (t)}{J_\alpha g(t)}&=\frac{J_\alpha g(t)-(J_\alpha g(t)-J_\alpha g_\delta (t))}{g(t)^{1/\lambda}}\\
&\geq1-\frac{g(2)\delta^\alpha}{\Gamma(\alpha+1)g(1)^{1/\lambda}}\\
&=1-C(\alpha,\lambda)\delta^\alpha \geq\sqrt{\frac{N}{M}} \end{align*} provided we choose $\delta=\delta(\alpha,\lambda,N)\in(0,1)$ sufficiently small. Hence for $1\leq t\leq1+\delta$ we see from \eqref{8.1} that \begin{equation}\label{8.8}
g_\delta (t)\leq g(t)=(J_\alpha g(t))^\lambda \leq\left(\frac{M}{N}\right)^{\lambda/2}(J_\alpha g_\delta (t))^\lambda \end{equation} which by \eqref{8.7} and \eqref{8.1} holds for all other $t$ as well.
Next let $\varphi(x)=e^{-\psi(x)}$ where $\psi(x)=\sqrt{1+|x|^2}-1$. Then for $\varepsilon\in(0,1),\,\gamma>1$, and $|\xi-x|<\gamma\sqrt{2}$ we have
$$\frac{\varphi(\varepsilon\xi)}{\varphi(\varepsilon x)}=e^{-(\psi(\varepsilon\xi)-\psi(\varepsilon x))}\geq e^{-\varepsilon|\xi-x|}\geq e^{-\varepsilon\gamma\sqrt{2}}.$$ Thus defining $f_\varepsilon :\mathbb{R}^n \times\mathbb{R}\to[0,\infty)$ by $$f_\varepsilon (x,t)=\varphi(\varepsilon x)\left(\frac{N}{M}\right)^{\frac{\lambda}{1-\lambda}}g_\delta (t)$$
we find for $|\xi-x|<\gamma\sqrt{2}$ and $\tau\in\mathbb{R}$ that $$f_\varepsilon (\xi,\tau)\geq\varphi(\varepsilon x)e^{-\varepsilon\gamma\sqrt{2}}\left(\frac{N}{M}\right)^{\frac{\lambda}{1-\lambda}}g_\delta (\tau).$$ Thus for $(x,t)\in\mathbb{R}^n \times(0,2)$ we have \begin{equation}\label{8.9}
J_\alpha f_\varepsilon (x,t)\geq\varphi(\varepsilon x)e^{-\varepsilon\gamma\sqrt{2}}\left(\frac{N}{M}\right)^{\frac{\lambda}{1-\lambda}}\int^{t}_{0}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}g_\delta (\tau)\int_{|\xi-x|<\gamma\sqrt{2}}\Phi_1 (x-\xi,t-\tau)\, d\xi \, d\tau. \end{equation} But for $x,\xi\in\mathbb{R}^n$ and $0<\tau<t<2$ we find making the change of variables $z=\frac{x-\xi}{\sqrt{4(t-\tau)}}$ that \begin{align*}
\int_{|\xi-x|<\gamma\sqrt{2}}\Phi_1(x-\xi,t-\tau)\,d\xi&\geq\int_{|\xi-x|<\gamma\sqrt{t-\tau}}\frac{1}{(4\pi(t-\tau))^{n/2}}e^{-\frac{|x-\xi|^2}{4(t-\tau)}}d\xi\\
&=\frac{1}{\pi^{n/2}}\int_{|z|<\gamma/2}e^{-|z|^2}dz=:I(\gamma)\to1 \end{align*} as $\gamma\to\infty$. Thus by \eqref{8.9} and \eqref{8.8} we have for $(x,t)\in\mathbb{R}^n \times(0,1+\delta)$ that \begin{align}\label{8.10}
\notag \frac{(J_\alpha f_\varepsilon (x,t))^\lambda}{f_\varepsilon (x,t)}&\geq\frac{\varphi(\varepsilon x)^\lambda e^{-\varepsilon\gamma\lambda\sqrt{2}}(\frac{N}{M})^{\frac{\lambda^2}{1-\lambda}}I(\gamma)^\lambda (J_\alpha g_\delta (t))^\lambda}{\varphi(\varepsilon x)(\frac{N}{M})^{\frac{\lambda}{1-\lambda}}g_\delta (t)}\\
&\geq\left(\frac{M}{N}\right)^{\lambda/2}I(\gamma)^\lambda e^{-\varepsilon\gamma\lambda\sqrt{2}}. \end{align} So first choosing $\gamma$ so large that $(\frac{M}{N})^{\lambda/2}I(\gamma)^\lambda >1$ and then choosing $\varepsilon>0$ so small that \eqref{8.10} is greater than 1 we see that $f:=f_\varepsilon$ satisfies\eqref{4.5} in $\mathbb{R}^n \times(0,1+\delta)$. Thus, since $g_\delta (t)$ and hence $f(x,t)$ is identically zero in $\mathbb{R}^n \times((-\infty,0]\cup[1+\delta,\infty))$ see that $f$ satisfies \eqref{4.5}, \eqref{4.6}.
From the exponential decay of $\varphi(x)$ as $|x|\to\infty$, we see that $f$ satisfies \eqref{4.12}. Also since $f$ is uniformly continuous and bounded on $\mathbb{R}^n \times\mathbb{R}$ and $$\int^{b}_{a}\int_{\mathbb{R}^n}\Phi_\alpha (x,t)\, dx \, dt=\frac{1}{\Gamma(\alpha+1)}(b^\alpha -a^\alpha )\quad\text{for }a<b,$$ we easily check that \eqref{4.12.5} holds.
Finally, since \[f(0,t)=\left(\frac{N}{M}\right)^{\frac{\lambda}{1-\lambda}}g(t)\quad\text{for }0\leq t\leq1\] we find that \eqref{4.13} holds and thus \eqref{4.14} follows from \eqref{4.5}. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm4.4}]
By Remark \ref{rem8.2} with $T=1$ we can assume $ K=1$. Define $\bar f:\mathbb{R}^n \times\mathbb{R}\to[0,\infty)$ by \begin{equation}\label{8.11}
\bar f(x,t)=g(t)\raisebox{2pt}{$\chi$}_{\{|x|^2 <t\}}(x,t) \end{equation} where $g$ is defined in Remark \ref{rem8.1}. Then for $(x,t)\in\mathbb{R}^n \times(0,\infty)$ we have
$$J_\alpha \bar f(x,t)=\int^{t}_{0}\frac{(t-\tau)^{\alpha-1}}{\Gamma(\alpha)}\Biggl(\int_{|\xi|^2 <\tau}\Phi_1 (x-\xi,t-\tau)\,d\xi\Biggr)g(\tau)\,d\tau.$$
Thus by Lemma \ref{lem7.4} we see for $|x|^2 <t$ that \begin{align}\label{8.12}
\notag J_\alpha \bar f(x,t)&\geq C(n,\alpha,\lambda)\int^{3t/4}_{t/4}(t-\tau)^{\alpha-1}\tau^{\frac{\alpha\lambda}{1-\lambda}}d\tau\\
\notag &=C(n,\alpha,\lambda)t^{\frac{\alpha}{1-\lambda}}\\
\notag &=C(n,\alpha,\lambda)g(x,t)^{1/\lambda}\\
&=C(n,\alpha,\lambda)\bar f(x,t)^{1/\lambda} \end{align}
which also holds in $(\mathbb{R}^n \times\mathbb{R})\backslash\{|x|^2\le t\}$ because $\bar f=0$ there. Thus letting $f=L\bar f$ where $$L=C^{\frac{\lambda}{1-\lambda}}$$ where $C=C(n,\alpha,\lambda)$ is as in \eqref{8.12} we find that $f$ satisfies \eqref{4.4}--\eqref{4.6}.
It follows from \eqref{8.11} and the definitions of $g$ and $f$ that there exists $N>0$ such that \eqref{4.15} holds. Thus, since $f$ solves \eqref{4.5} we obtain \eqref{4.16}. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm4.5}]
Since $|R_j|<\infty$, to prove Theorem \ref{thm4.5} it suffices to
show for each $\varepsilon\in(0,1)$ that the conclusion of Theorem
\ref{thm4.5} holds for some \begin{equation}\label{one} q\in(p,p+\varepsilon). \end{equation} So let $\varepsilon\in(0,1)$. By \eqref{4.17}$_1$, there exists $q$ satisfying \eqref{one} such that \begin{equation}\label{two} \alpha<\frac{n+2}{2q}\left(1-\frac{1}{\lambda}\right). \end{equation} Define $f_0 :\mathbb{R}^n \times\mathbb{R}\to\mathbb{R}$ by \begin{equation}\label{8.15}
f_0 (x,t)=\left(\frac{1}{t}\right)^{r}\raisebox{2pt}{$\chi$}_{\Omega_0}(x,t) \end{equation} where
\[\Omega_0 =\{(x,t)\in\mathbb{R}^n \times\mathbb{R}:|x|^2 <t<1\}\] and \begin{equation}\label{four} r:=\frac{n+2}{2q}<\frac{n+2}{2p} \end{equation} by \eqref{one}. Then by \eqref{four} and Lemma \ref{lem7.6} we have \begin{equation}\label{8.16}
f_0 \in L^p (\mathbb{R}^n \times\mathbb{R}) \end{equation} and \begin{equation}\label{8.17}
J_\alpha f_0 (x,t)\geq C\left(\frac{1}{t}\right)^{r-\alpha}\quad\text{for }(x,t)\in\Omega_0 \end{equation} where, throughout this entire proof, $C=C(n,\lambda,\alpha,p,q)$ is a positive constant whose value may change from line to line.
Let $\{T_j \}\subset(0,1/2)$ be a sequence such that $$T_{j+1}<T_j /4\qquad j=1,2,...$$ and define \begin{equation}\label{seven}
t_j=T_j/2. \end{equation} Then \begin{equation}\label{8.20}
\Omega_j :=\{(y,s)\in\mathbb{R}^n \times\mathbb{R}:|y|<\sqrt{T_j -s} \text{ and } t_j <s<T_j \}\subset R_j \subset\Omega_0 \end{equation} and thus defining $f_j :\mathbb{R}^n \times\mathbb{R}\to\mathbb{R}$ by \begin{equation}\label{nine} f_j (x,t)=(T_j -t)^{-r}\raisebox{2pt}{$\chi$}_{\Omega_j}(x,t) \end{equation} we obtain from \eqref{four} and Lemma \ref{lem7.7} that \begin{equation}\label{8.21}
\| f_j \|^{p}_{L^p (\mathbb{R}^n \times\mathbb{R})} =C(n)\int^{T_j -t_j}_{0}s^{(\frac{n+2}{2p}-r)p-1}ds\
\to0\quad\text{as }j\to\infty, \end{equation} \begin{equation}\label{8.22}
\| f_j \|_{L^q (R_j )}=\| f_j \|_{L^q (\mathbb{R}^n\times\mathbb{R})} =\infty\quad\text{for }j=1,2,..., \end{equation} and \begin{equation}\label{8.23}
J_\alpha f_j (x,t)\geq C\left(\frac{1}{(T_j -t)}\right)^{r-\alpha} \quad\text{for }(x,t)\in\Omega^{+}_{j} \end{equation} where \[ \Omega^{+}_{j}=\{(x,t)\in\Omega_j :\frac{3T_j}{4}<t<T_j\}. \]
It follows from \eqref{8.15} and \eqref{8.17} that \[
\frac{f_0 (x,t)}{(J_\alpha f_0 (x,t))^\lambda}\leq
Ct^{(r-\alpha)\lambda-r} \quad\text{for }(x,t)\in\Omega_0 \] and from \eqref{two} and \eqref{four} that the exponent \begin{equation}\label{8.22.5} (r-\alpha)\lambda-r=\lambda[r(1-1/\lambda)-\alpha]>0. \end{equation} Thus \begin{equation}\label{8.24}
\sup_{\Omega_0}\frac{f_0}{(J_\alpha f_0)^\lambda} \leq C \end{equation} and by \eqref{8.20} \begin{equation}\label{8.25}
\sup_{\Omega_j}\frac{f_0}{(J_\alpha f_0)^\lambda}\leq CT_j^{(r-\alpha)\lambda-r}<1 \end{equation} by taking a subsequence.
By \eqref{nine}, \eqref{8.23}, and \eqref{8.22.5} we have \begin{align}\label{8.26}
\notag \sup_{\Omega^{+}_{j}}\frac{f_j}{(J_\alpha f_j )^\lambda}&\leq C\sup_{(x,t)\in\Omega^{+}_{j}}(T_j -t)^{(r-\alpha)\lambda-r}\\
&\leq C(T_j -t_j )^{(r-\alpha)\lambda-r}<1 \end{align} by taking a subsequence.
It follows from \eqref{8.15}, \eqref{nine}, \eqref{8.20}, and \eqref{seven} that \begin{equation}\label{8.27}
\sup_{\Omega_j}\frac{f_0}{f_j}=\sup_{(x,t)\in\Omega_j} \frac{(T_j -t)^{r}}{t^{r}}\leq\frac{(T_j -t_j )^{r}}{t^{r}_{j}}=1 \end{equation} and letting $\Omega^{-}_{j}=\Omega_j \backslash\Omega^{+}_{j}$ we see from \eqref{nine}, \eqref{8.17}, \eqref{8.20}, and \eqref{8.22.5} that \begin{align}\label{8.28}
\notag \sup_{\Omega^{-}_{j}}\frac{f_j}{(J_\alpha f_0 )^\lambda} &\leq
C\sup_{(x,t)\in\Omega^{-}_{j}}\frac{t^{(r-\alpha)\lambda}}{(T_j
-t)^{r}} \leq C\frac{T^{(r-\alpha)\lambda}_{j}}{(T_j/4)^{r}}\\
&=CT^{(r-\alpha)\lambda-r}_{j}<\frac{1}{2} \end{align} by taking a subsequence.
Taking an appropriate subsequence of $f_j$ and letting $$f=f_0 +\sum^{\infty}_{j=1}f_j$$ we find from \eqref{8.16} and \eqref{8.21} that $f$ satsfies \eqref{4.18}.
In $\Omega^{+}_{j}$ we have by \eqref{8.25} and \eqref{8.26} that \begin{align*}
f&=f_0 +f_j \leq(J_\alpha f_0 )^\lambda +(J_\alpha f_j )^\lambda \\
&\leq(J_\alpha (f_0 +f_j ))^\lambda \leq(J_\alpha f)^\lambda . \end{align*} In $\Omega^{-}_{j}$ we have by \eqref{8.27} and \eqref{8.28} that $$f=f_0 +f_j \leq2f_j \leq(J_\alpha f_0 )^\lambda \leq(J_\alpha f)^\lambda .$$ In $\Omega_0 \backslash\cup^{\infty}_{j=1}\Omega_j$ we have by \eqref{8.24} that $$f=f_0 \leq C(J_\alpha f_0 )^\lambda \leq C(J_\alpha f)^\lambda.$$ In $(\mathbb{R}^n \times\mathbb{R})\backslash\Omega_0 ,\,f=0\leq(J_\alpha f)^\lambda$. Thus, after scaling $f$, we see that $f$ is a solution of \eqref{4.5}, \eqref{4.6}. Also \eqref{4.19} holds by \eqref{8.22}. \end{proof}
\begin{proof}[Proof of Theorem \ref{thm4.6}]
By \eqref{4.20}$_1$, there exists a unique number $\gamma\in(0,\frac{n+2}{2p}-\alpha)$ such that \begin{equation}\label{8.29}
\lambda=\frac{\frac{n+2}{2p}-\gamma}{\frac{n+2}{2p}-\alpha-\gamma}. \end{equation} Let $f_0$ and $\Omega_0$ be as in Lemma \ref{lem7.6}. Then by \eqref{8.29} and Lemma \ref{lem7.6} we have \begin{equation}\label{8.30}
f_0 \in X^p \end{equation} and \begin{equation}\label{8.31}
f_0 \leq C(J_\alpha f_0 )^\lambda \quad\text{in } \mathbb{R}^n \times\mathbb{R} \end{equation} where in this proof $C=C(n,\lambda,\alpha,p)$ is a positive constant whose value may change from line to line. Let $\{T_j \},\,\{t_j \}\subset(2,\infty)$ satisfy $$T_{j+1}\geq4T_j \quad\text{ and }\quad T_j =2t_j$$ and define $f_j :\mathbb{R}^n \times\mathbb{R}\to\mathbb{R}$ by \begin{equation}\label{8.32}
f_j (x,t)=\Biggl(\frac{1}{T_j -t}\Biggr)^{\frac{n+2}{2p}-\gamma}\raisebox{2pt}{$\chi$}_{\Omega_j}(x,t) \end{equation} where
$$\Omega_j :=\{(x,t)\in\mathbb{R}^n \times(T_j /2,T_j ):|x|<\sqrt{T_j -t}\}.$$ Then \begin{equation}\label{8.33}
\Omega_j \subset R_j \subset\Omega_0 , \quad\Omega_j \cap\Omega_k =\emptyset\quad\text{for }j\neq k, \end{equation} \begin{equation}\label{8.34}
\text{inf }\{t:(x,t)\in\Omega_j \}=T_j /2\to\infty\quad\text{as }j\to\infty, \end{equation} and by \eqref{8.32}, \eqref{8.29}, and Lemma \ref{lem7.7} we have \begin{equation}\label{8.35}
f_j \in L^p (\mathbb{R}^n \times\mathbb{R}) \end{equation} and $$f_j \leq C(J_\alpha f_j )^\lambda \quad\text{in } \Omega^{+}_{j}$$ where $$\Omega^{+}_{j}=\{(x,t)\in\Omega_j:\frac{3T_j}{4}<t<T_j \}.$$ It follows therefore from \eqref{8.31} that \begin{equation}\label{8.36}
f_0 +f_j \leq C((J_\alpha f_0 )^\lambda +(J_\alpha f_j )^\lambda )\leq C(J_\alpha (f_0 +f_j ))^\lambda \quad\text{in } \Omega^{+}_{j}. \end{equation} In $\Omega^{-}_{j}:=\Omega_j \backslash\Omega^{+}_{j}$ we have $$\frac{f_j}{f_0}=\Biggl(\frac{t}{(T_j -t)}\Biggr)^{\frac{n+2}{2p}-\gamma}\leq\Biggl(\frac{\frac{3}{4}T_j}{\frac{1}{4}T_j}\Biggr)^{\frac{n+2}{2p}-\gamma}=3^{\frac{n+2}{2p}-\gamma}$$ and thus we obtain from \eqref{8.31} that \begin{equation}\label{8.37}
f_0 +f_j \leq Cf_0 \leq C(J_\alpha f_0 )^\lambda \leq C(J_0 (f_0 +f_j ))^\lambda \quad\text{in } \Omega^{-}_{j}. \end{equation} Let $f=f_0 +\sum^{\infty}_{j=1}f_j$. Then clearly $f$ satisfies \eqref{4.6} and by \eqref{8.30}, \eqref{8.35}, and \eqref{8.34} we see that $f$ satisfies \eqref{4.21}.
In $\Omega_j$ we have by \eqref{8.33}$_2$, \eqref{8.36}, and \eqref{8.37} that $$f=f_0 +f_j \leq C(J_\alpha (f_0 +f_j ))^\lambda \leq C(J_\alpha f)^\lambda$$ and in $(\mathbb{R}^n \times\mathbb{R})\backslash\cup^{\infty}_{j=1}\Omega_j$ we have by \eqref{8.31} that $$f=f_0 \leq C(J_\alpha f_0 )^\lambda \leq C(J_\alpha f)^\lambda .$$ Thus after scaling $f$, we find that $f$ satisfies \eqref{4.5}.
Since $|R_j |<\infty$, we can for the proof of \eqref{4.22} assume instead of \eqref{4.20}$_2$ that $$q=\frac{n+2}{2\alpha}(1-\frac{1}{\lambda})$$ and hence by \eqref{8.29} we get $$\frac{n+2}{2p}-\gamma=\frac{\alpha}{1-\frac{1}{\lambda}}=\frac{n+2}{2q}.$$ Consequently from \eqref{8.33}$_1$, \eqref{8.32}, and Lemma \ref{lem7.7} we find that
$$\| f\|_{L^q (R_j )}\geq\| f_j \|_{L^q (\Omega_j )}=\infty \quad\text{for }j=1,2,...$$ which proves \eqref{4.22} \end{proof}
\appendix \section{Appendix} For the proof of Theorem \ref{thm2.3}(ii) we will need the following result due to Nogin and Rubin \cite{NR} concerning the inversion of the operator $J_\alpha$ in the framework of the spaces $L^p (\mathbb{R}^n \times\mathbb{R})$. See also \cite[Theorem 9.24]{SK}.
\begin{thm}\label{thmA.1}
Suppose $0<\alpha<\frac{n+2}{2p},\,1<p<\infty$, and $u=J_\alpha f$ with $f\in L^p (\mathbb{R}^n \times\mathbb{R})$. Then
$$\lim_{\varepsilon\to 0^{+}}J^{-\alpha}_{\varepsilon}u=f\quad\text{in } L^p (\mathbb{R}^n \times\mathbb{R})$$
where
\begin{equation}\label{A.1}
J^{-\alpha}_{\varepsilon}u(x,t)=C(n,\alpha,l)\iint_{\mathbb{R}^n \times(\varepsilon,\infty)}\frac{(\Delta^{l}_{y,\tau}u)(x,t)}{\tau^{1+\alpha}}e^{-\frac{|y|^2}{4}}\, dy \, d\tau
\end{equation}
and
\begin{equation}\label{A.2}
(\Delta^{l}_{y,\tau}u)(x,t) =\sum^{l}_{k=0}(-1)^k \binom{l}{k}u(x-y\sqrt{k\tau},t-k\tau),\quad l>\alpha.
\end{equation} \end{thm}
\noindent {\bf Acknowledgments}
\noindent The author thanks the anonymous referee for very helpful comments.
\end{document} | arXiv | {
"id": "1901.09964.tex",
"language_detection_score": 0.4626210629940033,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{An approximate diffusion process for environmental stochasticity in infectious disease transmission modelling} \author{Sanmitra Ghosh $^{1}$, Paul J. Birrell$^{2,1}$, Daniela De Angelis$^{1,2}$\\ \textit{\normalsize{$^{1}$MRC Biostatistics Unit, University of Cambridge, Cambridge, UK}}\\ \textit{\normalsize{$^{2}$UK Health Security Agency, London, UK}} } \date{} \maketitle \section*{Abstract} Modelling the transmission dynamics of an infectious disease is a complex task. Not only it is difficult to accurately model the inherent non-stationarity and heterogeneity of transmission, but it is nearly impossible to describe, mechanistically, changes in extrinsic environmental factors including public behaviour and seasonal fluctuations. An elegant approach to capturing environmental stochasticity is to model the force of infection as a stochastic process. However, inference in this context requires solving a computationally expensive ``missing data" problem, using data-augmentation techniques. We propose to model the time-varying transmission-potential as an approximate diffusion process using a path-wise series expansion of Brownian motion. This approximation replaces the ``missing data" imputation step with the inference of the expansion coefficients: a simpler and computationally cheaper task. We illustrate the merit of this approach through two examples: modelling influenza using a canonical SIR model, and the modelling of COVID-19 pandemic using a multi-type SEIR model.
\section{Introduction} \label{sec1}
Mathematical modelling of the complex dynamics of infectious diseases remains an essential tool to inform public health policies during epidemic outbreaks. The major focus of such modelling work is describing the intrinsic transmission dynamics and the flow of individuals between compartments that segregate the population as per their disease state. However, an epidemic is also driven by a number of extrinsic factors, including population mobility, social cycles (e.g. holidays), non-pharmaceutical interventions, and climatic variations \citep{breto2009time}. In a compartmental model, such factors are often introduced explicitly through the description of the hazard (force) of infection when information about these external drivers is available \citep{knock2021key,keeling2020fitting,davies2020effects}. However, while it is impossible to fully account for all extrinsic factors influencing transmission, yet ignoring this epistemic uncertainty often known as ``environmental stochasticity" leads to a structural miss-specification of the model, a ``model discrepancy". Model discrepancy can lead to miss-calibrated models that underestimate uncertainty and produce biased predictions \citep{Brynjarsd_ttir_2014}. An elegant approach to account for the un-modelled model discrepancy is to represent the force of infection as a stochastic process. For example, \cite{dureau2013capturing,cazelles2018accounting} use a diffusion process for this purpose, while \cite{birrell2021real} use a discrete time stochastic process. Parameter estimation for such stochastic models is, however, challenging. Inference, particularly in a Bayesian context, requires estimation of the joint posterior distribution of both the latent path of the stochastic process and the model parameters. Estimation using a Markov chain Monte Carlo (MCMC) algorithm, involves sampling the realisation of the stochastic process, a high dimensional object, often through \textit{data-augmentation} techniques, which incur a hefty computational cost \citep{de2015four}. As a result efficient calibration of a compartmental model, which embeds a stochastic process, has received significant attention in the literature \citep[e.g.][]{fuchs2013inference,sottinen2008application} with the goal of alleviating the computational bottleneck associated with the inference of the stochastic process.
In this paper we propose a new approach to the calibration problem through the use of a path-wise approximation of a diffusion process.
Specifically, we apply a truncated Fourier expansion of a Brownian motion to obtain the approximation. Application of this series expansion turns the task of inferring a high dimensional latent diffusion sample path into the task of inferring a smaller dimensional object, the expansion coefficients, which can be carried out without data-augmentation. This method is also applicable in the context of discrete time processes that converge to a diffusion in the continuous time limit. Such processes can be approximated by first carrying out the series expansion of the limiting diffusion and then applying a suitable time discretisation. We validate the proposed method against a data augmentation technique carried out using a particle MCMC sampler proposed in \cite{dureau2013capturing}, using a dataset from an influenza outbreak in a boarding school. We then apply this method to fit a model of COVID-19 spread in England during the first wave.
\section{Background: Epidemic models with a time-varying transmission-potential}\label{sec:Epidemic models with time-varying coefficients}
We consider the canonical SIR (Susceptible-Infected-Removed) model \citep{anderson1992infectious} to introduce the stochastic modelling framework, although the methodology can be applied to other more complex compartmental models.
In the SIR model the compartments denote the number of susceptible ($S$), infected ($I$), and recovered ($R$) people in a population subjected to an epidemic at time $t$. For a population of size $N$, the SIR model is defined by the following ODE system: \begin{equation}\label{eq: basic SIR} \frac{d S_t}{d t} = -\beta S_t \frac{I_t}{N}, \quad \frac{d I_t}{d t}= \beta S_t \frac{I_t}{N} - \gamma I_t, \quad \frac{d R_t}{d t} = \gamma I_t, \end{equation} where $\lambda=\beta \frac{I_t}{N}$ is the force of infection, describing the generation of infections with a transmission-potential $\beta$, between susceptible individuals and the fraction, $I_t /N$, of infectious individuals. The expected period spent in the compartment is given by $\gamma^{-1}$. The individual compartment sizes sum to $N = S_t + I_t + R_t$.
To include environmental stochasticity we introduce a time-varying $\beta_t$ \citep[e.g.][]{ellner1998noise,martinez2015unraveling,cauchemez2008likelihood,cauchemez2008estimating,cazelles1997using}
to mitigate model discrepancy, leading to a reformulation of the model in Eq~(\ref{eq: basic SIR}): \begin{equation}\label{eq: basic SIR sde} \begin{aligned} dx_t &= a(x_t,\xib)dt + b(x_t,\xib) dW_t\\ \beta_t &= g(x_t)\\ \frac{d S_t}{d t} &= - \beta_t S_t \frac{I_t}{N}, \quad \frac{d I_t}{d t}= \beta_t S_t \frac{I_t}{N} - \gamma I_t, \quad \frac{d R_t}{d t} = \gamma I_t, \end{aligned} \end{equation} where $x_t$ follows a diffusion process described by an It\^o stochastic differential equation (SDE) \citep{oksendal2013stochastic} with drift $a(\cdot)$, and diffusion $b(\cdot)$ functions parameterised by the vector $\xib$; $W_t$ is a standard Brownian motion; and $g(\cdot)$ is a nonlinear transformation that enforces $\beta_t>0$, such as exponential or inverse-logit transformation. Here we make some mild assumptions about $a(\cdot)$ and $b(\cdot)$ such as, for example, being locally Lipschitz with a linear growth bound \citep{oksendal2013stochastic} to ensure a non-explosive solution.
Inference for the stochastic model in Eq~(\ref{eq: basic SIR sde}) within a Bayesian framework, requires inference of the latent sample path $\bv{x}$ of the diffusion $x_t$, which is indirectly observed through the time evolution of the disease states: $S_t,I_t,R_t$. This is a missing data problem that can be addressed through data-augmentation based MCMC methods \citep[e.g.][]{fuchs2013inference, dureau2013capturing} in which a high resolution (in time) Euler-Maruyama discretisation of $x_t$ is sampled along with the model parameters. Such MCMC methods incur high computational costs and have reduced efficiency in terms of mixing and speed of convergence. In what follows we will investigate a scalable approximation of $x_t$ that is faster to sample.
\section{Methods}\label{sec:Series approximation of a diffusion process}
Following \cite{LyonsSS12,luo2006wiener,pmlr-v151-ghosh22a}, we carry out a Fourier expansion of a Brownian motion $W_t$ and obtain a smooth path-wise series approximation. Using this approximation of a Brownian motion, we can in turn approximate the SDE for $x_t$ with a random ODE. Inference of $x_t$ can then be carried out by inferring coefficients of this ODE, without requiring data-augmentation.
\subsection{Fourier expansion of Brownian motion}\label{sec:Fourier expansion of Brownian motion}
Within a time interval $[0,T]$, where $T$ is the length of the time horizon within which an epidemic is analysed, the Fourier expansion of a Brownian motion $W_t$ is given by \citep{luo2006wiener}: \begin{equation}\label{eq: Brownian series}
W_t = \sum_{i=1}^\infty\Big(\int_{0}^T\phi_i(s)dW_s\Big)\int_{0}^t\phi_i(u)du. \end{equation} where $\{\phi_i\}_{i=1}^{\infty}$ is a complete orthonormal basis of $L^2[0,T]$ (see Appendix A for derivation). For example this can be the generalised Fourier cosine basis \citep{lyons2014series} given by \begin{equation}\label{eq: KL}
\phi_i(t)=( 2/T)^{1/2}\cos\{(2i-1)\pi t/2T\}. \end{equation} We will use the shorthand $Z_i=\int_{0}^T\phi_i(s)dW_s$. Since the basis functions $\{\phi_i\}$ are deterministic and orthonormal, it follows from standard results of It\^o calculus that $Z_i\sim \mathcal{N}(0,1)$ \citep{luo2006wiener}. By truncating the infinite series in Eq~(\ref{eq: Brownian series}) to $n$-terms we obtain a path-wise approximation of the Brownian motion $W_t$ given by \begin{equation}\label{eq:approximate Brown}
\hat{W}_t = \sum_{i=1}^n Z_i\int_{0}^t\phi_i(u)du. \end{equation}
\subsection{Approximating a SDE with a random ODE}\label{sec:Approximating a SDE with a random ODE}
Taking derivative of $\hat{W}_t$ with respect to time we obtain the following approximation to white noise, the derivative of Brownian motion, given by \begin{equation}\label{Wong-Zakai}
\frac{d\hat{W}_t}{dt}=\sum_{i=1}^n Z_i \phi_i(t). \end{equation}
Now, let us replace the It\^o SDE in Eq~(\ref{eq: basic SIR sde}) with the following Stratonovich SDE \citep{oksendal2013stochastic} \begin{equation}\label{eq: strat}
dx_t = a'(x_t,\xib)dt + b(x_t,\xib) \circ dW_t, \end{equation} where $(\circ)$ denotes a Stratonovich integral \citep{oksendal2013stochastic} with respect to $W_t$. The It\^o SDE in Eq~(\ref{eq: basic SIR sde}) and the Stratonovich SDE given above are equivalent \citep{oksendal2013stochastic} if \begin{equation}\label{eq: ito-strat}
a'(x_t,\xib) = a(x_t,\xib) - \frac{b(x_t,\xib)}{2}\frac{\partial b(x_t,\xib)}{\partial x_t}b(x_t,\xib). \end{equation} By substituting the term $dW_t$ in Eq~(\ref{eq: strat}) with the approximation $d\hat{W}_t$ in Eq~(\ref{Wong-Zakai}), we obtain the following (random) ODE: \begin{equation}\label{eq: strat ode}
\frac{d\hat{x}_t}{dt}=a'(\hat{x}_t,\xib) + b(\hat{x}_t,\xib)\sum_{i=1}^n Z_i \phi_i(t). \end{equation} The work of \cite{wongzakai} shows that as $n\rightarrow\infty$ the solution $\hat{x}_t$ of the above ODE will converge to the solution $x_t$ of the Stratonovich SDE Eq~(\ref{eq: strat}) which, given the choice of $a'(\cdot)$ in Eq~(\ref{eq: ito-strat}), is an equivalent representation of the It\^o SDE in Eq~(\ref{eq: basic SIR sde}). Thus, the series approximation $\hat{x}_t$ of the solution $x_t$ of an It\^o SDE converges to the solution of an equivalent Stratonovich SDE.
Next, we discuss the implications of the above approximation with regards to inference.
\subsection{Inference using the series approximation}\label{sec:Inference using the series approximation}
Using the path-wise series approximation of a diffusion process $x_t$, presented in the previous sections, we can re-write the canonical SIR model in Eq~(\ref{eq: basic SIR sde}) as a system of coupled ODEs given by \begin{equation}\label{eq: SIR fourier} \begin{aligned} \frac{d\hat{x}_t}{dt} &=a'(\hat{x}_t,\xib) + b(\hat{x}_t,\xib)\sum_{i=1}^n Z_i \phi_i(t)\\ \beta_t &= g(\hat{x})\\ \frac{d S_t}{d t} &= - \beta_t S_t \frac{I_t}{N}, \quad \frac{d I_t}{d t}= \beta_t S_t \frac{I_t}{N} - \gamma I_t, \quad \frac{d R_t}{d t} = \gamma I_t, \end{aligned} \end{equation} where $a'(\cdot)$ is given by Eq~(\ref{eq: ito-strat}). Note that the randomness in the above model is now encapsulated in the expansion coefficients $\bv{Z} =(Z_1,\ldots,Z_n)$. Inference in this model is then relegated to the inference of all the parameters: $\bv{Z}, \xib, \gamma$, and the initial values: $x_0,S_0,I_0,R_0$. We denote the vector of the parameters governing the dynamics as $\thb=(\xib, \gamma)$. We denote the state vector evolving in continuous time by $\Xb_t = (x_t,S_t,I_t,R_t)$, and by $\Xb_0 =(x_0,S_0,I_0,R_0)$ the vector of initial values.
In order to explain the inferential framework based on the series approximation, in Eq~(\ref{eq: SIR fourier}), we assume that the available data $\bv{y}_{t_{1:m}}=(y_{t_{1}},\ldots, y_{t_{m}})$ are the noisy observations of the state $I_t$ at $m$ time-points. Here we are simply considering prevalence data for the ease of exposition, however the same idea can be extended to more complex observational models where the observed data only provide partial (and often indirect) information of the states $\Xb_t$ \citep{birrell2021real}.
The inferential goal is to learn the posterior distribution of all the unknown quantities, given the data $\bv{y}_{t_{1:m}}$. We place priors $p(\thb)$, $p(\bv{Z})$, $p(\Xb_0)$ on the parameters, expansion coefficients and the initial values. Note that, by construction, the $\bv{Z} = (Z_1, \ldots, Z_n)$ have an independent standard Normal prior, see Section \ref{sec:Fourier expansion of Brownian motion}. We then numerically solve Eq~(\ref{eq: SIR fourier}) to obtain a likelihood $p(\bv{y}_{t_{1:m}}|\bv{I}_{t_{1:m}}, \bv{\epsilon})$, based on the noise assumption, where $\bv{I}_{t_{1:m}}$ is the numerical solution of the state $I_t$ evaluated at the $m$ time-points, and $\bv{\epsilon}$ are the parameters of the chosen data distribution. The posterior distribution, up to a normalisation constant, follows from the Bayes rule: \begin{equation}\label{eq: pos ode approx diff}
p(\thb,\bv{Z},\Xb_0|\bv{y}_{t_{1:m}}) \propto p(\bv{y}_{t_{1:m}}|\bv{I}_{t_{1:m}}, \bv{\epsilon})p(\thb)p(\bv{Z})p(\Xb_0)p(\bv{\epsilon}). \end{equation} Samples from the posterior distribution can be obtained using MCMC. The samples of the latent approximate diffusion path $\hat{\xb}$ are simply the numerical solution of the ODE for $\hat{x}_t$ evaluated using samples of $\thb,\bv{Z},\Xb_0$ from the posterior distribution.
Note that if we had described $\beta_t$ using a SDE, then to sample the latent diffusion $\xb$ we would have had to use data-augmentation. This involves imputing the sample path of the latent diffusion at the time-points of observations $t_{1:m}$ as well as at time-points in-between the observations using, say, the Euler-Maruyama scheme \citep{kloedenplatenbook}. If one chooses $l$ time-points between $t_m$ and $t_{m-1}$ then the MCMC sampler would target $m(l+1) - l$ random variables (including $x_0$) related to the diffusion. Using the proposed approximation we have replaced the inference of $m(l+1) - l$ variables with $n$, which is a simpler inference problem if $n<m(l+1) - l$. Below we show that choosing a value of $n$ substantially smaller than $m(l+1) - l$ still renders an estimate of the posterior distribution that is a reliable approximation to the true posterior.
\section{Evaluation}\label{sec:Evaluation}
To evaluate the proposed approximation method we fit the model in Eq~(\ref{eq: SIR fourier}) to the data of an outbreak of influenza at a boarding school \citep{jackson2013school} (see Fig~\ref{ouppc compare real} (a)), on the number of infections for a period of $T=14$ days among a population of size $N = 763$. This dataset is publicly available in the R package \texttt{outbreaks} \citep{ob}. This dataset was previously used in \cite{del2015sequential,ryder18a} to fit a SIR model under assumption that the time varying transmission-potential can be modelled using an Ornstein–Uhlenbeck (OU) process. The model in \cite{del2015sequential} is similar to the stochastic model introduced in Eq~(\ref{eq: basic SIR sde}). Using the OU SDE for $x_t$ we can write the model in Eq~(\ref{eq: basic SIR sde}) as: \begin{equation}\label{eq: SIR OU} \begin{aligned} dx_t &= (\xi_1 - \xi_2 x_t)dt + \xi_3 dW_t \\ \beta_t &= \exp(x_t)\\ \frac{d S_t}{d t} &= - \beta_t S_t \frac{I_t}{N}, \quad \frac{d I_t}{d t}= \beta_t S_t \frac{I_t}{N} - \gamma I_t, \quad \frac{d R_t}{d t} = \gamma I_t, \end{aligned} \end{equation} where $\xib=(\xi_1,\ldots, \xi_3)$ denotes the parameter vector of the OU SDE.
Here we specifically want to compare the outcome of inference using the true OU diffusion used above (\textbf{SDE}) with its series approximation (\textbf{SA}), leading to a model such as in Eq~(\ref{eq: SIR fourier}), given by \begin{equation}\label{eq:OU series}
\frac{d\hat{x}_t}{dt} =(\xi_1 - \xi_2 x_t) + \xi_3\sum_{i=1}^n Z_i\phi_i(t), \end{equation} where we have chosen the generalised Fourier basis Eq~(\ref{eq: KL}) as the function $\phi_i(t)$.
For the \textbf{SDE} model the latent sample path $\bv{x}$, the diffusion parameters $\xib$, initial value $x_0$ and the parameter $\gamma$ were also estimated together with the initial susceptibility, $s_0 = S(t=0)/N$, assuming the initial recovered fraction $r_0=0$ and thus $i_0 = 1 - s_0$. As this is count data we have specified a Poisson likelihood: \begin{equation}
y_{t_i}|\thb, \bv{x}, \Xb_0 \sim \operatorname{Poisson}(I_{t_i}), \quad i=1, \ldots, m, \end{equation} where in this case $\Xb_0=(x_0,s_0)$. For the \textbf{SA} model we used the inferential framework introduced in the previous section and used the Poisson likelihood as above: \begin{equation}
y_{t_i}|\thb, \bv{Z}, \Xb_0 \sim \operatorname{Poisson}(I_{t_i}). \end{equation}
We chose a weakly-informative prior for the parameters governing the dynamics $\xi_1,\ldots, \xi_3, \gamma \sim \Gamma(2,2)$. For $s_0$ a $\operatorname{Beta}(2,1)$, since we expect the true value to be near or greater than $2/3$, and for the initial value of the diffusion we used a prior $x_0 \sim \mathcal{N}\Big(\xi_1/\xi_2,\big(\frac{\xi^2_3}{2 \xi_2}\big)^2\Big)$, which is the stationary distribution of the OU diffusion.
For the \textbf{SDE} model, data-augmentation using a particle filter was employed to sample the `true' diffusion's path, following \cite{dureau2013capturing}, and produce an unbiased estimate of the likelihood. Parameters $\gamma,\xib,\Xb_0$ were estimated using the Metropolis-Hastings (MH) algorithm, with an adaptive random-walk proposal based on algorithm 4 of \cite{andrieu2008tutorial}. See B in S1 text for further details on this proposal mechanism. The likelihood estimate produced by the particle filter was used in the acceptance step of the MH algorithm. This particle-marginal Metropolis-Hastings (PMMH) MCMC scheme for jointly updating the latent diffusion path along with the parameters has been shown to have superior performance \citep{dureau2013capturing} when compared to other data-augmentation approaches. For the PMMH, we used a Bootstrap particle filter \citep{gordon1995bayesian}, where the particles are propagated using Euler-Maruyama discretisation, and set the number of particles to $1000$. Following \cite{del2015sequential}, we carried out the Euler-Maruyama iterations with a stepsize $\delta t = 0.1$, leading to $l=9$ time-points between two observations.
For the \textbf{SA} model we used the Metropolis-Hastings algorithm with the same adaptive random-walk proposal (RWMH) used with the PMMH scheme and the Euler method to numerically solve the ODE adopting the same step-size that is used with the Euler-Maruyama scheme for the SDE.
Note that inference for the \textbf{SDE} model using PMMH will be substantially more computationally heavy compared to the inference for the ODE based \textbf{SA} model, irrespective of the value of $n$. This is due to the particle filter requiring multiple evaluation of the Euler-Maruyama scheme at each MCMC iteration. Even when parallelised, the particle filter will be bottlenecked by a weight-updating step (see \cite{gordon1995bayesian} for details) requiring message-passing across processes. The Euler scheme for solving the ODE in Eq~(\ref{eq:OU series}), in comparison, is evaluated once every iteration of a Metropolis-Hastings algorithm targeting the posterior distribution in Eq~(\ref{eq: pos ode approx diff}).
A crucial parameter for the proposed method is the number of basis functions $n$. If a value of $n$ produces a close match between the marginal densities of the true and approximate diffusion at the end of the analysis period $T$ then the approximation will be valid throughout the course of the epidemic. In this case $T=14$. In Fig~\ref{Figure:wv compare} we compare the time $T$ marginal densities $p(\hat{x}_t)|_{t=T}$ obtained by solving the ODE in Eq~(\ref{eq:OU series}) associated with the \textbf{SA}, and $p(x_t)|_{t=T}$ obtained from the original OU diffusion, both based on some trial parameters sampled from the prior. The value $n=15$ produces a close match between the marginal densities. We defer further discussion of the effect of $n$ on estimation to section \ref{sec:Experiment 2}.
\begin{figure*}
\caption{Comparison between the marginal density of the OU SDE at time $T=14$, with that obtained through the series approximation upon varying the number of basis $n=3,5,10,15$.}
\label{Figure:wv compare}
\end{figure*}
\subsection{Results: comparison between true and approximate diffusion}\label{sec:Experiments}
We fitted the two models, \textbf{SDE} and \textbf{SA} respectively, using the associated algorithms as described above to the influenza dataset. We ran two chains of both PMMH, for the \textbf{SDE} model, and RWMH, for the \textbf{SA} one, for $10^6$ iterations where the first $5 \times 10^5$ iterations were discarded as burnin and the remaining samples thinned to obtain $1000$ samples from the posterior distribution. The running times were $15907$ and $2397$ seconds for the PMMH and RWMH with $n=15$, respectively. We implemented a vectorised particle filter and the Euler solver for the ODE using \texttt{Jax} \citep{jax2018github}. The adaptive MCMC algorithm was implemented using \texttt{Python}.
We notice a good agreement between the parameter estimates obtained using the \textbf{SDE} and \textbf{SA} counterparts (see Fig~\ref{oumarg compare real}). Furthermore, in Fig~\ref{ouppc compare real} we compare the goodness-of-fit and display the posterior distribution of the latent diffusion paths $p(\bv{x}|\bv{y}_{t_{1:m}})$ and $p(\hat{\bv{x}}|\bv{y}_{t_{1:m}})$, corresponding to the \textbf{SDE} and \textbf{SA}. Additionally, for aid of visualisation, we have also plotted draws from the (posterior) sample paths for both models in Fig~\ref{ouppc compare real} (c) and (d).
\begin{figure}
\caption{Comparison of the posterior marginal densities of the parameters obtained using the \textbf{SDE} and the \textbf{SA} (with $n=15$ basis function). These densities are summarised using a kernel density estimate.}
\label{oumarg compare real}
\end{figure}
We observe a good agreement between the epidemic curves obtained using the \textbf{SDE} and the \textbf{SA}, but for the posterior distribution of the latent diffusion paths the credible intervals are narrower for the \textbf{SA}. The \textbf{SA}, due to the truncation of the infinite series expansion, produces smoother paths, slightly underestimating the volatility of the latent diffusion path. On a closer introspection of the posterior means (Fig~\ref{ouppc compare real} (b)), it is noticeable that the latent diffusion paths drop and increase again in the period between the $4$-th and $9$-th day, around the peak, indicating sudden changes in the transmission-potential. These changes are reflected in the estimates of both \textbf{SDE} and \textbf{SA}. After the $9$-th day, the variability in the latent paths increase for both \textbf{SDE} and \textbf{SA} and the posterior means match closely. This is expected since after the peak, when the epidemic is receding, a large change in $\beta_t$ will have negligible effect on the case counts.
\begin{figure}
\caption{\textbf{Influenza dataset}: Goodness-of-fit (a); posterior distribution of the latent diffusion paths corresponding to the \textbf{SDE} and \textbf{SA} counterparts (b), with densities summarised by the mean (solid lines) and $95\%$ credible intervals (broken lines); and samples from the posterior distribution of the latent diffusion paths, \textbf{SDE} (c) and \textbf{SA} (d)}
\label{ouppc compare real}
\end{figure}
These results were confirmed in a simulation study where the simulated datasets mimicked this influenza dataset (see Appendix C).
\subsection{Sensitivity to the choice of $n$}\label{sec:Experiment 2}
In Fig~\ref{Figure:wv compare} we noticed that the marginal distribution of the latent diffusion path and its series approximation starts agreeing beyond $n\geq10$ terms. It is worth investigating whether such a threshold exist for the posterior distributions obtained using the \textbf{SDE} and the \textbf{SA}. We did this by further comparing the joint posterior distribution $p(\thb,\Xb_0|\yb)$, from \textbf{SDE} and \textbf{SA} while varying $n$. Note that $\thb$ and $\Xb_0$ are quantities which were estimated using both the \textbf{SDE} and \textbf{SA} counterparts, and thus the joint posterior of these were chosen for comparison. For this comparison we estimated the posterior distribution by fitting the \textbf{SA} repeatedly with number of basis set to $n=3,5,10,15,20,25,30$. To compare the posterior distributions, we used the maximum mean discrepancy (MMD) metric \citep{gretton2012kernel}, a divergence metric that can be calculated using samples from the distributions. See Appendix H for further details on this metric.
In Fig~\ref{oummd compare} we plot the MMD between the posteriors from \textbf{SDE} and \textbf{SA} for increasing $n$. For $n \geq 10$ we found good agreement between the two posteriors, consistent with the results from comparing the marginal densities (Fig~\ref{Figure:wv compare}). This reinforces our approach of choosing the number of basis by comparing marginals of the latent process, while using the \textbf{SA}. We summarise the runtimes of MCMC with the RWMH proposal for each choice of $n$ in Table~\ref{runtimes}, noting that the increase in the runtimes as we varied $n$ was negligible, especially when compared to the PMMH with \textbf{SDE}.
\begin{figure}
\caption{MMD between the joint posterior distributions of the parameters $\thb$ and initial values $\Xb_0$ from \textbf{SDE} and \textbf{SA} (for different $n$).}
\label{oummd compare}
\end{figure}
\begin{table}[t] \caption{\bf Runtimes (rounded to nearest integer), in seconds, of MCMC for \textbf{SA}, as a function of the number of basis $n$, in comparison with the runtime of \textbf{SDE}. These were run on a $3.6$ GHz machine with $16$ GB memory.} \label{runtimes} \begin{center}
\begin{tabular}{|ll|l|l|l|l|l|l|l|}
\hline
\multicolumn{9}{|c|}{\bf Runtimes in seconds} \\ \hline \textbf{SA} with & $n=3$ & $n=5$ & $n=10$ & $n=15$ & $n=20$ & $n=25$ & $n=30$ & \textbf{SDE}\\ \hline & $2280$ &$2282$ & $2337$ & $2397$ & $2470$ & $2538$ & $2637$ & $15907$\\ \hline \end{tabular}
\end{center} \end{table}
\section{Application: modelling COVID-19 outbreak in England}\label{sec:Application: modelling COVID-19 outbreak in England}
Our proposed method of modelling the time-varying transmission-potential as an approximate diffusion can also be applied to a discrete time stochastic process that converges to a diffusion in the continuous time limit. For example, an AR$(1)$ process converges to a OU diffusion. Thus, if one is already using an AR$(1)$ process to model the transmission-potential, then a discretised version of the series approximation of OU diffusion, the ODE in Eq~(\ref{eq:OU series}), can be chosen as its replacement.
To exemplify the application of the series expansion method in replacing a discrete time stochastic process, we have chosen to fit a compartmental model whose dynamics are described as a set of first order difference equations, to data from the first wave of the COVID-19 outbreak in England, between February and August 2020 \citep{birrell2021real}. This model captures the effect of unknown extrinsic factors on the force of infection through a time-varying transmission-potential modelled as a Gaussian random-walk. We introduce the model of \cite{birrell2021real} in what follows and introduce an alternative formulation using the series approximation of Brownian motion.
\subsection{Transmission model for COVID-19}\label{sec:The transmission model}
This is an age and spatially structured transmission model, stratifying the population into $n_A=7$ age groups and $n_r=7$ regions. Within each region, the transmission dynamics are governed by a system of first order difference equations: \begin{equation}\label{eqn:determ.dynam} \begin{aligned}
S_{r, t_k, a} &= S_{r, t_{k - 1}, a}\left(1 - \lambda_{r, t_{k - 1}, a}\delta t\right)\\
E^1_{r, t_k, a} &= E^1_{r, t_{k - 1}, a}\left(1 - \frac{2\delta t}{d_L}\right) + S_{r, t_{k - 1}, a}\lambda_{r, t_{k - 1}, a}\delta t\\
E^2_{r, t_k, a} &= E^2_{r, t_{k - 1}, a}\left(1 - \frac{2\delta t}{d_L}\right) + E^1_{r, t_{k - 1}, a}\frac{2\delta t}{d_L}\\
I^1_{r, t_k, a} &= I^1_{r, t_{k - 1}, a}\left(1 - \frac{2\delta t}{d_I}\right) + E^2_{r, t_{k - 1}, a}\frac{2\delta t}{d_L}\\
I^2_{r, t_k, a} &= I^2_{r, t_{k - 1}, a}\left(1 - \frac{2\delta t}{d_I}\right) + I^1_{r, t_{k - 1}, a}\frac{2\delta t}{d_I}\\ \end{aligned} \end{equation} where: $S_{r, t_k, a}$, $E^d_{r, t_k, a}$, $I^d_{r, t_k, a}, d = 1, 2$ represent the time $t_k, k = 1, \ldots, K,$ partitioning of the population of individuals in a region $r, r = 1, \ldots, n_r$, in age-group $a, a = 1, \ldots, n_A$, into $S$ (susceptible), $E$ (exposed) and $I$ (infectious) disease states. The average period spent in the exposed and infectious states are given by the parameters $d_L$ and $d_I$ respectively; and $\lambda_{r,t_k,i}$ is the time- and age-varying rate with which susceptible individuals become infected, the force of infection. Time steps of $\delta t = 0.5$ days are chosen to be sufficiently small relative to the latent and infectious periods. Following \cite{birrell2011bayesian} the initial conditions of the system states $S,E^1,E^2,I^1,I^2$ at $t_0$ are given by region-specific parameters $\psi_r$ and $I_{0,r}$, describing the initial exponential growth and the initial number of infectious individuals, respectively. New infections are generated as
\begin{equation}\label{eq:nni}
\nni_{r,t_k,a} = S_{r,t_k,a}\lambda_{r,t_k,a} \delta t,
\end{equation} where $\lambda_{r,t_k,a} \delta t$ is driven over time by a region-specific time-varying potential, $\beta_{t_k,r}$, which moderates the rate at which effective contact take place. This region-specific transmission-potential captures the discrepancy between how actual contact take place between the age groups, and that encoded by a set of time-varying contact matrices. We refer the reader to \cite{birrell2021real} for further details on the model dynamics and parameterisation.
Over time $\beta_{t_k,r}$ is not allowed to vary unconstrained and a smoothing is imposed by assuming, {\it a priori} that its evolution follows a Gaussian random-walk process with volatility $\sigma_{\beta_{t}}$: \begin{equation}\label{eq: random ealk beta fast} \begin{aligned}
\log\left(\beta_{t_k,r}\right) &\sim \mathcal{N}\left(\log\left(\beta_{t_{k-1},r}\right),\sigma_{\beta_{t}}^2\right), \quad \text{if $t_k > \tlock$},\\
\log\left(\beta_{t_k,r}\right) &= 0, \quad \text{if $t_k \leq \tlock$}, \end{aligned} \end{equation} where $\tlock$ indicates the time-point corresponding to the lockdown introduced in England on \myrd{23} March $2020$. This random-walk formulation requires the inference of the high-dimensional (due to the choice of $\delta t$) sample path of this process, an extremely challenging task using MCMC. We will discuss this inferential difficulty later in Section \ref{sec: Challenge of sampling the latent contact-rate: an alternative formulation}. To restrict the dimensionality of the process, in \cite{birrell2021real} this transmission-potential is assumed to be piecewise constant with weekly changepoints, and its values at these changepoints modelled as a random-walk. Denote $w_k \equiv w(t_k)$ the week in which time $t_k$ falls. Then the time evolution of the transmission-potential is modelled at a slower weekly time-scale: \begin{equation}\label{eq: random ealk beta} \begin{aligned}
\log\left(\beta_{w_k,r}\right) &\sim \mathcal{N}\left(\log\left(\beta_{w_{k-1},r}\right),\sigma_{\beta_{w}}^2\right), \quad \text{if $t_k > \tlock + 7/\delta t$},\\
\log\left(\beta_{w_k,r}\right) &= 0, \quad \text{if $t_k \leq \tlock+ 7/\delta t$},\\
\beta_{t_k,r} &= \beta_{w_k,r}, \end{aligned} \end{equation} as a Gaussian random-walk, with volatility $\sigma_{\beta_{w}}$, following the first week of the lockdown. Realisation of the process, for each region, can then be obtained by sampling the vector $\Delta \bv{\beta}_r$ of all the weekly increments $\Delta\beta_{w_{k},r}=\log\left(\beta_{w_{k},r}\right)-\log\left(\beta_{w_{k-1},r}\right)$. It was assumed in \cite{birrell2021real} that the contact matrices sufficiently described how actual contacts took place between different age groups prior to the lockdown and thus $\beta_{w_k,r} = 1$ over that period.
\subsection{Inference}
To fit the model, using a Bayesian framework, surveillance data of age- and region-specific counts of deaths in people with a lab-confirmed COVID-19 diagnosis between \myst{17} February and \myst{1} August was used. Furthermore, serological data from NHS Blood and Transplant (NHSBT), informing the fraction of the population carrying COVID-19 antibodies, were also used.
Following \cite{birrell2021real} the number of observed deaths $y^d_{r,t_k,a}$ on day $t_k$, in age group $a$, and in region $r$ follows a negative binomial distribution: \begin{equation}
y^d_{r,t_k,a}|d_I,p_a,\psi_r,I_{0_{r}},\Delta \bv{\beta}_r,\eta \sim \textrm{NegBin}\left(\mu_{r,t_k,a}, \eta\right), \end{equation} where the mean $ \mu_{r,t_k,a} = p_a \sum_{l=0}^k f_{k - l} \nni_{r, t_l, a}$ is derived using Eq~(\ref{eq:nni}), an assumed-known distribution of the time from infection to death from COVID-19, $f$, and an age-specific infection-fatality ratio $p_a$. Here $\eta$ is a dispersion parameter such that $\mathbb{E}y^d_{r,t_k,a} = \mu_{r,t_k,a}$ and $\textrm{Var}\left(y^d_{r,t_k,a}\right) = \mu_{r,t_k,a}\left(1 + \eta\right)$.
If, on day $t_k$, $n_{r,t_k,a}$ blood samples are taken from individuals in region $r$ and age-group $a$, and the observed number of positive tests is $y^s_{r,t_k,a}$, then \begin{equation}
y^s_{r,t_k,a}|d_I,\psi_r,I_{0_{r}},\Delta \bv{\beta}_r,\ksens,\kspec \sim \textrm{Bin}\left(n_{r,t_k,a}, \ksens \left(1 - \frac{S_{r,t_k,a}}{N_{r,a}}\right) + \left(1 - \kspec\right)\frac{S_{r,t_k,a}}{N_{r,a}}\right), \end{equation} where $\ksens$ and $\kspec$ parametrises the sensitivity and the specificity of the serological testing process, and $S_{r,t_k,a}$ is obtained by solving the difference equations in Eq~(\ref{eqn:determ.dynam}). $N_{r,a}$ is the total population in age-group $a$ and region $r$.
The unknown quantities that need to be inferred can be divided into two groups: (i) Global parameters $\thb_g = (\eta,d_I,p_1,\ldots,p_{n_{A}},\ksens,\kspec,\sigma_{\beta_{w}})$ shared between regions, and (ii) regional parameters specific to each region: $\thb_r = (\psi_r,I_{0_{r}},\Delta \bv{\beta}_r)$. After placing the same priors as was used in \cite{birrell2021real} (and listed in Appendix E), the posterior distribution of the unknown quantities is as follows: \begin{equation}\label{eq:posterior}
p(\thb_g,\thb_1, \ldots, \thb_{n_{r}}|\bv{y^d},\bv{y^s})\propto p(\thb_g)\prod_{k=1}^K \prod_{a=1}^{n_{A}} \prod_{r=1}^{n_{r}} p(y^d_{r,t_k,a}|\thb_g,\thb_r)p(y^s_{r,t_k,a}|\thb_g,\thb_r)p(\thb_r), \end{equation} where we denote by $\bv{y^d},\bv{y^s}\in \mathbb{R}^{K \times {n_A} \times n_{r}}$ the data for all time-points, ages and regions corresponding to deaths and sero-positive tests, respectively.
\subsubsection{Sampling from the posterior}
Sampling from the posterior distribution Eq~(\ref{eq:posterior}) is challenging due to the large number of random-walk increments corresponding to all regions and weeks since lockdown. MCMC with a vanilla RWMH proposal, as applied in \cite{birrell2021real}, due to the linear scaling of convergence time with increasing dimensions
mixes poorly and requires a large number of iterations ($\approx 10^7$) of the Markov chain before convergence is reached. To improve convergence we instead used a random-scan Metropolis-within-Gibbs (MwG) algorithm that circumvent the updating of a large parameter vector at each iteration. This MwG algorithm exploits the independence between the regional parameters. Our proposed sampling strategy consists of sampling alternatively, at each MCMC iteration, from the posterior of the global parameters conditioned on all the regional ones: (i) $p(\thb_g|\thb_1,\ldots,\thb_{n_{r}},\bv{y^d},\bv{y^s})$, and regional parameters for one randomly chosen region conditioned on the global ones (since the regional parameters are conditionally independent of any other region's parameters): (ii) $p(\thb_{r^{*}}|\thb_g,\bv{y^d},\bv{y^s})$, where $r^{*}\sim \operatorname{Uniform}(1,n_r)$. Samples from each of these conditional distributions are obtained using an adaptive RWMH move with the same adaptation mechanism used in Section \ref{sec:Experiments}. The pseudocode for this MwG algorithm is furnished in Appendix F.
\subsubsection{An alternative formulation}\label{sec: Challenge of sampling the latent contact-rate: an alternative formulation}
The number of region-specific random-walk increments $\Delta\beta_{w_{k},r}$ that needs to be sampled increases with time. The performance of the MwG algorithm starts deteriorating and exhibiting poor mixing and slow convergence, as this number becomes large. This limits dramatically the usefulness of this model in the context of a real-time application.
For the model in Eq~(\ref{eq: random ealk beta}), this problem can be tackled by increasing the time between two successive changepoints thus reducinge the number of increments to be sampled for a period of analysis. This is however driven by computational convenience, and it would be more meaningful to learn these changes from data. We could model the time evolution of the transmission-potential at a faster time-scale, for example as in Eq~(\ref{eq: random ealk beta fast}). However, in this case the number of random-walk increments, to be sampled per region, equals the number of time-points between lockdown and the end of analysis date. Any MCMC sampler, that uses a RWMH proposal, would struggle severely to move efficiently in such a high-dimensional parameter space.
To alleviate these problems we propose to model the transmission-potential as a Brownian motion $W_{t,r}$ with volatility $\sigma_{\beta_{t}}$ evolving in continuous time $t$ and apply the series approximation as follows: \begin{equation} \begin{aligned} \beta_{t,r} &= \sigma_{\beta_{t}}\sum_{i=1}^{n} Z_i\int_{0}^t\phi_i(u)du \\ &= \sigma_{\beta_{t}} \sum_{i=1}^n Z_i( 2/T)^{1/2}\sin\{(2i-1)\pi t/2T\} \approx W_t, \end{aligned} \end{equation} where the second equality follows from choosing $\phi_i$ as given in Eq~(\ref{eq: KL}) and carrying out the integration. We can then discretise this approximation using the same time-step of $\delta t$ that is used for the compartmental dynamics to obtain the following path-wise (discrete time) approximation: \begin{equation}\label{eq: brown daily}
\beta_{t_k,r} = \sigma_{\beta_{t}} \sum_{i=1}^n Z_{i,r}( 2/T)^{1/2}\sin\{(2i-1)\pi t_k/2T\}, \end{equation} where $T$ is the number of days between lockdown and analysis date. Note that in this formulation the problem of sampling a large vector of increments $\Delta \bv{\beta}_r$ is reduced to that of sampling a $n$-dimensional vector of the coefficients $\bv{Z}_r=(Z_{1,r},\ldots,Z_{n,r})$. From the comparison of the time $T$ marginal distributions of the true and approximate Brownian motion, as for the OU process (see Fig~\ref{Figure:wv compare}), we found $n=10$ to produce a good path-wise approximation. Thus, we used $n=10$ for the subsequent comparative evaluations. The regional parameter vector, $\thb_r = (\psi_r,I_{0_{r}},\bv{Z}_r)$, now contains the expansion coefficients instead of the random-walk increments $\Delta \bv{\beta}_r$.
\subsection{Results: comparative evaluations}\label{sec:results covid}
We ran the MwG algorithm to target the posterior distribution in Eq~(\ref{eq:posterior}) while using the random-walk based piecewise constant transmission-potential in Eq~(\ref{eq: random ealk beta}) and the Brownian motion approximation (BMA) in Eq~(\ref{eq: brown daily}). In both cases we ran $3 \times 10^6$ iterations, discarded the first half of the iterations as burn-in and subsequently thinned the remaining samples to obtain $1000$ samples. We implemented the epidemic model in \texttt{C++}. The MwG algorithm was implemented using \texttt{Python}.
Fig~\ref{coronappc compare} (a) compares, for the two alternative choices of modelling the transmission-potential, the posterior predictive distributions of the death data aggregated across all ages and regions with the observed data (see Appendix G in for region-specific plots). Clearly the goodness-of-fit is indistinguishable between the two models. In Fig~\ref{coronappc compare} (b) we show summaries of the posterior distributions of the latent infections $p(\bv{ \nni}|\bv{y^d},\bv{y^s})$, aggregated across all ages and regions (region-wise infections are shown in Appendix G) again showing close consistency across models, with the exception of a few days immediately following the lockdown where the number of infections estimated by the BMA is slightly higher.
\begin{figure}
\caption{Goodness-of-fit of daily death data (a) and the inferred latent infections (b), produced using the random-walk (magenta lines) and BMA (orange lines). These densities are summarised by the mean (solid lines) and $95\%$ credible intervals (broken lines). The black line indicates the day of lockdown in England \myrd{23} March, 2020.}
\label{coronappc compare}
\end{figure}
Following \cite{birrell2021real}, we also obtain estimates of the effective region-specific reproduction number $R_{t_k,r}$, their weighted average $R_{t,E}$ representing the reproduction number for all of England, (formulae for these are given in Appendix D in S1 text). In Fig~\ref{rtppc compare} we show the posterior distributions for $p(R_{t,E}|\bv{y^d},\bv{y^s})$, using the two alternative models. It is evident that the estimate obtained from the BMA appears to be smoother than what is obtained using the piecewise constant model, more realistically reflecting the actual transmission process that happens in continuous time. In Table~\ref{Table2} we present infection-fatality ratio estimates from the two models, again showing close agreement across models. \begin{figure}
\caption{Posterior mean (solid lines) and $95\%$ credible intervals (broken lines) for the all England reproduction number $R_{t,E}$.}
\label{rtppc compare}
\end{figure}
\begin{table}[!h] \centering \caption{\bf Posterior mean and $95\%$ credible intervals for the age-specific infection-fatality ratio from the random-walk and BMA models of transmission-potential. }\label{Table2}
\begin{tabular}{|c|c|c|} \hline
\textbf{Age group (yrs)} & \textbf{Random-walk} & \textbf{BMA} \\
\hline
\multirow{2}{1.3cm}{$<5$} &0.0009\% & 0.0009\%\\
&(0.0002\%--0.0022\%) & (0.00007\%--0.0019\%)\\
\hline
\multirow{2}{1.3cm}{5--14}& 0.0014\% & 0.0014\%\\
&(0.0008\%--0.0022\%) & (0.0006\%--0.0022\%)\\
\hline
\multirow{2}{1.3cm}{15--24}& 0.0046\% & 0.0044\%\\
&(0.0032\%--0.0062\%) & (0.0029\%--0.0060\%)\\
\hline
\multirow{2}{1.3cm}{25--44}& 0.0311\% & 0.0299\%\\
&(0.0281\%--0.0345\%) & (0.0257\%--0.0341\%)\\
\hline
\multirow{2}{1.3cm}{45--64}& 0.4653\% & 0.4488\%\\
& (0.4412\%--0.4901\%) & (0.4001\%--0.4976\%)\\
\hline
\multirow{2}{1.3cm}{65--74}& 3.0992\% & 2.9831\%\\
& (2.9576\%--3.2600\%) & (2.6609\%--3.3052\%)\\
\hline
\multirow{2}{1.3cm}{$>74$}& 17.8161\% & 17.1086\%\\
&(16.9632\%--18.6098\%) & (15.3604\%--18.8568\%)\\
\hline \end{tabular} \end{table}
\paragraph{Computational gains:}
The MwG algorithm took around $78$ hours to finish for both models of the transmission-potential. However, the BMA allows inference at a faster time-scale producing a smoother estimate of $R_{t,E}$ avoiding artificial model assumptions. Such an inference would be computationally infeasible if using a random-walk model at the more granular time-scale as in Eq~(\ref{eq: random ealk beta fast}), given the poor scaling of the RWMH proposal in high dimensions. Thus, using the series approximation we were able to extract more information about the transmission-potential and reproduction-ratio in comparison to the piecewise constant model, while incurring the same computational expense.
Had we used the random-walk model in Eq~(\ref{eq: random ealk beta fast}), we would have had to further partition each of the regional parameter block in separate chunks to accommodate a large vector of increments $\Delta\beta_{t_{k},r}=\log\left(\beta_{t_{k},r}\right)-\log\left(\beta_{t_{k-1},r}\right)$. Consequently, multiple Gibbs moves would have been necessary to update all the increments for a randomly chosen region. This, in turn, would have increased the number of likelihood computations, involving the computationally expensive updates of the transmission model, exponentially at each MCMC iteration.
\section{Discussion}\label{sec:Discussion}
By modelling the force of infection as the function of a time-varying transmission-potential we can incorporate extrinsic, un-modelled effects in the description of the transmission process within a compartmental model. Describing this transmission-potential, in turn, as a stochastic process, a diffusion in particular, we can inject environmental stochasticity in an otherwise deterministic model. In this paper we proposed a path-wise approximation of a diffusion process as an alternative to modelling the dynamics of the transmission-potential as a SDE. Through the path-wise approximation we arrive at a random ODE approximating the SDE. As a function of its parameters, the path (solution) of an ODE is completely deterministic. As a result inference of the transmission-potential is simplified, with no need to solve a missing data problem using a computationally expensive data-augmentation procedure.
We demonstrate the efficacy of the proposed path-wise approximation using two epidemic models. In the first one, an influenza model, we replaced an OU SDE with an equivalent path-wise approximation. We noticed similar inference outcomes in terms of parameter estimates and goodness-of-fit using the SDE and its ODE approximation. However, for the latter we observed orders-of-magnitude improvement in computational efficiency.
We then applied the path-wise approximation to replace a Gaussian random-walk with a discretised path-wise approximation of Brownian motion to model the transmission-potential within a compartmental model of COVID-19 pandemic spread in England. Again we noticed consistent estimates of crucial unknown quantities such as infection-fatality rate, latent infections and a time-varying estimate of the reproduction number. In addition, the path-wise approximation allows the transmission-potential to be modelled at a more granular time-scale providing a smooth estimate of the effective reproduction number. This would be impossible to achieve using the random-walk model due to an exorbitant computational burden.
As an alternative to using our path-wise approximation of Brownian motion to model the transmission-potential, at a faster time-scale, we could have used a different MCMC algorithm, such as the No-U-Turn sampler \citep{hoffman2014no}, that is known to perform well for high dimensional problems. This algorithm proposes a move based on the gradient of the target density. Evaluating gradients, however, for the COVID-19 model is challenging as this requires, in addition to extra computations, a complete re-implementation of the model using an automatic differentiation package. However, for modelling studies where such re-implementation is straightforward, we like to point out that by applying a path-wise approximation of a diffusion process we are left with the task of sampling from a posterior distribution with a standard Gaussian prior (over the coefficients). The No-U-Turn sampler generally excels at this task.
In this paper we have used simple diffusion models whose transition densities are known analytically. However, if additional prior information about the force of infection is available, then such information can be incorporated in more complex nonlinear SDEs as models of the time-varying transmission-potential. Our methodology can be seamlessly applied in such cases to arrive at a path-wise approximation of such complex diffusion processes.
\section{Software} \label{sec5}
Code and data supporting the experiment with the SIR model in Section \ref{sec:Evaluation}, and the code for running the COVID-19 model in Section \ref{sec:Application: modelling COVID-19 outbreak in England} is available at \url{https://github.com/sg5g10/envstoch}. Requests to access the non-publicly available data used for the COVID-19 model in section 5, are handled by the UKHSA Office for Data Release (ODR) \url{https://www.gov.uk/government/publications/accessing-ukhsa-protected-data}
\section*{Appendix A: Fourier expansion of Brownian motion}\label{sec: Fourier expansion of Brownian motion} By the definition of an It\^o integral, within a time interval $[0,T]$ a standard Brownian motion can be written as \citep{luo2006wiener,LyonsSS12}: \begin{equation}\label{eq: Brownian indicator}
W_t = \int_{0}^t dW_s = \int_{0}^T \mathbb{I}_{[0,t]}(s)dW_s, \end{equation} where $\mathbb{I}_{[0,t]}(\cdot)$ is the indicator function. Suppose $\{\phi_i\}_{i=1}^{\infty}$ is a complete orthonormal basis of $L^2[0,T]$. We can interpret $\mathbb{I}_{[0,t]}$ as an element of $L^2[0,T]$, and expand it in terms of the basis functions: \begin{equation}\label{eq:brown innerp} \begin{aligned}
\mathbb{I}_{[0,t]}(s)&=\sum_{i=1}^{\infty} \left\langle \mathbb{I}_{[0,t]}(\cdot),\phi_i(\cdot)\right\rangle\phi_i (s)\\ &=\sum_{i=1}^{\infty} \Big(\int_{0}^t\phi_i(u)du\Big)\phi_i (s).
\end{aligned} \end{equation} Substituting (\ref{eq:brown innerp}) into (\ref{eq: Brownian indicator}) we see that: \begin{equation}\label{eq: Brownian series}
W_t = \sum_{i=1}^\infty\Big(\int_{0}^T\phi_i(s)dW_s\Big)\int_{0}^t\phi_i(u)du. \end{equation}
\section*{Appendix B: Adaptive MCMC}\label{sec:appendix a}
In an adaptive MCMC algorithm optimal values of the proposal density is learnt on the fly using past samples from the Markov chain. Different mechanisms can be used to adapt or learn the parameters of the proposal. \citep{andrieu2008tutorial} proposed a general framework for constructing adaptive MCMC algorithms that rely on the \textit{stochastic approximation} method \citep{robbins1951stochastic} for learning the proposal's parameters on the fly.
Consider in general the proposal density $q_{\phi}(\thb^{j+1}|\thb^j)$ parameterised by $\phi$. Let us also define a suitable objective function \begin{equation} h(\phi):= \mathbb{E}^{\phi}\big[H(\phi,\thb^0,\thb^1,\ldots,\thb^{j},\thb^{j+1})\big], \end{equation}
that expresses some measure of the statistical performance of the Markov chain in its stationary regime. The expectation is with respect to a $\phi$ dependent distribution. For example, the coerced acceptance probability is often used as the objective: \begin{equation}\label{eq: coerced}
H(\phi,\thb^0,\thb^1,\ldots,\thb^{j},\thb^{j+1})=\underbrace{\mathrm{min}\left\{1,\frac{\pi(\thb^{j+1})}{\pi(\thb^{j})}\frac{q_{\phi}(\thb^{j}|\thb^{j+1})}{q{\phi}(\thb^{j+1}|\thb^{j})}\right\}}_{=:\alpha^j}-\bar{\alpha}, \end{equation}
where $\pi(\thb)$ is the target distribution and $\bar{\alpha}$ is the approximate optimal expected acceptance probability in the stationary regime. For the Gaussian proposal $q:= \mathcal{N}(\thb^{j+1}|\thb^{j},\bv{\Sigma}^{j})$, with its parameter $\phi$ being the covariance $\bv{\Sigma}^{j}$, the following objective function: \begin{equation}\label{eq: moment matching}
H(\bv{\Sigma}^{j},\thb^{j+1}) = \thb^{j+1}\thb^{{j+1}^{'}} - \bv{\Sigma}^{j}, \end{equation} corresponds to matching the moments of the proposal with that of the target. Here by $a^{'}$ we denote the transpose of the vector $a$.
Optimal exploration of $\pi(\thb)$ can thus be formulated as finding the root $\bar{\phi}$ of the following equation: $h(\phi)=0$. The challenge here is to devise an algorithm to find the roots of $h(\phi)$, which involves both integration and optimisation. \cite{andrieu2008tutorial} suggested using the stochastic approximation method \citep{robbins1951stochastic} which is tailored to this situation: \begin{equation} \begin{aligned}
\phi^{j+1} &= \phi^{j} + \delta^j H(\phi^j,\thb^0,\thb^1,\ldots,\thb^{j},\thb^{j+1})\\
&= \phi^{j} + \delta^{j+1}h(\phi) + \delta^{j+1} H(\phi^j,\thb^0,\thb^1,\ldots,\thb^{j},\thb^{j+1}) - \delta^{j+1}h(\phi)\\
&= \phi^{j} + \delta^{j+1}h(\phi) + \delta^{j+1}\xi^{j+1}, \end{aligned} \end{equation} where $\xi^{j+1}:=\big[H(\phi^j,\thb^0,\thb^1,\ldots,\thb^{j},\thb^{j+1})-h(\phi)\big]$ is usually referred to as the \textit{noise term} and $\delta^j$ is a decreasing sequence (a step-size parameter). If the noise term $\xi^{j+1}$ averages to zero as $j\rightarrow \infty$, the above recursion will converge to the root $\bar{\phi}$ (or at least oscillate around it) when the following conditions hold: \begin{equation}
\sum_{j=0}^{\infty} \delta^j = \infty \quad \operatorname{and} \quad \sum_{j=0}^{\infty} (\delta^j)^2 < \infty. \end{equation}
Combining the above objective functions and using the stochastic approximation we have the following recursions for adapting a random-walk proposal with a global scaling $\lambda^{j}$, $ \mathcal{N}(\thb^{j+1}|\thb^{j},\lambda^{j}\bv{\Sigma}^{j})$, as \citep{andrieu2008tutorial}: \begin{equation}\label{eq: adaptive MCMC} \begin{aligned} \log(\lambda^{j+1}) &= \log(\lambda^{j}) + \delta^{j+1}(\alpha^{j+1} - \bar{\alpha}) \\ \bv{\mu}^{j+1} &= \bv{\mu}^{j} + \delta^{j+1}(\thb^{j+1} - \mu^{j}) \\ \bv{\Sigma}^{j+1} &= \bv{\Sigma}^{j} + \delta^{j+1}(\thb^{j+1}\thb^{{j+1}^{'}} - \bv{\Sigma}^{j}), \end{aligned} \end{equation} where the recursion in the first equation, trying to adapt the global scaling, is based on the coerced accepted probability objective in \eqref{eq: coerced} and the following two equations are minimising the moment matching objective in \eqref{eq: moment matching}.
By choosing a decreasing sequence $\{ \delta^{j}\}_{j=0}^\infty$ of step-sizes it is ensured that the adaptation declines over time, also known as \textit{vanishing adaptation} \citep{andrieu2008tutorial}, and the Markov chain converges to the correct stationary distribution. For all the experiments we have consistently used the following schedule: \begin{equation}
\delta^{j}= j^{-0.6}, \end{equation} which was shown to work particularly well for nonlinear differential equation models in \cite{johnstone2016uncertainty}.
\section*{Appendix C: Simulation study for influenza epidemic} Using a real dataset we are oblivious to the ground truth of the estimated quantities. Thus, we have also carried out a detailed simulation study where we have used simulated datasets that mimic the influenza epidemic used in the main text. We generated three simulated epidemics using the model in Equation (2.2), in the main text, on the same time period $T=14$ days, and used the same population size $N=763$, as the real influenza epidemic. We chose parameter values that generate an epidemic curve similar to the real dataset. These generative parameter values are shown in Figure \ref{oumarg compare}--\ref{oumarg3 compare}. We then proceed to fit the two alternative models using the inferential setup discussed in the main text.
\begin{figure}
\caption{\textbf{Simulated dataset 1}: Posterior marginal densities of the parameters obtained using the \textbf{SDE} and the \textbf{SA} (with $n=15$ basis function). These densities are summarised using a kernel density estimate. The black line in each of the plots demarcate the generative parameter value.}
\label{oumarg compare}
\end{figure}
\begin{figure}
\caption{\textbf{Simulated dataset 2}: Posterior marginal densities.}
\label{oumarg2 compare}
\end{figure} \begin{figure}
\caption{\textbf{Simulated dataset 3}: Posterior marginal densities.}
\label{oumarg3 compare}
\end{figure}
In Figure \ref{oumarg compare}--\ref{oumarg3 compare} we compare the marginal densities of the parameters obtained using the \textbf{SDE} and \textbf{SA} counterparts, for each of the simulated datasets. Clearly the estimates match well and generative parameter values are recovered.
\begin{figure}
\caption{\textbf{Simulated dataset 1}: Goodness-of-fit (a); posterior distribution of the latent diffusion paths corresponding to the \textbf{SDE} and \textbf{SA} counterparts (b), with densities summarised by the mean (solid lines) and $95\%$ credible intervals (broken lines); and samples from the posterior distribution of the latent diffusion paths, \textbf{SDE} (c) and \textbf{SA} (d)}
\label{ouppc compare}
\end{figure} \begin{figure}
\caption{\textbf{Simulated dataset 2}: Comparison of the goodness-of-fit}
\label{ouppc2 compare}
\end{figure} \begin{figure}
\caption{\textbf{Simulated dataset 3}: Comparison of the goodness-of-fit}
\label{ouppc3 compare}
\end{figure}
Furthermore, in Figure \ref{ouppc compare}--\ref{ouppc3 compare} we compare the goodness-of-fit. As was found for the real dataset, we observe little disagreement between the epidemic curves obtained using the \textbf{SDE} and the \textbf{SA}, but for the posterior distribution of the latent diffusion paths we noticed, for all the datasets, that the credible intervals are narrower for the \textbf{SA}. For all these datasets, the posterior means, and the draws of the sample path, of the two models match well.
\section*{Appendix D: Calculating a time-varying reproduction number}
The estimate of the contact-rate $\beta_{t_k,r}$ is used to derive an estimate of a time-varying reproduction number. Firstly, using the formula of \cite{wearing2005appropriate}, the initial reproduction number $R_{0,r}$ is estimated as follows: \begin{equation}
R_{0,r}= \psi_r d_I \frac{\left (\frac{\psi_r d_L}{2} + 1 \right)^2}{1 - \frac{1}{\left ( \frac{\psi_r d_I}{2} + 1 \right )^2}}. \end{equation} Over time the value of the reproduction number will change as contact patterns shift and the supply of susceptible individuals deplete. The time-$t$ reproduction number is then estimated using the following formula: \begin{equation}\label{eq: rt formula}
R_{t_k,r} = \begin{cases} R_{0,r} \frac{R^*_{t_k,r}}{R^*_{0,r}} & \text{if $t_k < \tlock$}\\
\beta_{t_k,r}R_{0,r} \frac{R^*_{t_k,r}}{R^*_{0,r}} & \text{if $t_k \geq \tlock$}
\end{cases} \end{equation} where $\tlock$ indicates the time-point corresponding to the lockdown. $R^*_{t_k,r}$ is the dominant eigenvalue of the time $t_k$ next-generation matrix, $\Lambda_{k,r}$, with elements:
\begin{equation}
\left(\Lambda_{k,r}\right)_{ij} = S_{r,{t_k},i} \vec{C}^{t_k}_{r,ij} d_I,
\end{equation} where $\vec{C}^{t_k}_{r,ij}$ is a region-specific time-varying contact matrix, see \cite{birrell2021real} for further details on these matrices.
To get an `all England' value for $R_{t_k,E}$ a weighted average of the regional $R_{t_k,r}$ is calculated, where the weights are given by the sum of the infections in each region: \begin{equation}\label{eq: all england rt} R_{t_k,E} = \frac{\sum_r R_{t_k, r} \sum_i \nni_{r,t_k,i}}{\sum_r \sum_i \nni_{r,t_k,i}}. \end{equation}
\section*{Appendix E: Priors for the COVID-19 model}
The priors for the global and regional parameters for the COVID-19 model are listed in Table \ref{tbl:pars}. We used the same priors as was used in \cite{birrell2021real}. Note that we also used the same prior for the volatility of both the piecewise constant random-walk and the Brownian motion model of the transmission-potential.
\begin{table}[!ht] \caption{Model parameters with assumed prior distributions or fixed values, as was used in \cite{birrell2021real}. \label{tbl:pars}} {\tabcolsep=4.25pt \begin{tabular}{p{7.3cm}p{7.3cm}} Name&Prior source\\ \toprule
Over-dispersion, $\eta$ &Uninformative $\operatorname{Gamma}(1, 0.2)$.\\
Mean infectious period, $d_I$ &2 + $\operatorname{Gamma}(1.43, 0.549)$.\\
Infection-fatality rate for age $<5$: $p_1$ & $\operatorname{Beta}(1,62110.8012)$.\\
Infection-fatality rate for age, $5-14$: $p_2$ &$\operatorname{Beta}(1,23363.4859)$.\\
Infection-fatality rate for age $15-24$: $p_3$ &$\operatorname{Beta}(1,5290.0052)$.\\
Infection-fatality rate for age $25-44$: $p_4$ &$\operatorname{Beta}(1,1107.6474)$.\\
Infection-fatality rate for age $45-64$: $p_5$ &$\operatorname{Beta}(1,120.9512)$.\\
Infection-fatality rate for age $65-74$: $p_6$ &$\operatorname{Beta}(1,31.1543)$.\\
Infection-fatality rate for age $>74$: $p_7$ &$\operatorname{Beta}(9.5,112)$.\\
Serological test sensitivity, $\ksens$& $\operatorname{Beta}(71.5,29.5)$.\\
Serological test specificity, $\kspec$& $\operatorname{Beta}(777.5,9.5)$.\\
Exponential growth, $\psi_r$ & $\operatorname{Gamma}(31.36, 224)$.\\
Log of initial infectives, $\log{I_{0,r}}$ & $\mathcal{N}(-17.5, 1.25^2)$.\\
Volatility of transmission-potential, $\sigma_{\beta_{w}},\sigma_{\beta_{t}}$&$\operatorname{Gamma}(1, 100)$.\\
\hline
Mean latent period, $d_L$&3 days (fixed not estimated).\\ \bottomrule \end{tabular}} \end{table}
\section*{Appendix F: Pseudocode of the MwG algorithm}
The pseudocode listed in Algorithm \ref{alg:MwG} describes the Metropolis-within-Gibbs algorithm for sampling from the posterior distribution $p(\thb_g,\thb_1,\ldots,\thb_{n_{r}}|\bv{y^d},\bv{y^s})$ of the global $\thb_g$ and regional $\thb_1,\ldots,\thb_{n_{r}}$ parameters of the COVID-19 model. For each parameter group $\thb_g,\thb_1,\ldots,\thb_{n_{r}}$ we use a proposal with a different set of parameters that are adapted through the mechanism described in \eqref{eq: adaptive MCMC}.
\begin{algorithm} \small
\caption{A random-scan adaptive Metropolis-within-Gibbs sampler}
\label{alg:MwG} \begin{algorithmic}
\STATE {\bfseries Input:} Number of iterations $J$; data $\bv{y^d},\bv{y^s}$; optimal acceptance rate $\bar{\alpha}$.
\STATE Initialise the regional $\thb^{0}_1,\ldots,\thb^{0}_{n_{r}}$ and global parameters $\thb^{0}_g$.
\STATE Initialise the regional proposal parameters $\lambda^{0}_1,\ldots,\lambda^{0}_{n_{r}}$, $\bv{\mu}^{0}_1,\ldots,\bv{\mu}^{0}_{n_{r}}$ and $\bv{\Sigma}^{0}_1,\ldots,\bv{\Sigma}^{0}_{n_{r}}$.
\STATE Initialise the global proposal's parameters $\lambda^{0}_g$, $\bv{\mu}^{0}_g$ and $\bv{\Sigma}^{0}_g$.
\FOR{$j=0$ {\bfseries to} $J-1$}
\STATE \textbf{Global move}:
\begin{enumerate}
\item Draw $\thb^*_g \sim \mathcal{N}(\thb^j_g,\lambda^{j}_g\bv{\Sigma}^j_g)$ and set $\thb^{j+1}_g=\thb^*_g$ with probability $\alpha^j_g=\operatorname{min}\Big\{1,\frac{p(\thb^*_g|\bv{y^d},\bv{y^s})}{p(\thb_g|\bv{y^d},\bv{y^s})}\Big\}$, otherwise $\thb^{j+1}_g=\thb^j_g$.
\end{enumerate}
\STATE \textbf{Regional move}: \begin{enumerate}
\item Draw $r^{*}\sim \operatorname{Uniform}(1,n_r)$.
\item Draw $\thb^*_{r^{*}}\sim \mathcal{N}(\thb^j_{r^{*}},\lambda^{j}_{r^{*}}\bv{\Sigma}^j_{r^{*}})$
and set $\thb^{j+1}_{r^{*}}=\thb^*_{r^{*}}$ with probability $\alpha^j_{r^{*}}=\operatorname{min}\Big\{1,\frac{p(\thb^*_{r^{*}}|\bv{y^d},\bv{y^s})}{p(\thb^j_{r^{*}}|\bv{y^d},\bv{y^s})}\Big\}$, otherwise $\thb^{j+1}_{r^{*}}=\thb^{j}_{r^{*}}$.
\item Set $\thb^{j+1}_{{n_{r}}\setminus{r^{*}}}= \thb^j_{{n_{r}}\setminus{r^{*}}}$, where the symbol $A\setminus a$ denotes all elements of the set $A$ except $a$.
\end{enumerate}
\STATE \textbf{Adaptation}: \begin{enumerate}
\item Adapt global proposal's parameters:\\
\begin{equation}
\begin{aligned}
\log(\lambda^{j+1}_g) &= \log(\lambda^{j}_g) + \delta^j(\alpha^j_g - \bar{\alpha}) \\
\bv{\mu}^{j+1}_g &= \bv{\mu}^j_g + \delta^j(\thb^{j+1}_g - \bv{\mu}^j_g) \\
\bv{\Sigma}^{j+1}_g &= \bv{\Sigma}^j_g + \delta^j(\thb^{j+1}_g\thb^{{j+1}^{'}}_g - \bv{\Sigma}^j_g).
\end{aligned}
\end{equation}
\item Adapt proposal's parameters for region ${r^{*}}$:\\
\begin{equation}
\begin{aligned}
\log(\lambda^{j+1}_{r^{*}}) &= \log(\lambda^{j}_{r^{*}}) + \delta^j(\alpha^j_{r^{*}} - \bar{\alpha}) \\
\bv{\mu}^{j+1}_{r^{*}} &= \bv{\mu}^j_{r^{*}} + \delta^j(\thb^{j+1}_{r^{*}}- \bv{\mu}^j_{r^{*}}) \\
\bv{\Sigma}^{j+1}_{r^{*}} &= \bv{\Sigma}^j_{r^{*}} + \delta^j(\thb^{j+1}_{r^{*}}\thb^{{j+1}^{'}}_{r^{*}} - \bv{\Sigma}^j_{r^{*}}).
\end{aligned}
\end{equation}
\item Set $\lambda^{j+1}_{{n_{r}}\setminus{r^{*}}}= \lambda^j_{{n_{r}}\setminus{r^{*}}}$, $\bv{\mu}^{j+1}_{{n_{r}}\setminus{r^{*}}}= \bv{\mu}^j_{{n_{r}}\setminus{r^{*}}}$ and $\bv{\Sigma}^{j+1}_{{n_{r}}\setminus{r^{*}}}= \bv{\Sigma}^j_{{n_{r}}\setminus{r^{*}}}$.
\end{enumerate}
\ENDFOR
\STATE {\bfseries Output:} $\{\thb^j_g,\thb^j_1,\ldots,\thb^j_{n_{r}}\}_{j=0}^{J-1} $. \end{algorithmic} \end{algorithm}
\section*{Appendix G: Goodness-of-fit as per regions of England}
In Figure \ref{gof1} -- \ref{gof7} we show the posterior predictive distributions of the number of deaths and the posterior distribution of the latent infection for each region respectively. We have aggregated the results across ages.
\section*{Appendix H: Maximum mean discrepancy}
For any given probability distribution $\mathbb{P}$ on a domain $\mathcal{X}$ its kernel embedding is defined as $\mu_{\mathbb{P}} =\mathbb{E}_{X \sim \mathbb{P}} k(\cdot,\thb)$ \citep{muandet2017kernel}, an element of reproducing kernel Hilbert space $\mathcal{H}$ associated with a positive definite kernel function $k:\mathcal{X} \times \mathcal{X} \rightarrow \mathbb{R}$. Such an embedding exists for any $\mathbb{P}$ whenever $k$ is bounded. Given two probability distributions $\mathbb{P}$ and $\mathbb{Q}$ the maximum mean discrepancy (MMD) is the Hilbert space distance between their kernel embedding $\mu_{\mathbb{P}}$ and $\mu_{\mathbb{Q}}$. Considering that we have two set of samples $\{X_i\}_{i=1}^n$ and $\{Y_i\}_{i=1}^m$ from corresponding distributions $\mathbb{P}$ and $\mathbb{Q}$ respectively, then the MMD between $\mathbb{P}$ and $\mathbb{Q}$ is given by \citep{gretton2012kernel} \begin{equation}\label{eq:MMD} \begin{aligned}
MMD^2(\mathbb{P},\mathbb{Q})&=|| \mu_{\mathbb{P}} - \mu_{\mathbb{Q}}||_{\mathcal{H}}\\
&=\frac{1}{n(n-1)}\sum_{i=1}^n \sum_{j\ne i}^m k(X_i,X_j) + \frac{1}{m(m-1)}\sum_{i=1}^n \sum_{j\ne i}^m k(Y_i,Y_j)-\frac{2}{nm}\sum_{i=1}^n \sum_{j=1}^m k(X_i,Y_j). \end{aligned} \end{equation} The $MMD^2(\mathbb{P},\mathbb{Q})=0$ iff $\mathbb{P}=\mathbb{Q}$, following the properties of kernel embedding. The kernel embedding captures all the necessary information about a distribution \citep{muandet2017kernel}, thus the distance between two embedding would naturally highlight the discrepancy more efficiently in the tail regions of the distributions under comparison. In this paper we used an exponentiated quadratic kernel given by \begin{equation}\label{eq: rbf kernel}
k (X,X')= \exp{\Big(\frac{||X - X'||^2}{\rho^2}\Big)}, \end{equation} where $\rho$ is a hyperparameter. We set $\rho$ to the median distance among the samples.
\begin{figure}
\caption{Goodness-of-fit of daily death data (a) and the inferred latent infections (b), produced using the random-walk (magenta lines) and BMA (orange lines) for the region \textbf{East of England}. These densities are summarised by the mean (solid lines) and $95\%$ credible intervals (broken lines). The black line indicates the day of lockdown in England \myrd{23} March, 2020.}
\label{gof1}
\end{figure}
\begin{figure}
\caption{Goodness-of-fit of daily death data (a) and the inferred latent infections (b) for the region \textbf{North West}. }
\end{figure}
\begin{figure}
\caption{Goodness-of-fit of daily death data (a) and the inferred latent infections (b) for the region \textbf{Midlands}.}
\end{figure}
\begin{figure}
\caption{Goodness-of-fit of daily death data (a) and the inferred latent infections (b) for the region \textbf{London}. }
\end{figure}
\begin{figure}
\caption{Goodness-of-fit of daily death data (a) and the inferred latent infections (b) for the region \textbf{North East and Yorkshire}.}
\end{figure}
\begin{figure}
\caption{Goodness-of-fit of daily death data (a) and the inferred latent infections (b) for the region \textbf{South East}. }
\end{figure}
\begin{figure}
\caption{Goodness-of-fit of daily death data (a) and the inferred latent infections (b) for the region \textbf{South West}. }
\label{gof7}
\end{figure}
\end{document} | arXiv | {
"id": "2208.14363.tex",
"language_detection_score": 0.7407561540603638,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Error propagation in loss- and failure-tolerant quantum computation schemes}
\author{Peter P. Rohde} \email[]{rohde@physics.uq.edu.au} \homepage{http://www.physics.uq.edu.au/people/rohde/} \affiliation{Centre for Quantum Computer Technology, Department of Physics\\ University of Queensland, Brisbane, QLD 4072, Australia}
\author{Timothy C. Ralph} \affiliation{Centre for Quantum Computer Technology, Department of Physics\\ University of Queensland, Brisbane, QLD 4072, Australia}
\author{William J. Munro} \affiliation{Hewlett-Packard Laboratories, Filton Road, Stoke Gifford, Bristol BS34 8QZ, UK} \date{\today}
\frenchspacing
\begin{abstract} Qubit loss and gate failure are significant obstacles for the implementation of scalable quantum computation. Recently there have been several proposals for overcoming these problems, including schemes based on parity and cluster states. While effective at dealing with loss and gate failure, these schemes typically lead to a blow-out in effective depolarizing noise rates. In this supplementary paper we present a detailed analysis of this problem and techniques for minimizing it. \end{abstract}
\pacs{03.67.Lx,42.50.-p}
\maketitle
\section{Introduction} Quantum computation has the potential to solve computational problems intractable on classical computers \cite{bib:NielsenChuang00}. A major obstacle facing the experimental realization of quantum computing is the introduction of errors. In some architectures the dominant errors are qubit loss and gate failure. This is especially the case for photonic schemes, such as linear optics quantum computing \cite{bib:KLM01}, where these effects arise due to physical loss of photons, source and detector inefficiencies, and the inherent non-determinism of multi-qubit gates. This has motivated the development of recent schemes for tolerating qubit loss \cite{bib:RalphHayes05,bib:Varnava05} and gate failure \cite{bib:Duan05,bib:BarrettKok05}.
These schemes achieve loss/failure tolerance through the introduction of redundant encoding. This provides multiple attempts to perform the relevant operations, suppressing loss/failure rates. However, redundancy also introduces additional opportunities for noise to be introduced, increasing effective error rates. In general this results in an exponential blow-out in depolarizing noise \cite{bib:RohdeRalphMunro07}. This problem of error blow-out can be minimized by modifying the schemes. However, in general this significantly reduces their loss/failure tolerance.
In a variety of contexts this is a serious problem. When embedded into a fault tolerant quantum computing architecture it significantly reduces the effective fault tolerant threshold. In a loss-tolerant quantum memory scenario it quickly reduces the memory to a depolarizing (i.e. classical) channel. In the context of state preparation strategies, which have applications beyond quantum computing, it results in the preparation of highly mixed states.
We demonstrate this principle by example of three well known protocols for tolerating qubit loss and gate failure, and expand on the work of Ref. \cite{bib:RohdeRalphMunro07} by deriving analytic expressions for effective error rates. We also examine techniques for minimizing the error scaling problem. We begin with a general discussion of the relevant error propagation properties of cluster (or graph) states in section \ref{sec:graph_state_disc}. Next we discuss a gate-failure tolerant approach to preparing cluster states in section \ref{sec:failure}. In principle this scheme can tolerate arbitrary non-zero gate success probabilities, at the expense of a polynomial overhead in physical resource requirements. We demonstrate that effective error rates increase exponentially with gate failure probability, which places practical limitations on the failure tolerance of this scheme. We present a more general state preparation strategy, which allows us to reduce this effect at the expense of an overhead in physical and temporal resource requirements. In section~\ref{sec:cluster} we consider a recent scheme for tolerating qubit loss in the cluster state model for quantum computation. In principle this scheme can tolerate extremely high loss rates -- up to 50\%. We demonstrate that, like the previous scheme, this protocol has the effect of magnifying the effects of depolarizing noise, which places practical limitations on realistically tolerable loss rates. We discuss a method for minimizing this effect, relying on majority voting techniques. However, doing so significantly reduces the loss tolerance of the scheme. Again we derive expressions for effective error and loss rates. Finally, in section \ref{sec:parity} we consider the loss-tolerant parity scheme. As before, this scheme is shown to magnify depolarizing noise. We discuss a potential solution to this, also based on a majority voting technique. We conclude in section \ref{sec:conclusion}.
Our results indicate that in realistic scenarios the loss/failure tolerance of specialized schemes is likely to be significantly lower than is possible in principle. Consequently, such scheme are likely to be limited to dealing with comparatively modest levels of loss and failure.
\section{Discussion of graph state properties} \label{sec:graph_state_disc} We begin with a general discussion of the behavior of graph states (in this paper we use the terms `graph state' and `cluster state' interchangeably) under qubit loss and single qubit errors. We assume familiarity with the cluster state model for quantum computation and suggest the unfamiliar reader refer to Refs.~\cite{bib:Raussendorf01,bib:Raussendorf03,bib:Nielsen06}. In general such behavior is highly topology-dependent. We focus on two topologies of particular interest -- linear graphs, and fully connected graphs (which are locally equivalent to the maximally entangled GHZ states).
\subsection{The stabilizer representation of graph states} An $n$ qubit graph state can be expressed in terms of a set of $n$ stabilizers, one for each vertex in the graph. The stabilizers take the form \begin{equation} \hat{S}_i = \hat{X}_i \bigotimes_{j\in v(i)} \hat{Z}_j, \end{equation} where $i$ denotes a qubit, $v(i)$ denotes the neighborhood of $i$, and $\hat{X}_i$ and $\hat{Z}_i$ denote the usual Pauli bit-flip and phase-flip operators respectively, acting on qubit $i$.
The stabilizer representation is particularly well suited to understanding the propagation of errors through graph states, which we consider in the following subsections.
\subsection{Recoverability from qubit loss} In general, a graph state which has suffered the loss of a qubit (or alternately any form of located error) can be partially recovered by measuring the qubits adjacent to the lost qubit in the $Z$-eigenbasis. This feature of graph states enables, for example, the gate-failure tolerant construction of Refs.~\cite{bib:Duan05,bib:BarrettKok05} that we discuss in the next section.
Because this recovery operation requires measuring the neighborhood of the affected qubit, it is highly topology-dependent. Consider a linear graph of length $n$. When the $m^\mathrm{th}$ qubit is lost one can recover two linear graphs of length $m-2$ and $n-m-2$ by measuring the qubits adjacent to the lost qubit in the $Z$-eigenbasis. For a connected graph $K_n$ (i.e. a GHZ state) the behavior is very different. Because the graph is fully connected, the recovery operation requires measuring every remaining qubit, which completely destroys the system. Thus, for linear graphs, under qubit loss much of the entanglement is preserved, whereas for GHZ states it is completely destroyed.
\subsection{Error propagation under $Z$-measurement} We now consider the situation where we perform a sequence of $Z$-measurements on a graph state. This situation arises naturally in many graph state preparation strategies where unwanted qubits are removed from the graph via $Z$-measurement. In general, an error in the $Z$-measurement outcome (i.e. an $X$-error) will propagate $Z$-errors onto the neighbors of the affected qubit. This property follows directly from the stabilizer represenation. Suppose we have a graph state $\ket\psi$ which is subject to an $\hat{X}$ error at location $i$, \begin{equation} \hat{X}_i\ket\psi = \hat{X}_i\hat{S}_i\ket\psi = \bigotimes_{j\in v(i)} \hat{Z}_j \ket\psi. \end{equation}
Consider a linear graph where we measure a qubit in the $Z$-eigenbasis. This has the effect of dividing the graph into two smaller linear graphs. If an $X$-error was present on the measured qubit, $Z$-errors will propagate onto the end qubits of each of the newly created linear graphs. Importantly, if a sequence of $Z$-measurements is performed along a linear graph segment, only errors affecting the terminating qubits will propagate onto the remaining state. Thus, there is no accumulation of errors as we perform the measurement sequence.
For the fully connected graph $K_n$, $Z$-measurement of a single qubit reduces the graph to $K_{n-1}$ and propagates a correlated $Z$-error onto all of the remaining qubits (i.e. $Z^{\otimes n-1}$. It can easily be verified from the stabilizer representation that this is equivalent to a $Y$-error acting on any one qubit in the remaining state. If we again consider the situation where we perform a sequence of $Z$-measurements, unlike the linear graphs we now have a situation where the propagated $Y$-errors accumulate. Specifically, the net probability of a $Y$-error being propagated onto the remaining state is the probability of an odd number of measurement errors occurring. This exhibits exponential dependence on the number of measured qubits.
\subsection{Error propagation under $X$-measurement} \label{sec:X_prop_rules} Next we consider the situation where we perform $X$-measurements on qubits in a graph state. First consider the linear graph. We can quickly establish the error propagation properties from the following two circuit identities. The first states that a $Z$-error acting on a qubit which is subsequently measured in the $X$-eigenbasis is equivalent to an $X$-error on a neighboring qubit. In the circuit model this can be represented as, \begin{displaymath} \xymatrix @*=<0em> @C=1em @R=.7em{ & \ket{+} & & \ctrl{1} & \gate{Z} & \measure{X} & = & & \ket{+} & & \ctrl{1} & \qw & \measure{X} \\ & \ket{+} & & \ctrl{-1} & \qw & \qw & & & \ket{+} & & \ctrl{-1} & \gate{X} & \qw \\} \end{displaymath} The second identity states that when a $Z$-error is introduced onto a graph state qubit, and both that qubit and its neighbor are measured in the $X$-eigenbasis, this is equivalent to a $Z$-error acting on the third qubit along the chain, \begin{displaymath} \xymatrix @*=<0em> @C=1em @R=.7em{ & \ket{+} & & \ctrl{1} & \qw & \gate{Z} & \measure{X} & & & \ket{+} & & \ctrl{1} & \qw & \qw & \measure{X} \\ & \ket{+} & & \ctrl{-1} & \ctrl{1} & \qw & \measure{X} & = & & \ket{+} & & \ctrl{-1} & \ctrl{1} & \qw & \measure{X} \\ & \ket{+} & & \qw & \ctrl{-1} & \qw & \qw & & & \ket{+} & & \qw & \ctrl{-1} & \gate{Z} & \qw} \end{displaymath} Suppose we have the situation where we wish to sequentially measure a series of qubits from a linear graph in the $X$-eigenbasis. We are interested in how errors propagate onto the terminating qubit, which we refer to as the `root' qubit. From the above two circuit identities we can establish the following. When a $Z$-error is introduced onto qubits which are an even number of qubits away from the root qubit, after measurement this is equivalent to a $Z$-error acting on the root qubit. Secondly, when a $Z$-error is introduced onto a qubit an odd number of qubits away from the root qubit, after measurement this is equivalent to an $X$-error acting on the root qubit. In both of these cases the propagated errors accumulate and the net error probability is the probability of an odd number of the respective errors being propagated - i.e. exponential dependence.
For the fully connected graph the situation is similar to before. Namely, an $X$-measurement on $K_n$ reduces it to $K_{n-1}$, and an incorrect measurement result propagates a $Y$-error onto one of the remaining qubits. As before, if a sequence of measurements is performed this leads to an accumulation of errors with exponential dependence on the number of measured qubits.
\section{Scalable probabilistic quantum computing and state preparation schemes} \label{sec:failure} In some quantum computing architectures gate failure is a significant problem. This undermines our ability to perform scalable quantum computing, because the probability of a computation succeeding drops exponentially with the size of the circuit. Recently schemes have been suggested for tolerating gate failure \cite{bib:Duan05,bib:BarrettKok05}. We specifically consider the scheme of Ref.~\cite{bib:Duan05}, which describes a technique for constructing cluster states, a resource for universal quantum computation, using physical resources that grow polynomially with the size of the desired cluster, thus allowing for `efficient' quantum computation. In principle this scheme can tolerate arbitrary non-zero gate success probabilities.
The scheme is an example of a `divide-and-conquer' approach to state preparation. A related proposal is Nielsen's \cite{bib:Nielsen04} \emph{micro-cluster} approach. We now briefly review this scheme. Ordinarily, using deterministic gates, if we wish to prepare, say, a square lattice cluster, we begin with a lattice of qubits initally prepared in the $\ket{+}=(\ket{0}+\ket{1})/\sqrt{2}$ state and perform a sequence of {\sc CPHASE} gates between nearest neighbors. Using non-deterministic gates this is clearly not possible since the success probability is exponentially small.
This scheme overcomes this problem by utilizing a resource of `+'-clusters. These consist of a central node, which will ultimately belong to the prepared cluster (see Fig.~\ref{fig:failure}), from which emanate four linear chain clusters of length $n_l$. Using non-deterministic gates these states can be prepared off-line in advance. Utilizing this resource we proceed to bond two +-clusters together by performing a {\sc CPHASE} gate between the ends of their arms, as shown in Fig.~\ref{fig:failure}a. If this fails we can recover the remainder of the +-clusters by performing $Z$-measurements on the qubits neighboring the ones to which we applied the {\sc CPHASE} gate. Thus, a gate failure reduces arm length by two qubits. We can now reattempt this bonding using what is left of the arms. Thus, an arm length of $n_l$ allows for $n_l/2$ bonding attempts. When bonding finally succeeds (assuming it does) any left-over arm qubits may be removed by measuring them in the $X$-eigenbasis, as shown in Fig.~\ref{fig:failure}b. This leaves us with a cluster state where the two central qubits from the original +-clusters are now neighbors.
It is clear that this structure provide redundancy in the bonding process, giving us multiple attempts at bonding qubits together, thereby suppressing the effective gate failure rate. It is shown in Ref.~\cite{bib:Duan05} that this procedure generalizes to the construction of arbitrarily large square lattice clusters. Furthermore, it is shown that the required arm length of the resource of +-clusters scales as, \begin{equation} \label{eq:arm_len_scale} n_l\approx \frac{2}{p_g}\mathrm{ln}\left(\frac{2N}{\varepsilon}\right), \end{equation} where $p_g$ is the success probability of the {\sc CPHASE} gate, $N$ is the desired number of qubits in the final cluster state, and $\varepsilon$ the probability of successfully preparing the cluster state. \begin{figure}
\caption{Gate-failure-tolerant approach to constructing cluster states. The fundamental building block is the `+'-cluster. This has a central node (shown in gray) which will ultimately belong to the constructed square lattice. The central node is bonded to four linear chain clusters, each of length $n_l$. These `arms' provide redundancy, allowing multiple bonding attempts. To grow a cluster, rather than bond two cluster qubits together directly, we utilize +-clusters and attempt to bond them starting at the ends of the arms (a). Whenever this fails we lose two qubits from the respective arms, but can recover the remainder of the cluster by measuring the neighboring qubits in the $Z$-eigenbasis. We can keep reattempting the gate until there are no qubits remaining in the arms. When bonding succeeds we have the two desired cluster nodes with some remaining arm qubits left between them. These are removed by measuring them in the $X$-eigenbasis (b).}
\label{fig:failure}
\end{figure}
\subsection{Analysis of error propagation} This scheme relies on sequential $X$-measurements along a linear graph segment to remove left-over qubits. Thus we can use the error propagation rules from section \ref{sec:X_prop_rules}. From these rules we know that a $Z$-error will be propagated onto the root qubit if an odd number of $Z$-errors occurred on half of the measured qubits, and similarly for $X$-errors. Thus, the effective error rates can be expressed, \begin{eqnarray} p_X'&=&\sum_{\substack{0\leq i\leq n_X/2 \\ i\in 2\mathbb{Z}+1}}\binom{n_X/2}{i}{p_X}^i(1-p_X)^{n_X/2-i}\nonumber\\ p_Z'&=&\sum_{\substack{0\leq i\leq n_X/2 \\ i\in 2\mathbb{Z}+1}}\binom{n_X/2}{i}{p_Z}^i(1-p_Z)^{n_X/2-i}, \end{eqnarray} where $p_X$ and $p_Z$ are the physical $X$ and $Z$-error rates, and $n_X$ is the number of measured qubits.
Thus, the effective $X$- and $Z$-error rates exhibit exponential dependence on $n_X$, which, from Eq.~\ref{eq:arm_len_scale}, is inversely proportional to gate success probability. Thus, while this scheme can tolerate arbitrary non-zero gate success probabilities in principle, in practise noise blow-out places a practical limitation on gate success probability.
\subsection{Minimizing the effects of error propagation} We now discuss a technique for minimizing the effects of error blow-out in divide-and-conquer based state preparation strategies. The approach is effectively to trade an increase in physical resource requirements for a reduction in accumulated error rates. This is achieved by beginning with larger resource states, which are prepared using a `single-shot' approach, i.e. probabilistically prepared in one attempt.
Consider the Duan \emph{et al.} scheme. Referring to Fig.~\ref{fig:resource}, we begin with a resource of clusters of the form shown in (a). We then fuse two such clusters together and measure out the redundant qubits to produce a resource of clusters of form (b). Similarly, fusing two of these clusters together and removing the redundant qubits yields a cluster of form (c). Suppose the initial resource of +-clusters are produced using a single-shot approach. Thus, we assume the initial resource states do not suffer from accumulated errors. Ordinarily a (b)-type cluster suffers error accumulation associated with the measurement of redundant qubits from two fused +-clusters. This can obviously be avoided by instead beginning with a resource of (b) clusters, directly prepared using a single-shot approach. Doing so avoids the measurement of the interstitial redundant qubits and the associated accumulation of errors. Obviously this idea can be used to an arbitrary extent, allowing for further suppression of error accumulation effects. However, the degree to which this approach can be employed is practically limited by the exponential scaling of single-shot preparation.
This technique effectively allows us to tailor a strategy which presents an arbitrary tradeoff between the single-shot and divide-and-conquer strategies, where the level of tradeoff is limited by the gate failure rate. The tradeoff between competing resources is clear. For a given bound on the effective error rate, using larger resource states allows us to tolerate higher local error rates, since error accumulation is reduced. However, because the resource states are prepared using a single-shot approach, preparing larger resource states requires physical and temporal resources growing exponentially with the size of the resource state, and polynomially with gate failure probability. This places fundamental limitations on practically tolerable gate failure rates. \begin{figure}
\caption{Different resource states that can be employed in the scalable construction of cluster states using non-deterministic gates.}
\label{fig:resource}
\end{figure}
A simple numerical example is illustrative. From Ref.~\cite{bib:Duan05}, constructing a 100 qubit cluster state with 10\% success probability, using {\sc CPHASE} gates that operate with 99\% success probability requires a resource of +-clusters with arm length $n_l\approx 11$. Suppose we construct the resource states using a single-shot approach to prevent accumulation of errors. Then the preparation of each resource +-cluster succeeds with probability $p_\mathrm{success}={p_\mathrm{gate}}^{4n_l}\approx 0.64$. The next step in the protocol is to join these +-clusters together to form clusters of type (b). We are in a high $p_\mathrm{gate}$ regime, so the average number of redundant qubits that must be measured away is $\approx n_l$. With a physical depolarizing rate of $p_\mathrm{error}=10^{-3}$, after measurement of the redundant qubits, this results in an effective depolarizing rate of $p_\mathrm{eff}=1.1\times 10^{-2}$, an order of magnitude increase. Alternately, we could produce the type-(b) clusters directly as a resource. In this case the single-shot success probability is $p_\mathrm{success}={p_\mathrm{gate}}^{6n_l+1}\approx 0.51$. However, now there are no accumulated errors associated with joining the +-clusters, so the effective error rate at this stage of the protocol is just the physical error rate of $10^{-3}$.
While this particular example exhibits a very significant reduction in the effective error rate, it is clear that we are limited to a high $p_\mathrm{gate}$ regime, as a result of the large exponents. Now the tradeoff becomes evident. For lower values of $p_\mathrm{gate}$, we loose our ability to directly prepare type-(b) clusters, and for yet lower values of $p_\mathrm{gate}$, to prepare +-clusters. Beyond this we have to resort to preparing the +-clusters non-deterministically, but we may still attempt to prepare the initial linear clusters via single-shot, and so on. While it is obvious that this approach is limited, the benefits of shifting as much of the state preparation into single-shot construction are clear.
\subsection{Discussion} While we have analyzed a specific state preparation strategy, these results are likely to be applicable to other divide-and-conquer type strategies, which have applications beyond quantum computing. For example, Kieling \emph{et al.} \cite{bib:Kieling06} recently investigated optimal strategies for constructing cluster states using non-deterministic gates. Their analysis focussed on minimizing physical resource requirements, and was entirely classical. However, error blow-out is affected by more than just resource requirements. First, as we have demonstrated, it is highly dependent upon the preparation strategy. Second, as we know from our discussion in section \ref{sec:graph_state_disc}, it is also highly topology dependent. Thus, asking questions like ``which strategy minimizes the required number of gate operations?'' or ``which strategy minimizes physical resource requirements?'' overlook this important issue. We suggest that future investigations of state preparation strategies adopt a more rigorous definition of `optimal' which includes consideration of error propagation effects.
Let us qualify this statement further by considering two simple example state preparation strategies. First, consider a pure single-shot strategy. Clearly, when using probabilistic gates this approach has an exponentially small success probability and therefore requires exponentially large resources. This is one extreme of the example discussed above, where we shift the entire preparation into single-shot. Although this strategy has exponentially low success probability, it does not require the introduction of \emph{any} redundant qubits and therefore will not accumulate additional errors associated with unwanted qubits that must be measured out. Second, consider the other extreme, a divide-and-conquer approach, where we probabilistically build up a large cluster from numerous small clusters that are prepared offline in advance. The +-cluster approach discussed, as well as micro-cluster approaches, are examples of this. In general, this type of strategy exhibits polynomial resource requirements. However, while this strategy is superior from a resource perspective, it is inferior from an error propagation perspective, since it necessarily introduces redundant qubits which must be removed via measurement. In this simple comparison of two extreme cases we see that physical resource requirements and error accumulation are directly competing parameters.
\section{Loss-tolerant cluster states} \label{sec:cluster} We now examine the Varnava \emph{et al.} \cite{bib:Varnava05} scheme for loss-tolerant cluster state quantum computation. Here a cluster qubit is replaced with a `tree' cluster. This structure facilitates multiple attempts at \emph{indirect measurement} of the lost qubit, suppressing effective loss rates. Indirect measurement is a feature of cluster states that follows from the stabilizer representation. The stabilizers impose correlations in measurement outcomes of cluster state qubits. Indirect measurement exploits these correlations to infer the measurement result of a lost qubit using only the measurement results of correlated qubits, explained in Fig.~\ref{fig:cluster}. \begin{figure}
\caption{Using a tree cluster to perform indirect $Z$-measurement of a lost qubit. An indirect $Z$-measurement is performed by measuring a qubit below the lost qubit in the $X$-eigenbasis, and each of the connected qubits below that in the $Z$-eigenbasis. If the $X$-measurement fails, we can make another attempt on the next branch. If any of the $Z$-measurements fail they can be indirectly measured by moving further down the tree.}
\label{fig:cluster}
\end{figure}
Indirect measurement via the tree structure exhibits similar unfavorable error scaling characteristics to the previous example. Specifically, an indirect measurement outcome will be incorrect if an odd number of the involved measurements were incorrect. Thus, the effective error rate of the lost root qubit scales up exponentially with the number of measurements made. Furthermore, high loss tolerance is achieved by using larger trees. Therefore higher loss tolerance implies higher error rates, because more qubits are measured during the indirect measurement. A numerical example is illustrative. Based on results from Ref.~\cite{bib:Varnava05}, achieving an effective loss rate of $\varepsilon_\mathrm{eff}\approx 10^{-3}$ given a physical loss rate of $\varepsilon_\mathrm{loss}=0.2$, requires tree clusters with roughly $Q\approx 1000$ qubits. Suppose an indirect measurement requires measuring half the tree on average. This will magnify a physical error rate of $p_\mathrm{error}\approx 10^{-3}$ to an effective error rate on the indirectly measured qubit of $p_\mathrm{eff}\approx 0.32$, an increase of more than three orders of magnitude.
We now consider an approach for minimizing this effect, again relying on a majority voting technique. In fact, this modification does not require any changes to the tree structure, but only a change in the protocol. The tree is characterized by branching parameters $\{b_1,b_2,\dots,b_d\}$ (i.e. the number of branches emanating from each node of the respective level), where $d$ is the depth of the tree. To indirectly measure the lost root qubit we have as many attempts as there are branches at the first level of the tree, $b_1$. In the original protocol we simply keep attempting indirect measurement via the different branches until one succeeds. We now modify the protocol as follows. We simultaneously perform indirect measurement via \emph{all} available branches. From the ones which succeed we perform a majority vote to determine the correct indirect measurement outcome. If indirect measurement is performed in parallel via $b_1$ branches, the probability of an error propagating into the measurement outcome scales as $p_\mathrm{eff}=\textsc{Exp}^{-1}(b_1)$ with $p_\mathrm{single}$, the probability of any single indirect measurement being incorrect. On the other hand, $p_\mathrm{single}$ scales as $p_\mathrm{single}=\textsc{Exp}(\textsc{Poly}[b_2,\dots,b_d])$ with $p_\mathrm{error}$. Therefore, for an appropriate choice of branching parameters $\{b_i\}$, one expects that exponential error scaling can be eliminated.
We now analyze this modification in detail. For simplicity we begin by presenting the analysis for a simple two-level tree structure with uniform branching parameters. We then generalize the analysis to arbitrary tree structures.
\subsection{Two-level error propagation analysis} We begin by considering a two-level tree structure with uniform branching parameters $b=b_1=b_2$. $p_\mathrm{local}$ and $p_\mathrm{loss}$ are the local error and loss rates respectively.
A single indirect measurement (i.e. along just one of the branches) succeeds if \emph{none} of the qubits involved were lost, \begin{equation} p_\mathrm{im-success}=(1-p_\mathrm{loss})^{b+1}. \end{equation} The effective loss rate is the probability that all $b$ indirect measurements fail, \begin{equation} p_\mathrm{loss}'=(1-p_\mathrm{im-success})^b. \end{equation} The probability of an error occurring during a single indirect measurement is the probability that an odd number of measurement errors occur on qubits involved in the indirect measurement, \begin{equation} p_\mathrm{im-error}=\sum_{i\in 2\mathbb{Z}+1}\binom{b}{i}{p_\mathrm{local}}^i(1-p_\mathrm{local})^{b-i}. \end{equation}
The probability that $m$ indirect measurements succeed is \begin{equation} p_\mathrm{success}(m)=\binom{b}{m}{p_\mathrm{im-success}}^m(1-p_\mathrm{im-success})^{b-m}. \end{equation} The probability of an error being introduced after majority voting, given that $m$ indirect measurements succeed is \begin{equation} p_\mathrm{error}(m)=\sum_{i>m/2}{p_\mathrm{im-error}}^i(1-p_\mathrm{im-error})^{m-i}. \end{equation} The overall probability of an error being introduced after majority voting is \begin{equation} p_\mathrm{error}=\sum_{i=1}^b p_\mathrm{success}(i)p_\mathrm{error}(i). \end{equation}
\subsection{General error propagation analysis} The general analysis proceeds along the same lines as the two-level case. However, now the relationships are defined recursively. We use the notation $p^{(i)}$ to denote a probability at level $i$ of the tree structure.
Imagine we wish to measure the $Z$-observable of a qubit at level $i$. There are two ways in which this can succeed: either the photon was \emph{not} lost, and it can be measured directly, or it \emph{was} lost, but it is successfully measured indirectly. The probability that the later case succeeds is the probability that indirect measurement succeeds ($p_\mathrm{im-success}^{(i+1)}$) via \emph{any} of the underlying $b_i$ routes. Each of these indirect measurements will succeed if the qubit at level $i+1$ (which we measure in the $X$-eigenbasis) was \emph{not} lost, and $Z$-measurement on each of the qubits below that, at level $i+2$, succeed. Thus, \begin{eqnarray} p_\mathrm{Z-success}^{(i)}&=&(1-p_\mathrm{loss})+p_\mathrm{loss}\left[1-\left(1-p_\mathrm{im-success}^{(i)}\right)^{b_i}\right]\nonumber\\ p_\mathrm{im-success}^{(i)}&=&(1-p_\mathrm{loss}){p_\mathrm{Z-success}^{(i+2)}}^{b_{i+1}} \end{eqnarray} where $i$ is odd. The probability that $m$ indirect measurements succeed is \begin{equation} p_\mathrm{im-success}^{(i)}(m)=\binom{b_i}{m}{p_\mathrm{im-success}^{(i)}}^{m}\left(1-p_\mathrm{im-success}^{(i)}\right)^{b_i-m}. \end{equation} This reasoning applies for all levels $i$, except the top level ($i=1$) and bottom level ($i=d$). At the top level the root qubit has definitely been lost, so the expression reduces to \begin{equation} p_\mathrm{Z-success}^{(1)}=\left[1-\left(1-p_\mathrm{im-success}^{(i)}\right)^{b_i}\right]. \end{equation} At the bottom level, indirect measurement is not possible, so $Z$-measurements will only succeed if the qubits have not been lost \begin{equation} p_\mathrm{Z-success}^{(d)}=1-p_\mathrm{loss}. \end{equation} Now let us turn our attention to error propagation. Again there are two possibilities. If the photon was \emph{not} lost, and measured directly, the probability of an error being picked up at that level is the local error rate. If the photon \emph{was} lost, an error will be picked up if the majority of the underlying indirect measurements suffered errors. \begin{eqnarray} p_\mathrm{maj-error}^{(i)}(m)&=&\sum_{j>m/2}p_\mathrm{im-error}^{(i)}(m)^{j}p_\mathrm{im-error}^{(i)}(m)^{m-j}\nonumber\\ p_\mathrm{maj-error}^{(i)}&=&\sum_{j=0}^{b_i}p_\mathrm{im-success}^{(i)}(m)p_\mathrm{maj-error}^{(i)}(m)\nonumber\\ p_\mathrm{error}^{(i)}&=&(1-p_\mathrm{loss})p_\mathrm{local}+p_\mathrm{loss}p_\mathrm{maj-error}^{(i)}.\nonumber\\ \end{eqnarray} where $p_\mathrm{maj-error}^{(i)}(m)$ is the probability of the majority vote being incorrect given that $m$ indirect measurements are performed, $p_\mathrm{maj-error}^{(i)}$ is the net probability of the majority vote being incorrect, and $p_\mathrm{error}^{(i)}$ the total error probability. It is worth noting here that where indirect measurement is possible $p_\mathrm{maj-error}^{(i)}\leq p_\mathrm{local}$. This suggests a further optimization to our measurement strategy. Namely, even when a qubit is present, we should always preferentially measure through indirect measurement, rather than through direct measurement. This scenario can be modeled through a trivial modification of the previous equation.
Finally, the probability of an error being introduced during any given indirect measurement is the probability that an \emph{odd} number of measurement errors are introduced onto the involved qubits. There are two possibilities, either a local error occurs on the qubit being measured in the $X$-eigenbasis and an even number of errors occur on the qubits measured in the $Z$-eigenbasis, or no error occurs on the qubit being measured in the $X$-eigenbasis and an odd number of errors occur on the qubits measured in the $Z$-eigenbasis. Thus, \begin{eqnarray} p_\mathrm{im-error}^{(i)}=p_\mathrm{local}\sum_{i\in 2\mathbb{Z}}\binom{b_{i+1}}{i}{p_\mathrm{error}^{(i+2)}}^i\left(1-p_\mathrm{error}^{(i+2)}\right)^{b_{i+1}-i}\nonumber\\ +(1-p_\mathrm{local})\sum_{i\in 2\mathbb{Z}+1}\binom{b_{i+1}}{i}{p_\mathrm{error}^{(i+2)}}^i\left(1-p_\mathrm{error}^{(i+2)}\right)^{b_{i+1}-i}.\nonumber\\ \end{eqnarray}
\subsection{Discussion} In the modified gate failure tolerant protocol we observed a tradeoff between failure and depolarizing rates. In this example we see a similar effect. In the modified protocol loss rates determine the \emph{effective} value of $b_1$. That is, the number of indirect measurements that succeed at the first level of the tree depends on the loss rate. Thus, higher loss rates imply lower confidence in the majority vote and therefore lower tolerance against depolarizing noise. This undermines the otherwise very high loss tolerance promised by this scheme and introduces a direct tradeoff between these two error types. Let us consider a simple numerical example to illustrate this point. We analyzed a simple two level tree-structure with branching parameters $b_1=b_2=3$. This structure improves the effective loss rate for $\varepsilon\lesssim 0.195$. That is, below this threshold the effective loss rate is lower than the physical loss rate. Using the original protocol, without majority voting, this loss rate would increase a physical error rate of $p_\mathrm{error}=10^{-3}$ to an effective error rate on the lost qubit of $p_\mathrm{eff}\approx 4\times 10^{-3}$. With the introduction of majority voting this reduces to $p_\mathrm{eff}\approx 1.7\times 10^{-3}$. More importantly, there is a `break-even' point on the physical loss rate, below which there is no degradation in the effective error rate (i.e. $p_\mathrm{eff}\leq p_\mathrm{error}$). In this example this occur at $\varepsilon\approx 0.1$, roughly half the in-principle loss tolerance rate. This leads to several conclusions. First, with the addition of majority voting this scheme is useful not only as a loss-tolerance technique, but also as a quantum error correction technique. Second, if we do not wish effective error rates to suffer, tolerable loss rates are significantly reduced.
\section{Loss tolerant parity states} \label{sec:parity} Finally we consider the Ralph \emph{et. al} \cite{bib:RalphHayes05} loss tolerant parity state scheme. In this scheme there are two levels of encoding. At the lower level, computational basis states are encoded as equal superpositions of odd or even parity states, \begin{eqnarray} \ket{0}_L&=&(\ket{+}^{\otimes n}+\ket{-}^{\otimes n})/\sqrt{2}\nonumber\\ \ket{1}_L&=&(\ket{+}^{\otimes n}-\ket{-}^{\otimes n})/\sqrt{2} \end{eqnarray} where $\ket\pm=(\ket{0}\pm\ket{1})/\sqrt{2}$, and $n$ is the level of parity encoding. These parity states are locally equivalent to maximally entangled GHZ states, and can therefore be regarded as $K_n$ graph states. Above the parity encoding is a level of $q$-fold redundant encoding, \begin{equation} \ket\psi_L=\alpha\ket{0}_L^{\otimes q}+\beta\ket{1}_L^{\otimes q}. \end{equation} One of the fundamental operations in this architecture is \emph{re-encoding}. Here new qubits are fused onto an existing parity state and the old ones measured out. This operation is used to `refresh' lost qubits, and is also required for the implementation of some quantum logic gates. The re-encoding procedure proceeds as follows. A `root' node is chosen, onto which a new redundant parity state is fused. All of the remaining qubits in the same level of redundant encoding as the root node are measured in the $Z$-eigenbasis. Additionally, a single qubit from every other level of the redundancy is measured in the $X$-eigenbasis. See Fig.~\ref{fig:parity}. Refer to Ref.~\cite{bib:RalphHayes05} for a detailed description. \begin{figure}
\caption{Re-encoding in the parity state scheme. A root qubit is chosen from the initial encoded state. Every other qubit in the same level of the redundant encoding is measured in the $Z$-eigenbasis, and a single qubit from each of the other levels is measured in the $X$-eigenbasis. The remaining root qubit is used to construct a new encoded state.}
\label{fig:parity}
\end{figure}
Upon re-encoding, $N=n+q-1$ qubits must be measured. Let us consider how errors propagate in this context. As before, we will assume that physical qubits are all subject to independent depolarizing noise characterized by error probability $p$. Because the state is maximally entangled, upon measurement the residual state will be depolarized if \emph{any} of the $N$ measured qubits were depolarized. Thus, the effective error rate on the residual state is given by \begin{equation} p'=1-(1-p)^N. \end{equation} Again we see exponential dependence of the effective error rate on the number of qubits measured, and therefore the size of the parity state. In the same vein as the previous examples, this significantly undermines the loss tolerance of the scheme since practical error tolerance requirements effectively limit us to using small parity states.
We now briefly describe a technique for suppressing error propagation. We rely on the same technique as previously -- we introduce redundant qubits to facilitate majority voting. Due to the complexity of analyzing this scheme we do not provide a quantitative analysis. However it should be clear from the discussion that the described modification ought to exhibit similar features to the previous examples.
We modify the scheme by introducing a new layer of redundant encoding underneath the parity states. Specifically, \begin{equation} \ket{0}\to\ket{0}^{\otimes m},\ \ket{1}\to\ket{1}^{\otimes m}. \end{equation} This new layer of redundant encoding is introduced asymmetrically. The redundancy is applied to all but one qubit from each parity state. The non-redundant qubits are where fusions are attempted. Thus, under the modified protocol only one fusion attempt per parity state is possible. This requires increasing $q$ to provide sufficient fusion opportunities to suppress loss to the required level. When unwanted parity qubits are measured out in the $Z$-eigenbasis the new level of redundancy allows for majority voting on each measurement. Furthermore, when parity qubits are measured in the $X$-eigenbasis, rather than measure a \emph{single} qubit from the respective parity state (as per the original protocol), we measure \emph{all} qubits and perform a majority vote on the outcomes. Note that this only requires a change in the protocol, and does not require changes to the encoding. It is obvious that these modifications will suppress measurement errors. However, while this is effective at suppressing noise rates, it does so at the expense of loss-tolerance. When any parity qubit is measured in the $X$-eigenbasis, if \emph{any} of the underlying redundant qubits were lost, the qubit will be dephased and the measurement result randomized. Thus, for an $X$-measurement to be successful, \emph{all} underlying redundant qubits must be present. As per the previous examples, this presents us with a tradeoff between loss and error rates. Higher loss rates imply a lower success probability when performing $X$-measurements. Thus, when a parity state is measured in the $X$-eigenbasis, the confidence of the majority vote is reduced, thereby increasing effective noise rates.
\section{Conclusion} \label{sec:conclusion} We have considered three well known protocols for tolerating gate failure and qubit loss. We demonstrated that while very effective at dealing with these particular error types, these schemes have the undesirable effect of magnifying other error types, namely depolarizing noise. In each case we discussed techniques for minimizing this effect. However, this introduces a tradeoff between loss/failure and depolarizing rates. In practical situations we always need some degree of tolerance against both these error types. This implies that in realistic scenarios these schemes are unlikely to achieve the loss/failure tolerance they are capable of in principle.
While we specifically considered three well-known protocols, we believe our results are likely to be applicable to other related protocols. For example, other state preparation strategies such as Nielsen's micro-cluster approach \cite{bib:Nielsen04} ought to exhibit similar error propagation properties, since they also rely on measuring out redundant qubits.
In conclusion, when designing gate failure or loss tolerant quantum computing protocols, it is extremely important to be mindful of error propagation characteristics. As we have demonstrated, specialized schemes which ignore these effects often boast misleadingly high loss and failure tolerance. While there are techniques for minimizing this problem, they significantly reduce the loss/failure tolerance of these schemes. Nonetheless, such schemes may be very useful for tolerating modest levels of loss and gate failure.
\begin{acknowledgments} We thank Michael Nielsen for the discussion that motivated this work, and Henry Haselgrove and Alex Hayes for helpful discussions. The use of majority voting in tree clusters to suppress error rates was first recognized by Daniel Browne, Terry Rudolph and Michael Varnava. This work was supported by the Australian Research Council and QLD State Government. We acknowledge partial support by the DTO-funded U.S. Army Research Office Contract No. W911NF-05-0397 \end{acknowledgments}
\end{document} | arXiv | {
"id": "0701090.tex",
"language_detection_score": 0.8583791255950928,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{{\bf An Algorithmic Analysis of the\\Honey-Bee Game}\thanks{RF acknowledges support by the National Natural Science Foundation of China (No. 60973026), the Shanghai Leading Academic Discipline Project (project number B114), the Shanghai Committee of Science and Technology of China (09DZ2272800), and the Robert Bosch Foundation (Science Bridge China 32.5.8003.0040.0). GJW acknowledges support by the Netherlands Organisation for Scientific Research (NWO grant 639.033.403), and by BSIK grant 03018 (BRICKS: Basic Research in Informatics for Creating the Know\-ledge Society).}}
\author{\sc Rudolf Fleischer
\thanks{ School of Computer Science, IIPL, Fudan University,
Shanghai 200433, China. Email: {\tt rudolf@fudan.edu.cn}.}
\and
\sc Gerhard J.\ Woeginger
\thanks{ {\tt gwoegi@win.tue.nl}.
Department of Mathematics and Computer Science,
TU Eindhoven, P.O.\ Box 513, 5600 MB Eindhoven, Netherlands.}
} \date{} \maketitle
\date{}
\maketitle
\begin{abstract} The {{\sc Honey-Bee}} game is a two-player board game that is played on a connected hexagonal colored grid or (in a generalized setting) on a connected graph with colored nodes. In a single move, a player calls a color and thereby conquers all the nodes of that color that are adjacent to his own current territory. Both players want to conquer the majority of the nodes. We show that winning the game is PSPACE-hard in general, NP-hard on series-parallel graphs, but easy on outerplanar graphs.
In the solitaire version, the goal of the single player is to conquer the entire graph with the minimum number of moves. The solitaire version is NP-hard on trees and split graphs, but can be solved in polynomial time on co-comparability graphs.
\noindent \emph{Keywords:} combinatorial game; computational complexity; graph problem. \end{abstract}
\section{Introduction} \label{s_intro}
The {{\sc Honey-Bee}} game is a popular two-player board game that shows up in many different variants and at many different places on the web (the game is best be played on a computer). For a playable version we refer the reader for instance to Axel Born's web-page \cite{Bor09}; see Fig.~\ref{fig_born} for a screenshot. The playing field in {{\sc Honey-Bee}} is a grid of hexagonal honey-comb cells that come in various colors; the coloring changes from game to game. The playing field may be arbitrarily shaped and may contain holes, but must always be connected. In the beginning of the game, each player controls a single cell in some corner of the playing field. Usually, the playing area is symmetric and the two players face each other from symmetrically opposing starting cells. In every move a player may call a color $c$, and thereby gains control over all connected regions of color $c$ that have a common border with the area already under his control. The only restriction on $c$ is that it cannot be one of the two colors used by the two players in their last move before the current move, respectively. A player wins when he controls the majority of all cells. On Born's web-page \cite{Bor09} one can play against a computer, choosing from four different layouts for the playing field. The computer uses a simple greedy strategy: ``Always call the color $c$ that maximizes the immediate gain.'' This strategy is short-sighted and not very strong, and an alert human player usually beats the computer after a few practice matches.
\begin{figure}
\caption{Born's ``Biene''. The human player (starting from the top-left corner) is on the edge of losing against the computer (starting from the bottom-right corner).}
\label{fig_born}
\end{figure}
In this paper we perform a complexity study of the {{\sc Honey-Bee}} game when played by two players on some arbitrary connected graph instead of the hex-grid of the original game. We will show in Section~\ref{s_two} that {{\sc Honey-Bee-2-Players}} is NP-hard even on series-parallel graphs, and that it is PSPACE-complete in general. On outerplanar graphs, however, it is quite easy to compute a winning strategy.
In the \emph{solitaire} (single-player) version of {{\sc Honey-Bee}} the goal is to conquer the entire playing field as quickly as possible. Intuitively, a good strategy for the solitaire game will be close to a strong heuristic for the two-player game. For the solitaire version, our results draw a sharp separation line between easy and difficult cases. In particular, we show in Section~\ref{s_one} that {{\sc Honey-Bee-Solitaire}} is NP-hard for split graphs and for trees, but polynomial-time solvable on co-comparability graphs (which include interval graphs and permutation graphs). Thus, the complexity of the game is well-characterized for the class and subclasses of perfect graphs; see Fig.~\ref{fig_results} for a summary of our results.
\begin{figure}
\caption{Summary of the complexity results for {\sc Honey-Bee-Solitaire}. NP-complete problems have a solid frame, polynomial-time solvable problems have a dashed frame. The results for the graph classes in the three colored boxes imply all other results.}
\label{fig_results}
\end{figure}
\section{Definitions} \label{s_def}
We model {{\sc Honey-Bee}} in the following graph-theoretic setting. The playing field is a connected, simple, loopless, undirected graph $G=(V,E)$. There is a set $C$ of $k$ colors, and every node $v\in V$ is colored by some color $col(v)\in C$; we stress that this coloring does not need to be proper, that is, there may be edges $[u,v]\in E$ with $col(u)=col(v)$. For a color $c\in C$, the subset $V_c\subseteq V$ contains the nodes of color $c$. For a node $v\in V$ and a color $c\in C$, we define the \emph{color-$c$-neighborhood} $\Gamma(v,c)$ as the set of nodes in $V_c$ either adjacent to $v$ or connected to $v$ by a path of nodes of color $c$. Similarly, we denote by $\Gamma(W,c)=\bigcup_{w\in W}\Gamma(w,c)$ the color-$c$-neighborhood of a subset $W\subseteq V$. For a subset $W\subseteq V$ and a sequence $\gamma=\seq{\gamma_1,\ldots,\gamma_b}$ of colors in $C$, we define a corresponding sequence of node sets $W_1=W$ and $W_{i+1}=W_i\cup \Gamma(W_i,\gamma_i)$, for $1\le i\le b$. We say that sequence $\gamma$ started on $W$ \emph{conquers} the final node set $W_{b+1}$ in $b$ moves, and we denote this situation by $W\to_{\gamma}W_{b+1}$. The nodes in $V-W_{b+1}$ are called \emph{free} nodes.
In the \emph{solitaire} version of {\sc Honey-Bee}, the goal is to conquer the entire playing field with the smallest possible number of moves. Note that {{\sc Honey-Bee-Solitaire}} is trivial in the case of only two colors. But as we will see in Section~\ref{s_one}, the case of three colors can already be difficult.
\probl{{\sc Honey-Bee-Solitaire}} {A graph $G=(V,E)$; a set $C$ of $k$ colors and a coloring $col:V\to C$; a start node $v_0\in V$; and a bound $b$.} {Does there exist a color sequence $\gamma=\seq{\gamma_1,\ldots,\gamma_b}$ of length $b$ such that $\{v_0\}\to_{\gamma}V$?}
In the \emph{two-player} version of {{\sc Honey-Bee}}, the two players $A$ and $B$ start from two distinct nodes $a_0$ and $b_0$ and then extend their regions step by step by alternately calling colors. Player $A$ makes the first move. One round of the game consists of a move of $A$ followed by a move of $B$. Consider a round, where at the beginning the two players control node sets $W_A$ and $W_B$, respectively. If player $A$ calls color $c$, then he extends his region $W_A$ to $W^\prime_A=W_A\cup(\Gamma(W_A,c)-W_B)$. If afterwards player $B$ calls color $d$, then he extends his region $W_B$ to $W^\prime_B=W_B\cup(\Gamma(W_B,c)-W^\prime_A)$. Note that once a player controls a node, he can never lose it again.
The game terminates as soon as one player controls more than half of all nodes. This player wins the game. To avoid draws, we require that the number of nodes is odd. There are three important rules that constrain the colors that a player is allowed to call.
\begin{enumerate} \item[R1.] A player must never call the color that has just been called by the other player. \item[R2.] A player must never call the color that he has called in his previous move. \item[R3.] A player must always call a color that strictly enlarges his territory, unless rules R1 and R2 prevent him from doing so. \end{enumerate}
\begin{figure}
\caption{Player A (circled nodes) is leading with four captured nodes over player B (squared nodes) with only two captured nodes. Player B would next like to play black to capture all the white nodes in the next move. Without rule~R2, player A could prevent this by repeatedly playing black. }
\label{fig_rule_R2}
\end{figure}
What is the motivation for these three rules? Rule~R1 is a technical condition that arises from the graphical implementation~\cite{Bor09} of the game: Whenever a player calls a color $c$, his current territory is entirely recolored to color $c$. This makes it visually easier to recognize the territories controlled by both players. Rule~R2 prevents the players from permanently blocking some color for the opponent. Fig.~\ref{fig_rule_R2} shows a situation where rule~R2 actually prevents the game from stalling. Rule~R3 is quite delicate, and is justified by situations as depicted in Fig.~\ref{fig_rule_R3}. Rule~R3 guarantees that every game must terminate with either a win for player~A or a win for player~B. Note that rule~R2 is redundant except in the case when a player has no move to gain territory (see Fig.~\ref{fig_rule_R2}.
\begin{figure}
\caption{ Player A who controls the black node at the left end of the path loses if he calls dark-gray (and hence prefers to call white, light-gray, and black). Player B who controls the white node at the other end of the path loses if he calls light-gray (and hence prefers to call colors white, dark-gray, and black). Rule~R3 forces the players to move into the unoccupied territory. }
\label{fig_rule_R3}
\end{figure}
\probl{{\sc Honey-Bee-2-Players}} {A graph $G=(V,E)$ with an odd number of nodes; a set $C$ of colors and a coloring $col:V\to C$; two start nodes $a_0,b_0\in V$.} {Can player $A$ enforce a win when the game is played according to the above rules?}
Note that {{\sc Honey-Bee-2-Players}} is trivial in the case of only three colors: The players do not have the slightest freedom in choosing their next color, and always must call the unique color allowed by rules~R1 and~R2. However we will see in Section~\ref{s_two} that the case of four colors can already be difficult.
Finally we observe that calling a color $c$ always conquers all connected components induced by $V_c$ that are adjacent to the current territory. Hence an equivalent definition of the game could use a graph with node weights (that specify the size of the corresponding connected component) and a \emph{proper} coloring of the nodes. Any instance under the original definition can be transformed into an equivalent instance under the new definition by contracting each connected component of $V_c$,
for some $c$, into a single node of weight $|V_c|$. However, we are interested in restrictions of the game to particular graph classes, some of which are not closed under edge contractions (as for instance the hex-grid graph of the original {{\sc Honey-Bee}} game).
\section{The Solitaire Game} \label{s_one}
In this section we study the complexity of finding optimally short color sequences for {{\sc Honey-Bee-Solitaire}}. We will show that this is easy for co-comparability graphs, while it is NP-hard for trees and split graphs. Since the family of co-comparability graphs contains interval graphs, permutation graphs, and co-graphs as sub-families, our positive result for co-comparability graphs implies all other positive results in Fig.~\ref{fig_results}.
A first straightforward observation is that {{\sc Honey-Bee-Solitaire}} lies in NP:
Any connected graph $G=(V,E)$ can be conquered in at most $|V|$ moves, and hence such a sequence of polynomially many moves can serve as an NP-certificate.
\subsection{The Solitaire Game on Co-Comparability Graphs} \label{sec1:cocomparability}
A \emph{co-comparability graph} $G=(V,E)$ is an undirected graph whose nodes $V$correspond to the elements of some partial order $<$ and whose edges $E$ connect any two elements that are incomparable in that partial order, i.e., $[u,v]\in E$ if neither $u<v$ nor $v<u$ holds. For simplicity, we identify the nodes with the elements of the partial order. Golumbic {{et al.}}~\cite{GoRoUr83} showed that co-comparability graphs are exactly the intersection graphs of continuous real-valued functions over some interval $I$. If two function curves intersect, the corresponding elements are incomparable in the partial order; otherwise, the curve that lies complete above the other one corresponds to the larger element in the partial order. The function graph representation readily implies that the class of co-comparability graphs is closed under edge contractions. Therefore, we may w.l.o.g.~restrict our analysis of {{\sc Honey-Bee-Solitaire}} to co-comparability graphs with a proper node coloring, i.e., adjacent nodes have distinct colors (in the solitaire game we do not care about the weight of a node after an edge contraction). In this case, every color class is totally ordered because incomparable node pairs have been contracted.
Consider an instance of {{\sc Honey-Bee-Solitaire}} with a minimal start node $v_0$ (in the partial order on $V$); a maximal start node could be handled similarly. The function graph representation implies the following observation.
\begin{observation} \label{obs_smaller} Conquering a node will simultaneously conquer all smaller nodes of the same color.
$\Box$ \bigbreak \end{observation}
For any color $c$, let $Max(c)$ denote the largest node of color $c$. By Obs.~\ref{obs_smaller}, it suffices to find the shortest color sequence conquering all nodes $Max(c)$, for all colors $c$. We can do that by a simple shortest path computation. We assign every node $Max(c)$ weight $0$, and all other nodes weight $1$. Then we compute a shortest path (with respect to the node-weights) from $v_0$ to every node $Max(c)$ that is a \emph{maximal element} in the partial order (which is actually exactly the set of all maximal elements). Let $OPT$ denote the smallest cost over all such paths.
For a color sequence $\gamma=\seq{\gamma_1,\ldots,\gamma_b}$, we define the \emph{length} of $\gamma$ as $|\gamma|=b$. We also define the \emph{essential length} $ess(\gamma)$ of $\gamma$ as
$|\gamma|$ minus the number of steps where $\gamma$
conquers a maximal node $Max(c)$ of some color class $c$. Obviously, $|\gamma|=ess(\gamma)+k$. Note that $OPT$ is the minimal essential cost of any color sequence conquering one of the maximal nodes.
\begin{lemma} \label{thm_opt} The optimal solution for {{\sc Honey-Bee-Solitaire}} has cost $OPT+k$. \end{lemma}
\begin{proof} Let $\gamma$ be a shortest color sequence conquering the entire graph starting at $v_0$. After conquering $v$, $\gamma$ only needs to conquer all free nodes $Max(c)$
to conquer the entire graph. Thus, $|\gamma| = ess(\gamma) + k \ge OPT+k$.
$\Box$ \bigbreak \end{proof}
\begin{theorem} \label{th_cocomp} {{\sc Honey-Bee-Solitaire}} starting at an extremal node $v_0$ can be solved in polynomial time on co-comparability graphs. \end{theorem}
\begin{proof} Given the co-comparability graph $G$, we can compute the underlying partial order $<$ in polynomial time \cite{GoRoUr83}. Assigning the weights and solving one single source shortest path problem starting at $v_0$ also takes polynomial time.
$\Box$ \bigbreak \end{proof}
We can also formulate this algorithm as a dynamic program. For any node $v$, let $D(v)$ denote the essential length of the shortest color sequence $\gamma$ that can conquer $v$ when starting at $v_0$. For any color $c$, let $min_v(c)$ denote the smallest node of color $c$ connected to $v$, if such nodes exist. Then we can compute $D(v)$ recursively as follows: $$D(v_0) = 0$$ and $$D(v) = \min_{c} (D(min_v(c)) + \delta_v) \>,$$ where $D(min_v(c))=\infty$ if $min_v(c)$ is undefined, and $\delta_v=0$ ($1$) if $v$ is (not) a maximal node for some color class.
Clearly, this dynamic program simulates the shortest path computation of our first algorithm and we have $OPT = \min_{v}(D(v)+k)$, where we minimize over all maximal nodes $v$. We now extend the dynamic program to the case that $v_0$ is not an extremal element. The problem is that we now must extend our territory in two directions. If we choose a move that makes good progress upwards it may make little progress downwards, or vice versa. In particular, the optimal strategy cannot be decomposed into two independent optimal strategies, one conquering upwards and one conquering downwards. Analogously to the algorithm above, for a clor $c$ define $Min(c)$ as the smallest node of color $c$, and $max_v(c)$ as the largest node of color $c$ connected to a node $v$.
Unfortunately, we must now redefine the essential length of a color sequence $\gamma$. In our original definition, we did not count coloring steps that conquered maximal elements of some color class. This is intuitively justified by the fact that these steps must be done by any color sequence conquering the entire graph at some time, therefore it is advantageous to do them as early as possible (which is guaranteed by giving these moves cost 0). But now we must also consider the minimal nodes of each color class. An optimal sequence conquering the entire graph will at some time have conquered a minimal node and a maximal node. Afterwards, it will only call extremal nodes for some color class. If both extremal nodes of a color class are still free, we only need \emph{one} move to conquer both simultaneously. If one of them had been captured earlier, we still need to conquer the other one. This indicates that we should charge 1 for the first extremal node conquered while the second one should be charged 0, as before. If both nodes are conquered in the same move, we should also charge 0. Therefore, we now define the \emph{essential length} $ess(\gamma)$ of $\gamma$ as
$|\gamma|$ minus the number of steps where $\gamma$ conquers the second extremal node of some color class.
For a node $v$ below $v_0$ or incomparable to $v_0$ and a node $w$ above $v_0$ or incomparable to $v_0$ let $D(v,w)$ denote the essential length of the shortest color sequence $\gamma$ that can conquer $v$ and $w$ when starting at $v_0$. Note that we do not need to keep track of which first extremal nodes of a color class have been conquered because we can deduce this from the two nodes $v$ and $w$ currently under consideration. In particular, we can compute $D(v,w)$ recursively as follows: $$D(v_0,v_0) = 0$$ and $$D(v,w) = \min_{c} (D(v,min_w(c)) + \delta_w(v), D(max_v(c),w) + \delta_v(w)) \>,$$ where $\delta_v(w)=0$ if and only if $w$ is an extremal node of some color class $c$ and the other extremal node of color class $c$ is either between $v$ and $w$, or incomparable to either $v$ or $w$, or both
(it was either conquered earlier, or it will be conquered in this step); otherwise, $\delta_v(w)=1$. Obviously, $|\gamma|=ess(\gamma)+k$.
\begin{lemma} \label{thm_opt_general} The optimal solution for {{\sc Honey-Bee-Solitaire}} has cost $\min_{v,w}(D(v,w)+k)$, where we minimize over all minimal nodes $v$ and all maximal nodes $w$. \end{lemma}
\begin{proof} Let $\gamma$ be a shortest color sequence conquering the entire graph starting at $v_0$. Let $v$ be the first minimal node conquered by $\gamma$ and $w$ the first maximal node. After conquering $v$ and $w$, $\gamma$ only needs to conquer all free nodes $Max(c)$
to conquer the entire graph. Thus, $|\gamma| \ge D(v,w) + k$.
$\Box$ \bigbreak \end{proof}
\begin{theorem} \label{th_cocomp_general} {{\sc Honey-Bee-Solitaire}} can be solved in polynomial time on co-comparability graphs.
$\Box$ \bigbreak \end{theorem}
\subsection{The Solitaire Game on Split Graphs} \label{ss_split}
A \emph{split graph} is a graph whose node set can be partitioned into an induced clique and into an induced independent set. We will show that {{\sc Honey-Bee-Solitaire}} is NP-hard on split graphs. Our reduction is from the NP-hard {\tt Feedback Vertex Set} ({\sc FVS}) problem in directed graphs; see for instance Garey and Johnson \cite{GaJo79}.
\probl{{\sc FVS}}
{A directed graph $(X,A)$; a bound $t<|X|$.}
{Does there exist a subset $X^\prime\subseteq X$ with $|X^\prime|=t$ such that the directed graph induced by $X-X^\prime$ is acyclic?}
\begin{theorem} \label{thm_split} {{\sc Honey-Bee-Solitaire}} on split graphs is NP-hard. \end{theorem}
\begin{proof} Consider an instance $(X,A,t)$ of {\sc FVS}. To construct an instance $(V,E,b)$ of {\sc Honey-Bee-Solitaire}, we first build a clique from the nodes in $X$ together with a new node $v_0$, the start node of {\sc Honey-Bee-Solitaire}, where each node $x\in X+v_0$ has a different color $c_x$. Next, we build the independent set. For every arc $(x,y)\in A$, we introduce a corresponding node $v(x,y)$ of color
$c_y$ which is only connected to node $x$ in the clique, i.e., it has degree one. Finally, we set $b=|X|+t$. We claim that the constructed instance of {{\sc Honey-Bee-Solitaire}} has answer YES, if and only if the instance of {{\sc FVS}} has answer YES.
Assume that the {{\sc FVS}} instance has answer YES. Let $X^\prime$ be a smallest feedback set whose removal makes $(X,A)$ acyclic. Let $\pi$ be a topological order of the nodes in $X-X^\prime$, and let $\tau$ be an arbitrary ordering of the nodes in $X^\prime$. Consider the color sequence $\gamma$ of length $|X|+t$ that starts with $\tau$, followed by $\pi$, and followed by $\tau$ again. We claim that $\{v_0\}\to_{\gamma}V$. Indeed, $\gamma$ first runs through $\tau$ and $\pi$ and thereby conquers all clique nodes. Every independent set node $v(x,y)$ with $y\in X^\prime$ is conquered during the second transversal of $\tau$. Every independent set node $v(x,y)$ with $y\in X-X^\prime$ is conquered during the transversal of $\pi$, since $\pi$ first conquers $x$ with color $c_x$, and afterwards $v(x,y)$ with color $y$.
Next assume that the instance of {{\sc Honey-Bee-Solitaire}} has answer YES. Let $\gamma$ be a color sequence of length $b=|X|+t$ conquering $V$. Define $X^\prime$ as the set of nodes $x$ such that color $c_x$ occurs at least twice in $\gamma$; clearly, $|X^\prime|\le t$. Consider an arc $(x,y)\in A$ with $x,y\in X-X^\prime$. Since $\gamma$ contains color $c_y$ only once, it must conquer node $v(x,y)$ of color $c_y$ after node $v(x)$ of color $c_x$. Hence, $\gamma$ induces a topological order of $X-X^\prime$.
$\Box$ \bigbreak \end{proof}
The construction in the proof above uses linearly many colors. What about the case of few colors? On split graphs, {{\sc Honey-Bee-Solitaire}} can always be solved by traversing the color set $C$ twice; the first traversal conquers all clique nodes, and the second traversal conquers all remaining free independent set nodes. Thus, every split graph can be completely conquered in at most $2|C|$ steps. If there are only few colors, we can simply check all color sequences of this
length $2|C|$.
\begin{theorem} \label{thm_split_const} If the number of colors is bounded by a fixed constant, {{\sc Honey-Bee-Solitaire}} on split graphs is polynomial-time solvable.
$\Box$ \bigbreak \end{theorem}
\subsection{The Solitaire Game on Trees} \label{ss_tree}
In this section we will show that {{\sc Honey-Bee-Solitaire}} is NP-hard on trees, even if there are at only three colors. We reduce {{\sc Honey-Bee-Solitaire}} from a variant of the {\tt Shortest Common Supersequence} ({\sc SCS}) problem which is know to be NP-complete (see Middendorf~\cite{Mid94}).
\probl{{\sc SCS}} {A positive integer $t$; finite sequences $\sigma_1,\ldots,\sigma_s$ with elements from $\{0,1\}$ with the following properties: (i) All sequences have the same length. (ii) Every sequence contains exactly two 1s, and these two 1s are separated by at least one 0.} {Does there exist a sequence $\sigma$ of length $t$ that contains $\sigma_1,\ldots,\sigma_s$ as subsequences?}
Middendorf's hardness result also implies the hardness of the following variant of {\sc SCS}:
\probl{{\tt Modified SCS} ({\sc MSCS})} {A positive integer $t$; finite sequences $\sigma,\ldots,\sigma_s$ with elements from $\{0,1,2\}$ with the following property: In every sequence any two consecutive elements are distinct, and no sequence starts with 2.} {Does there exist a sequence $\sigma$ of length $t$ that contains $\sigma_1,\ldots,\sigma_s$ as subsequences?}
\begin{theorem} \label{thm_mscs} {{\sc MSCS}} is NP-complete. \end{theorem}
\begin{proof} Here is a reduction from {{\sc SCS}} to {{\sc MSCS}}. Consider an arbitrary sequence $\tau$ with elements from $\{0,1\}$. We define $f(\tau)$ as the sequence we obtain from replacing every occurrence of the element 0 in $\tau$ by two consecutive elements 0 and 2. Now consider an instance $(\sigma_1,\ldots,\sigma_s,t)$ of {\sc SCS}. We construct an instance $(\sigma_1^\prime,\ldots,\sigma_s^\prime,t^\prime)$ of {{\sc MSCS}} by setting $\sigma^\prime_i=f(\sigma_i)$, for $1\le i\le s$. Then, for any sequence $\sigma$ with elements from $\{0,1\}$, $\sigma$ is a common supersequence of $\sigma_1,\ldots,\sigma_s$ if and only if $f(\sigma)$ is a common supersequence of $\sigma^\prime_1,\ldots,\sigma^\prime_s$. This implies the NP-hardness of {\sc MSCS}.
$\Box$ \bigbreak \end{proof}
\begin{theorem} \label{thm_tree} {{\sc Honey-Bee-Solitaire}} is NP-hard on trees, even in case of only three colors. \end{theorem}
\begin{proof} We reduce {{\sc MSCS}} to {{\sc Honey-Bee-Solitaire}} on trees.
Consider an instance $(\sigma_1,\ldots,\sigma_s,t)$ of {\sc MSCS}. We use color set $C=\{0,1,2\}$. We first construct a root $v_0$ of color $2$. Then we attach a path of length $|\sigma_i|$ to $v_0$ for each sequence $\sigma_i$, where an element $j$ is colored $j$. See the left half of Fig.~\ref{fig_sp} for an example. Finally, we set $b=t$. It its straightforward to see that the constructed instance of {{\sc Honey-Bee-Solitaire}} has answer YES if and only if the instance of {{\sc MSCS}} has answer YES.
$\Box$ \bigbreak \end{proof}
\section{The Two-Player Game} \label{s_two}
In this section we study the complexity of the two-player game. While on outerplanar graphs the players can compute their winning strategies in polynomial time, this problem is NP-hard for series-parallel graphs with four colors, and PSPACE-complete with four colors on arbitrary graphs. Our positive result for outerplanar graphs works for an arbitrary number of colors. Our negative results work for four colors, which is the strongest possible type of result (recall that instances with three colors are trivial to solve).
\subsection{The Two-Player Game on Outer-Planar Graphs} \label{ss_outerplanar}
A graph is \emph{outer-planar} if it contains neither $K_4$ nor $K_{2,3}$ as a minor. Outer-planar graphs have a planar embedding in which every node lies on the boundary of the so-called \emph{outer face}. For example, every tree is an outer-planar graph.
Consider an outer-planar graph $G=(V,E)$ as an instance of {{\sc Honey-Bee-2-Players}} with starting nodes $a_0$ and $b_0$ in $V$, respectively. The starting nodes divide the nodes on the boundary of the outer face $F$ into an upper chain $u_1,\ldots,u_s$ and a lower chain $\ell_1,\ldots,\ell_t$, where $u_1$ and $\ell_1$ are the two neighbors of $a_0$ on $F$, while $u_s$ and $\ell_t$ are the two neighbors of $b_0$ on $F$. We stress that this upper and lower chain are not necessarily disjoint (for instance, articulation nodes will occur in both chains).
Now consider an arbitrary situation in the middle of the game. Let $U$ (respectively $L$) denote the largest index $k$ such that player $A$ has conquered node $u_k$ (respectively node $\ell_k$). See Fig.~\ref{fig_outerplanar} to illustrate these definitions and the following lemma.
\begin{lemma} \label{thm_outerplanar_conquer} Let $X$ denote the set of nodes among $u_1,\ldots,u_U$ and $\ell_1,\ldots,\ell_L$ that currently do neither belong to $A$ nor to $B$. Then no node in $X$ can have a neighbor among $u_{U+1},\ldots,u_s,b_0,\ell_t,\ldots,\ell_{L+1}$. \end{lemma}
\begin{proof} The existence of such a node in $X$ would lead to a $K_4$-minor in the outer-planar graph.
$\Box$ \bigbreak \end{proof}
\begin{figure}
\caption{An outerplanar graph with start nodes $a_0$ and $b_0$. Player $A$ (circled nodes) has conquered the light-gray colored nodes, i.e., $U=2$ and $L=2$. Eventually, $A$ will also conquer $\ell_1$, since Player $B$ cannot reach it.}
\label{fig_outerplanar}
\end{figure}
\begin{theorem} \label{thm_outerplanar} {{\sc Honey-Bee-2-Players}} on outer-planar graphs is polynomial-time solvable. \end{theorem}
\begin{proof} The two indices $U$ and $L$ encode all necessary information on the future behavior of player $A$. Eventually, he will own all nodes $u_1,\ldots,u_U$ and $\ell_1,\ldots,\ell_L$, and the possible future expansions of his area beyond $u_U$ and $\ell_L$ only depend on $U$ and $L$. Symmetric observations hold true for player $B$.
As every game situation can be concisely described by just four indices, there is only a polynomial number $O(|V|^4)$ of relevant game situations. The rest is routine work in combinatorial game theory: We first determine the winner for every end-situation, and then by working backwards in time we can determine the winners for the remaining game situations.
$\Box$ \bigbreak \end{proof}
\subsection{The Two-Player Game on Series-Parallel Graphs} \label{ss_sp}
A graph is \emph{series-parallel} if it does not contain $K_4$ as a minor. Equivalently, a series-parallel graph can be constructed from a single edge by repeatedly doubling edges, or removing edges, or replacing edges by a path of two edges with a new node in the middle of the path. We stress that we do not know whether the two-player game on series-parallel graphs is contained in the class NP (and we actually see no reason why it should lie in NP); therefore the following theorem only states NP-hardness.
\begin{theorem} \label{thm_sp} For four (or more) colors, problem {{\sc Honey-Bee-2-Players}} on series-parallel graphs is NP-hard. \end{theorem}
\begin{proof} We use the color set $C=\{0,1,2,3\}$. A central feature of our construction is that player $B$ will have no real decision power, but will only follow the moves of player $A$: If player $A$ starts a round by calling color $0$ or $1$, then player $B$ must follow by calling the other color in $\{0,1\}$ (or waste his move). And if player $A$ starts a round by calling color $2$ or $3$, then player $B$ must call the other color in $\{2,3\}$ (or waste his move). In the even rounds the players will call the colors in $\{0,1\}$ and in the odd rounds they will call the colors in $\{2,3\}$. Both players are competing for a set of honey pots in the middle of the battlefield, and need to get there as quickly as possible. If a player deviates from the even-odd pattern indicated above, he might perhaps waste his move and delay the game by one round (in which neither player comes closer to the honey pots), but this remains without further impact on the outcome of the game.
The proof is by reduction from the supersequence problem {{\sc SCS}} with binary sequences; see Section~\ref{ss_tree}. Consider an instance $(\sigma_1,\ldots,\sigma_s,t)$ of {{\sc SCS}}, and let $n$ denote the common length of all sequences $\sigma_i$. We first construct two start nodes $a_0$ and $b_0$ of colors $2$ and $3$, respectively. For each sequence $\sigma_i$ with $1\le i\le s$ we do the following:
\begin{itemize} \item We construct a path $P_i$ that consists of $2n-1$ nodes and that is attached to $a_0$: The $n$ nodes with odd numbers mimic sequence $\sigma_i$, while the $n-1$ nodes with even numbers along the path all receive color $2$. The first node of $P_i$ is adjacent to $a_0$, and its last node is connected to a so-called honey pot $H_i$.
\item The \emph{honey pot} $H_i$ is a long path consisting of $4st$ nodes of color $3$. Intuitively, we may think of a honey pot as a single node of large weight, because conquering one of the nodes will simultaneously conquer the entire path.
\item Every honey pot $H_i$ can also be reached from $b_0$ by another path $Q_i$ that consists of $2t-1$ nodes. Nodes with odd numbers get color $0$, and nodes with even numbers get color $3$. The first node of $Q_i$ is adjacent to $b_0$, and its last node is connected to $H_i$. Furthermore, we create for each odd-numbered node (of color $0$) a new twin node of color $1$ that has the same two neighbors as the color $0$ node. Note that for every path $Q_i$ there are $t$ twin pairs. \end{itemize}
Finally we create a private honey pot $H_B$ for player $B$, that is connected to node $b_0$ and that consists of $4s(s-1)t+(2n-1)s$ nodes of color $2$. This completes the construction; see Fig.~\ref{fig_sp} for an example.
Assume that the {{\sc SCS}} instance has answer YES. During his first $2t-1$ steps, player $B$ can only conquer the paths $Q_i$ and his private honey pot $H_B$. At the same time, player $A$ can conquer all paths $P_i$ by calling color $2$ in his even moves and by following a shortest 0-1 supersequence in his odd moves. Then, in round $2t$ player $A$ will simultaneously conquer all the honey pots $H_i$ with $1\le i\le s$. This gives $A$ a territory of at least $1+(2n-1)s+4s^2t$ nodes, and $B$ a smaller territory of at most $1+(3t-1)s+4s(s-1)t+(2n-1)s$ nodes. Hence $A$ can enforce a win.
Next assume that player $A$ has a winning strategy. Player $B$ can always conquer his starting node $b_0$ and his private honey pot $H_B$. If $B$ also manages to conquer one of the pots $H_i$, then he gets a territory of at least $1+4s(s-1)t+(2n-1)s+4st$ nodes and surely wins the game. Hence player $A$ can only win if he conquers all $s$ honey pots $H_i$. To reach them before player $B$ does, player $A$ must conquer them within his first $2t$ moves. In every odd round, player $A$ will call a color $0$ or $1$ and player $B$ will call the other color in $\{0,1\}$. Hence, in the even rounds, colors $0$ and $1$ are forbidden for player $A$, and the only reasonable move is to call color $2$. Note that the slightest deviation of these forced moves would give player $B$ a deadly advantage. In order to win, the odd moves of player $A$ must induce a supersequence of length at most $t$ for all sequences $\sigma_i$. Therefore, the {{\sc SCS}} instance has answer YES.
$\Box$ \bigbreak \end{proof}
\begin{figure}\label{fig_sp}
\end{figure}
\subsection{The Two-Player Game on Arbitrary Graphs} \label{ss_pspace}
In this section we will show that problem {{\sc Honey-Bee-2-Players}} is PSPACE-complete on arbitrary graphs. Our reduction is from the PSPACE-complete {\tt Quantified Boolean Formula} ({\sc QBF}) problem; see for instance Garey \& Johnson \cite{GaJo79}.
\probl{{\sc QBF}} {A quantified Boolean formula with $2n$ variables in conjunctive normal form: $\exists x_1\forall x_2\cdots\exists x_{2n-1}\forall x_{2n} \wedge_j C_j$, where the $C_j$ are clauses of the form $\vee_k l_{jk}$, where the $l_{jk}$ are literals.} {Is the formula true?}
\begin{theorem} \label{thm_pspace} For four (or more) colors, problem {{\sc Honey-Bee-2-Players}} on arbitrary graphs is PSPACE-complete. \end{theorem}
\begin{proof} We reduce from {{\sc QBF}}. Let $F=\exists x_1\forall x_2\cdots\exists x_{2n-1}\forall x_{2n} \bigwedge_j C_j$ be an instance of {\sc QBF}. We construct a bee graph $G_F=(V,E)$ with four colors (white, light-gray, dark-gray, and black) such that player $A$ has a winning strategy if and only if $F$ is true. Let $a_0$ (colored light-gray) and $b_0$ (colored dark-gray) denote the start nodes of players $A$ and $B$, respectively.
Each player controls a \emph{pseudo-path}, that is, a path where some nodes may be duplicated as parallel nodes in a diamond-shaped structure; see Fig.~\ref{fig_var}. A so-called \emph{choice pair} consists of a node on a pseudo-path together with some duplicated node in parallel. The start nodes are at one end of the respective pseudo-paths, and the players can conquer the nodes on their own path without interference from the other player. However, they must do so in a timely manner because either path ends at a humongous \emph{honey pot}, denoted respectively by $H_A$ and $H_B$. A honey pot is a large clique of identically-colored nodes (we may think of it as a single node of large weight, because conquering one node will simultaneously conquer the entire clique). Both honey pots have the same size but different colors, namely black ($H_A$) and white ($H_B$), and they are connected to each other by an edge. Consequently, both players must rush along their pseudo-paths as quickly as possible to reach their honey pot before the opponent can reach it and to prevent the opponent from winning by conquering both honey pots. The last nodes before the honey pots are denoted by $a_f$ and $b_f$, respectively. They separate the last variable gadgets (described below) from the honey pots.
\begin{figure}
\caption{The variable gadget in the proof of Thm.~\ref{thm_pspace}.}
\label{fig_var}
\end{figure}
Fig.~\ref{fig_var} shows an overview of the pseudo-paths and one \emph{variable gadget} in detail. A variable gadget is a part of the two pseudo-paths corresponding to a pair of variables $\exists x_{2i-1} \forall x_{2i}$, for some $i\ge1$. For player $A$, the gadget starts at node $a_{i-1}$ with a choice pair $a_{2i-1}^F$ and $a_{2i-1}^T$, colored white and black, respectively. The first node conquered by $A$ will determine the truth value for variable $x_{2i-1}$. In the same round, player $B$ has a choice on his pseudo-path $P_B$ between nodes $b_{2i-1}^F$ and $b_{2i-1}^T$. Since these nodes have the same color as $A$'s choices in the same round, $B$ actually does not have a choice but must select the other color not chosen by $A$.
Three rounds later, player $B$ has a choice pair $b_{2i}^F$ and $b_{2i}^T$, assigning a truth value to variable $x_{2i}$. In the next step (which is in the next round), player $A$ has a choice pair $a_{2i}^F$ and $a_{2i}^T$ with the same colors as $B$'s choice pair for $x_{2i}$. Again, this means that $A$ does not really have a choice but must select the color not chosen by $B$ in the previous step. Since we want $A$ to conquer those clauses containing a literal set to true by player $B$, the colors in $B$'s choice pair have been switched, i.e., $b_{2i}^F$ is black and $b_{2i}^T$ is white.
Note that all the nodes $a_0,a_1,\ldots,a_n$ are light-gray and all the nodes $b_0,b_1,\ldots,b_n$ are dark-gray. This allows us to concatenate as many variable gadgets as needed. Further note that $a_f$ is white, while $b_f$ is light-gray.
The \emph{clause} gadgets are very simple. Each clause $C_j$ corresponds to a small honey pot $H_j$ of color white. The size of the small honey pots is smaller than the size of the large honey pots $H_A$ and $H_B$, but large enough such that player $A$ loses if he misses one of them. Player $A$ should conquer $H_j$ if and only if $C_j$ is true in the assignment chosen by the players while conquering their respective pseudo-paths. We could connect $a_{2i-1}^T$ directly with $H_j$ if $C_j$ contains literal $x_{2i-1}$, however then player $A$ could in subsequent rounds shortcut his pseudo-path by entering variable gadgets for the other variables in $C_j$ from $H_j$. To prevent this from happening, we place waiting gadgets between the variable gadgets and the clauses.
Let $a_{k}^\star$ denote the node on $P_A$ right after the choice pair $a_k^F$ and $a_k^T$, for $k=1,\ldots,2n$; similarly, $b_k^\star$ are the nodes on $P_B$ right after $B$'s choice pairs. A \emph{waiting gadget} $W_k$ consists of two copies $W_k^F$ and $W_k^T$ of the sub-path of $P_A$ starting at $a_k^\star$ and ending at $a_n$, see Fig.~\ref{fig_wait}. If clause $C_j$ contains literal $x_k$, $H_j$ is connected to the node $w_n^T$ corresponding to $a_n$ in $W_k^T$; if $C_j$ contains literal $\overline{x_k}$, $H_j$ is connected to the node $w_n^F$ corresponding to $a_n$ in $W_k^F$. If $k=2i-1$ (i.e., we have an existential variable $x_{2i-1}$ whose value is assigned by player $A$), then $a_{2i-1}^F$ and $b_{2i-1}^F$ are connected to $w_{2i-1}^{\star F}$, and $a_{2i-1}^T$ and $b_{2i-1}^T$ are connected to $w_{2i-1}^{\star T}$. If $k=2i$ (i.e., we have a universal variable $x_{2i}$ whose value is assigned by player $B$), then $a_{2i}^F$ and $b_{2i}^\star$ are connected to $w_{2i}^{\star F}$, and $a_{2i}^T$ and $b_{2i-1}^\star$ are connected to $w_{2i}^{\star T}$.
\begin{figure}
\caption{The waiting gadgets for existential variables ($W_{2i-1}^F$ and $W_{2i-1}^T$, the two top paths) and universal variables ($W_{2i}^F$ and $W_{2i}^T$, the two bottom paths) in the proof of Thm.~\ref{thm_pspace}. Note that usually one of the two waiting paths $W_k^F$ or $W_k^T$ woud be connected to $H_j$ because we may assume that a clause does not contain $x_k$ and $\overline{x_k}$ at the same time. }
\label{fig_wait}
\end{figure}
Finally, we connect $b_f$ with all clause honey pots $H_j$ to give player $B$ the opportunity to conquer all those clauses that contain no true literal. This completes the construction of $G_F$. Fig.~\ref{fig_example} shows the complete graph $G_F$ for a small example formula $F$.
We claim that player $A$ has a winning strategy on $G_F$ if and only if formula $F$ is true. It is easy to verify that player $A$ can indeed win if $F$ is true. All he has to do is to conquer those nodes in his existential choice pairs corresponding to the variable values in a satisfying assignment for $F$. For the existential variables, he has full control to select any value, and for the universal variables he must pick the opposite color as selected by player $B$ in the previous step, which corresponds to setting the variable to exactly the value that player $B$ has selected. Hence player $B$ can block a move of player $A$ by appropriately selecting a value for a universal variable. Note that no other blocking moves of player $B$ are advantageous: If $B$ blocks $A$'s next move by choosing a color that does not make progress on his own pseudo-path, then $A$ will simply make an arbitrary waiting move and then in the next round $B$ cannot block $A$ again. When player $A$ conquers node $a_n$, he will simultaneously conquer the last nodes in all waiting gadgets corresponding to true literals. Since every clause contains a true literal for a satisfying assignment, player $A$ can then in the next round conquer $a_f$ together with all clause honey pots (which all have color white). Player $B$ will respond by conquering $b_f$, and the game ends with both players conquering their own large honey pots $H_A$ and $H_B$, respectively. Since player $A$ got all clause honey pots, he wins.
To make this argument work, we must carefully chose the sizes of the honey pots. Each pseudo-path contains $9n+1$ nodes, of which at most $n$ can be conquered by the other player. The waiting gadgets contain two paths of length $9k+6$ for existential variables and $9k+1$ for universal variables. At the end, player $A$ will have conquered one of the two paths completely and maybe some parts of the sibling path, that is, we do not know exactly the final owner of less than $n^2$ nodes. The clause honey pots should be large enough to absorb this fuzzyness, which means it is sufficient to give them $2n^2$ nodes. The honey pots $H_A$ and $H_B$ should be large enough to punish any foul play by the players, that is, when they do not strictly follow their pseudo-paths. It is sufficient to give them $2n^3$ nodes.
To see that $F$ is true if player $A$ has a winning strategy note that player $A$ must strictly follow his pseudo-path, as otherwise player $B$ could beat him by reaching the large honey pots first. Thus player $A$'s strategy induces a truth assignment for the existential variables. Similarly, player $B$'s strategy induces a truth assignment for the universal variables. Player $A$ can only win if he also conquers all clause honey pots, and hence the players must haven chosen truth values that make at least one literal per clause true. This means that formula $F$ is satisfiable.
$\Box$ \bigbreak \end{proof}
\section{Conclusions} \label{s_conclusion}
We have modeled the Honey Bee game as a combinatorial game on colored graphs. For the solitaire version, we have analyzed the complexity on many classes of perfect graphs. For the two player version, we have shown that even the highly restricted case of series-parallel graphs is hard to tackle. Our results draw a clear separating line between easy and hard variants of these problems.
\begin{figure}
\caption{The reduction in the proof of Thm.~\ref{thm_pspace} would produce this graph for the formula $F=(x_1\vee x_2\vee x_3) \wedge (x_1\vee \overline{x_2}\vee x_4) \wedge (\overline{x_2}\vee x_3\vee\overline{x_4})$.}
\label{fig_example}
\end{figure}
\end{document} | arXiv | {
"id": "1102.3025.tex",
"language_detection_score": 0.8836326599121094,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Quantum advantage in training binary neural networks}
\author{Yidong Liao} \affiliation{Institute for Quantum Science and Engineering, Department of Physics, Southern University of Science and Technology (SUSTech), Shenzhen, China}
\author{Daniel Ebler } \affiliation{Institute for Quantum Science and Engineering, Department of Physics, Southern University of Science and Technology (SUSTech), Shenzhen, China} \affiliation{Wolfson College, University of Oxford, Linton Road, Oxford OX2 6UD, UK}
\author{Feiyang Liu} \affiliation{Institute for Quantum Science and Engineering, Department of Physics, Southern University of Science and Technology (SUSTech), Shenzhen, China}
\author{Oscar Dahlsten} \thanks{Correspondence: Oscar Dahlsten \\(dahlsten@sustech.edu.cn)} \affiliation{Institute for Quantum Science and Engineering, Department of Physics, Southern University of Science and Technology (SUSTech), Shenzhen, China} \affiliation{Center for Quantum Computing, Peng Cheng Laboratory, Shenzhen, 518000, China} \affiliation{London Institute for Mathematical Sciences, 35a South Street Mayfair, London W1K 2XF, UK} \affiliation{Wolfson College, University of Oxford, Linton Road, Oxford OX2 6UD, UK}
\begin{abstract} The performance of a neural network for a given task is largely determined by the initial calibration of the network parameters. Yet, it has been shown that the calibration, also referred to as training, is generally NP-complete. This includes networks with binary weights, an important class of networks due to their practical hardware implementations. We therefore suggest an alternative approach to training binary neural networks. It utilizes a quantum superposition of weight configurations. We show that the quantum training guarantees with high probability convergence towards the globally optimal set of network parameters. This resolves two prominent issues of classical training: (1) the vanishing gradient problem and (2) common convergence to suboptimal network parameters. Moreover we achieve a provable polynomial---sometimes exponential---speedup over classical training for certain classes of tasks. We design an explicit training algorithm and implement it in numerical simulations.
\end{abstract}
\pacs{}
\maketitle
\section*{Introduction} Artificial Neural Networks (NNs) ~\cite{grossberg1988neural,haykin2009neural,russell2016artificial}, have proven immensely successful for a large variety of tasks, notably including pattern recognition~\cite{bishop1995neural, nasrabadi2007pattern}, language processing \cite{collobert2008unified,mikolov2010recurrent}, and simulation of molecular dynamics \cite{kozuch2018combined,degiacomi2019coupling}, leading to changes in practice in fields such as medicine, pharmaceutical research \cite{baxt1991use,karabatak2009expert,zupan1999neural,agatonovic2000basic}, and finance \cite{trippi1992neural}. While computers were originally built to process information according to a pre-defined algorithm, NNs can learn how to process data themselves. Usually, this is achieved by training the network: data, for which the desired output is already known, is input and the network parameters are adjusted until the outputs of the NN coincide with the desired ones. However, training NNs was shown to be NP-complete \cite{rojas2013neural}, leading to long training times \cite{blum1989training} and large consumption of memory \cite{rumelhart1985learning,werbos1990backpropagation}.
Recently, simplified models such as binary neural networks (BNNs) \cite{hubara2016binarized}, for which the network parameters can only assume bit values, were introduced. Such simplifications drastically reduce the memory size and run time during the execution of the network \cite{Rastegari:tn}. However, during the calibration of parameters, the parameters and outputs of the BNN are kept real and the binarization happens \textit{after} each training cycle \cite{hubara2016binarized}. Hence, even for strongly simplified models improved training methods remain to be found.
Furthermore, common training methods, such as gradient descent, heavily rely on the shape of the optimisation landscape of the network parameters. Since the landscape is usually not convex, common training methods find suboptimal choices of parameters, corresponding to local extrema rather than global ones \cite{choromanska2015open}. It is known that there are scenarios where local extrema lead to performances which are far from globally optimal \cite{SwirszczCP16,Alizadeh:2018ul}.
A novel approach to NNs is based on quantum technology, which has been shown to achieve performances beyond the possible of current classical implementations for a variety of tasks \cite{deutsch1992rapid,grover1996fast, shor1999polynomial,bravyi2017quantum}. These so-called quantum neural networks (QNNs) strive not only for an improved learning capacity \cite{schuld2014quest}, but also for learning \cite{wan2018learning, morales2018variationally} and identifying new \textit{quantum} protocols \cite{wan2017quantum, beer2019efficient}. An appealing possibility is that quantum effects, such as superposition and entanglement, can also improve the efficiency of training methods. Proposals of quantum neural networks are commonly accompanied by a fully classical \cite{wan2017quantum, bergholm2018pennylane,daskin2018simple,zhao2018forecast} or semi-classical training method assisted with quantum effects \cite{wan2018learning,Farhi:wv,beer2019efficient}, leading to the performance issues discussed above \cite{McClean:2018um}. There have been recent proposals for showing a quantum speed-up for training \cite{verdon2018universal}, taking advantage of a quantum speed-up for gradient descent~\cite{GilyenASW17, Jordan05}. Such results suggest quantum neural networks constitute a well-defined and potentially advantageous route. However, a key challenge with gradient descent based training methods is the risk of arriving in local extrema of the optimization landscape. In \cite{ricks2004training} the high-level idea to apply quantum search, which yields optimal solutions, to NN training was suggested, though without a concrete training scheme. There are certain hurdles to directly applying the standard quantum search techniques to neural nets, in particular how to relate the training to a superposition of weight strings that takes into account the cost function and is amenable to a quantum search.
In this work we explicitly propose a method for training binary neural networks using quantum search techniques. The protocol initialises the system parameters coherently and maintains the quantum superposition throughout the full training. By reformulating the training as a quantum search problem, we (1) are guaranteed (with high probability) to find the globally optimal set of network parameters and (2) achieve a provable speed-up in the number of possible choices of parameters, as detailed later in the paper.
We proceed as follows. After a brief introduction to BNNs and Grover's search algorithm, we design a quantum extension of a binary neuron. Then, the fully quantum training protocol is presented. Finally, an analysis of the performance in comparison to classical binary neurons is given and a generalisation from single neurons to feedforward quantum BNNs suggested. The Appendix includes simulations of the quantum training for concrete examples of networks.
\section{Preliminaries} We commence by introducing classical NNs and their basic training method. Afterwards, we give an overview of Grover's quantum search algorithm and quantum phase estimation.
\subsection{Classical feedforward binary neural networks} Artificial neural networks (NNs) constitute a computational framework used to process data without pre-defining an algorithm. The task-specific algorithm is constructed from the input data alone, which allows NNs to operate universally, up to restrictions due to the architecture of the network. The basic building block of an NN, the neuron, acts as a computational cell by receiving a set of inputs and processing them according to a pre-set rule. The output is then forwarded to the connected neurons. Modelling the structure as a graph, the edge between two neurons carries a weight $w$, which describes the importance of the connection, see e.~g.~Figure \ref{fig:classical_FNN}.
We focus in this Letter on feedforward NNs (FNNs), for which the neurons are grouped into sequential layers and data is fed in one direction only. Every neuron in layer $r$ shares an edge to every neuron in layer $r-1$ and $r+1$, while the neurons within a layer or across neighbouring layers do not interact (Figure \ref{fig:classical_FNN} depicts a simple instance of an FNN). The first layer obtains a set of inputs, processes it and forwards it to the next layer. The cascade of computations terminates when the last layer yields the final outputs. Note, that the amount of neurons can be different in each layer.
\begin{figure}
\caption{\textit{Simple instance of an FNN:} the inputs $a_1$, $a_2$ are forwarded to the neurons $\map N_1$ and $\map N_2$, which process them and forward their outputs to the next layer, consisting of $\map N_3$, $\map N_4$ and $\map N_5$. Afterwards, the outputs of the second layer is forwarded to $\map N_6$ and $\map N_7$, which output $a'_1$, $a'_2$. Each of the three layers is connected with edges carrying weights $w \in \{ w_i \}_{i=1}^{12}$.}
\label{fig:classical_FNN}
\end{figure}
The ability of NNs to learn certain tasks like pattern recognition \cite{bishop1995neural,nasrabadi2007pattern} and classification \cite{kotsiantis2006machine} comes with a high computational cost. One of the reasons for this is that the inputs to a neuron and weights $w$ can be arbitrary real numbers, which generally require large amounts of memory and arithmetic operations. Only recently, Bengio \textit{et al} \cite{hubara2016binarized} proposed a simplified version of NNs, which limits the outputs of the neurons and the weights to be binary. As a consequence, the memory size is drastically reduced and most arithmetic operations are replaced with bit-wise operations. The so-called binary neural networks (BNNs) were shown to process data faster and with a smaller amount of memory compared to NNs with continuous parameters. Remarkably, despite the strong nature of the simplifications the accuracy of BNNs has been shown to be similar to NNs \cite{hubara2016binarized}. \subsubsection*{Training an NN: gradient descent} During the training phase of an NN, pairs of training data $(a,a^*)$ consisting of inputs $a$ and corresponding desired outputs $a^*$ (also called labels) are used to calibrate the weights $w$. This happens by sending the inputs through the NN, and comparing the corresponding outputs $a'$ to the correct outputs $a^*$. The weights $w$ in the NN are refined according to the deviation of the output from the desired ones. To quantify the deviation, a task-specific cost function $C(\{a',a^*\})$ is defined. Note, that the outputs $a'$ implicitly depend on the choice of weights. A widely used approach in training classical neural networks is to approach the minimum cost through the method of gradient descent, which updates the weight $w_{ij}$ between neuron $i$ and neuron $j$ from the $t$-th iteration of the NN to the next one as \begin{align} w_{ij}(t+1)=w_{ij}(t) - \eta \frac{\partial C}{\partial w_{ij}} \ . \label{gradientdesc} \end{align} Here, $\eta$ is a positive number, usually referred to as the learning rate.
The direct evaluation of the term $\partial C / \partial w_{ij}$ is computationally expensive, and reuquires to loop over all inputs twice. In addition, even if the values of the network parameters and inputs is restricted (e.g. for binary neural networks, see Section \ref{bnnsec}), commonly the weights are kept real-valued and only discretised after the evaluation of the partial derivatives \cite{hubara2016binarized,courbariaux2015binaryconnect}.
\iffalse {\color{green}{[I am tempted to delete the next paragraph -- do you think we should speak about backpropagation in the way below, or simply mention in the performance
section that this is a commonly used technique?]}} A frequently used method is backpropagation, which approximates the derivative by evaluating a sequence of local computations. Informally, the chain rule for derivatives allows to rewrite \begin{align} \frac{\partial C}{\partial w_{ij}}=\frac{\partial C}{\partial o^{(n)}}\frac{\partial o^{(n)}}{\partial i^{(n)}}\frac{\partial i^{(n)}}{\partial o^{(n-1)}}\dots \frac{\partial i^{(m)}}{\partial w_{ij}} \ , \end{align} where where we defined $o^{(n)}=\{ o^{(n)}_j \}$ as the set of all outputs of the neurons in the $n$-th layer, and $i^{(n)}=\{ i^{(n)}_i \}$ as the corresponding set of inputs. The partial derivative $\partial C / \partial o^{(n)}$ is then to be understood as the change in the cost function with respect to all outputs from the $n$-th layer of neurons (and analogously for the other partial derivatives). Hence, term is evaluated locally. The multiplication finally yields an approximation of the partial derivative in Eq.~(\ref{gradientdesc}). This concept can be applied to arbitrary NNs, as long as the processing $f$ of each neuron is differentiable. \fi
A technique known as backpropagation removes the need to evaluate Eq.~(\ref{gradientdesc}) for all weights: given the structure of the NN one can infer how to change weights in earlier layers given Eq.~(\ref{gradientdesc}) for later layers.
Gradient descent, whether with or without back-propagation, has the disadvantage of finding minima that may be local, i.e. sub-optimal \cite{choromanska2015open}. To find a global minimum is in general NP-Hard consistent with the exponential - in the number of neurons -number of weight configurations \cite{rojas2013neural,livni2014computational}. \subsection{Grover's search algorithm} For an unsorted list of length $N$, identifying the index of a given element in the list requires $O(N)$ computational steps classically. If quantum effects are allowed for, only $O(\sqrt{N})$ steps are needed. This is the result of the famous search algorithm proposed by Grover \cite{grover1996fast}, in which a speedup is achieved by evolving a superposition of list elements. In the following, we give a brief overview of the algorithm.
Let us encode the $N$ items of a list into quantum states in the computational basis $|0\rangle,|1\rangle,\dots,|N-1\rangle$. Furthermore, let $\{|\omega_i\rangle\}_{i=1}^M \subset \{ |0\rangle,|1\rangle,\dots,|N-1\rangle \}$ be the set of states corresponding to the solutions to the search problem. The algorithm then proceeds as follows:
\begin{itemize} \item Initialize the registers in the superposition \begin{align*}
\{ |0\rangle,|1\rangle,\dots,|N-1\rangle \} \mapsto |X\rangle = \frac{1}{\sqrt{N}}\sum_{x=0}^{N-1} |x\rangle \end{align*} \item For $O(\sqrt{N/M})$ times, repeat: \begin{enumerate}
\item Apply the quantum oracle $\Lambda_{\omega}=-2 \sum_{i=1}^M |\omega_i\rangle\langle\omega_i| + I$
\item Apply the diffusion transform $D=H^{\otimes n} \Lambda_0 H^{\otimes n}=H^{\otimes n} \left(2 (|0\rangle\<0|)^{\otimes n} - I \right) H^{\otimes n} $ \end{enumerate} \item Apply a measurement in the computational basis to the register \end{itemize}
The quantum oracle $\Lambda_{\omega}$ marks the state $|x\rangle$ by flipping its sign if it coincides with one of the solutions from the set $\{ \omega_i \}_{i=1}^M$, and does nothing otherwise. The diffusion operator amplifies the amplitude of the marked elements, such that the concluding measurement yields the outcome $x^* \in \{ \omega_i \}_{i=1}^M$ with high probability.
\iffalse Altogether, Grover search can be defined by the sequence \begin{align*} \ldots ( H \Lambda_{0} H ) (\Lambda_{\omega})( H \Lambda_{0} H ) (\Lambda_{\omega}) H \end{align*} or, alternatively by applying the the operator $Q=- \Lambda_{0} H \Lambda_{\omega} H$ \fi
The optimal number of iterations of the oracle and the diffusion before the concluding measurement is given by $k^*\approx \sqrt{\frac{N}{M}} \frac{\pi}{4}$ \cite{grover1996fast}. It is, hence, crucial to have knowledge about $M$, as stopping at the wrong time may result in a random output rather than a valid solution.
\subsection{Phase Estimation \label{phaseest}}
Suppose $|u\rangle$ is the eigenvector of a unitary operator $K$, with eigenvalue $e^{i \pi 2\phi}$. To estimate $\phi$ up to some error $\epsilon$, we initiate $t$ qubits in the state $|0\rangle$. The number $t$ is determined by the precision of the estimation of $\phi$, as well as the probability of success of the estimation. The phase estimation algorithm succeeds in two steps and is depicted in Figure \ref{fig:PE}.
\begin{figure}
\caption{Circuit for the quantum Phase estimation: the $t$ ancillary qubits act as control systems for the unitary $K$. The system input $|u\rangle$ is the corresponding eigenvector to the eigenvalue $e^{i \pi 2\phi}$ of $K$. The $l$-th ancilla applies $K$ for $2^{l-1}$ times on the system. After the in total $2^t-1$ controlled applications of $K$, the control systems are transformed by an inverse quantum Fourier transform $F_n^{-1}$, which yields a binary encoding of the number $\phi$.}
\label{fig:PE}
\end{figure}
\textit{Step 1.} Apply a Hadamard gate to each of the $t$ qubits in the first register, yielding the states $|+\rangle^{\otimes t}$. Then, the first ancilla acts as a control for $2^0=1$ use of the unitary operator $K$, which is applied to $|u\rangle$. The second ancilla acts as a control for $2^1=2$ uses of $K$, and so on. In general, the $l$-th ancilla acts as a coherent control for $2^{l-1}$ uses of $K$. In the end, the collective output state $|T\rangle$ of the $t$ ancillas yields \cite{nielsen2002quantum} \begin{align*}
|T\rangle=\frac{1}{\sqrt{2^t}}\sum_{k=0}^{2^t-1} e^{2 \pi i \phi k} |k\rangle \ . \end{align*}
\textit{Step 2.} Apply the inverse quantum Fourier Transform $F_n^{-1}$ in order to convert $\phi$ of the state $|T\rangle$ into a $t$-qubit register. More precisely, the inverse QFT acts as \begin{align}
F_n^{-1} |T\rangle = |\phi_1 \phi_2 \dots \phi_t\rangle \ , \label{PEout} \end{align}
where the phase to be estimated is assumed to have the binary form $\phi=0. \phi_1 \phi_2 \dots$, i.e. $\phi_j$ is the $j$-th decimal bit of the number $\phi$. It is important to remark that with the phase estimation (PE) algorithm being unitary, it acts linearly on superpositions $\sum_i |u_i\rangle$ of eigenvectors $|u_i\rangle$ \cite{nielsen2002quantum}.
\section{Quantum binary neurons \label{bnnsec}}
A classical neuron is the basic computational unit of a classical NN. It implements a function, which takes multiple inputs and produces a single output. Hence, this many-to-one mapping makes the classical neuron function irreversible, which is a priori not compatible with reversible dynamics of quantum computing. However, this mapping can be generalised to a quantum neuron in the same way that quantum computing generalises classical computing, along the paradigm of \cite{wan2017quantum}. To define the quantum generalisation, first, classical gates are extended to reversible gates, which are a subset of quantum unitaries. Afterwards, general unitaries are allowed for.
\subsection{Finding a reversible extension} Let us consider the elementary instance of a classical binary neuron (CBN) depicted in Figure \ref{fig:CBN}.
\begin{figure}
\caption{\textit{Functioning of a CBN:} an $\tt XNOR$ operation multiplies the inputs $a_1$, $a_2$ with the corresponding weights $w_1$, $w_2$. The resulting values $s_1=w_1 a_1$, $s_2=w_2 a_2$ are forwarded to the $\tt bitcount$, which outputs $s=s_1+s_2$. Finally, the Sign function determines the activation of the neuron.}
\label{fig:CBN}
\end{figure}
The neuron takes the input $\underline{a}=(a_1,a_2)$ with the two components $a_1$ and $a_2$, and has weights $w_1,w_2 \in \{1,-1 \}$ on the edges. An $\tt XNOR$ gate multiplies the inputs with the weights, yielding $s_i=w_i a_i$, $i=1,2$. Afterwards, the operation $\tt bitcount$ sums the results to the value $s=w_1 a_1 + w_2 a_2$. Finally, the Sign function $f(s)$ determines the activation value $a'$ of the neuron. Now, firstly we introduce ancillary inputs and outputs for each operation in the CBN, such that the number of inputs coincides with the number of outputs. These ancillas carry the output values of the operations, while the inputs are preserved. This yields a reversible embedding $U_{\tt XNOR}$ of $\tt XNOR$ and $U_{\tt bit+}$ of $\tt bitcount$, see Figure \ref{revCBN}. Here, $U_{\tt bit+}$ includes both the $\tt bitcount$ operation and the activation through the activation function $f$.
\begin{figure}
\caption{\textit{Reversible embedding of CBN:} the gate $U_{\tt XNOR}$ take component $i$ of the input vector $\underline{a}$ and multiplies it with the corresponding edge weight $w_i$, yielding the value $s_i=a_i w_i$, with $i\in \{ 1,2 \}$. To achieve reversibility $w_i$ is forwarded on the first output. The gate $U_{\tt bit+}$ takes $s_1$, $s_2$ and an ancilla $0$, and encodes the activation $f(s)$ in the output $a'$, while preserving $s_1$ and $s_2$.}
\label{revCBN}
\end{figure} \subsection{Unitary embedding of the reversible CBN \label{basicopqbn}}
The reversible logical gates in Figure \ref{revCBN}, with inputs $x_i$ and outputs $y_i$ can be implemented as quantum unitaries $U=\sum_i |y_i\rangle\<x_i|$. This defines a quantum binary neuron (QBN). In order to find a quantum circuit implementation of a QBN, we decompose the operations $U_{\tt XNOR}$ and $U_{\tt bit+}$ into elementary quantum gates.
The multiplication of the input and weight can be achieved by a controlled NOT (CNOT) gate on each input $|a_i \rangle$, controlled by the state of the corresponding weight $|w_i\rangle$ (see Figure \ref{fig:QBN}).
\begin{figure}
\caption{\textit{Functioning of QBN:} the input data is encoded into quantum states. The multiplication succeeds by the CNOT gate and outputs the states $|s_1\rangle$, $|s_2\rangle$. Finally, the Toffoli gate, which has known decompositions into elementary gates, executes $U_{\tt bit+}$, leading to the output state $|a'\rangle$. }
\label{fig:QBN}
\end{figure}
It can be easily seen from the truth table of the $\tt XNOR$ and CNOT gate that the operations indeed coincide. The gate $U_{\tt bit+}$ is realized by the Toffoli gate \cite{barenco1995elementary} on the ancilla, where the states $|s_1\rangle$ and $|s_2\rangle$ act as control systems.
\subsection{Generalization to quantum FBNNs \label{networkgeneralisation}} So far, we proposed a unitary extension of a classical neuron. A neural network consists of multiple interlinked neurons. Hence, by extending the neurons unitarily, the resulting quantum neural network acts, overall, unitarily on inputs as well. Generalising Figure \ref{fig:QBN}, a quantum feedforward binary neural network (QFBNN) takes a set of $N$ weights $\{ w_1, w_2, \dots, w_N\}$ as input, together with a set of data $\underline{a}=(a_1,a_2,\dots,a_p)$. The set $\{ w_1, w_2, \dots, w_N\}$ denotes all weights in the networks -- including input layer, hidden layers and output layer. The set of data $\underline{a}=(a_1,a_2,\dots,a_p)$ assumes that there are $p$ neurons in the input layer (with assigned weights $w_1,w_2,\dots,w_p$). Each of the $r$ neurons in the last layer of the QFBNN outputs one value, leading to the overall output $\underline{a}'=(a_1',a_2',\dots a_r')$. Then, by depicting the action of the QBFNN as a single unitary $U$, the fully quantum training method introduced in the next Section generalises straightforwardly from a single neuron to general QFBNNs.
One point requires a bit more attention: when generalising from a single neuron to a feedforward neural network, the output values of the neurons are to be copied and distributed to each neuron in the next layer. While classically this is trivial to do, quantum mechanics prohibits exact copying of data -- a phenomenon called no-cloning theorem \cite{wootters1982single}. For classical binary neural networks, this issue is resolved by applying CNOT gates to the output of each neuron, one for each copy operation. The CNOT acts on an ancillary qubit initialized in $|0\rangle$, controlled by the output qubit $|a'\rangle$ of the neuron. Hence, for bit valued $a'$ the CNOT gate acts as \begin{align}
\text{CNOT} (|a'\rangle |0\rangle) = |a'\rangle|a'\rangle \ . \end{align} In total from one layer with $\ell_1$ neurons to the next with $\ell_2$ neurons, we need $\ell_1 \times \ell_2$ ancillas. When the overall state has coherence, the CNOT gates in general do not produce perfect copies but rather entanglement between the states and the ancillas carrying the output values.
\iffalse So far, we restricted the discussion to the training of a single QBN. Here, an extension to quantum feedforward BNNs (FBNNs) is suggested. Instead of identifying the optimal weights for a single neuron the goal is to find the optimal set of weights for each link among two neurons in the network. For a classical FBNN, there are $2^{N_{\text{tot}}}$ different choices, for a total amount of $N_{\text{tot}}$ parameters. Hence, in the classical case a brute-force search becomes infeasible very quickly. Using the proposed quantum training, it is sufficient to initialize a single superposition of all weight states, as $|W\rangle=1/\sqrt{\widetilde{N}_{\text{tot}}} \sum_{\underline{w}} |\underline{w} \rangle$, where $\underline{w}= w_1,w_2, \dots,w_{N_{\text{tot}}}$ and $\widetilde{N}_{\text{tot}}=2^{N_{\text{tot}}}$.
For the phase accumulation subroutine in the quantum training with PE, the oracle $\Lambda$ in the accumulation of matches subroutine then acts as \begin{align} \Lambda(\underline{a}', \underline{a}^*) = \begin{cases} e^{i \pi / n_{\text{tot}}} \ \ \text{if} \ \ \underline{a}' = \underline{a}^* \\ 1 \ \ \text{else} \ , \end{cases} \label{oracleqbnn} \end{align} where $\underline{a}'$ is the collection of all training inputs produced by the network (for corresponding inputs $\underline{a}$) and $\underline{a}^*$ the desired outputs the oracle uses for comparison. Hence, the quantum training method extends straightforwardly to feedforward binary neural networks, by replacing the action of the single neuron $U$ with the action of the whole network.
It is important to note that this generalisation works because the network itself is classical, and only the training is fully quantum. This way, architectural subleties of quantum networks discussed in \cite{wan2017quantum,wan2018learning}, such as replacing the copying of neuron outputs by imperfect distribution to the next neuron layer via fan-out unitaries, do not need to be considered. {\color{green}{[anything more here?]}}
The same holds for the quantum training with counter registers. \fi
\section{Quantum training with phase estimation \label{petrain}} Here, we propose a fully quantum training protocol for QBNs, meaning that we initialise the weights of the network in a quantum superposition and do not collapse the state throughout the training. For presentational clarity we restrict the training to a single neuron. This readily generalises to networks using the recipe described in section~\ref{networkgeneralisation}. The general strategy proceeds in the following three steps
\begin{enumerate} \item Each weight string in superposition is multiplied by a phase factor which depends on how many times it leads to the correct output, for the input data in the training set. This is achieved by looping over training data coherently, making use of a technique known as uncomputation. \item Binarisation of the marking: the phase factors from step 1 are binarised to the values $\{1, -1\}$ using quantum phase estimation. \item Step 1 and 2 together yield an oracle compatible with Grover search which is applied as a final step. \end{enumerate} This yields with high probability the globally optimal set of weights, quadratically - in the number of weight strings - faster than a brute force classical search. While the main text discusses the subroutines in detail, Appendix \ref{app:qbnnexp} illustrates the functioning of the training method with concrete examples. \subsection{Marking the target weights \label{markingtheweights}}
Let us assume we have in total $N$ binary weights $\{ w_1, w_2, \dots , w_N \}$, with values encoded as $|0\rangle$ and $|1\rangle$. Consequently, there are in total $\tilde{N}=2^N$ different combinations of values for the weights. Each of these combinations will be denoted by a string $\underline{w}$ of length $N$, for which the corresponding quantum state reads $|\underline{w}\rangle$. Furthermore, let $n$ be the number of data pairs $(\underline{a_1},a^*_1),(\underline{a_2},a^*_2),\dots,(\underline{a_n},a^*_n)$ used for the training. Here, the value $a^*_i$ (sometimes called label) notes the desired output of the given input $\underline{a_i}$.
The first subroutine works as a phase accumulation approach to identify the goodness of a string, see Figure \ref{fig:marking}. The resulting phases will be used later to mark the good strings for a quantum search.
\begin{figure}\label{fig:marking}
\end{figure}
\textit{Step 1:} initialize all $\tilde{N}=2^N$ possible weight strings in a coherent superposition $|W\rangle=\frac{1}{\sqrt{\tilde{N}}} \sum_i |\underline{w_i}\rangle$. This yields the overall initial state \begin{align}
|\text{In}_1\rangle= |W\rangle |\underline{a}\rangle |a^*\rangle |0\rangle \ , \end{align}
where $|0\rangle$ is an ancillary qubit and $(\underline{a},a^*)\in \{(\underline{a_1},a^*_1), (\underline{a_2},a^*_2),\dots,(\underline{a_n},a^*_n) \}$ is one pair of training data. Due to the superposition of weight states, all weight states are simultaneously combined with the input $|\underline{a}\rangle$. Next, the unitary action $U$ (see Figure \ref{fig:QBN}) of the QBN acts on $|W\rangle$, the input $|\underline{a}\rangle$ and an ancilla $|0\rangle$, encoding the output $|a'\rangle$ on the ancillary system. The overall state is then given by \begin{align}
|\text{Out}_1\rangle=U |\text{In}_1\rangle= \frac{1}{\sqrt{\tilde{N}}} \sum_i |\underline{w_i}, \underline{\widetilde{a_i}} ,a_i'\rangle |a^*\rangle \ , \label{markingout1} \end{align}
where the QBN transforms the input $|\underline{a}\rangle$ and ancilla into $|\underline{\widetilde{a_i}}\rangle$ and $|a_i'\rangle$ respectively if the control state is $|\underline{w_i}\rangle$. Note that except for desired out $|a^*\rangle$ the systems in Eq.~(\ref{markingout1}) are entangled in general. \\
\textit{Step 2:} call the oracle $\Lambda$ to compare the output $|a_i'\rangle$ with the desired output $|a^*\rangle$. If the two states coincide $\Lambda$ adds a phase $e^{i \pi/n}$ to $|a_i'\rangle$. This leads to
\begin{align} \label{eq:step2}
|\text{Out}_2\rangle= \frac{1}{\sqrt{\tilde{N}}} \sum_i |\underline{w_i}, \underline{\widetilde{a_i}}, a_i'\rangle e^{i \Delta \theta_{\underline{w_i}} } |a^*\rangle \ , \end{align}
where $\Delta \theta_{\underline{w_i}} = \pi/n$ if the oracle comparison between $a_i'$ and $a^*$ was successful, and $\Delta \theta_{\underline{w_i}}=0$ otherwise. \\
\textit{Step 3:} decouple the weights by uncomputation. By inverting the unitary action $U$ of the QBN, the weights get decoupled and assume the state \begin{align}
|W'\rangle=\frac{1}{\sqrt{\tilde{N}}} \sum_i e^{i \Delta \theta_{\underline{w_i}}} |\underline{w_i}\rangle \ . \end{align}
The full output state is given by $|\text{Out}_3\rangle = |W'\rangle |\underline{a}\rangle |a^*\rangle |0\rangle$. \newline
\textit{Step 4:} accumulation of phases. The state $|W'\rangle$ is used as the initial weight state for the marking with a new training pair. By repeating Step 1 - 3 for all $n$ pairs of training data, we achieve a coherent phase accumulation for all weight strings, resulting in the output weight state \begin{align}
|\widetilde{W}\rangle=\frac{1}{\sqrt{\tilde{N}}} \sum_i e^{i \pi N_i /n} |\underline{w_i}\rangle \ . \label{phaseaccweights} \end{align} Here, $N_i \leq n$ denotes the number of times the oracle comparison was successful for weight string $\underline{w_i}$ during the $n$ rounds. The most frequently marked weights are closest to a phase of -1, whereas bad weight strings maintain a phase of $+1$. In Appendix ~\ref{app:qbnnexp} we discuss the quantum circuit implementations of several basic examples of the above marking scheme and their simulation results. The simulations have been done via Huawei's quantum computing cloud platform HiQ(\url{https://hiq.huaweicloud.com}), as numerical evidence of the proposed quantum training algorithm. \\
Now, let $\tilde{U}$ be the unitary describing the full accumulation process, namely \begin{align*}
&\tilde{U} \left(|W \rangle \otimes \left( \bigotimes_{j=1}^n |\underline{a_j}\rangle \otimes |a^*_j\rangle \right) \otimes |\underline{0}\rangle \right) \nonumber \\
=& |\widetilde{W} \rangle \otimes \left( \bigotimes_{j=1}^n |\underline{a_j}\rangle \otimes |a^*_j\rangle \right) \otimes |\underline{0}\rangle \ , \end{align*}
where $|\underline{0}\rangle= |0\rangle^{\otimes n}$. It can be seen easily that the states $|\underline{w_i}\rangle \otimes ( \bigotimes_{j=1}^n |\underline{a_j}\rangle \otimes |a^*_j\rangle) \otimes |\underline{0}\rangle$, $i=1,2,\dots,n$ are eigenvectors of $\tilde{U}$ with corresponding eigenvalues $e^{i \pi N_i /n}$. This is an important insight for next step in which the phases will be converted to a binarized marking via phase estimation.
\subsection{Binarizing the marking} The output state of Eq. \ref{phaseaccweights} suggests to use a generalisation of the standard marking of Grover search to oracles which add certain phases rather than $+1$ or $-1$ to the elements subject to the search. It was shown in \cite{grover1998quantum} that this is indeed possible, maintaining the scaling $O(\sqrt{N/M})$ of the number of iterations. However, the optimal number $k^*$ of iterations of the algorithm is in general unknown which makes such a generalisation impractical.
With the aim of making the output amenable to Grover search, we thus propose a subroutine to binarize the phases in the coherent weight state $|\tilde{W}\rangle$. To this purpose, we define a threshold count $N_t$, which denotes the minimum required number of successful oracle calls for a weight string to get marked. If $N_i \geq N_t$, the corresponding weight state will get a phase of $-1$. If $N_i < N_t$, the phase is set to 1. This conversion can be done via phase estimation as shown in the following Section.
\subsubsection{Binarised marking via phase estimation} The complete process of converting the phases in Eq. \ref{phaseaccweights} to a binary marking is visualized in Figure \ref{fig:PEfull}.
\begin{figure}
\caption{\textit{Binarised marking via phase estimation:} In the first step, the PE is applied to the coherent superposition $|W\rangle$ of weight states, together with all other inputs and ancillas included in the training. The unitary action $K$ is the full marking scheme $\tilde{U}$ described in Section \ref{markingtheweights}. This yields the binary encoding $|\phi^{(i)}_1 \phi^{(i)}_2 \dots \phi^{(i)}_t\rangle$ of the phases $\phi^{(i)}=N_i/2n$ subject to the estimation. Then, the oracle $O_{\pm 1}$ is called, which will add a marker to the state if the phase is larger than a certain threshold $N_t$. Finally, inverting the PE achieves a decoupling of the marked weight state from the other systems.}
\label{fig:PEfull}
\end{figure}
For our training algorithm, the eigenvectors are given by $|u_i\rangle=|\underline{w_i}\rangle \otimes \bigotimes_{j=1}^n (|\underline{a_j}\rangle \otimes |a^*_j\rangle) \otimes |\underline{0}\rangle$. Then, we identify the controlled unitary operation $K$ acting on the superposition (we omit normalisation for readability) $|u\rangle = \sum_{i=1}^{\tilde{N}} |u_i\rangle$ as the full phase accumulation in Section \ref{markingtheweights}, i.e. $K=\tilde{U}$, and the numbers to be estimated $\phi^{(i)} = N_i / (2 n)$. We now define a new oracle $O_{\pm 1}$, acting as \begin{align*}
O_{\pm 1} |\phi_1 \phi_2 \dots \phi_t \rangle = \pm 1 |\phi_1 \phi_2 \dots \phi_t \rangle \ , \end{align*}
where $O_{\pm 1}$ adds $-1$ if $N_i \geq N_t$. In general, we can make certain choices for the threshold $N_t$, such that $N_t/2n$ corresponds to a binary fraction of length $\ell$. Then, for $t$ large enough it can be determined from the estimate $|\phi_1 \phi_2 \dots \phi_t \rangle$ if $N_i \geq N_t$ is met.
For illustration purposes, let us analyse some simple examples. Assume first we have $t=1$, such that PE gives the first decimal bit $\phi_1$. Then, using $\phi = N_i / (2 n)$, we have $\phi_1=1$ if $N_i=n$, and $\phi_1=0$ if $N_i< n$. If $t=2$ PE gives $\phi_1 \phi_2$, which is $\phi_1 \phi_2= 0 1$ if $N_i \geq n/2$ and $\phi_1 \phi_2= 00$ if $N_i \leq n/2$. For $t=3$, PE yields $\phi_1 \phi_2 \phi_3$, which assumes the value $011$ if $N_i \geq 3 n/4$, and $010$ if $n/2 \leq N_i < 3 n/4 $. Thus, even though the number of uses of $\tilde{U}$ grows exponentially in $t$, a small number of $t$ is sufficient for a good precision in singling out the best weight strings. Note that $t$ only depends on our required precision, not on $N$.
We comment in Section \ref{bsearch} in greater detail how to choose $t$ and in Section \ref{perfPE} its impact on the performance of the marking.
Finally, after PE and the oracle $O_{\pm 1}$, the weight state together with the phase register state reads \begin{align}
|\tilde{\tilde{W}}\rangle=\frac{1}{\sqrt{\tilde{N}}} \sum_i (-1)^{N_i \geq N_t} |\underline{w_i}\rangle |\phi^{(i)}_1 \phi^{(i)}_2 \dots \phi^{(i)}_t \rangle \ , \label{weightunc} \end{align} where the exponent $N_i \geq N_t$ is to be understood as a Boolean variable which is 1 in case the condition is true and 0 else.
The state in Eq. (\ref{weightunc}) is entangled between the weight strings and the binary encoding of phases from the PE. In order to disentangle the two systems while maintaining coherence of the weight state, we uncompute the PE, which transforms (\ref{weightunc}) to \begin{align}
PE^{-1}: |\tilde{\tilde{W}}\rangle \mapsto |\hat{W}\rangle= \frac{1}{\sqrt{\tilde{N}}} \sum_i (-1)^{N_i \geq N_t} |\underline{w_i}\rangle \ . \label{weightfin} \end{align}
In the next step, the amplitude of the marked strings in the state $|\hat{W}\rangle$ are amplified such that the concluding measurement detects one of the marked strings with high probability.
\subsection{Full training cycle \label{fulltrain}}
It can be seen from Eq.~(\ref{weightfin}) that the marked state $|\hat{W}\rangle$ is equivalent to the marked state of standard Grover search. Hence, in order to amplify the amplitudes of the marked elements the standard diffusion operator $D=H^{\otimes n} \Lambda_{0} H^{\otimes n}$ can be used. The quantum training for the binary neuron then succeeds by applying $k^*\approx \sqrt{\frac{\tilde{N}}{M}} \frac{\pi}{4}$ iterations of the binary marking via phase estimation and the subsequent diffusion, if we assume that we know the number $M$ of target weight strings. If $M$ is not known one can find an optimal weight string in an expected time that scales as $O(\sqrt{\tilde{N}/M})$ with a subroutine based on Boyer \textit{et al.} \cite{boyer1998tight}. Figure \ref{fig:cycle} summarizes the full training cycle with phase estimation. A circuit implementation of the full training is given in Appendix \ref{app:fulltrain}.
\begin{figure}
\caption{\textit{Full traning cycle with phase estimation:} After the binary marking process depicted in Figure \ref{fig:PEfull}, the diffusion operator $D=H \Lambda_{0} H$ is applied to the weight register. Amplification of the marked elements is achieved by looping the full marking subroutine and the subsequent diffusion for $k^*$ times. A final measurement in the computational basis yields the optimal weight string $\underline{w^*}$.}
\label{fig:cycle}
\end{figure}
\subsubsection*{Binary search for globally optimal solution \label{bsearch}} For a fixed precision $t$ of the phase estimation subroutine, the number of solutions $M$ depends on the training set and is generally unknown. Indeed, there might be multiple marked weight strings, or even none which is above the threshold $N_t$. To resolve this problem, we propose a binary search on the threshold intervals as a subroutine to ensure, up to the precision of the search and up to imprecisions in the phase estimation algorithm, that the optimal solution is marked and amplified.
For a solution $w^*$ the possible range of the number of matches $N^*$ in the accumulative marking scheme is initially an integer from the interval $[0,n]$. At every step of the search the possible range is cut evenly into two parts. Thus the size of the possible range goes as $n2^{-i}$ for the i-th step of the binary search. The search stops when the size of the possible range reaches a small enough number which quantifies our desired precision, and which we call $\delta < n$, i.e. the search runs until i is large enough so that $n2^{-i}\leq \delta$. To determine whether the updated possible range is the lower or upper half, we mark the weight strings in the upper half by -1 using the procedures described above. If after the search the measured weight string is in the upper half this becomes the new possible range. Else if it is not in the upper half the possible range becomes the lower half. Thus eventually this binary search identifies with a high probability an $N^*$ which is within $\delta$ of the global optimum. This takes $\lceil \log (\frac{n}{\delta})\rceil$ steps, which scales efficiently in the number of $n$. \\
The binary search then proceeds as follows:
\begin{itemize} \item Initialize $N_t = n/2$, $i=0$. \\Set $m=1$ and pick $\mu \in (1,4/3)$. \item For $i \leq \lceil \log(n/\delta) \rceil$, loop the following steps: \begin{enumerate} \item Choose an integer $s\in[1,m-1]$ uniformly at random. \item With current $N_t$, apply $s$ iterations of the full training cycle from Figure \ref{fig:cycle}. Measure the weight state and obtain an candidate solution $\underline{w}^*$. \item Run the classical BNN on $\underline{w}^*$ only to obtain $N^*$, the number of times $\underline{w}^*$ gave correct outputs.
\item \textit{If} $N^* \geq N_t $, this means the observed $\underline{w}^*$ is indeed a solution for current $N_t$, and there might be better solutions for larger $N_t$, so update $N_t$ as: $N_t \rightarrow N_t + \Delta N_i /2$, in which $\Delta N_i=n2^{-i}$ , and go to next step to update i. \\
\textit{else} (i.e. $N^* < N_t $), set $m$ to $\min(\mu m, \sqrt N)$, and go back to step 1. \\ \textit{Time-out} rule for this inner loop on $m$: when the loop of step 1-4 continues to the case that the full training cycle has been totally executed $O(\sqrt N)$ iterations, but $N^* < N_t $ still, this means there is no solution for the current $N_t$, then update $N_t$ as: $N_t \rightarrow N_t - \Delta N_i /2$ in which $\Delta N_i=n2^{-i}$ , and go to next step to update i.
\item update i as: $i \rightarrow i+1$. \end{enumerate} \item output the last value of $\underline{w}^*$ that has $N^* \geq N_t $ and end training. This $\underline{w}^*$ is the optimal weights the algorithm found.
\end{itemize}
The above search homes in on a solution that is globally optimal up to the $\delta$ precision. The number of steps in the loop (on i) of the binary search can by inspection be seen to be scale as $\log(\frac{n}{\delta})$.
\section{Quantum training with counter registers} Quantum training via phase estimation includes an overhead of neuron calls that is exponential in the precision $t$ of the phase estimation. There is an alternative approach which is sensitive to the size of the training set, but avoids that overhead: rather than accumulating phases, the number of matches between the QBN output and the desired output can be encoded directly into a multi-qubit register.
The algorithm is similar to the training cycle presented in Section \ref{petrain}, up to some changes: in Figure \ref{fig:marking} the oracle $\Lambda$ adding incremental phases to good weight strings is replaced by a new oracle $O_{\tt count}$, acting on a $\lceil \log(n+1) \rceil$-qubit register as \begin{align}
O_{\tt count} |h\rangle = \begin{cases} |h+1\rangle \ \text{if} \ a'=a^* \\
|h\rangle \ \text{else} \end{cases} \ . \end{align}
The number $h$ of matches is then encoded in binary into the qubits. Initially the counter is set to $|0\rangle$ and increased coherently during the marking. This way, after the full set of training data, the output weight state reads \begin{align}
|\tilde{W}\rangle=\frac{1}{\tilde{N}} \sum_i |\underline{w_i}\rangle |N_i\rangle \ , \end{align} where $N_i$ is the amount of matches for the weight string $\underline{w_i}$.
Next, we again define a threshold count $N_t$ and a new oracle $\widetilde{O}_{\pm 1}$. If the condition $N_i \geq N_t$ holds, the oracle marks the corresponding weight string $\underline{w_i}$ by adding a factor of $-1$. Otherwise, the string is left invariant. The resulting state reads \begin{align}
|\tilde{\tilde{W}}\rangle=\frac{1}{\sqrt{\tilde{N}}} \sum_i (-1)^{N_i \geq N_t} |\underline{w_i} \rangle |N_i\rangle \ . \end{align} In order to retrieve the same marked state as in standard Grover search, the counter needs to be reset. To maintain coherence, such a reset needs to be unitary. This can be achieved by uncomputing the $n$ rounds of counting at the beginning of the marking.
The weight state then becomes \begin{align}
|\hat{W}\rangle=\frac{1}{\sqrt{\tilde{N}}} \sum_i (-1)^{N_i \geq N_t} |\underline{w_i} \rangle \ , \end{align} and the same binary search procedure can be applied.
\section{Performance \label{perfPE}} One way to quantify the time requirement for the training is to count (1) the number of calls to the neural network and (2) the number of calls to the oracles. Recall there are two more elementary oracles, one for comparing the output with the desired output, and one for checking if phase is above a threshold. These together with the phase accumulation routine acts as a Grover Oracle. Counting the calls to the oracles as a way of quantifying performance is reasoned since the probability of a system being lost, or the amount of noise more generally, will often scale as the number of calls. Moreover, the cost to experimenters is often the amount of time the experiment takes, which may also scale as the number of calls.
In the quantum training presented above there are two separate contributions to the number of calls: (i) for each training input $(\underline{a_i},a_i^*)$ there is one call to the accumulation subroutine (consisting of one call to the comparatory oracle and two neural network calls: one call and one inverse call for the uncomputation), and there are $n$ accumulation cycles in total (recall $n$ is the number of training inputs), (ii) the quantum search protocol on the weight states is expected to take a number of calls scaling as the $O \left(\sqrt{\tilde{N}/M} \right)$, with $M$ being the estimated number of solutions and $\tilde{N}=2^N$ being the total number of weight states.
Hence, in terms of the number of training data pairs, $n$, and the total number of weight states $\tilde{N}$ , we call the combined Grover oracle \begin{align} \sum_{i=1}^{\lceil \log{n/ \delta} \rceil} \sqrt{\frac{\tilde{N}}{M_i}}\leq \lceil \log\left(\frac{n}{\delta}\right) \rceil \sqrt{\tilde{N}} \equiv N_G \label{groveroraclecalls} \end{align} times, where $M_i$ is the number of optimal weights for the $i$-th loop in the binary search and $M_i \geq 1$. The sum in Equation \ref{groveroraclecalls} comes from the binary search with precision $\delta$ described in Section \ref{fulltrain}. The comparatory oracle is then called \begin{align} N_C=N_G \left(2 n \left(\sum_{j=0}^{t-1} 2^j \right) \right) = N_G \left(2 n \left(2^t - 1 \right)\right) \approx 4 N_G n^2 \label{groveroraclecalls2} \end{align} times (the factor of 2 comes from the uncomputation of the phase estimation), and the QBNN is called $2 N_C$ times. The last approximation follows from the fact that the estimated phases are given by $N_i/2n$ and the smallest possible precision of $N_i$ is $\delta=1$. In order to be able to give an estimate up to $\delta=1$, we require the $t$-th binary digit in $0.\phi^{(i)}_1 \phi^{(i)}_2 \dots \phi^{(i)}_t$ to be such that $2^{-t}=1/2n$. This gives $2^t - 1 \approx 2n$.\\
The number of qubits needed constitutes of (i) the number of qubits needed for the phase estimation and (ii) the number of qubits needed for QBNN.
For (i), the phase estimation requires $\log (2n/\delta)$ ancillary qubits to estimate with precision $\delta$ of the binary search, regardless of the imprecisions in the phase estimation algorithm itself. For (ii), the number of qubits needed for the QBNN is consists of the amount of qubits needed for the training inputs, the desired outputs, the $N$ weight qubits, and the ancilla qubits. There are two types of ancillas: the ones to achieve the unitary embedding of the classical binary neural network and the ones needed for the fan-out operation. We show in Appendix \ref{app:noofqubits} that the following following simple equality holds \begin{align} Q_{\text{input}} + Q_{\text{ancilla}} = Q_{\text{weight}} + Q_{\text{output}} \ . \end{align} Here, $Q_{\text{input}}$ is the amount of input qubits, $Q_{\text{ancilla}}$ the amount of ancilla qubits, $Q_{\text{weight}}$ the amount of weight qubits and $Q_{\text{output}}$ the number of output qubits. It can be easily seen that the amount of qubits needed for the desired outputs is the same as $Q_{\text{output}}$. Therefore, the total number of qubits needed for the QBNN is equal to \begin{align}
2 (Q_{\text{weight}} + Q_{\text{output}} ) \leq 2 (Q_{\text{weight}} + Q_{\text{input}}) \end{align}
\iffalse
The number of qubits needed is a sum of several terms:(i) $\log(n/\delta)$ for phase estimation, (ii) $N$ for the weights, (iii)up to $\log n$ for inputs, (iv) $N/k$ for ancillas for neurons with $k$ inputs, (v) up to $\log n$ for desired output, (vi) up to $N$ fan-out ancilla qubits.
\fi
\iffalse Thus, the total number of calls is expected to scale as $2 n\times\sqrt{\tilde{N}}$. {\color{blue}{[from above, it seems like we count the neuron calls and the oracle calls. And we treat them equally: for one phase accumulation step, we call $U$, then the oracle, then $U^{-1}$ -- so in total we have 2 calls. Is this a good way to do it?]}} \fi
\iffalse
The oracle calls are a bit more subtle:
{\color{blue}{[how expensive is it to implement the oracles? Can we ignore those costs or is it actually difficult to implement the phase accumulation oracle (with good precision!) and the threshold oracle ?]}}
{\color{red}{[1. The cost of the phase oracle in NN computation: it consist of a series of successive multi-controlled phase gates, the number of the gates equals to the number of cases that the output and desired output coincide. {\color{green}{[Does this mean we must know the number of solutions before we actually run the NN? That would not make sense.]}} when the output and desired output both have m digits, the number of cases that the output and desired output coincide $=2^m$ {\color{green}{[I dont understand the second sentence. There are $2^m$ possible stings with $m$ digits, but I don't see how this is connected to the number of matches between the output of the NN and the desired output.]}}]}}
{\color{red}{[2. The cost of the threshold oracle: it also consist of a series of successive multi-controlled NOT gates, the number of the gates equals to the number of cases that $\phi=0. \phi_1 \phi_2 \dots$, $\phi_j$ is between $N_t/2n$ and $1/2$. {\color{green}{[Also this we never know beforehand right? So if we want a universal NN, we need to assume that in the worst case there are $n$ elements in this interval, so we must say that one oracle call scales as $n$..? ]}} For example , when $N_t=n$, there is not one multi-controlled NOT gates is needed for the oracle. the number of the gates needed depends on how close $N_t$ is to $n$ and the precision of the phase estimation (the number of the phase storing qubits]}}
Hence, overall we have a basic $2n \times$? calls for both training approaches. In the following, we discuss each approach separately and compare their time requirements.
Each iteration of the Grover-like search consists of three parts: (1) the phase estimation algorithm with the phase accumulation as a subroutine, (2) the binarisation of the marking through the oracle call $O_{\pm 1}$, followed by the uncomputation of (1), and (3) the amplitude amplification of the marked items.
For each full iteration in the training, a precision $t$ is fixed. This precision determines the number of calls of the neuron. For a given $t$, the PE subroutine calls the full marking routine $\sum_{i=0}^{t-1} 2^i = 2^t - 1$ times. Additionally, the inverse quantum Fourier transform on the ancillary registers requires $\Theta (t^2)$ steps, which can be neglected in the overall time analysis.
Once the training outputs a solution $\underline{w^*}$, the binary search described in Section \ref{bsearch} is executed until the unique solution corresponding to the global optimum is found. The binary search changes the precision $t$ in each step, and, consequently, the constant exponential overhead of the neuron calls. More precisely, for the first round in the binary search, the integer search interval $[0,N]$ is cut in half, which requires the precision (up to some subtleties we discuss below) to be $t=2$. With increasing threshold, $t$ increases until a stopping criterion for the binary search is met. Hence, altogether, the number of calls to the neuron is \begin{align} \# \text{Calls} = &\sum_{t=0}^{j^*} (2^{t+2} -1) \times 2n \times \sqrt{\tilde{N}} \nonumber \\ \leq &(2^{j^* + 3} - j^* - 5) \times 2n \times \sqrt{\tilde{N}} \ , \label{performPE} \end{align} \iffalse \begin{align} \# \text{Calls} = &\sum_{t=0}^{j^*} (2^{t+2} -1) \times 2n \times \text{?} \times \sqrt{\tilde{N}} \nonumber \\ \leq &(2^{j^* + 3} - j^* - 5) \times 2n \times \text{?} \times \sqrt{\tilde{N}} \ , \label{performPE} \end{align} \fi where $j^*$ is the number of bisection steps in the binary search algorithm on the interval of the threshold $N_t$, and is determined by the stopping criteria of the binary search (see Section \ref{bsearch}).
It can be shown \cite{nielsen2002quantum} that for a PE with probability of error $\epsilon$, in order to obtain the phase $N_i/(2n)$ accurate to $m$ bits, we need a precision (and number of ancillary control systems) \begin{align*} t=m+ \left( \log \left(2 + \frac{1}{2 \epsilon} \right) \right) \ . \end{align*} For instance, from the previous analysis in Section \ref{phaseest}, a relatively small interval inside which the output solutions of the quantum training lie can be achieved for $m\approx 5$. Allowing for an error $\epsilon$ of about $8\%$ then yields $j^* = 8$, which is small but requires 255 executions of the neuron during the PEs. {\color{blue}{[Is this example ok or should we just remove it?]}}
In the Appendix we present several implementations of the training methods and discuss their performances.
\subsection{Performance of Quantum training with a counter register} The quantum training with a qubit counter register performs differently from the PE approach: \begin{align} \# \text{Calls} = \ell^* \times 2n \times \sqrt{\tilde{N}} \ , \label{performCO} \end{align} \iffalse \begin{align} \# \text{Calls} = \ell^* \times 2n \times \text{?} \times \sqrt{\tilde{N}} \ , \label{performCO} \end{align} \fi where $\ell^*$ accounts for the number of steps in the binary search.
From a neuron call perspective, the counter approach is significantly more economic, even for low precision. However, the qubit register encoding the number of successful oracle counts scales as $\sqrt{n}$. Often, the number of training data is large, such that this logarithmic overhead in the number of ancillary qubits is challenging. In this case, the quantum training with PE may be favourable, as the constant exponential overhead of neuron uses during the PE is independent of the size $n$ of training data and the amount of weight strings $\tilde{N}$. The reason for this is that PE only estimates the \textit{fraction} $N_i /2n$, while the counter register stores the \textit{absolute value} of counts $N_i$.
For smaller sets of training data, however, the counter register is more efficient than the PE. These could occur in practice for shallow networks with a smaller amount of neurons. \iffalse \subsubsection{Choosing the right amount of error} In both approaches to quantum training, we have a probability of error emerging from the fact that it is unknown how many good weight strings exist. Since the stopping time of Grover search is only known for a fixed number of solutions, a binary search was used as a subroutine to ensure, up to an error $\delta$, that a unique solution to the training exists.
Here, we argue that in practise a lower precision is sufficient, and can even be favourable for the over-all performance of the neuron (or, generally, neural net). A known issue for classical neural networks is overfitting: often, using the optimal set of network parameters identified through training leads to poor performances when applied to real data (after the training, such that the solutions are not known) \cite{caruana2001overfitting}. To circumvent this issue, several techniques have been established, from deliberately using suboptimal set of weights (corresponding to a local minima of the cost landscape, rather than a global one), or reducing the size of the network such that the amount of parameter shrinks \cite{srivastava2014dropout}. It has been shown that both methods can improve the performance of the network. Yet, it is still an open problem how to select a set of parameters which guarantees a good performance of the network.
For the quantum training with fixed architecture this indicates that errors can actually be favourable. Grover search is known to yield the globally optimal solution, which is expected to be suboptimal for the overall performance of the network. Hence, imprecisions in the subroutines of PE and binary search can yield good, but suboptimal solutions. This also limits the number of calls of the neuron in the training.
However, how large a good scale of error is remains an open problem and will depend on both on the training data and the corresponding task. \fi \fi \subsection{Classical vs. quantum training} The training of classical neural networks are known to have three drawbacks: (1) there is no efficient method, and indeed training has been shown to be NP-complete even for small networks \cite{blum1989training,rojas2013neural}. (2) Commonly, the solutions found in training methods based on gradient descent are only locally optimal and can be globally very suboptimal \cite{SwirszczCP16,Alizadeh:2018ul}. (3) The gradient of the cost function with respect to a specific weight can be vanishingly small, preventing convergence to a solution, or explodingly large. This issue is often referred to as the problem of vanishing or exploding gradients.
In the following we elaborate on how the quantum training method in Section \ref{fulltrain} addresses these drawbacks.
\subsubsection{Quantum training faster} The outcome of the quantum training methods discussed above is guaranteed with high probability to yield the globally optimal set of weights. For sufficiently unstructured cost landscapes, training methods based on gradient descent commonly output weight configurations corresponding to local extrema of the cost function. As a result, the performance on fresh data after the training stage risk being poor.
To ensure classically that the globally optimal parameters are found would require a brute-force search over a list of size $n \times \widetilde{N}$, in which $\tilde{N} = 2^N$ and $N$ is the number of weights in the network, $n$ is the number of training samples in the training set. This list consist of all weight strings and input pairs with assignment of a cost value. As each call of that list requires comparing desired outputs with outputs, such a brute force search calls the comparatory oracle ${N_C}^{classical}=n\times\tilde{N}$ times. On the other hand, from Eq.\ref{groveroraclecalls} and Eq.\ref{groveroraclecalls2}, our quantum training calls the comparatory oracle ${N_C}^{quantum} \approx 4n^2\log\left(\frac{n}{\delta}\right)\sqrt{\tilde{N}} $ times, where $\delta$ represents the precision of the threshold in the binary search for globally optimal weights (see section \ref{bsearch}), therefore the ratio of ${N_C}^{classical} $ and ${N_C}^{quantum}$ is: \begin{align} \frac{{N_C}^{classical} }{{N_C}^{quantum} } \approx \frac{ \sqrt{\tilde{N}}}{4n\log\left(\frac{n}{\delta}\right)} = \frac{2^{N/2}}{4n\log\left(\frac{n}{\delta}\right)}, \label{ratio} \end{align} Note that, for training to be practical, $n$ should scale reasonably with the number of weights, and is thus expected to be bounded by a polynomial of $N$. Thus Eq.\ref{ratio} can be termed an exponential advantage in $N$ for such cases. For example, within the architecture of a two-hidden-layer feedforward network that are enough to learn $n$ training samples \cite{huang2003learning}, we have the relation $n < N$ (see Appendix \ref{2hidden} for details). Therefore for this example we have: \begin{equation} \frac{{N_C}^{classical} }{{N_C}^{quantum} } >\frac{ 2^{N/2}}{4N\log\left(\frac{N}{\delta}\right) } \label{advantage} \end{equation}
Eq.~\ref{advantage} shows an instance of the quantum advantage of our training protocol over classical brute force search. The value of $N$ is often large, in the order of thousands to millions\cite{mocanu2018scalable}.
\subsubsection{Overcoming vanishing and exploding gradients} During the classical training, the weights are updated with respect to their caused change in cost. Binary activation functions are usually replaced by continuous approximations such as the sigmoid function. Yet, it can be shown that the gradient of the sigmoid function is bound to the interval $(0,1)$. Hence, in a network with $L$ layers, approximating the derivative $\partial C / \partial w$ of the cost function $C$ with respect to a weight $w$ in layer $\ell$ by backpropagation leads to a multiplication of $\gamma=L-\ell$ terms, all smaller than one -- the gradient vanishes exponentially in the number $\gamma$. Conversely, in certain cases the derivatives can be very large and the derivative explodes. Both situations lead to a failure in training, as the changes in the weights are either vanishingly small or too large to control.
Common approaches to address this issue is using specific activation functions, such as the ReLU function, which improves the issue but does not resolve it in general \cite{glorot2011deep}. Other solutions were proposed, such as classical global search methods \cite{bengio1994learning,Shang485892} and special architectures for neural networks \cite{watrous1992induction,graves2008novel,sutskever2011generating}, but also these approaches were shown to suffer from significant drawbacks \cite{hochreiter1998vanishing} or fail to resolve the exploding gradient problem \cite{pascanu2013difficulty}.
The quantum training method proposed in this work is a global search. It, therefore, finds globally optimal solutions while at the same time avoiding gradient related issues. While classically it is still an active field of research to improve on this problem, the quantum search completely resolves it. This makes the quantum search a powerful option to train classical networks.
\section{Summary and Outlook} In this work we presented a fully quantum protocol for the training of classical feedforward binary neural networks. It resolves two prominent issues of gradient descent training: suboptimal choices of weights and training failure due to vanishing or exploding gradients. The protocol is guaranteed to find the globally optimal set of weights, while yielding a quadratic advantage in the number of queries needed to achieve the same with a classical algorithm. A key contribution of the protocol is to find an explicit way of turning the neural net training problem into a quantum search problem. The training protocol is fully specified, and numerical simulations of small but non-trivial quantum binary neural nets were made.
An advantage with making this quantum generalisation of binary neural nets is that they can be implemented with physical qubits and are thus suitable for realisation with quantum computing experiments. Whilst implementing large networks is currently not feasible, training a single binary neuron--an elementary module of the network--with the method described above requires approximately 8 logical qubits (see the performance section), which is within range of current state-of-the-art experiments.
The ability of finding the globally optimal weights may also induce a risk of overfitting more. There are several methods for avoiding overfitting classically, including dropping out certain layers at times during the training, restricting the number of weights and weight regularisation techniques~\cite{srivastava2014dropout,shrestha2019review}. It thus seems well-motivated to build on this quantum training framework to explore analogous quantum techniques. For example quantum versions of drop-out could involve dynamic quantum network architectures e.g.\ using quantum control systems or teleportation between non-successive layers. \\
\appendix \onecolumngrid
\section{Circuit implementation of the full phase estumation training cycle \label{app:fulltrain}} In the main text, Section \ref{fulltrain} presented a schematic overview of the complete cycle of the quantum training via phase estimation for a single QBN. Here, we provide the circuit implementation for the the full training, which consists of $k^*$ training cycles (c.f. Figure \ref{fig:cycle}).
\begin{figure}
\caption{\textit{Circuit implementation for the full training.}}
\label{fig:cycle2}
\end{figure}
As depicted in \ref{fig:cycle2}, from top to bottom, we first identify the circuit's input qubits (all initialized in state $|0\rangle$). The very first qubit $\ket{0}_1$ plays the role of a control for the binary marking oracle $O_{\pm 1}$. As described in the main text, the oracle decides whether the quality of a weight string is above a certain threshold $N_t$, and if, it so changes the sign of the according string. To do so, the control qubit of the oracle is to be transformed into state $|-\rangle$ to perform the threshold comparison correctly (we explain the functioning of the oracle below). The next three input qubits $\ket{0}_2-\ket{0}_4$ are the ancillary control systems for the phase estimation. In the schematic circuit discussed here this means that the phase estimation subroutine estimates $t=3$ digits in binary of the number $\phi$ in the phase $e^{i \pi 2 \phi}$ (up to some error described in Section \ref{perfPE}). The remaining qubits play the following role: the multi-qubit register $|0\rangle_5$ represents the weight register, which is transformed into the coherent superposition $|W\rangle=\frac{1}{\sqrt{\tilde{N}}} \sum |\underline{w}\rangle$ of all weight strings. The last qubits with index 6 are the remaining inputs (training data and ancillas for saving the output of the neural network). The training inputs and desired outputs are all initialized in zero. As we will see later based on concrete examples, the unitary action $\tilde{U}$ includes changing the training input qubits into the intended training pairs.\\
The first subroutine in the blue box executed the phase estimation. The PE algorithm first initializes the $t=3$ control qubits in the $|+\rangle$ state. Each qubit controls $2^{j-1}$ uses of the full marking unitary $\tilde{U}$ (see Section \ref{markingtheweights} for more details) on the inputs of $\tilde{U}$. The circuit implementation of $\tilde{U}$ is further discussed in Appendix \ref{app:qbnnexp}. Then, the inverse quantum Fourier transform (QFT) converts the control ancillas into the state $|\phi_1 \phi_2 \phi_3 \rangle$, which represents a truncated binary encoding of the factor $N_i / 2n$ into the qubit registers. \\
Next, the first red box implements the oracle $O_{\pm 1}$. The function of the oracle is to add a minus sign to the states $|\phi_1 \phi_2 \phi_3 \rangle$, which encode the quality $N_i$ of the weight string (in binary). In case the $N_i$ supersedes the threshold $N_t$, the state is changed as $|\phi_1 \phi_2 \phi_3 \rangle \mapsto -|\phi_1 \phi_2 \phi_3 \rangle$. In this specific example depicted in Figure \ref{fig:cycle2}, the threshold is set to be $N_t=n$, (namely the optimal weight should be good for all the inputs). When converting to binary digits this condition translates to $\phi_1=1, \phi_2=0, \phi_3=0 $. Therefore we apply a multi-controlled gate to realise the sign flip: only when $\phi_1=1, \phi_2=0,\phi_3=0 \rangle$ the controlled-NOT gate gets executed on the ancilla (which has been pre-set to $|-\rangle$ by applying X and a Hadamard gate on it). The full routine is thereafter to be uncomputed, with the purpose of decoupling the marked weight state $|\hat{W}\rangle=\frac{1}{\sqrt{\tilde{N}}}\sum (-1)^{N_i\geq N_t} |\underline{w}\rangle$ from the other systems. The uncomputation of PE is depicted by the second blue box.\\
After the decoupling of the weight state, the marked superposition $|\hat{W}\rangle$ undergoes amplitude amplification: the diffusion operator $D=-H \Lambda_0 H$, depicted by the second red box, amplifies the amplitude of the marked states and dampens the unmarked elements. The full Grover cycle - marking and amplifying - is then collated into one box, called $Q$, which is repeated as described in \ref{bsearch}. This yields an optimal weight string, up to some errors induced by the precision $t$ of the phase estimation algorithm and $\delta$ of the binary search.
\section{Implementations of QBNN examples}\label{app:qbnnexp} Section \ref{app:fulltrain} gave a schematic overview of the full training cycle. We now going into detail how the phase accumulation $\tilde{U}$ is implemented as a quantum circuit. We illustrate this with concrete examples below.
\subsection{Single neuron with 2 weights and 2 inputs \label{app:exp22}} We begin with studying an elementary instance of a network, namely a single QBN with two inputs and two weights. Figure \ref{fig:QCFON6}
illustrates the example, together with a circuit implementation of the phase accumulation subroutine--see figure \ref{fig:QCFON62} .
\begin{figure}
\caption{Single neuron with 2 inputs and 2 weights.}
\label{fig:QCFON6}
\end{figure}
\begin{figure}
\caption{Quantum circuit implementation of the phase accumulation sub-routine for the single neuron with 2 inputs and 2 weights.}
\label{fig:QCFON62}
\end{figure}
In this the circuit implementation, the 6 input qubits are (from top to bottom): the two weights $|w_1\rangle$ and $|w_2\rangle$, the two training inputs $|a_1\rangle$ and $|a_2\rangle$, the ancilla storing the output of the neuron computation, and the desired output $|a^*\rangle$. The initial training datapoint is $(a_1,a_2,a^*)=(0,0,0)$. In the first step, the Hadamard transformations create the coherent superposition of all possible weight strings, $|W\rangle=\frac{1}{2} (|00\rangle+|01\rangle+|01\rangle+|11\rangle)$ (the weight strings are simply $|w\rangle=|w_1 w_2\rangle$). Next, the circuit in the first dashed blue box depicts the unitary actions of the neurons: the CNOT gates implements the multiplication between weights and inputs, the following Toffoli gate implements the addition (bitcount operation) of the weighted inputs and the activation function by Sign function. The output is then saved in the ancillary qubit on wire 5. After the oracle $\Lambda$ is called, the uncomputation of the neuron is performed (the circuit in the second blue box). A single phase accumulation cycle is then collectively denoted by the gate $N$. \\
The full phase accumulation subroutine succeeds by repeating $N$ for $n=4$ times, where $n$ is the number of different training inputs. New input data is initialized in the circuit by applying X gates on $\ket{a_1}$, $\ket{a_2}$, $\ket{a^*}$. In this example, the training set we adopt is: $\{(a_1 , a_2, a^* )\}=\{(0,0,0),(1,0,0),(0,1,1),(1,1,1)\} $ (We exhaust all possible data points as training data, to reasonably make use of the phase accumulation process, for such small example with few inputs. This is also adopted for the other examples in this appendix).
\subsection{Single neuron with 3 weights and 3 inputs \label{singleneur3in}} Next, we consider a single neuron with 3 weights and 3 inputs, as in figure \ref{fig:QCFON8}. As we will see below, this slight increase in inputs manifests itself in a significantly more complex gate implementation of the neuron action. Figure \ref{fig:QCFON82} shows the quantum circuit for the phase accumulation subroutine of this example. \begin{figure}
\caption{Single neuron with 3 inputs and 3 weights.}
\label{fig:QCFON8}
\end{figure}
\begin{figure}
\caption{Quantum circuit implementation of phase accumulation sub-routine for the example of a single neuron with 3 inputs and 3 weights.}
\label{fig:QCFON82}
\end{figure}
As depicted in Figure \ref{fig:QCFON82} : qubits with index 1-3 take the role of the weights and qubits with index 4-6 are the three training inputs. Qubit 7 is an ancilla storing the output of the QBN. Qubit 8 carries the desired outcome. As before, the dashed blue box depicts the action of the QBN consisting of weighing the inputs with the weights, adding up the weighted input together with the subsequent activation fucntion (the gate implementation of the addition and activation is included in the dashed red box). After applying the oracle $\Lambda$, the action of the QBN is uncomputed (the circuit in the second dashed blue box). Finally, the full phase addition is repeated for $n$ times, where $n$ denotes again the amount of training input data. In this graph we only depict two data points $\{(a_1 , a_2, a_3, a^* )\}=\{(0,0,0,0),(1,0,0,0)\} $ of the training set and used ellipsis to indicate that the loop succeeds over all remaining training pairs. The two independent sets of full training data considered for this architecture are specified in Figure \ref{fig:qbnndata} below. \\
\begin{figure}
\caption{\textit{Training data used for the example of a single neuron with 3 weights and 3 inputs:} each training tuple consists of three inputs $a_1$,$a_2$,$a_3$ and one desired output $a^*$. The aim of the training is to find weight configurations that can generate the desired output of the corresponding inputs. We consider two independent tasks (enumerated by 1 and 2) and study the training according to the data.}
\label{fig:qbnndata}
\end{figure}
During the loops of the full training (see Figure \ref{fig:PEfull} in the main text), the probabilities of the weight strings change. Here we present the probability evolution of the weight strings, in the quantum training with the highest $N_t$ for which there exists at least one optimal weight string: For task 1, Figure \ref{fig:qbnn31data} shows the probabilities of the weight strings evolving over the training cycles, and correspondingly figure \ref{fig:qbnn31data2} illustrates the evolution for task 2.
\begin{figure}
\caption{\textit{Task 1, the probabilities of weight strings, evolving over the training cycles.} For Task 1 there exists only one optimal weight configuration, namely (000). The vertical bars present the probabilities of the weight configurations, for each iteration of the full training cycle during the training. The probability of the optimal weight is amplified during the iterations and reaches its maximum after two iterations, in accordance to the optimal stopping time $k^*=\sqrt{8}\pi/4 \approx 2.22$ of standard Grover search.}
\label{fig:qbnn31data}
\end{figure}
\begin{figure}
\caption{\textit{Task 2, the probabilities of weight strings, evolving over the training cycles.} For Task 2 there are two optimal weight configurations, namely (000) and (101). The probabilities of the optimal weights are amplified during the iterations and reaches its maximum after the first iteration, in accordance to the optimal stopping time $k^*=\sqrt{8/2}\pi/4 \approx 1.57$ of Grover search with two possible solutions.}
\label{fig:qbnn31data2}
\end{figure}
\subsection{3-layer 5-neuron network with 6 weights, 2 inputs and 1 ouput}
Analogously to before, we study the circuit implementation of the phase accumulation subroutine for a network with five neurons and layer configuration 2-2-1. This is depicted in figure \ref{fig:qbnn221} and \ref{fig:qbnn2212}. In total, there are two inputs and six weight states $|w_1\rangle$, $|w_2\rangle$, $\dots$, $|w_6\rangle$, leading to $2^6=64$ possible weight strings $|w_1 w_2 \dots w_6\rangle$. The two dashed blue boxes show the circuit for the computation and uncomputation of the neural network respectively. \begin{figure}
\caption{Five-neuron network with layer configuration 2-2-1. The network takes two inputs and has in total 6 weights.}
\label{fig:qbnn221}
\end{figure} \begin{figure}
\caption{\textit{Circuit implementation for a network with layer configuration 2-2-1, with 2 inputs and 6 weights.} The qubits indexed 1-4 are the weights for the first layer and qubits with index 5-6 are the weights for the second layer. Qubits with index 7-10 are 4 training inputs duplicated from 2 inputs in the first layer, for the fanout to the second layer. (To save space in the diagram, the fan-outs in the first layer take place to the left, outside the diagram. Thus the inputs are already copied when they enter the depicted circuit.) Qubits 11-12 are ancillas storing the outputs within the QBNN. Qubit 13 encodes the desired outcome. The dashed blue box depicts the action of the QBNN. After applying the oracle $\Lambda$, the action of the QBNN is uncomputed. The circuit depicts one round of marking for one data point in the training set, for the phase accumulation process.}
\label{fig:qbnn2212}
\end{figure}
For the QBNN in this Example, we use the training set $\{(a_1 , a_2, a^* )\}=\{(0,0,0),(1,0,0),(0,1,0),(1,1,1)\} $). In figure \ref{fig:qbnn221data}, we present the probability evolution of the weight strings, in the quantum training with the highest $N_t$ for which there exists at least one optimal weight string.
\begin{figure}
\caption{\textit{The probability evolution of the weight strings in the 5-neuron QBNN with layer configuration 2-2-1, with 2 inputs and 6 weights.} For the training set $\{(a_1 , a_2, a^* )\}=\{(0,0,0),(1,0,0),(0,1,0),(1,1,1)\} $ there are 7 optimal weight configurations among the total $2^6=64$ configurations in the QBNN. In the Figure, the horizontal axis presents all the 64 weight configurations. The configurations which have the same weights the first layer are grouped together, with different colours representing the different weights $(00,01,10,11)$ in the second layer. The vertical bars presents the probabilities of the weight configurations, for each iteration during the training. The probabilities of the optimal weights are amplified over the iterations and reaches its largest amplification after two iterations. This result agrees with the expected stopping point given by $\sqrt{N/M} \pi/4$ to achieve the largest amplification, which is $\sqrt{64/7} \pi/4 \approx 2.37$ for this instance. }
\label{fig:qbnn221data}
\end{figure}
\subsection{3-layer 6-neuron network with 8 weights, 3 inputs and 1 output} We conclude the set of examples with a six-neuron network with layer configuration 3-2-1, with 3 inputs and 8 weights (see Figure \ref{fig:classical321}). This is one of the largest networks that can be implemented on quantum simulation platforms such as Huawei's computing cloud HiQ. The circuit implementation of a single cycle of phase accumulation is shown in figure \ref{fig:quantum321}. \\
\begin{figure}
\caption{Neural network with 6 neurons in layer configuration 3-2-1.}
\label{fig:classical321}
\end{figure}
\begin{figure}
\caption{\textit{Circuit implementation for a network with layer configuration 3-2-1.} Qubits 1-6 are the weights for the first layer and qubits 7-8 are the weights for the second layer. Qubits 9-14 are the 6 training inputs, duplicated from 3 inputs in the first layer, for the fanout to the second layer. (To save space in the diagram, the fan-outs in the first layer take place to the left, outside the diagram. Thus the inputs are already copied when they enter the depicted circuit.) Qubits with index 15-16 are ancillas storing the outputs of the neurons within the QBNN and qubit 17 encodes the output of the QBNN. Qubit 18 encodes the desired output. The dashed blue box contains the action of the QBNN, consisting of weighing the inputs with the weights, adding up the weighted input together with the subsequent activation fucntion (the gate implementation of the addition and activation is included in the dashed red box). After applying the oracle $\Lambda$, the action of the QBNN is uncomputed (the circuit in the second dashed blue box). The entire circuit depicts one round of marking for one data point in the training set, for the phase accumulation process.}
\label{fig:quantum321}
\end{figure}
We trained the QBNN in this example with the same two tasks presented in figure \ref{fig:qbnndata}. For instance of task 2, in figure \ref{fig:qbnn321data}, we present the probability evolution of the weight strings, in the quantum training with the highest $N_t$ for which there exists at least one optimal weight string.
\begin{figure}
\caption{\textit{Probabiliy evolution of the weight strings in the 6-neuron QBNN with configuration 3-2-1, task 2.} }
\label{fig:qbnn321data}
\end{figure}
As in figure \ref{fig:qbnn321data}, for this example, there are 8 optimal weight configurations among the total $2^8=256$ configurations of the 8 binary weights in the QBNN. The horizontal plane presents all the 256 weight configurations. Each point in the grid represents one weight configuration, with two axes indicating the weights in the two layers respectively. The vertical bars present the probabilities of all the weight configurations, for each iteration during the training. The probability of the optimal weights are amplified over the iterations and reaches its top at iteration 4, in agreement with the expected stopping point given by $\sqrt{256/8} \pi \approx 4.44$. \\
Figure \ref{fig:qbnn321data2} below shows the probability of obtaining an optimal weight string, as a function of the training cycle for both tasks.
\begin{figure}
\caption{\textit{Probability of obtaining an optimal weights, for a 6-neuron QBNN with layer configuration 3-2-1.} The graphs show the optimal stopping time of the training, for both task 1 and 2. We see that the probability of success is close to unity after 6 iterations for task 1 and 4 iterations for task 2. }
\label{fig:qbnn321data2}
\end{figure}
Recall the comparison between our quantum training and classical training stated in section \ref{perfPE}: classical brute force search calls the comparatory oracle ${N_C}^{classical}=n\times\tilde{N}$, whereas our quantum training algorithm only calls approximately $ N_C=N_G\times 2n\times (2n-1)$. Inserting the value of $n=8$ and $\tilde{N}=256$ for this example, one can see that even for this small network there is a quantum advantage in $N_C$: ${N_C}^{classical}=8\times256$, whereas our quantum $N_C=8\times180$ (Task 1). Note that for large networks the quadratic advantage in $\tilde{N}$ will be much more significant, as discussed in the performance section. \\
\textit{*The implementations in this appendix are done via Huawei's Quantum Computing Cloud Platform HiQ (\url{https://hiq.huaweicloud.com}) and the open-source version code is available here: \url{https://github.com/phylyd/QBNN}}
\section{Equality for the number of qubits needed in the QBNN \label{app:noofqubits}} In Section \ref{perfPE} we made use of the equality \begin{align} Q_{\text{input}} + Q_{\text{ancilla}} = Q_{\text{weight}} + Q_{\text{output}} \ , \label{app:bilance} \end{align} where $Q_{\text{input}}$ was the amount of input qubits, $Q_{\text{ancilla}}$ the amount of ancilla qubits, $Q_{\text{weight}}$ the amount of weight qubits and $Q_{\text{output}}$ the number of output qubits. Here, we derive this equality illustrated by two examples. \\
Let us begin with the elementary case of a single neuron, depicted in Figure \ref{qcircuit1} below. It is straightforward to count and verify the equality in Equation \ref{app:bilance}, by noting that $Q_{\text{ancilla}}=Q_{\text{output}}=1$ and that every input comes on an edge with an assigned weight, i.e. $Q_{\text{input}}=Q_{\text{weight}}$. Thus Equality \ref{app:bilance} holds for the case of a single neuron.\\
\begin{figure}
\caption{\textit{the case of a single neuron} }
\label{qcircuit1}
\end{figure}
Next, we consider a more involved network with three neurons in two layers, see Figure \ref{qcircuit2}.\\
\begin{figure}
\caption{\textit{the case of 3 neuron in 2 layers} }
\label{qcircuit2}
\end{figure}
Direct counting verifies that Equation \ref{app:bilance} also holds here. The reason in general is that in the first layer the input qubits and weight qubits always come in pairs, i.e. \begin{align} Q_{\text{input}}= Q_{\text{weight}}^{(in)} \ , \end{align} where the superscript denotes that the count is for the weights in the first layer. \\
In the first layer and hidden layers, the ancilla qubits from the previous layer carry the output of the neuron computation, together with fan-out ancilla qubits, serve as inputs for the next layer, and each of them is paired with a weight qubit in the hidden layers. Mathematically, this condition reads
\begin{align} Q_{\text{ancilla}}^{(in)}+Q_{\text{ancilla}}^{(hidden)} = Q_{\text{weight}}^{(hidden)} \ , \end{align} where $hidden$ denotes that the equality holds for any hidden layer between the input and output layer of the network. For the output layer each output qubit corresponds to an ancilla qubit in this layer. Hence,
\begin{align} Q_{\text{ancilla}}^{(out)} = Q_{\text{output}} \ . \end{align} Altogether, we see that the equality \begin{align} Q_{\text{input}} + Q_{\text{ancilla}} = Q_{\text{weight}} + Q_{\text{output}} \end{align} follows, for general QBNN according to our design.
\section{Correlation between $n$ and $N$ for a two hidden layer feedfoward network \label{2hidden}}
In \cite{huang2003learning} the author proved that for a two-hidden-layer feedforward network with m output neurons, the number of hidden nodes that are enough to learn N samples with negligibly small error is given by:
\begin{equation}
2\sqrt{(m+2)N}
\end{equation}
Specifically, the sufficient number of hidden nodes in the first layer is suggested to be \begin{equation} L_1 = \sqrt{(m+2)N}+2 \sqrt{N/(m+2)}
\end{equation}
and in the second layer is suggested to be \begin{equation} L_2 = m\sqrt{N/(m+2)}
\end{equation}
In this architecture, the number of weights between the two hidden layers is the product of the number of nodes in the two hidden layers:
\begin{align}
L_1 \times L_2 \\ =({\sqrt{(m+2)N}+2 \sqrt{N/(m+2)}})\times( m\sqrt{N/(m+2)})\\ =
mN+2Nm/(m+2)
\end{align}
The total number of weights in this optimal architecture $N_{total}$ is larger than $ L_1 \times L_2 $ (Since we also have weights between input layer and first hidden layer, weights between second hidden layer and output layer). Thus:
\begin{equation}
N_{total} > L_1 \times L_2 > mN
\end{equation}
Using the notation we use in our paper, i.e.$ N_{total} \mapsto N, N \mapsto n $ we have :
\begin{equation}
N > m \times n
\end{equation}
where $N$ is the number of weight qubits and n is the number of training samples, $m$ is the number of neurons in the output layer.
Since $m$ is an integer larger than 1, so $ N > mn > n $. Now we have derived the correlation $ n < N$ used in the maintext.
\end{document} | arXiv | {
"id": "1810.12948.tex",
"language_detection_score": 0.8177220821380615,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Fast truncation of mode ranks for bilinear tensor operations} \author{D. V. Savostyanov, E. E. Tyrtyshnikov, N. L. Zamarashkin} \address{Institute of Numerical Mathematics, Russian Academy of Sciences, \\
Russia, 119333 Moscow, Gubkina 8 \\
\email{dmitry.savostyanov@gmail.com, [tee,kolya]@bach.inm.ras.ru}
} \grant{This work was supported by
RFBR grants 08-01-00115, 09-01-12058, 10-01-00757, 10-01-09201, RFBR/DFG grant 09-01-91332,
Russian Federation Gov. contract $\Pi940$
and Priority research program of Dep. Math. RAS.
} \date{\today}
\begin{abstract} We propose a fast algorithm for mode rank truncation of the result of a bilinear operation on 3-tensors given in the Tucker or canonical form. If the arguments and the result have mode sizes $n$ and mode ranks $r,$ the computation costs~$\mathcal{O}(nr^3 + r^4).$ The algorithm is based on the cross approximation of Gram matrices, and the accuracy of the resulted Tucker approximation is limited by square root of machine precision. \keywords{\ppkeywords} \par \noindent \emph{AMS classification:} 15A21, 15A69, 65F99 \end{abstract}
\section{Introduction} Data sparse representations of tensors and efficient operations in the corresponding formats play increasingly important role in many applications. In the paper we consider a 3-\emph{tensor} $\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C} = \mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}[i,j,k]$ that is an array with three indices. The number of allowed values of each index is called \emph{mode size}. To specify tensor indices explicitly, we use \emph{square brackets}. This notation allows to easily specify different \emph{index transformations}. For instance, \emph{unfoldings} of ${n_1 \times n_2 \times n_3}$ tensor $\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}[i,j,k],$ are \emph{matricizations} of sizes $n_1 \mathbin{\times} n_2n_3,$ $n_2 \mathbin{\times} n_1n_3 $ and $n_3 \mathbin{\times} n_1n_2$ that consist of columns, rows and tube fibers of $\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C},$ \begin{equation}\label{eq:un} A^{(1)} = A[i, jk], \qquad A^{(2)} = A[j, ki], \qquad A^{(3)} = A[k, ij]. \end{equation} Here we set row/column/fiber index of the tensor $\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}[i,j,k]$ as row index and join the two others in one multiindex for columns of the unfolding. The result is considered as a two-index object (matrix), with row and column indices separated by comma. The difference between matrices and tensors is additionally stressed by use of uppercase letter instead of bold uppercase.
The \emph{reshape} of tensor elements assumes as well a change of the index ordering. For example, transposition of matrix reads $(A[i,j])^\t = A[j,i],$ vectorization reads $\mathbf{a}} \def\b{\mathbf{b}} \def\c{\mathbf{c}} \def\d{\mathbf{d}[ij] = A[i,j].$ We see that the square bracket notation is rather self-explaining and suits for description of algorithms working with multidimensional data. We also will use the MATLAB-style \emph{round bracket} notation $a(i,j,k)$ to point to individual element of $\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}[i,j,k]$ and $\mathbf{a}} \def\b{\mathbf{b}} \def\c{\mathbf{c}} \def\d{\mathbf{d}(i,:,k)$ to select a mode vector (i.e. row) from tensor $\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}.$ Scalars and vectors are denoted by lowercase and bold lowercase letters.
In numerical work with tensors of large \emph{mode size} it is crucial to use data sparse formats. For 3-tensors, the most useful are the following.
The \emph{canonical decomposition} \cite{hitchcock-sum-1927,cc-parafac-1970,harshman-parafac-1970} (or \emph{canonical approximation} to some other tensor) reads \begin{equation}\label{eq:c} \mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}[i,j,k] = \sum\limits_{s=1}^R \mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}_s[i] \mathbin{\raise1pt\hbox{$\scriptscriptstyle\mathord\otimes$}} \v_s[j] \mathbin{\raise1pt\hbox{$\scriptscriptstyle\mathord\otimes$}} \w_s[k], \qquad a(i,j,k) = \sum\limits_{s=1}^R u(i,s) v(j,s) w(k,s). \end{equation} The minimal possible number of summands is called \emph{tensor rank} or \emph{canonical rank} of the given tensor $\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}.$ However, canonical decomposition/approximation of a tensor with minimal value of $R$ is a rather ill-posed and computationally unstable problem \cite{desilva-2008}. This explains why among many algorithms of canonical approximation (cf.~\cite{comon-2000,lathauwer-schur-2004,esgras-bb-2009,ost-sorto-2009}) none is known as absolutely reliable, and no robust tools for linear algebra operations maintaining the canonical format (linear combinations, etc.) are proposed.
The (truncated ) \emph{Tucker decomposition/approximation} \cite{Tucker} reads \begin{equation}\label{eq:t}
\begin{split}
\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}[i,j,k] & = \G[p,q,s] \mathbin{\times}_1 U[i,p] \mathbin{\times}_2 V[j,q] \mathbin{\times}_3 W[k,s], \\
a(i,j,k) & = \sum\limits_{p=1}^{r_1}\sum\limits_{q=1}^{r_2} \sum\limits_{s=1}^{r_3} g(p,q,s) u(i,p) v(j,q) w(k,s).
\end{split} \end{equation} The quantities $r_1,r_2,r_3$ are referred to as \emph{Tucker ranks} or \emph{mode ranks}, the tensor $\G=\G[p,q,s]$ of size $r_1 \mathbin{\times} r_2 \mathbin{\times} r_3$ is called the \emph{Tucker core}, the symbol $\mathbin{\times}_l$ designates the multiplication of a tensor by a matrix along the $l$-th mode, the \emph{mode factors} $U, V, W$ have orthonormal columns. In $d$ dimensions, the memory to store $r\mathbin{\times} r\mathbin{\times} \ldots \mathbin{\times} r$ core is $r^d,$ that is usually beyond affordable for large $d$ and even for very small $r$ (so-called \emph{curse of dimensionality}). For $d=3, \: r\sim 100$ the storage is small and Tucker decomposition can be used efficiently.
In~\cite{ost-latensor-2009} the efficient operations with 3-tensors in canonical and Tucker formats are discussed, with approximation of the result in the Tucker format. Simple operations like linear combination of small number of structured tensors can be done using \emph{multilinear SVD} \cite{lathauwer-svd-2000} (or high-order SVD, HOSVD), with quasi-optimal ranks and guaranteed accuracy. Linear combination of many tensors, convolution, Hadamard (pointwise) product of tensors and many other bilinear operations reduce to recompression of the following structured tensor \begin{equation}\label{eq:f}
\begin{split}
\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k] = & \mathop{\mathbf{Kron}}\nolimits(\G, \H)[ap,bq,cs] \mathbin{\times}_1 U[i,ap] \mathbin{\times}_2 V[j,bq] \mathbin{\times}_3 W[k,cs], \\
f(i,j,k) = & \sum_{pqs} \sum_{abc} g(p,q,s) h(a,b,c) u(i,ap) v(j,bq) w(k,cs),
\end{split} \end{equation} with ${r_1 \times r_2 \times r_3}$ core $\G[p,q,s],$ ${p_1 \times p_2 \times p_3}$ core $\H[a,b,c]$ and non-orthogonal factors $U, V$ and $W.$ Formally~\eqref{eq:f} is a Tucker-like format with larger mode ranks $p_1r_1, p_2r_2, p_3r_3,$ that should be reduced (truncated) maintaining the desired accuracy. Due to memory limitations, $\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k]$ can not be assembled for mode sizes $n \gtrsim 10^3$ and auxiliary $p_1r_1 \mathbin{\times} p_2r_2 \mathbin{\times} p_3r_3$ core can not be assembled for ranks $r \gtrsim 30$ (see Tab.~\ref{tab:mem}).\footnote{We always assume $n_1=n_2=n_3=n$ and $r_1=r_2=r_3=p_1=p_2=p_3=r$ in complexity estimates} The structure of $\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}$ should be exploited without explicit evaluation of large temporary arrays.
\begin{table}[t] \caption{Memory for $r^d$ elements, MB} \label{tab:mem} \begin{center}
\begin{tabular}{c|cccc}
& $d=3$ & $d=4$ & $d=5$ & $d=6$ \\ \hline $r=15$ & $0.026$ & $0.4$ & $5.8$ & $87$ \\ $r=30$ & $0.2$ & $6.2$ & $185$ & $5560$ \\ $r=50$ & $0.95$ & $47$ & $2384$ & $119210$ \\ $r=100$& $7.7$ & $763$ & $76300$ & $\approx 8$ TB \\ \end{tabular} \end{center} \end{table}
A practical rank-reduction algorithm proposed in \cite{ost-latensor-2009} is a rank revealing version of iterative Tucker-ALS~\cite{tuckerals-1980,lathauwer-rank1-2000} requiring $\mathcal{O}(nr^4 + r^6)$ operations. However, the number of iterations in Tucker-ALS depends on the initial guess, and fast approximate evaluation of Tucker factors of~\eqref{eq:f} is important.
In Sec.~2 we propose to approximate dominant mode subspaces of $\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k]$ by the ones of simpler tensors. In Sec.~3 we compute dominant mode subspaces by a cross approximation of Gram matrices of the unfoldings. The resulted algorithm requires $\mathcal{O}(nr^3 + r^4)$ operations in three-dimensional case and can be easily generalized to higher dimensions using $\mathcal{O}(dnr^3 + dr^{d+1})$ operations. Since it uses decomposition of Gram matrices, the accuracy is limited by square root of machine precision. In Sec.~4 we apply the proposed method to Hadamard product of electron densities of simple molecules and show that using the result as an initial guess, Tucker-ALS converges to almost machine precision in one iteration.
In the paper we use Frobenius norm of tensors, that is defined as follows $$
\|\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C} \|_F^2 \mathrel{\stackrel{\mathrm{def}}{=}} \f{\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}}{\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}}, \qquad \f{\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}}{\B} \mathrel{\stackrel{\mathrm{def}}{=}} \sum\limits_{i=1}^{n_1} \sum\limits_{j=1}^{n_2} \sum\limits_{k=1}^{n_3} a_{ijk} b_{ijk} $$ and spectral norm of tensor (cf.~\cite{defant-1993}) $$
\|\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}\|_\mathbf{2} \mathrel{\stackrel{\mathrm{def}}{=}} \max_{\|\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}\|=\|\v\|=\|\w\|=1} \mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C} \mathbin{\times}_1 \mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}^\t \mathbin{\times}_2 \v^\t \mathbin{\times}_3 \w^\t = \max_{\|\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}\|=\|\v\|=\|\w\|=1} \f{\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}}{\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w} \mathbin{\raise1pt\hbox{$\scriptscriptstyle\mathord\otimes$}} \v \mathbin{\raise1pt\hbox{$\scriptscriptstyle\mathord\otimes$}} \w}, $$
induced by standard vector norm $\|\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}\|^2 \mathrel{\stackrel{\mathrm{def}}{=}} \|\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}\|_2^2 = (\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w},\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}) = \sum_{i=1}^n |u_i|^2.$
\section{Approximation of dominant subspaces} Our goal is to find approximate dominant subspaces of an ${n_1 \times n_2 \times n_3}$ tensor~\eqref{eq:f} producing an approximation in the Tucker format \begin{equation}\label{eq:appr}
\tilde\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k] = \mathbf{T}[\alpha,\beta,\gamma] \mathbin{\times}_1 X[i,\alpha] \mathbin{\times}_2 Y[j,\beta] \mathbin{\times}_3 Z[k,\gamma], \qquad \|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H} - \tilde \mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_F \leq \varepsilon} \def\phi{\varphi \|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_F \end{equation} with a desired (not very high) accuracy and values of mode ranks for $\tilde\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H},$ close to optimal.
Tucker factors $X[i,\alpha], Y[j,\beta]$ and $Z[k,\gamma]$ approximate dominant subspaces of rows, columns and fibers of $\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k],$ respectively. They can be computed by SVD of the unfoldings of $\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H},$ as proposed in~\cite{lathauwer-svd-2000}, but this method requires evaluation of all elements of tensor and is not feasible for large mode sizes. We can compute~\eqref{eq:appr} interpolating a given tensor on carefully selected set of elements. This is done in Cross3D algorithm~\cite{ost-tucker-2008}, that requires evaluation of $\mathcal{O}(nr + r^3)$ tensor elements and uses $\mathcal{O}(nr^2 + r^4)$ additional operations. For a structured tensor~\eqref{eq:f} this summarizes to $\mathcal{O}(nr^3 + r^6)$ operations, i.e. the complexity is~\emph{linear} in mode size. However, pivoting and error checking involves heuristics and in certain cases is slower than the approximation itself. For example, computation of residual $(\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H} - \tilde\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H})[i,j,k]$ on $\mathcal{O}(n)$ randomly picked elements uses $\mathcal{O}(nr^4)$ operations.
To avoid heuristic approaches, we can evaluate dominant subspaces by proper decomposition of Gram matrices of the unfoldings. In~\cite{sav-rr-2009} this idea was used for fast mode rank truncation of tensor given in the canonical form~\eqref{eq:c} with large number of terms. The proposed in~\cite{sav-rr-2009} cross approximation algorithm is equivalent to an unfinished Cholesky decomposition and computes rank-$r$ dominant basis using the diagonal and certain $r$ columns of the Gram matrix. However, for the unfolding $F[i,jk]$ of tensor $\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k]$ the Gram matrix $(F F^\t)[i,i']$ reads \begin{equation}\label{eq:ff}
\begin{split}
(F F^\t)(i,i') = \sum_{pqs} \sum_{abc} \sum_{p'q's'} \sum_{a'b'c'} & g(p,q,s) h(a,b,c) g(p',q',s') h(a',b',c') \\
& (V^\t V)(bq,b'q') (W^\t W)(cs,c's') u(i,ap) u(i',a'p'),
\end{split} \end{equation} and it is easy to check, that evaluation of any element of~\eqref{eq:ff} requires $\mathcal{O}(r^6)$ operations. Therefore, the algorithm from~\cite{sav-rr-2009} applied to~\eqref{eq:ff} has $\mathcal{O}(nr^6)$ complexity, which is not promising even for moderate $r.$ To perform faster, we propose to change the computational objective and look for dominant subspaces of tensors with a simpler structure.
Rewrite the tensor~\eqref{eq:f} as follows \begin{equation}\label{eq:split}
\begin{split}
\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k] = & {} \mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'[i,bq,cs] \mathbin{\times}_2 V[j,bq] \mathbin{\times}_3 W[k,cs], \\
& {} \mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'[i,bq,cs] = \mathop{\mathbf{Kron}}\nolimits(\G,\H)[ap,bq,cs] \mathbin{\times}_1 U(i,ap).
\end{split} \end{equation} It is clear that the Tucker approximation of $\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'[i,bq,cs]$ gives a Tucker approximation of $\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k]$ with the same mode-$1$ rank. Therefore, we can approximate dominant mode-$1$ subspace of $\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}$ by the one of $\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'.$ The accuracy of resulted approximation is estimated by the following theorem.
\begin{theorem}\label{thm1} For tensor $\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k]$ given by~\eqref{eq:split} it holds \begin{equation}\nonumber
\|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_F \leq \|\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'\|_F \|V\|_2 \|W\|_2, \quad
\|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_\mathbf{2} \leq \|\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'\|_\mathbf{2} \|V\|_2 \|W\|_2, \end{equation} and for mode-$1$ unfoldings $F=F[i,jk]$ and $U'=U'[i,pqcs]$ it holds \begin{equation}\nonumber
\|F\|_2 \leq \|U'\|_2 \|V\|_2 \|W\|_2. \end{equation} \begin{proof} The first and last parts follow directly from matrix inequalities \begin{equation}\nonumber
\begin{split}
\|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k]\|_F & = \|F[i, jk]\|_F \leq \|U'[i,bqcs]\|_F \| W[k,cs] \mathbin{\raise1pt\hbox{$\scriptscriptstyle\mathord\otimes$}} V[j,bq] \|_2 = \|\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'\|_F \|V\|_2 \|W\|_2, \\
\|F[i,jk]\|_2 & \leq \|U'[i,bqcs]\|_2 \| W[k,cs] \mathbin{\raise1pt\hbox{$\scriptscriptstyle\mathord\otimes$}} V[j,bq] \|_2 = \|U'[i,bqcs]\|_2 \|V\|_2 \|W\|_2.
\end{split} \end{equation} Second part reads \begin{equation}\nonumber
\begin{split}
\|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k]\|_\mathbf{2} & = \max_{\|\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}\|=\|\v\|=\|\w\|=1} \f{\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'[i,pq,cs] \mathbin{\times}_2 V[j,pq] \mathbin{\times}_3 W[k,cs]}{\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}[i] \mathbin{\raise1pt\hbox{$\scriptscriptstyle\mathord\otimes$}} \v[j] \mathbin{\raise1pt\hbox{$\scriptscriptstyle\mathord\otimes$}} \w[k]} = \\
{} & = \max_{\|\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}\|=\|\v\|=\|\w\|=1} (\v^\t V)[bq] (\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}' \mathbin{\times}_1 \mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}^\t)[bq,cs] (W^\t \w)[cs] \leq \\
{} & \leq \max_{\|\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}\|=\|\v\|=\|\w\|=1} \|V^\t \v\| \|\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}' \mathbin{\times}_1 \mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}^\t\|_2 \|W^\t \w\| = \\
{} & = \left(\max_{\|\mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}\|=1} \|\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}' \mathbin{\times}_1 \mathbf{u}} \def\v{\mathbf{v}} \def\w{\mathbf{w}^\t\|_2\right) \left(\max_{\|\v\|=1} \|V^\t \v\|\right) \left(\max_{\|\w\|=1} \|W^\t \w\|\right)
= \|\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'\|_\mathbf{2} \|V\|_2 \|W\|_2.
\end{split} \end{equation} \end{proof} \end{theorem} \begin{corollary} For certain perturbation $\Delta\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'$ of tensor $\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}',$ the corresponding perturbation $\Delta\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}$ can be estimated as follows \begin{equation}\label{eq:acc}
\begin{split}
\frac{\| \Delta\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_F }{\|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_F} \leq c_F \frac{\| \Delta\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'\|_F }{\|\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'\|_F}, & \qquad c_F =\frac{\|\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'\|_F \|V\|_2 \|W\|_2 }{\|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_F}; \\
\frac{\| \Delta F\|_2 }{\| F\|_2} \leq c_2 \frac{\| \Delta U'\|_2 }{\| U'\|_2}, & \qquad c_2 =\frac{\| U'\|_2 \|V\|_2 \|W\|_2 }{\| F\|_2}; \\
\frac{\| \Delta\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_\mathbf{2}}{\|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_\mathbf{2}} \leq c_\mathbf{2} \frac{\| \Delta\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'\|_\mathbf{2}}{\|\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'\|_\mathbf{2}},& \qquad c_\mathbf{2}=\frac{\|\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'\|_\mathbf{2} \|V\|_2 \|W\|_2 }{\|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_\mathbf{2}}.
\end{split} \end{equation} \end{corollary} \begin{remark}
For any tensor, $\|\mathbf{A}} \def\B{\mathbf{B}} \def\C{\mathbf{C}[i,j,k]\|_\mathbf{2} \leq \|A[i,jk]\|_2 \leq \|A[i,j,k]\|_F.$ \end{remark}
To find a dominant mode-$1$ subspace of $\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'[i,bq,cs],$ we can use proper decomposition of Gram matrix of the unfolding $U'[i,bqcs],$ that reads \begin{equation}\label{eq:gram}
\begin{split}
A[i,i'] = (U' {U'}^\t)[i,i'] = U[i,ap] \left( \mathaccent"705E G[p,p'] \mathbin{\raise1pt\hbox{$\scriptscriptstyle\mathord\otimes$}} \mathaccent"705E H[a,a'] \right) U[a'p',i'], \\
\mathaccent"705E G[p,p'] = G[p,qs] G[qs,p'], \quad \mathaccent"705E H[a,a'] = H[a,bc] H[bc,a'].
\end{split} \end{equation} Tensor $\mathbf{U}} \def\V{\mathbf{V}} \def\W{\mathbf{W}'$ has a simpler structure than $\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H},$ and computation of the Gram matrix~\eqref{eq:gram} is faster than~\eqref{eq:ff}. However, evaluation of $A[i,i']$ as full $n_1 \times n_1$ array leads to $\mathcal{O}(n^2r^3)$ complexity. Looking for the methods with linear in mode size complexity, we are to use the cross approximation algorithms.
\section{Cross approximation of Gram matrices} Truncated singular/proper decomposition is used in cases where low-rank approximation is required. This problem can be solved by faster methods, for example, those based on \emph{cross approximation} $A[i,j] \approx \tilde A[i,j] = U[i,J](A[I,J])^{-1}A[I,j],$ where $I$ and $J$ contain indices of certain rows and columns of $A.$ This approximation is exact on the \emph{cross} formed by rows $I$ and columns $J,$ but the overall accuracy depends heavily on the properties of $A[I,J].$ In~\cite{gt-psa-1995,gtz-psa-1997,gt-maxvol-2001} it is shown that a good choice for $A[I,J]$ is \emph{maximum volume} (modulus of determinant) submatrix. Search of this submatrix in general case is NP-hard problem~\cite{bartholdi-1982}, and alternatives should be used, see~\cite{tee-cross-2000,gostz-maxvol-2010}. If the supported cross is iteratively widened at each step by one row and column that intersect on element where residual is maximum in modulus, cross approximation method is equivalent to Gau{\ss}ian decomposition with complete pivoting. For Gram matrix the pivot is always on the diagonal and cross approximation is equivalent to unfinished Cholesky decomposition. The resulted algorithm exploiting structure of~\eqref{eq:gram} is summarized in Alg.~\ref{alg}.
\begin{algorithm}[ht] \caption{Cross approximation for Gram matrix~\eqref{eq:gram}} \label{alg} \begin{algorithmic}[1] \REQUIRE Structured tensor $\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H} = \mathop{\mathbf{Kron}}\nolimits(\G, \H) \mathbin{\times}_1 U \mathbin{\times}_2 V \mathbin{\times}_3 W,$ see~\eqref{eq:f}
\ENSURE Approximation $\tilde A = X \Lambda X^\t$ for Gram matrix~\eqref{eq:gram}, such that $\|A - \tilde A\|_F \lesssim \varepsilon} \def\phi{\varphi\|A\|_F$ \item[\textbf{Initialization:}] $p=0, \quad \tilde A = 0$ \STATE $\mathaccent"705E G[p,p'] = G[p,qs] G[qs,p'], \quad \mathaccent"705E H[a,a'] = H[a,bc] H[bc,a']$
{$\mathcal{O}(r^{4})$} \FOR[Compute diagonal of matrix]{$i=1,\ldots,n$}
\STATE $U_i[a,p] = U[i,ap], \quad d(i) = \f{U_i[a,p] G[p,p']}{H[a,a']U_i[a',p']}$
{$\mathcal{O}(r^3)$} \ENDFOR
\STATE $\mathop{\mathtt{nrm}}\nolimits := \|\d\|_1$ \REPEAT
\STATE $i_\star := \arg \max_i |d(i)| $ \COMMENT{Find new pivot}
{$\mathcal{O}(n)$} \STATE $\mathbf{a}} \def\b{\mathbf{b}} \def\c{\mathbf{c}} \def\d{\mathbf{d}(:,i_\star) := U[:,ap] (H[a,a'] U_{i_\star}[a',p'] G[p',p])[ap]$
{$\mathcal{O}(nr^2 + r^3)$} \STATE $\tilde\mathbf{a}} \def\b{\mathbf{b}} \def\c{\mathbf{c}} \def\d{\mathbf{d}(:,i_\star) = X \Lambda (\mathbf{x}}\def\y{\mathbf{y}} \def\z{\mathbf{z}(i_\star, :))^\t$
{$\mathcal{O}(np)$} \STATE $\mathbf{x}}\def\y{\mathbf{y}} \def\z{\mathbf{z}_\star := (\mathbf{a}} \def\b{\mathbf{b}} \def\c{\mathbf{c}} \def\d{\mathbf{d}-\tilde\mathbf{a}} \def\b{\mathbf{b}} \def\c{\mathbf{c}} \def\d{\mathbf{d}) / \sqrt{(a - \tilde a)(i_\star,i_\star)}$
{$\mathcal{O}(n)$}
\STATE $\d[i] := \d[i] - |\mathbf{x}}\def\y{\mathbf{y}} \def\z{\mathbf{z}_\star[i]|^2$ \COMMENT{Update diagonal of residual}
{$\mathcal{O}(n)$} \STATE $\mathbf{x}}\def\y{\mathbf{y}} \def\z{\mathbf{z}_\star =: [X \: \mathbf{x}}\def\y{\mathbf{y}} \def\z{\mathbf{z}'] \b$ \COMMENT{Orthogonalize $\mathbf{x}}\def\y{\mathbf{y}} \def\z{\mathbf{z}_\star$ to $\mathop{\mathrm{span}}\nolimits X$}
$\mathcal{O}(np)$ \STATE $\Lambda + \b^\t\b =: V D V^\t$ \COMMENT{Re-diagonalize decomposition}
$\mathcal{O}(p^3)$
\STATE $X := [X \: \mathbf{x}}\def\y{\mathbf{y}} \def\z{\mathbf{z}'] V, \quad \Lambda := D, \quad \tilde A = X \Lambda X^\t, \quad \mathop{\mathtt{err}}\nolimits := \|\d\|_1$
$\mathcal{O}(np^2)$ \UNTIL{$\mathop{\mathtt{err}}\nolimits \leq \varepsilon} \def\phi{\varphi\mathop{\mathtt{nrm}}\nolimits$ \textbf{or} $r = r_\mathrm{max}$ } \end{algorithmic} \end{algorithm}
It is easy to see that evaluation of $\mathaccent"705E G$ and $\mathaccent"705E H,$ i.e. Gram matrices of the unfoldings $G[p,qs]$ and $H[a,bc],$ requires $\mathcal{O}(r^4)$ operations in three-dimensional case and $\mathcal{O}(r^{d+1})$ in $d$-dimension case. With precomputed $\mathaccent"705E G$ and $\mathaccent"705E H$ every element $a(i,i')$ is computed in $\mathcal{O}(r^3)$ operations and a column $\mathbf{a}} \def\b{\mathbf{b}} \def\c{\mathbf{c}} \def\d{\mathbf{d}(:,i')$ is computed in $\mathcal{O}(nr^2 + r^3)$ operations for three and $d$-dimensional case. For the rediagonalization of $\Lambda + \b^\t\b$ matrix we can use algorithm proposed by Demmel (see~\cite{demmel}, Alg.~5.3) that is implemented by the LAPACK procedure \texttt{slaed3} and has complexity~$\mathcal{O}(p^3).$ We conclude that approximation of rank-$r$ dominant mode subspace of Gram matrix~\eqref{eq:gram} in $d$-dimensional case requires $\mathcal{O}(nr^3 + r^{d+1})$ operations.
The relation between accuracy of cross approximation of Gram matrices and corresponding low-rank approximation of initial matrices is given by the following theorem. \begin{theorem}\label{thm2} Consider a matrix $U = \left[\begin{array}{cc} U_1 & U_2 \end{array}\right].$ If the corresponding Gram matrix $$ A = U^\t U = \left[ \begin{array}{cc} A_{11} & A_{12} \\ A_{21} & A_{22} \end{array} \right] $$ allows the cross approximation \begin{equation}\nonumber
\left\| A - \left[ \begin{array}{c} A_{11} \\ A_{21} \end{array} \right] A_{11}^{-1} \left[\begin{array}{cc} A_{11} & A_{12} \end{array} \right] \right\|_2 \leq \varepsilon} \def\phi{\varphi \|A\|_2, \end{equation} then there exists a matrix $B$ such that \begin{equation}\label{eq:cu}
\|U - U_1 B^\t \|_2 \leq \sqrt{\varepsilon} \def\phi{\varphi} \|U\|_2. \end{equation} \begin{proof} Consider $V = - U_1 A_{11}^{-1} A_{12} + U_2$ and write $$ V^\t V = A_{21} A_{11}^{-1} A_{11} A_{11}^{-1} A_{12} - A_{21} A_{11}^{-1} A_{12} - A_{21} A_{11}^{-1} A_{12} + A_{22} = A_{22} - A_{21} A_{11}^{-1} A_{12}. $$ Cross approximation is exact on the selected rows and columns \begin{equation}\label{eq:res} A - \left[ \begin{array}{c} A_{11} \\ A_{21} \end{array} \right] A_{11}^{-1} \left[\begin{array}{cc} A_{11} & A_{12} \end{array} \right] =
\left[ \begin{array}{cc} 0 & 0 \\ 0 & A_{22} - A_{21} A_{11}^{-1} A_{12} \end{array} \right] =
\left[ \begin{array}{cc} 0 & 0 \\ 0 & V^\t V \end{array} \right], \end{equation}
and it follows that $\|V^\t V\|_2 \leq \varepsilon} \def\phi{\varphi \|U^\t U\|_2$ and $\|V\|_2 \leq \sqrt{\varepsilon} \def\phi{\varphi} \|U\|_2.$ We conclude that $B^\t = \left[\begin{array}{cc}I & A_{11}^{-1} A_{12}\end{array}\right]$ provides~\eqref{eq:cu}. \end{proof} \end{theorem} \begin{remark} For $U$ with $U_1^\t U_1 = I, \: U_2^\t U_2 = \varepsilon} \def\phi{\varphi I, \: U_1^\t U_2 = 0,$ inequality~\eqref{eq:cu} is sharp. \end{remark} \begin{remark} For fixed $U_1,$ matrix $B^\t = \left[\begin{array}{cc}I & A_{11}^{-1} A_{12}\end{array}\right] = (U_1^\t U_1)^{-1} U_1^\t U$ provides minimal residual $U - U_1 B^\t$ in Frobenius and spectral norms. See~\cite{gor-cross-2008}, where a nice estimates for accuracy of cross approximation of matrices and tensors are also given. \end{remark} \begin{remark} $\mathop{\mathrm{span}}\nolimits B = \mathop{\mathrm{span}}\nolimits X$ is the subspace of columns of the Gram matrix that support the cross approximation in Alg.~\ref{alg}. \end{remark}
Since the spectral norm of the residual is not easy to evaluate, the stopping criteria in a practical algorithm is based on the Frobenius norm. On each step of Alg.~\ref{alg} vector $\d$ contains the diagonal of residual~\eqref{eq:res} and $$
\|\d\|_1 = \sum_{i=1}^n |d(i)| = \sum_{i} (V^\t V)(i,i) = \|V[i,j]\|_F^2. $$ We can also implement stopping criteria based on eigenvalues stored in $\Lambda.$ To do this, we can split them in `dominant' and `smaller' parts basing on desired tolerance $\varepsilon} \def\phi{\varphi,$ and stop the process if during several iterations new eigenvalues fall into the smaller part. This criteria will approximate spectral norm more precisely, but as we see in numerical experiments, it generally does not differ from the Frobenius-based one.
Obviously, Alg.~\ref{alg} can be applied in the same way to estimate other Tucker factors of~\eqref{eq:f}. Due to roundoff errors, accuracy $\varepsilon} \def\phi{\varphi$ of Alg~\ref{alg} is limited by machine precision $\mathtt{tol},$ and for $\varepsilon} \def\phi{\varphi=\mathtt{tol}$, accuracy of~\eqref{eq:appr} can be estimated by Thm.~\ref{thm2} as $$
\|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H} - \tilde\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_2 \leq \sqrt{\mathtt{tol}} \sqrt{c_2^2(U) + c_2^2(V) + c_2^2(W)} \|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_2, $$ where $c_2(U)$ is defined in~\eqref{eq:acc} and similar definition applies to $V, W.$
\section{Numerical examples} Multidimensional data often appear in modern modelling programs. For example, in chemical packages, e.g. PC GAMESS, MOLPRO, the \emph{electron density function} is given in canonical form~\eqref{eq:c} as a sum of tensor product of Gaussians, but with number of terms, that may be too large for practically feasible computations even for moderate molecules. In order to make computations efficient, further approximation (recompression) to the Tucker format can be performed. This problem was approached in~\cite{mpi-chem3d-2009} using Tucker-ALS algorithm, in \cite{khor-ml-2009} by Tucker-ALS with initial guess obtained from the coarser grids, in~\cite{fkst-chem-2008} by Cross3D algorithm, in~\cite{ost-chem-2009} by individual cross approximation of canonical factors, in~\cite{sav-rr-2009} by cross approximation of Gram matrices of unfoldings and in~\cite{gos-kryl-2010} by algorithms based on Wedderburn rank reduction.
As an example, we apply the discussed algorithm for Hadamard multiplication of electron density given in Tucker format to themselves. This operation can be a building block for algorithm that computes pointwise cubic root of density, that is used in the Kohn-Sham model. A good initial guess for such methods can be evaluated by mimic algorithm~\cite{ost-chem-2009}.
The results of experiments are collected in Tab.~\ref{tab}. They were performed on Intel Xeon Quad-Core E5504 CPU running at $2.00$~GHz using Intel Fortran compiler version 11.1 and BLAS/LAPACK routines provided by MKL library. For each molecule, we show time in seconds $T(\mbox{Alg.~\ref{alg}})$ for evaluation of three dominant subspaces $X[i,\alpha], Y[j,\beta]$ and $Z[k,\gamma]$ by Alg.~\ref{alg} with accuracy of approximation of Gram matrices set to $\varepsilon} \def\phi{\varphi=10^{-12}.$ Then we compute best core by convolution \begin{equation}\nonumber
\mathbf{T}[\alpha,\beta,\gamma] = \mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}[i,j,k] \mathbin{\times}_1 X[\alpha,i] \mathbin{\times}_2 Y[\beta,j] \mathbin{\times}_3 Z[\gamma,k]. \end{equation} and check relative accuracy $\varepsilon} \def\phi{\varphi(\mbox{Alg.~\ref{alg}})$ of approximation~\eqref{eq:appr} in Frobenius norm.
The direct computation of all elements of residual requires a lot of computational time and the accuracy $\|\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}-\tilde\mathbf{F}} \def\G{\mathbf{G}} \def\H{\mathbf{H}\|_F$ was verified by comparing the result with Tucker approximation computed by Cross3D algorithm~\cite{ost-tucker-2008} with accuracy set to $\varepsilon} \def\phi{\varphi=10^{-12}.$ The Cross3D algorithm was verified in~\cite{ost-tucker-2008,fkst-chem-2008} by exhaustive check on parallel memory platforms, and can be considered as reliable answer. The residual between two Tucker formats is computed as proposed in~\cite{ost-latensor-2009}.
Then we compute approximation of the same accuracy~$\varepsilon} \def\phi{\varphi(\mbox{Alg.~\ref{alg}})$ by Cross3D~\cite{ost-tucker-2008} and WsvdR~\cite{gos-kryl-2010} algorithms and show the corresponding timings as $T(\mbox{c3d})$ and $T(\mbox{wsvdr})$. We also show time $T(\mbox{tals})$ for one iteration of Tucker-ALS~\cite{tuckerals-1980,lathauwer-rank1-2000} with ranks fixed equal to the ranks of bases $X, Y, Z,$ returned by Alg.~\ref{alg}. Then we apply one iteration of rank-revealing Tucker-ALS~\cite{ost-chem-2009} with accuracy parameter set to $\varepsilon} \def\phi{\varphi=10^{-12}$ using bases $X, Y, Z$ as initial guess, and show the accuracy of improved approximation by~$\varepsilon} \def\phi{\varphi(\mbox{tals}).$
\begin{table}[t] \caption{Hadamard square of electron density, $n_1=n_2=n_3=5121$} \label{tab}
\begin{center}
\begin{tabular}{cc|cc|ccc|c}
molecule & $r_1, r_2, r_3$ & $T(\mbox{Alg.~\ref{alg}})$ & $\varepsilon} \def\phi{\varphi(\mbox{Alg.~\ref{alg}})$ & $T(\mbox{c3d})$ & $T(\mbox{wsvdr})$ & $T(\mbox{tals})$ & $\varepsilon} \def\phi{\varphi(\mbox{tals})$ \\ \hline
methane & $(74,74,74)$ & $4.0$ & $3\eee{-7}$ & $78.6$ & $12.4$ & $37$ & $7\eee{-13}$ \\
ethane & $(67,94,83)$ & $5.3$ & $6\eee{-7}$ & $76.8$ & $15.1$ & $42$ & $8\eee{-13}$ \\
ethanol & $(128,127,134)$ & $20$ & $5\eee{-7}$ & $1050$ & $210$ & $473$ & $9\eee{-13}$ \\
glycine & $(62,176,186)$ & $38$ & $8\eee{-7}$ & $1260$ & $237$ & $442$ & $9\eee{-13}$ \\
\end{tabular}
\end{center} \end{table}
We conclude that proposed algorithm is faster that other methods for this purpose and return approximation of dominant subspaces that allows to construct approximation with accuracy about square root of machine precision. Using the subspaces, computed by Alg.~\ref{alg} as initial guess, rank revealing Tucker-ALS converges to almost machine precision in one iteration.
\end{document} | arXiv | {
"id": "1004.4919.tex",
"language_detection_score": 0.5632232427597046,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title[Multipliers for Continuous Frames in Hilbert spaces] {Multipliers for continuous frames in Hilbert spaces} \author[P. Balazs, D. Bayer and A. Rahimi]{P. Balazs$^\dagger$, D. Bayer$^\dagger$ and A. Rahimi$^*$,} \address{$^\dagger$ Acoustics Research Institute, Austrian Academy of Sciences, Wohllebengasse 12-14, 1040 Wien, Austria.} \email{peter.balazs@oeaw.ac.at} \email{bayerd@kfs.oeaw.ac.at} \address{$^*$ Department of Mathematics, University of Maragheh, P. O. Box 55181-83111, Maragheh, Iran.} \email{asgharrahimi@yahoo.com}
\subjclass[2000]{Primary 42C40; Secondary 41A58, 47A58.} \keywords{Frame, Continuous frame, Measure space, Riesz type, Riesz basis, Riesz frame, Wavelet frame, Short-time Fourier transform, Gabor frame, Controlled frames, Compact operators, Trace-class operators, Hilbert-Schmidt operators.}
\begin{abstract} In this paper we examine the general theory of continuous frame multipliers in Hilbert space. These operators are a generalization of the widely used notion of (discrete) frame multipliers. Well-known examples include Anti-Wick operators, STFT multipliers or Calder\'on-Toeplitz operators. Due to the possible peculiarities of the underlying measure spaces, continuous frames do not behave quite as well as their discrete counterparts. Nonetheless, many results similar to the discrete case are proven for continuous frame multipliers as well, for instance compactness and Schatten class properties. Furthermore, the concepts of controlled and weighted frames are transferred to the continuous setting. \end{abstract}
\maketitle
\section{Introduction}
A discrete frame is a countable family of elements in a separable Hilbert space which allows stable but not necessarily unique decomposition of arbitrary elements into expansion of the frame elements. The concept of generalization of frames was proposed by G. Kaiser \cite{Gk} and independently by Ali, Antoine and Gazeau \cite{Ali2} to a family indexed by some locally compact space endowed with a Radon measure. These frames are known as continuous frames. Gabardo and Han in \cite{Gb} called these frames \textit{Frames associated with measurable spaces}, Askari-Hemmat, Dehghan and Radjabalipour in \cite{Ra} called these frames \textit{generalized frames} and they are linked to \textit{coherent states} in mathematical physics \cite{Ali2}. For more studies, the interested reader can also refer to \cite{Ali1, jpaxxl09, jpaxxl11, chui, For}. \par Bessel and frame multipliers were introduced by one of the authors \cite{xxlmult1,peter2,peter3} for Hilbert spaces. For Bessel sequences, the investigation of the operator $\mathbf{M}=\sum m_k \langle f,\psi_{k}\rangle\varphi_{k}$, where the analysis coefficients $\langle f,\psi_{k}\rangle$ are multiplied by a fixed symbol $(m_k)$ before resynthesis (with $\varphi_{k}$), is very natural. There are numerous applications of this kind of operators. As a particular way to implement time-variant filters Gabor frame multipliers \cite{feic} are used, also known as Gabor filters \cite{matz}. Such operators find application in psychoacoustics \cite{xxllabmask1}, denoising \cite{majxxl10}, computational auditory scene analysis \cite{wanbro06}, virtual acoustics \cite{majxxl1} and seismic data analysis \cite{gigrheillama05}. On a more theoretical level
Bessel multipliers of $p$-Bessel sequences in Banach spaces are introduced in \cite{peterasghar}. \par Wavelet and Gabor frames are used very often in signal processing algorithm. Both systems are derived from a continuous frame transform. For these two special systems continuous frame multipliers have been investigated as STFT-multipliers \cite{feic} or Anti-Wick operators \cite{coro05} respectively Calder\`on - Toeplitz operators \cite{now93, roch90}. In this paper we investigate multipliers for continuous frames in the general setting, with some comments on the mentioned special cases in Section \ref{sec:STFTwavmult0}.
The paper is organized as follows. In Section 2, we collect a number of notions and preliminaries on continuous Bessel mappings and frames and their most basic properties and present some well-known examples. In Section 3, we define continuous Bessel and frame multipliers as generalizations of discrete Bessel and frame multipliers, develop their theory and prove a number of statements on the compactness of multipliers as well as on mapping properties with respect to Schatten classes. We also investigate perturbation results and the continuous dependence of the multiplier on the symbol and on the analysis and synthesis frames. We also look at the particular instances of STFT and Wavelet multipliers, the latter are known as Calder\'on-Toeplitz operators, and compare our results to existing ones. Section 4 generalizes the concepts of controlled and weighted frames to the continuous setting.
\section{Preliminaries}
\subsection{Operator theory and functional analysis}
Throughout this paper, $\mathcal{H}$ (respectively $\mathcal{H}_1, \mathcal{H}_2$) will be complex Hilbert spaces, with inner product $\langle x, y\rangle$, linear in the first and conjugate linear in the second coordinate and norm $\|x\| = \sqrt{\langle x, x \rangle}$ for $x,y \in \mathcal{H}$.
Let $\mathcal{B}(\mathcal{H}_1,\mathcal{H}_2)$ be the set of all bounded linear operators from $\mathcal{H}_1$ to $\mathcal{H}_2$.
This set is a Banach space with norm $\|T\|=\sup_{\|x\|=1}\|Tx\|$. We define $GL(\mathcal{H}_1,\mathcal{H}_2)$ as the set of all bounded linear operators with bounded inverse. If $\mathcal{H}_1 = \mathcal{H}_2 = \mathcal{H}$, we simply write $\mathcal{B}(\mathcal{H})$ and $GL(\mathcal{H})$. By $(e_n)$ we always denote an orthonormal basis for a Hilbert space. A map $\Psi:\mathcal{H} \times \mathcal{H}\rightarrow \mathbb{C}$ is a sesquilinear form if it is linear in the first variable and conjugate-linear in the second. For such a map, we have the following assertion. \begin{thm}\cite{Mu}\label{murphy}
Let $\Psi$ be a bounded sesquilinear form on a Hilbert space $\mathcal{H}$. Then there is a unique operator $u$ on $\mathcal{H}$ such that \[ \Psi(x,y)=\langle u(x),y\rangle \quad (x,y\in \mathcal{H}). \]
Moreover,
$\|u\|=\|\Psi\|$. \end{thm} A bounded operator $T$ is called positive (respectively non-negative), if $\langle Tf,f\rangle>0$ for all $f\neq0$ ( respectively $\langle Tf,f\rangle\geq0$ for all $f\in\mathcal{H}$). We say $S> T$ if $S-T>0$ (respectively $S\geq T$, if $S-T\geq0$). For a non-negative operator $T$, there exists a unique non-negative operator $S$ on $\mathcal{H}$ such that $S^2=T$ and $S$ commutes with every operator that commutes with $T$. See e.g. \cite{conw1} or \cite{gogoka03} for good accounts of elementary operator theory.
A linear operator $T$ from the Banach space $X$ into the Banach space $Y$ is called compact if the image under $T$ of the closed unit ball in $X$ is a relatively compact subset of $Y$, or, equivalently, if the image of any bounded sequence contains a convergent subsequence. A well-known characterization of compact operators is the following: \begin{lem} \label{weakly}\cite{conw1} Let $X,Y$ be Banach spaces. A bounded operator $T:X\rightarrow Y$
is compact if and only if $\|Tx_{n}\|\longrightarrow 0$ whenever $x_{n}\longrightarrow 0$ weakly in $X$. \end{lem}
For any compact operator $T:\mathcal{H} \to \mathcal{K}$, the operator $T^{\ast}T:\mathcal{H} \to \mathcal{H}$ is compact and non-negative. The unique non-negative operator $S$ such that $S^2 = T^{\ast}T$ is also compact. The eigenvalues of $S$ are called the singular values of $T$. They form a non-increasing sequence of non-negative numbers that either consists of only finitely many non-zero terms or converges to zero. If the sequence of singular values $(s_n)$ is in $\ell^p$, $1 \leq p < \infty$, then $T$ belongs to the Schatten p-class $\mathcal{S}_p(\mathcal{H})$. In particular, if $\sum |s_n | < \infty$, then $T$ is a trace class operator; if $\sum | s_n |^2 < \infty$, then $T$ is a Hilbert-Schmidt operator. A good source for information on Schatten class operators is e.g. \cite{zh07}. \\
We recall the definition of a discrete frame. \begin{defn} A family $(f_n)\subseteq\mathcal{H}$ is a \emph{frame} for $\mathcal{H}$ if there exist constants $A>0$ and $B<\infty$ such that \[
A\|f\|^2\leq\sum_n|\langle f,f_n\rangle|^2\leq B\|f\|^2 \] for all $f\in\mathcal{H}$. If $A=B$, then it is called a tight frame.
\end{defn}
\subsection{Continuous frames}
\begin{defn} Let $\mathcal{H} $ be a complex Hilbert space and $(\Omega ,\mu)$ be a measure space with positive measure $\mu.$ The mapping $F:\Omega\to\mathcal{H}$ is called a \emph{continuous frame} with respect to $(\Omega ,\mu)$, if\\ \begin{enumerate} \item $F$ is weakly-measurable, i.e., for all $f\in \mathcal{H}$, $\omega\to\langle f,F({\omega})\rangle$ is a measurable function on $\Omega$; \\ \item there exist constants $A, B> 0$ such that \[ \label{deframe}
A\|f\|^{2}\leq \int_{\Omega}|\langle f,F({\omega})\rangle|^{2}\,d\mu(\omega) \leq B\|f\|^{2}, \quad ( f\in \mathcal{H}). \] \end{enumerate} The constants $A$ and $B$ are called \emph{continuous frame bounds}. If $A=B$, then $F$ is called a \emph{tight} continuous frame, if $A = B = 1$ a {\em Parseval} frame. The mapping $F$ is called {\em Bessel mapping} or shorter \emph{Bessel} if only the righthand inequality in (\ref{deframe}) holds. In this case, $B$ is called the \emph{Bessel constant} or \emph{Bessel bound}. \end{defn} If $\Omega=\mathbb{N}$ and $\mu$ is counting measure then $F$ is a discrete frame. In this sense continuous frames are the more general setting.
The first inequality in (\ref{deframe}), shows that $F$ is complete, i.e., $$\overline{\textrm{span}}\{F({\omega})\}_{\omega\in\Omega}=\mathcal{H}.$$
It is well-known that discrete Bessel sequences in a Hilbert space are norm bounded above: if \[
\sum_{n} | \langle f, f_n \rangle |^2 \leq B \| f \|^2 \] for all $f\in \mathcal{H}$, then \[
\| f_n \| \leq \sqrt{B} \] for all $n$. For continuous Bessel mappings, however, this is not necessary. Consider the following example. \begin{exa}\label{example_1} Take an (essentially) unbounded (Lebesgue) measurable function $a:\mathbb R \to \mathbb C$ such that $a \in L^2(\mathbb R) \setminus L^{\infty}(\mathbb R)$. It is easy to see that such functions indeed exist; consider for example the function \[ b(x) := \begin{cases}
\frac{1}{\sqrt{|x|}}, & \mbox{ if } 0 < |x| < 1, \\
\frac{1}{|x|^2}, & \mbox{ if } |x| \geq 1, \\ 0, & \mbox{ if } x=0. \end{cases} \] This function is clearly in $L^1(\mathbb R) \setminus L^{\infty}(\mathbb R)$ and, furthermore, $b(x) \geq 0$ for all $x \in \mathbb R$. Now take $a(x) := \sqrt{b(x)}$. Choose a fixed vector $h \in\mathcal{H}$, $h \neq 0$. Then the mapping \[ F: \mathbb R \to\mathcal{H}, \quad \quad \omega \mapsto F(\omega) := a(\omega)\cdot h \] is weakly (Lebesgue) measurable and a continuous Bessel mapping, since \begin{align*}
\int_{\mathbb R} | \langle f, F(\omega) \rangle |^2\,d\mu(\omega) & = \int_{\mathbb R} |a(\omega)|^2 | \langle f, h \rangle |^2\,d\mu(\omega) \\
& = | \langle f, h \rangle |^2 \int_{\mathbb R} |a(\omega)|^2\,d\mu(\omega) \\
& \leq \| h \|^2 \|a\|_{L^2(\mathbb R)}^2 \| f \|^2 \end{align*} for all $f\in \mathcal{H}$, but \[
\| F(\omega) \| = \| a(\omega) h \| = |a(\omega)| \| h \| \] is unbounded, since $a$ is unbounded.
$\triangle$ \end{exa}
Even continuous frames need not necessarily be norm bounded.
\begin{exa}\label{example_2}
Let $F:\mathbb R \to \mathcal{H}$ be a norm unbounded continuous Bessel mapping with Bessel constant $B_F$, as in the previous example. Let $G:\mathbb R \to\mathcal{H}$ be a norm bounded continuous frame (for example a continuous wavelet or Gabor frame, cf. Section \ref{gaborandwavelet}) with continuous frame bounds $0 < A_G \leq B_G$ and norm bound $M > 0$, i.e. $\| G(\omega) \|\leq M$ for a.e. $\omega \in\mathbb R$.\\ Then $G + \varepsilon F$ is a norm unbounded continuous frame, for all sufficiently small $\varepsilon > 0$.\\ To see this, first note that it is obvious that the mapping $G + \varepsilon F:\mathbb R \to\mathcal{H}$ is weakly measurable for any choice of $\varepsilon > 0$. It satisfies the upper frame bound, since \begin{align*}
\int_{\mathbb R} | \langle f, G(\omega) & + \varepsilon F(\omega) \rangle |^2\,d\mu(\omega) \\
& \leq \int_{\mathbb R} \Big( | \langle f, G(\omega) \rangle | + \varepsilon | \langle f, F(\omega) \rangle | \Big)^2 \,d\mu(\omega) \\
& \leq 2\cdot \int_{\mathbb R} \Big( | \langle f, G(\omega) \rangle |^2 + \varepsilon^2 | \langle f, F(\omega) \rangle |^2 \Big) \,d\mu(\omega) \\
& \leq 2\cdot (B_G + \varepsilon^2 B_F)\cdot \| f \|^2. \end{align*}
For the lower frame bound, observe that \begin{align*}
\Big( \int_{\mathbb R} | \langle f, G(\omega) & + \varepsilon F(\omega) \rangle |^2\,d\mu(\omega) \Big)^{1/2} \\
& \geq \Big( \int_{\mathbb R} | \langle f, G(\omega) \rangle |^2\,d\mu(\omega) \Big)^{1/2} - \Big( \int_{\mathbb R} \varepsilon^2 | \langle f, F(\omega) \rangle |^2\,d\mu(\omega) \Big)^{1/2} \\
& \geq \sqrt{A_G} \|f\| - \varepsilon \sqrt{B_F}\|f\| \\
& = (\sqrt{A_G} - \varepsilon\sqrt{B_F})\|f\|. \end{align*} Now choose $\varepsilon < \sqrt{\frac{A_G}{B_F}}$, then $\sqrt{A_G} - \varepsilon\sqrt{B_F} > 0$, and the lower frame bound is established.
This continuous frame is, however, not norm bounded, since \begin{align*}
\| G(\omega) + \varepsilon F(\omega) \| \geq \varepsilon \| F(\omega) \| - \| G(\omega) \| \geq \varepsilon \| F(\omega) \| - M; \end{align*} by $F$ being unbounded, this is unbounded as well.
$\triangle$ \end{exa}
The construction in the last two examples depends crucially on the existence of an unbounded square-integrable function or, equivalently, on the existence of an unbounded integrable function. It can be generalized to the following theorem:
\begin{thm} Let $(\Omega, \mu)$ be a measure space such that $L^1(\Omega, \mu) \nsubseteq L^{\infty}(\Omega, \mu)$, i.e. there exist unbounded integrable functions. Then the following holds:\\ If there exist any continuous frames at all with respect to $(\Omega, \mu)$, then there are also norm-unbounded ones. \end{thm} \begin{proof}
Fix a vector $h \in \mathcal{H}$, $h \neq 0$. Pick a function $b:\Omega \to \mathbb{C}$, $b \in L^1(\Omega, \mu) \setminus L^{\infty}(\Omega, \mu)$. Then $a := \sqrt{| b|}$ is a function in $L^2(\Omega,\mu) \setminus L^{\infty}(\Omega, \mu)$, i.e. an unbounded square-integrable function. As in Example \ref{example_1}, the mapping $F:\Omega \to \mathcal{H}$, $F(\omega) = a(\omega)\cdot h$, is an norm-unbounded continuous Bessel mapping. If there exists a norm-bounded continuous frame $G:\Omega \to \mathcal{H}$, then one can show as in Example \ref{example_2} that the mapping $G + \epsilon F$ is a norm-unbounded continuous frame, for sufficiently small $\epsilon$. \end{proof}
Concerning the existence of continuous frames, we have the following result:
\begin{thm} Let $(\Omega, \mu)$ be a $\sigma$-finite measure space. Then there exists a continuous tight frame $F:\Omega \to \mathcal{H}$ with respect to $(\Omega, \mu)$. \end{thm} \begin{proof} Since $\Omega$ is $\sigma$-finite, it can be written as a disjoint union $\Omega = \bigcup \Omega_k$ of countably many subsets $\Omega_k \subseteq \Omega$ such that $\mu(\Omega_k) < \infty$ for all $k$. Without loss of generality assume that $\mu(\Omega_k) > 0$ for all $k$. If there are infinitely many such subsets $\Omega_k$, $k \in \mathbb{N}$, then let $(e_k)_{k\in\mathbb{N}}$ be an orthonormal basis of an infinite-dimensional separable Hilbert space $\mathcal{H}$. Define the function $F:\Omega \to \mathcal{H}$ by \[ \omega \mapsto F(\omega) := \frac{1}{\sqrt{\mu(\Omega_k)}} e_k, \quad \mbox{ for }\omega \in \Omega_k. \] Then, for all $f \in \mathcal{H}$, \begin{align*}
\int_{\Omega} \left| \langle f, F(\omega) \rangle \right|^2\,d\mu(\omega) & = \sum_{k} \int_{\Omega_k}\left| \langle f, F(\omega) \rangle \right|^2\,d\mu(\omega) \\
& = \sum_k \left| \langle f, e_k \rangle \right|^2 \frac{1}{\mu(\Omega_k)} \mu(\Omega_k) \\
& = \| f \|^2, \end{align*} thus $F$ is a continuous tight frame with frame bound $1$. If there are only finitely many $\Omega_k$, $k = 1, \ldots, N$, then take for $\mathcal{H}$ an $N$-dimensional Hilbert space instead and proceed analogously. \end{proof}
For the convenience of the reader, we shortly repeat some basic facts and notions on continuous frames. Details may be found for example in \cite{Ali2} or \cite{RaNaDe}.
Let $F$ be a continuous frame with respect to $(\Omega ,\mu)$, then the mapping $$\Psi : \mathcal{H}\times\mathcal{H} \to \mathbb{C}$$ defined by $$\Psi(f,g)= \int_{\Omega}\langle f,F({\omega})\rangle\langle F({\omega}),g\rangle \,d\mu(\omega)$$ is well defined, sesquilinear and bounded. By Cauchy-Schwarz's inequality we get
\begin{eqnarray*}|\Psi(f,g)|&\leq&
\int_{\Omega}|\langle f,F(\omega)\rangle\langle F(\omega),g\rangle|\, d\mu(\omega)\\
&\leq& \left(\int_{\Omega}|\langle f,F(\omega)\rangle|^{2}
\,d\mu(\omega)\right)^{\frac{1}{2}}\left(\int_{\Omega}|\langle F(\omega), g \rangle|^{2}\, d\mu(\omega)\right)^{\frac{1}{2}}\\
&\leq& B\| f\|\| g\|. \end{eqnarray*}
Hence $\|\Psi\|\leq B.$ By Theorem \ref{murphy} there exists a unique operator $S_{F}: \mathcal{H}\to\mathcal{H}$ such that $$\Psi(f,g)=\langle S_{F}f, g\rangle,\quad ( f,g\in\mathcal{H} )$$ and moreover
$\|\Psi\|=\|S\|.$\\
Since $\langle S_{F}f,f\rangle=\int_{\Omega}|\langle f,F(\omega)\rangle |^{2}\,d\mu(\omega)$, $S_{F}$ is positive and $AI\leq S_{F}\leq BI$. Hence $S_{F}$ is invertible, positive and $\frac{1}{B} I\leq S^{-1}_{F}\leq \frac{1}{A} I$. We call $S_{F}$ the continuous frame operator of $F$ and we use the notation $S_{F}f=\int_{\Omega}\langle f,F(\omega)\rangle F(\omega) \,d\mu(\omega)$, which is valid in the weak sense. Thus, every $f\in\mathcal{H}$ has the (weak) representations $$f=S_{F}^{-1}S_{F}f=\int_{\Omega}\langle f, F(\omega)\rangle S_{F}^{-1}F(\omega)\,d\mu(\omega)$$$$f=S_{F}S_{F}^{-1}f=\int_{\Omega}\langle f, S_{F}^{-1}F(\omega)\rangle F(\omega)\,d\mu(\omega).$$ \begin{thm}\label{TF}\cite{RaNaDe} Let $(\Omega, \mu)$ be a measure space and let $F$ be a Bessel mapping from $\Omega$ to $\mathcal{H}.$ Then the operator $T_{F}:L^{2}(\Omega, \mu)\to\mathcal{H}$ weakly defined by $$\langle T_{F}\varphi, h\rangle=\int_{\Omega}\varphi(\omega)\langle F(\omega),h \rangle \,d\mu(\omega),\quad ( h\in\mathcal{H} )$$ is well defined, linear, bounded and its adjoint is given by $$ T_{F}^{*}: \mathcal{H}\to L^{2}(\Omega, \mu),\quad (T_{F}^{*}h)(\omega)=\langle h, F(\omega)\rangle,\quad ( \omega\in\Omega ).$$ The operator $T_{F}$ is called the \textit{synthesis operator} and $T_{F}^{*}$ is called the \textit{analysis operator} of $F$. \end{thm}
Such as in the discrete case we have the next proposition. \begin{prop}\cite{RaNaDe} Let $F:\Omega\to\mathcal{H}$ be a Bessel function with respect to $(\Omega,\mu)$. By the above notations $S_{F}=T_{F}T_{F}^{*}.$ \end{prop}
Using an analogous statement as in \cite{RaNaDe} for the synthesis operator, it is easy to prove a characterization of continuous frames in terms of the frame operator. \begin{thm} Let $(\Omega, \mu)$ be a $\sigma$-finite measure space.
The mapping $F:\Omega\to\mathcal{H}$ is a continuous frame with respect to $(\Omega, \mu)$ for $\mathcal{H}$ if and only if the operator $S_{F}$ is a bounded and invertible operator. \end{thm}
\begin{defn} Let $F$ and $G$ be continuous frames with respect to $(\Omega ,\mu)$ for $\mathcal{H}$. We call $G$ a \textit{dual} of $F$ if the following holds true: \[ \label{dual} \langle f,g\rangle=\int_{\Omega}\langle f,F(\omega)\rangle\langle G(\omega),g\rangle d\mu \quad ( f, g\in\mathcal{H} ). \] In this case $(F,G)$ is called a \textit{dual pair}. It is clear that (\ref{dual}) is equivalent with $T_G T^*_F=I$. \end{defn}
It is certainly possible for a continuous frame $F$ to have only one dual. In this case we call $F$ a \textit{Riesz-type} frame.
\begin{prop}\cite{Gb} Let $F$ be a continuous frame with respect to $(\Omega ,\mu)$ for $\mathcal{H}$. Then $F$ is a Riesz-type frame if and only if $\mathcal{R}(T_{F}^* ) = L^2(\Omega , \mu)$.
\end{prop}
\subsection{Gabor and wavelet systems}\label{gaborandwavelet}
Well known examples for frames are wavelet and Gabor systems. The corresponding continuous wavelet and STFT transforms give rise to continuous frames. We make use of the following unitary operators on $L^2(\mathbb{R})$: \begin{itemize} \item Translation: $T_x f(t) := f(t - x)$, for $f \in L^2(\mathbb{R})$ and $x \in \mathbb{R}$ \item Modulation: $M_y f(t) := e^{2\pi i y\cdot t} f(t)$, for $f \in L^2(\mathbb{R})$ and $y \in \mathbb{R}$
\item Dilation: $D_z f(t) := \frac{1}{|z|^{\frac{1}{2}}} f(\frac{t}{z})$, for $f \in L^2(\mathbb{R})$ and $z > 0$ \end{itemize} \begin{defn}\label{D:Def_Wavelet} Let $ \psi \in L^{2}(\mathbb{R})$, and let
$$C_{\psi}:=\int_{-\infty}^{+\infty}\frac{|\hat{\psi}(\gamma)|^{2}}
{|\gamma|}\,d\gamma,$$ where $\hat{\psi}$ denotes the Fourier transform of $\psi$. The function $\psi$ is called admissible if $0 < C_{\psi} < +\infty$. For $a,b\in\mathbb{R}$ with $a\neq0$, let
$$\psi^{a,b}(x):= (T_{b}D_{a}\psi)(x)= \frac{1}{|
a|^{\frac{1}{2}}}\psi(\frac{x-b}{a}), \quad ( x\in\mathbb{R}).$$ Then the \textit{continuous wavelet transform} $W_{\psi} $ is
defined by
$$W_{\psi}(f)(a,b):=\langle f,\psi^{a,b}\rangle=\int_{-\infty}^{+\infty}f(x)\frac{1}{|
a|^{\frac{1}{2}}}\overline{\psi(\frac{x-b}{a})}\,dx, \quad f \in L^2(\mathbb{R}). $$ \end{defn}
For an admissible function $\psi$ in $L^2$, the system $\{\psi^{a,b}\}_{a\neq0, b\in\mathbb{R}}$ is a continuous tight frame for $ L^{2}(\mathbb{R})$ with respect to $ \Omega = \mathbb{R}\setminus\{0\}\times\mathbb{R} $ equipped with the measure $\frac{dadb}{a^{2}}$ and for all $f\in L^{2}(\mathbb{R})$ $$f=\frac{1}{C_{\psi}}\int_{-\infty}^{+\infty}\int_{-\infty}^{+\infty}W_{\psi}(f)(a,b) \psi^{a,b}\,\frac{dadb}{a^{2}},$$ where the integral is understood in weak sense (this formula is known as the \textit{Calder\'on Reproducing Formula}, cf. \cite{da92}). This system constitutes a continuous tight frame with frame bound $\frac{1}{C_{\psi}}$. If $\psi$ is suitably normed so that $C_{\psi} = 1$, then the frame bound is $1$, i.e. we have a continuous Parseval frame. For details, see the Proposition 11.1.1 and Corollary 11.1.2 of \cite{C4}.
\begin{defn}\label{D:Def_STFT} Fix a function $ g\in L^{2}(\mathbb{R})\setminus\{0\}$. The \textit{short-time Fourier transform} (STFT) of a function $f\in L^{2}(\mathbb{R})$ with respect to the window function $g $ is given by $$ \Psi_{g}(f)(y,\gamma)=\int_{-\infty}^{+\infty}f(x)\overline{g(x-y)}e^{-2\pi i x\gamma}dx, \quad\quad( y, \gamma \in\mathbb{R}).$$ \end{defn} Note that in terms of modulation operators and translation operators, $\Psi_{g}(f)(y,\gamma)=\langle f, M_{\gamma} T_{y}g\rangle$.
Let $ g\in L^{2}(\mathbb{R})\setminus\{0\}$. Then $\{M_{b}T_{a}g\}_{a,b\in\mathbb{R}}$is a continuous frame for $L^{2}(\mathbb{R})$ with respect to $ \Omega =\mathbb{R}^{2}$ equipped with the Lebesgue measure. Let $f_{1}, f_{2}, g_{1}, g_{2}\in L^{2}(\mathbb{R})$. Then $$\int_{-\infty}^{+\infty}\int_{-\infty}^{+\infty}\Psi_{g_{1}}(f_{1})(a,b)\overline{\Psi_{g_{2}}(f_{2})(a,b)}dbda =\langle f_{1}, f_{2}\rangle\langle g_{2},g_{1}\rangle.$$
So this system represent a continuous tight frame with bound $\|g\|^2$. For details see the proposition 8.1.2 of \cite{C4}.
\section{Continuous Frame Multipliers}
Gabor multipliers \cite{feic} led to the introduction of Bessel and frame multipliers for abstract Hilbert spaces $\mathcal{H}_{1}$ and $\mathcal{H}_{2}$. These operators are defined by a fixed multiplication pattern (the symbol) which is inserted between the analysis and synthesis operators \cite{xxlmult1,peter2,peter3}.
\begin{defn} Let $\mathcal{H}_{1}$ and $\mathcal{H}_{2}$ be Hilbert spaces, let $(\psi_{k})\subseteq\mathcal{H}_{1}$ and $(\phi_{k})\subseteq\mathcal{H}_{2}$ be Bessel sequences. Fix $m = (m_k)\in l^{\infty}$. The operator ${\bf M}_{(m_k), (\phi_k), (\psi_k)} : \mathcal{H}_{1} \rightarrow \mathcal{H}_{2}$ defined by $$ {\bf M}_{(m_k), (\phi_k), ( \psi_k )} (f) = \sum \limits_k m_k \langle f,\psi_k\rangle \phi_k,\quad \quad (f \in \mathcal{H}_1) $$ is called \emph{Bessel multiplier} for the Bessel sequences $\{\psi_{k}\}$ and $\{\phi_{k}\}$. The sequence $m$ is called the \emph{symbol} of {\bf M}. For frames the resulting Bessel multiplier is called a \emph{frame multiplier}, for Riesz sequence a \emph{Riesz multiplier}. \end{defn}
This motivates the following definition in the continuous case.
\begin{defn}\label{definitioncontframemult} Let $F$ and $G$ be Bessel mappings for $\mathcal{H}$ with respect to $(\Omega,\mu)$ and $m:\Omega\rightarrow \mathbb{C}$ be a measurable function. The operator $\mathbf{M}_{m,F,G}:\mathcal{H}\rightarrow\mathcal{H}$ weakly defined by \[ \langle \mathbf{M}_{m,F,G}f,g\rangle=\int_{\Omega}m(\omega)\langle f, F(\omega)\rangle\langle G(\omega),g\rangle d\mu(\omega) \] for all $f,g\in\mathcal{H}$, is called {\em continuous Bessel multiplier} of $F$ and $G$ with respect to the mapping $m$, called the {\em symbol}. \end{defn} We use the following notation to be understood in weak sense as above: $$\mathbf{M}_{m,F,G}f:=\int_{\Omega}m(\omega)\langle f, F(\omega)\rangle G(\omega)d\mu(\omega).$$
\begin{lem}\label{tar} Let $F$ and $G$ be Bessel mappings for $\mathcal{H}$ with respect to $(\Omega,\mu)$ with bounds $B_F$ and $B_G$. Let $m\in L^{\infty}(\Omega,\mu)$. The operator $\mathbf{M}_{m,F,G}:\mathcal{H}\rightarrow\mathcal{H}$ weakly defined by \[ \langle \mathbf{M}_{m,F,G}f,g\rangle=\int_{\Omega}m(\omega)\langle f, F(\omega)\rangle\langle G(\omega),g\rangle d\mu(\omega) \] for all $f,g\in\mathcal{H}$, is well defined and bounded with \[
\|\mathbf{M}_{m,F,G}\|\leq \|m\|_\infty \sqrt{B_F B_G}. \]
\end{lem} \begin{proof} It is clear that for each $f,g\in\mathcal{H}$, \begin{eqnarray*}
|\langle\mathbf{M}_{m,F,G}f,g\rangle|&\leq&\|m\|_\infty
\int_{\Omega}|\langle f,F(\omega)\rangle\langle G(\omega),g\rangle|\, d\mu(\omega)\\
&\leq&\|m\|_\infty \left(\int_{\Omega}|\langle f,F(\omega)\rangle|^{2}
\,d\mu(\omega)\right)^{\frac{1}{2}}\left(\int_{\Omega}|\langle G(\omega), g \rangle|^{2}\, d\mu(\omega)\right)^{\frac{1}{2}}\\
&\leq& \|m\|_\infty \sqrt{B_F B_G}\| f\|\| g\|. \end{eqnarray*} Thus $\mathbf{M}_{m,F,G}$ is well defined and bounded. \end{proof}
It is easy to prove that if $m(\omega)>0$ a.e., then for any Bessel function $F$ the multiplier $\mathbf{M}_{m,F,F}$ is a positive operator, and if $m(\omega)\geq \delta > 0$ for some positive constant $\delta$ and
$\|m\|_{\infty}<\infty$, then $\mathbf{M}_{m,F,F}$ is just the frame operator of $\sqrt{m}F$ and thus is positive, self-adjoint and invertible. \par By using synthesis and analysis operators, one easily shows that \begin{equation}\label{rep1}\mathbf{M}_{m,F,G}=T_G D_m T^*_F \end{equation}
where $D_m:L^{2}(\Omega,\mu)\rightarrow L^{2}(\Omega,\mu) $ and $(D_m \varphi)(\omega)=m(\omega)\varphi(\omega)$. It is proved that if $m\in L^{\infty}(\Omega,\mu)$, then $D_m$ is bounded and
$\|D_m\|=\|m\|_{\infty}$, \cite{conw1}. \par
\begin{prop} Let $F$ and $G$ be Bessel mappings for $\mathcal{H}$ with respect to $(\Omega,\mu)$ and $m:\Omega\rightarrow \mathbb{C}$ be a measurable function, then
$(\mathbf{M}_{m,F,G})^*=\mathbf{M}_{\overline{m},G,F}.$ \end{prop} \begin{proof}
For $f,g\in\mathcal{H}$ \begin{eqnarray*} \langle f,\mathbf{M}^*_{m,F,G}g\rangle &=&\langle\mathbf{M}_{m,F,G}f,g\rangle\\ &=&\int_{\Omega}m(\omega)\langle f, F(\omega)\rangle\langle G(\omega),g\rangle d\mu(\omega)\\ &=&\int_{\Omega}\langle f,\overline{m(\omega)}\langle g,G(\omega)\rangle F(\omega)\rangle d\mu(\omega)\\ &=&\langle f,\mathbf{M}_{\overline{m},G,F}g\rangle. \end{eqnarray*} \end{proof}
\subsection{Multiplication operators on $L^2$}
Motivated by the discrete case one might expect that $m\in L^p$ implies $D_m\in \mathcal{S}_p$, where $\mathcal{S}_p(\mathcal{H})$ denotes the family of Schatten $p$-class operators on $\mathcal{H}$. For $p=1$, we have trace class operators and for $p=2$ we have Hilbert-Schmidt operators. If this were true, we could easily, using the representation (\ref{rep1}), get results like in \cite{peter2}, since $ \mathcal{S}_p(\mathcal{H}_1,\mathcal{H}_2) $ is a two sided $*$-ideal of $\mathcal{B}(\mathcal{H}_1, \mathcal{H}_2)$. Unfortunately, the following proposition shows that at least for multiplication operators on $L^2(\mathbb R^d) = L^2(\mathbb R^d, dx)$ (with $dx$ denoting Lebesgue measure) the above considerations are never true, which constitutes a major difference between the discrete and the continuous case. The result seems to be mathematical folklore; we give a full proof for completeness. We will use the following lemma, which is of independent interest. \begin{lem} Let $A \subseteq \mathbb R^d$ be a measurable set of positive Lebesgue measure, $\lambda(A) > 0$. Then there exists a partition of $A$ into countably infinitely many mutually disjoint measurable sets $A_n$, $n\in\mathbb N$, of positive measure, i.e. such that \begin{enumerate} \item $A = \bigcup_{n=1}^{\infty} A_n$, \item $A_n \cap A_m = \emptyset$ for $n\not=m$, \item $\lambda(A_n) > 0$ for all $n\in \mathbb N$. \end{enumerate} \end{lem} \begin{proof} It suffices to show that any $A\subseteq \mathbb R^d$ with $\lambda(A) > 0$ can be decomposed in \emph{two} disjoint measurable sets $B$ and $C$ such that $A = B \cup C$, $B \cap C = \emptyset$ and $\lambda(B) > 0$, $\lambda(C) > 0$, since the claim follows from this by induction. Without loss of generality assume further that $\lambda(A) =: L < \infty$. The whole space $\mathbb R^d$ can be covered by mutually disjoint d-dimensional half-open cubes $I_n$, $n\in\mathbb N$, sufficiently small such that $\lambda(I_n) < \frac{L}{2}$ for all $n\in\mathbb N$. Then $A = \bigcup_{n\in\mathbb N} (A\cap I_n)$. Since $(A\cap I_n) \cap (A\cap I_m) = \emptyset$, we have $L = \lambda(A) = \sum_{n\in\mathbb N} \lambda(A\cap I_n)$. Now set \[ N := \{ n\in\mathbb N\,:\, \lambda(A\cap I_n) > 0 \}. \] Since $\lambda(A\cap I_n) \leq \lambda(I_n) \leq \frac{L}{2}$ for all $n\in\mathbb N$, $N$ must clearly contain \emph{at least two elements}, say $n_1$ and $n_2$. Now set \[ B := A\cap(I_{n_1}) \] and \[ C := (A\cap I_{n_2}) \cup \bigcup_{n\not\in N} (A\cap I_n). \] Then $B$ and $C$ have the stated properties. \end{proof} Now we can prove the following \begin{prop} Let $a\in L^{\infty}(\mathbb R^d)$. Denote by $D_a:L^2(\mathbb R^d) \to L^2(\mathbb R^d)$, $f \mapsto a\cdot f$, the bounded multiplication operator with symbol $a$.\\ Then $D_a$ is a compact operator if and only if $a = 0$. \end{prop} \begin{proof}
Assume $a \not= 0$. Let $\| a \|_{\infty} =: c > 0$. Define \[
A := \{ x\in\mathbb R^d\, : \, |a(x)| > \frac{c}{2} \}. \] Then $A$ is a set of positive Lebesgue measure, $\lambda(A) > 0$. Find a partition of $A$ as in the preceding lemma, i.e. into countably infinitely many measurable subsets $A_n$, $n\in\mathbb N$, such that (1) $A = \bigcup_{n\in\mathbb N} A_n$, (2) $A_n \cap A_m = \emptyset$ for $n\not= m$, i.e. the sets $A_n$ are mutually disjoint, and (3) $\lambda(A_n) > 0$ for all $n\in\mathbb N$, i.e. all the sets $A_n$ have strictly positive Lebesgue measure. Then set \[ f_n := \chi_{A_n}\cdot \frac{1}{\sqrt{\lambda(A_n)}}, \] with $\chi_{A_n}$ the characteristic function of $A_n$. Since \begin{align*} \langle f_n, f_m \rangle & = \int_{\mathbb R^d} \chi_{A_n}(x)\cdot \frac{1}{\sqrt{\lambda(A_n)}} \cdot \overline{\chi_{A_m}(x)\cdot \frac{1}{\sqrt{\lambda(A_m)}}}\,dx \\ & = \int_{A_n\cap A_m} \frac{1}{\sqrt{\lambda(A_n)\lambda(A_m)}} \\ & = \begin{cases} \int_{A_n} \frac{1}{\lambda(A_n)}\,dx = 1, & \text{ if $n = m$,} \\ 0, & \text{ if $n\not= m$,} \end{cases} \end{align*} the sequence of functions $(f_n)$ constitutes an orthonormal system in $L^2(\mathbb R^d)$. As such, it satisfies $f_n \to 0$ weakly by Bessel's Inequality. But $D_af_n(x) = a(x)\cdot \chi_{A_n}(x)\frac{1}{\sqrt{\lambda(A_n)}}$ and \begin{align*}
\| D_af_n \|^2 & = \int_{\mathbb R^d} |a(x)|^2\cdot |\chi_{A_n}(x)\frac{1}{\sqrt{\lambda(A_n)}}|^2\,dx \\
& = \int_{A_n} |a(x)|^2 \frac{1}{\lambda(A_n)}\,dx \\ & \geq \int_{A_n} \big(\frac{c}{2}\big)^2 \frac{1}{\lambda(A_n)}\,dx \\ & \geq \big(\frac{c}{2}\big)^2, \end{align*}
thus $\| D_af_n \| \nrightarrow 0$. Hence $D_a$ cannot be compact by Lemma \ref{weakly}. \end{proof}
In order to prove sufficient conditions for compactness of continuous frame multipliers, we thus have to choose a different approach than in the discrete setting. This will be addressed in the next section.
\subsection{Compact Multipliers}
\begin{thm}
Let $F$ and $G$ be Bessel mappings for $\mathcal{H}$ with respect to $(\Omega, \mu)$ and let either $F$ or $G$ be norm bounded, i.e. there is a constant $M > 0$ such that $\| F(\omega) \| \le M$ resp. $\| G(\omega) \| \le M$ for almost every $\omega \in \Omega$. Let $m:\Omega\rightarrow \mathbb{C}$ be a (essentially) bounded measurable function with support of finite measure, i.e. there exists a subset $K \subseteq \Omega$ with $\mu(K) < \infty$ such that $m(\omega) = 0$ for almost every $\omega \in \Omega \setminus K$.\\ Then $\mathbf{M}_{m,F,G}$ is a compact operator. \end{thm} \begin{proof} We have \[ \mathbf{M}_{m,F,G} = T_G \circ D_m \circ T^{\ast}_F \]
with $T^{\ast}_F$ the analysis operator for $F$, $D_m$ the multiplication operator with symbol $m$ and $T_G$ the synthesis operator for $G$. Assume first that $F$ is bounded, $\|F(\omega)\| \le M$ for almost all $\omega \in \Omega$. We will show that $D_m \circ T^{\ast}_F: \mathcal{H} \to L^2(\Omega, \mu)$ is compact. To this end, let $f_n \to 0$ weakly. Then \begin{align*}
\| D_mT^{\ast}_Ff_n \|^2 & = \int_{\Omega} |m(\omega)|^2\cdot | \left< f_n, F(\omega) \right> |^2\,d\mu(\omega) \\
& = \int_{K} |m(\omega)|^2\cdot | \left< f_n, F(\omega) \right> |^2\,d\mu(\omega). \end{align*} For the integrand, \[
|m(\omega)|^2\cdot | \left< f_n, F(\omega) \right> |^2 \to 0 \]
for $n\to\infty$ pointwise for every fixed $\omega \in K$, since the weak convergence of $(f_n)$ implies $\left< f_n, F(\omega) \right> \to 0$ for every $\omega\in\Omega$ fixed. Furthermore, weakly convergent sequences are bounded, thus there is a constant $C>0$ such that $\|f_n \| \le C$ for all $n\in\mathbb{N}$, and \begin{align*}
|m(\omega)|^2\cdot | \left< f_n, F(\omega) \right> |^2 & \le \|m\|_{\infty}^2\cdot \|f_n\|^2\cdot \|F(\omega)\|^2 \\
& \le \|m\|_{\infty}^2 \cdot C^2 \cdot M^2 \end{align*} for all $n\in\mathbb{N}$. This constant is an integrable majorant on $K$, so by Lebesgue's Dominated Convergence Theorem \[
\int_{K} |m(\omega)|^2\cdot | \left< f_n, F(\omega) \right> |^2\,d\mu(\omega) \to 0 \] for $n\to \infty$. Hence the operator $D_m \circ T^{\ast}_F$ maps weakly convergent sequences to norm convergent ones and is compact by Lemma \ref{weakly}. So $\mathbf{M}_{m,F,G} = T_G \circ (D_m \circ T^{\ast}_F)$ is compact as well.\\ If $G$ is bounded instead of $F$, consider the adjoint operator \[ \mathbf{M}_{m,F,G}^{\ast} = \mathbf{M}_{\overline{m},G,F} = T_{F}\circ D_{\overline{m}} \circ T^{\ast}_G; \] by what we have already shown, $\mathbf{M}_{\overline{m},G,F}$ is compact, hence also $\mathbf{M}_{m,F,G}$. \end{proof}
\begin{cor} \label{sec:compact2} Let $F$ and $G$ be Bessel mappings for $\mathcal{H}$ with respect to $(\Omega, \mu)$ and let either $F$ or $G$ be norm bounded. Let $m:\Omega \to \mathbb{C}$ be a (essentially) bounded measurable function that vanishes at infinity, i.e. for every $\varepsilon > 0$ there is a set of finite measure $K = K(\varepsilon) \subseteq \Omega$, $\mu(K) < \infty$, such that $m(\omega)\le \varepsilon$ for almost every $\omega \in \Omega \setminus K$. Then $\mathbf{M}_{m,F,G}$ is compact. \end{cor} \begin{proof}
For every $n\in\mathbb{N}$, choose a set $K_n \subseteq \Omega$ such that $\mu(K_n) < \infty$ and $|m(\omega)| \le \frac{1}{n}$ for all $\omega \not\in K_n$. Set \[ m_n(\omega) := m(\omega)\cdot \chi_{K_n}(\omega) \] where $\chi_{K_n}$ denotes the characteristic function of the set $K_n$. Then obviously \[
\| m_n - m \|_{\infty} \le \frac{1}{n} \to 0 \] for $n\to \infty$, thus \[
\| \mathbf{M}_{{m_n},F,G} - \mathbf{M}_{m,F,G}\| \le \| m_n - m \|_{\infty} \sqrt{B_FB_G} \to 0 \] by Lemma \ref{tar}. The functions $m_n$ are bounded and of finite support, so $\mathbf{M}_{{m_n},F,G}$ is compact for every $n\in\mathbb{N}$ by the preceding theorem, hence $\mathbf{M}_{m,F,G}$ is also compact. \end{proof}
Now assume that \emph{both} $F$ and $G$ are norm bounded. Then we can prove a trace class result. We use the following criterion:
\begin{lem}\label{trace_class_criterion}
\cite{pietsch} Let $\mathcal{H}$ be a Hilbert space. A bounded operator $T:\mathcal{H} \to \mathcal{H}$ is trace class if and only if $\sum_n | \left<Te_n, e_n \right> | < \infty$ for every orthonormal basis $(e_n)$ of $\mathcal{H}$. Moreover, \[
\| T \|_{\mathcal{S}^1} = \sup \{\sum_n | \left<Te_n, e_n \right> |\,:\, (e_n) \mbox{ orthonormal basis } \}. \] \end{lem}
\begin{thm}\label{ubs} Let $F$ and $G$ be norm bounded Bessel mappings with norm bounds $L_F$ and $L_G$, respectively. Let $m \in L^1(\Omega,\mu)$.\\
Then $\mathbf{M}_{m,F,G}$ is a well defined bounded operator and a trace class operator with $\| \mathbf{M}_{m,F,G} \|_{\mathcal{S}_1} \le \|m\|_{1}L_F L_G$. \end{thm} \begin{proof} For arbitrary $f, g \in \mathcal{H}$, we have \begin{align*}
\int_{\Omega} |m(\omega)|& | \left< f, F(\omega) \right> || \left< G(\omega), g \right> |\,d\mu(\omega) \\
& \leq \int_{\Omega} |m(\omega)| \|f\| \|F(\omega)\| \|g\| \|G(\omega)\|\,d\mu(\omega) \\
& \leq \|f\| \|g\| L_F L_G \int_{\Omega} |m(\omega)|\,d\mu(\omega) \\
& = \|f\| \|g\| L_F L_G \|m\|_1, \end{align*} thus $\mathbf{M}_{m,F,G}$ is a well defined bounded linear operator by Theorem \ref{murphy}. \\ Take an arbitrary orthonormal basis $(e_n)$ of $\mathcal{H}$. Then \begin{align*}
\sum_n & |\left< \mathbf{M}_{m,F,G}e_n, e_n \right>| \\
& = \sum_n |\int_{\Omega} m(\omega) \left< e_n, F(\omega) \right>\left< G(\omega), e_n \right>\,d\mu(\omega) | \\
& \leq \sum_n \int_{\Omega} | m(\omega)|\cdot | \left< e_n, F(\omega) \right>| \cdot |\left< G(\omega), e_n \right> |\,d\mu(\omega) \\
& \stackrel{\mbox{\tiny Fub.}}{=} \int_{\Omega} |m(\omega)| \sum_n | \left< e_n, F(\omega) \right>| \cdot |\left< G(\omega), e_n \right> |\,d\mu(\omega) \\
& \stackrel{\mbox{\tiny C.-S.}}{\leq} \int_{\Omega} |m(\omega)| \left( \sum_n | \left< e_n, F(\omega) \right>|^2 \right)^{1/2}\left( \sum_n |\left< G(\omega), e_n \right> |^2 \right)^{1/2}\,d\mu(\omega) \\
& = \int_{\Omega} |m(\omega)| \|F(\omega) \|\|G(\omega) \|\,d\mu(\omega) \\
& \leq \|m\|_{1}L_F L_G, \end{align*}
where we have used Fubini's Theorem and the Cauchy-Schwarz's Inequality at the indicated places. Hence $\mathbf{M}_{m,F,G}$ is trace class with norm estimate $\| \mathbf{M}_{m,F,G} \|_{\mathcal{S}_1} \le \|m\|_{1}L_F L_G$, by the previous Lemma \ref{trace_class_criterion}. \end{proof}
Having established the trace class case, we are now able to extend the result to the whole family of Schatten p-classes by complex interpolation, see e.g. \cite{belo}.
\begin{thm} \label{sec:schatten1} Let $F$ and $G$ be norm bounded Bessel mappings with norm bounds $L_F$ and $L_G$, respectively. Let $m \in L^p(\Omega,\mu)$, $1 < p < \infty$.\\ Then $\mathbf{M}_{m,F,G}$ is a well defined bounded operator that belongs to the Schatten p-class $\mathcal{S}_p(\mathcal{H})$, with norm estimate \[
\| \mathbf{M}_{m,F,G} \|_{\mathcal{S}_p} \leq \| m \|_p \left( L_F L_G \right)^{1/p}\left(B_F B_G \right)^{1/2q}. \] \end{thm} \begin{proof}
We first show that the operator is well defined by the weak definition in \ref{definitioncontframemult}. To this end, let $f,g \in \mathcal{H}$ be fixed. Observe that the functions $\omega \mapsto \left< f, F(\omega) \right>$ resp. $\omega \mapsto \left< G(\omega), g \right>$ are bounded (by $L_F \|f\|$ resp. $L_G \|g\|$) and belong to $L^2(\Omega, \mu)$ (because $F$ and $G$ are Bessel mappings), hence their product $\omega \mapsto \left< f, F(\omega) \right>\left< G(\omega), g \right>$ is in $L^1(\Omega, \mu) \cap L^{\infty}(\Omega, \mu)$. But $L^1(\Omega, \mu) \cap L^{\infty}(\Omega, \mu) \subseteq L^q(\Omega, \mu)$ for all $1<q<\infty$.
Thus, for all $f,g \in\mathcal{H}$, \begin{align*}
| \left< \mathbf{M}_{m,F,G}f,g \right> | & \leq \int_{\Omega} |m(\omega)| | \left< f, F(\omega) \right>\left< G(\omega), g \right> |\,d\mu(\omega) \\
& \leq \| m \|_{p} \| \left< f, F(\cdot) \right>\left< G(\cdot), g \right> \|_{q} \end{align*} by H\"older's Inequality, with $\frac{1}{p} + \frac{1}{q} = 1$. The second term can be estimated as \begin{align*}
\| \left< f, F(\cdot) \right>&\left< G(\cdot), g \right> \|_{q} \\
& \leq \|\left< f, F(\cdot) \right>\left< G(\cdot), g \right> \|^{q-1}_{\infty} \| \left< f, F(\cdot) \right>\left< G(\cdot), g \right> \|_{1} \\
& \leq L_F^{q-1}\|f\|^{q-1} L_G^{q-1}\|g\|^{q-1} \int_{\Omega} | \left< f, F(\omega) \right>\left< G(\omega), g \right> | \,d\mu(\omega) \\
& \stackrel{\mbox{\tiny C.-S.}}{\leq} L_F^{q-1}\|f\|^{q-1} L_G^{q-1}\|g\|^{q-1} \sqrt{B_F} \,\|f\| \sqrt{B_G}\, \|g\| \\
& = L_F^{q-1} L_G^{q-1}\sqrt{B_F} \sqrt{B_G}\, \|f\|^q \|g\|^q. \end{align*}
Now assume that $\|f\|, \|g\| \leq 1$. Then \[
| \left< \mathbf{M}_{m,F,G}f,g \right> | \leq \| m \|_{p}(L_F L_G)^{q-1}\sqrt{B_F B_G}, \] thus for arbitrary $f,g \in\mathcal{H}$ \[
| \left< \mathbf{M}_{m,F,G}f,g \right> | \leq \| m \|_{p}(L_F L_G)^{q-1}\sqrt{B_F B_G}\,\|f\| \|g\|. \] This proves that $\mathbf{M}_{m,F,G}$ is a well defined bounded operator.\\ Now Lemma \ref{tar} shows that the mapping $L^{\infty}(\Omega,\mu) \to \mathcal{B}(\mathcal{H})$, $m \mapsto \mathbf{M}_{m,F,G}$, is a bounded linear operator. The same is true for the mapping $L^1(\Omega, \mu) \to \mathcal{S}_1(\mathcal{H})$, $m \mapsto \mathbf{M}_{m,F,G}$, by Theorem \ref{ubs}. Now let $\theta = 1 - \frac{1}{p} = \frac{1}{q}$ (i.e. such that $\frac{1}{p} = \frac{1-\theta}{1} + \frac{\theta}{\infty}$). A standard complex interpolation (\cite{belo}), between the Banach spaces $[L^1(\Omega,\mu), L^{\infty}(\Omega,\mu) ]_{\theta} = L^p(\Omega,\mu)$ on the one hand and $[ \mathcal{S}_1(\mathcal{H}), \mathcal{B}(\mathcal{H})] = [\mathcal{S}_1(\mathcal{H}), \mathcal{S}_{\infty}(\mathcal{H}) ]_{\theta} = \mathcal{S}_p(\mathcal{H})$ on the other, proves that the mapping $m \mapsto \mathbf{M}_{m,F,G}$ gives also a bounded linear operator from $L^p(\Omega,\mu)$ to the Schatten p-class $\mathcal{S}_p(\mathcal{H})$ with norm estimate \begin{align*}
\| \mathbf{M}_{m,F,G} \|_{\mathcal{S}_p} & \leq \| m \|_p \left( L_F L_G \right)^{1 - \theta}\left( \sqrt{B_F B_G} \right)^{\theta} \\
& = \| m \|_p \left( L_F L_G \right)^{1/p}\left(B_F B_G \right)^{1/2q}. \end{align*} \end{proof}
\subsection{Changing the Ingredients}
Like discrete Bessel multipliers \cite{xxlmult1}, a continuous Bessel multiplier clearly depends on the chosen symbol, analysis and synthesis functions. A natural question arises: What happens if these items are changed? Are the frame multipliers similar to each other if the symbol or the frames are similar to each other (in the right similarity sense)? Do the multipliers depend continuously on the input data? \par Let $m,m'\in L^\infty$ and $F,F',G,G'$ be Bessel functions. The representation (\ref{rep1}) and linearity of the operators $T_F, T_F', T_G, T_G', D_m$ and $D_m' $ result \[\label{1} \mathbf{M}_{m,F,G}-\mathbf{M}_{m',F,G}=T_G D_{m-m'} T^*_F=\mathbf{M}_{m-m',F,G}, \] \[\label{2} \mathbf{M}_{m,F,G}-\mathbf{M}_{m,F',G}=T_G D_{m} T^*_{F-F'}=\mathbf{M}_{m,F-F',G}, \] \[\label{3} \mathbf{M}_{m,F,G}-\mathbf{M}_{m,F,G'}=T_{G-G'} D_{m} T^*_{F}=\mathbf{M}_{m,F,G-G'}. \]
By adapting the methods in \cite{xxlmult1} and using the above identities, we can prove the following theorem about continuous Bessel multipliers.
\begin{thm} \label{sec:frammulprop1} Let $F$ and $G$ be Bessel mappings for $\mathcal{H}$ with respect to $(\Omega,\mu)$ and $m:\Omega\rightarrow \mathbb{C}$ be a measurable function. Let $m^{(n)}$ be functions indexed by $n\in\mathbb{N}$ with $m^{(n)} \to m$
in $L^{p}(\Omega, \mu)$. Then $\mathbf{M}_{m^{(n)},F,G}$ converges to $\mathbf{M}_{m,F,G}$
in the Schatten-$p$-norm, i.e.
$ \| \mathbf{M}_{m^{(n)},F,G} - \mathbf{M}_{m,F,G} \|_{\mathcal{S}_p} \rightarrow 0 $, as $n\rightarrow\infty$. \end{thm} \begin{proof} The proof follows immediately from (\ref{1}) and the norm estimate in Lemma \ref{tar} and Theorem \ref{sec:schatten1}. \end{proof} In particular this is also valid for trace class ($p = 1$) operators and bounded operators ($p = \infty$).
\begin{thm} Let $m \in L^2(\Omega, \mu)$. Let $F$ and $G$ be Bessel mappings for $\mathcal{H}$. Let $F^{(n)}$ be a sequence of Bessel mappings such that $F^{(n)}(\omega) \rightarrow F(\omega)$ in a uniform strong sense. Then $\mathbf{M}_{m,F^{(n)},G}$ converges to $\mathbf{M}_{m,F,G}$ in operator norm. \end{thm} \begin{proof}
Let $f,g \in \mathcal{H}$. For given $\epsilon > 0$, choose $N$ such that $\| F^{(n)}(\omega) - F(\omega) \| \leq \epsilon$ for all $n \geq N$, for all $\omega \in \Omega$. Then \begin{align*}
| \langle (\mathbf{M}_{m,F^{(n)},G} & - \mathbf{M}_{m,F,G})f, g \rangle | \\
& \leq \int_{\Omega} | m(\omega) | |\langle f, F^{(n)}(\omega) - F(\omega) \rangle | | \langle G(\omega), g \rangle |\,d\mu(\omega) \\
& \leq \left( \int_{\Omega} | m(\omega) |^2 |\langle f, F^{(n)}(\omega) - F(\omega) \rangle |^2 \,d\mu(\omega) \right)^{1/2} \left( B_G \right)^{1/2} \| g \| \\
& \leq \epsilon \| m \|_{2} \|f \| \left( B_G \right)^{1/2} \| g \|. \end{align*} Thus by Theorem \ref{murphy} \[
\| \mathbf{M}_{m,F^{(n)},G} - \mathbf{M}_{m,F,G} \| \leq \epsilon \| m \|_2 \left( B_G \right)^{1/2}, \] so $\mathbf{M}_{m,F^{(n)},G}$ converges to $\mathbf{M}_{m,F,G}$ in operator norm. \end{proof}
For symbols $m \in L^1(\Omega, \mu)$, we can find the following theorem. \begin{thm} Let $m \in L^1(\Omega, \mu)$. Let $F$ and $G$ be Bessel mappings for $\mathcal{H}$ and $G$ be norm-bounded. Let $F^{(n)}$ be a sequence of Bessel mappings such that $F^{(n)}(\omega) \rightarrow F(\omega)$ in a uniform strong sense. Then $\mathbf{M}_{m,F^{(n)},G}$ converges to $\mathbf{M}_{m,F,G}$ in operator norm. \end{thm}
\begin{proof} For given $\epsilon > 0$, choose $N$ such that $\| F^{(n)}(\omega) - F(\omega) \| \leq \epsilon$ for all $n \geq N$, for all $\omega \in \Omega$. Then \begin{align*}
\| (\mathbf{M}_{m,F^{(n)},G} - \mathbf{M}_{m,F,G})f \| & \le \int_{\Omega} | m(\omega) | \underbrace{| \langle f, F^{(n)}(\omega) - F(\omega) \rangle |}_{\le \epsilon} \underbrace{\| G(\omega) \|}_{\le L_G} \,d\mu(\omega) \\
& \le \epsilon \| m \|_{1} L_G \| f\|. \end{align*} \end{proof} In the last two results the roles of $F$ and $G$ can be switched.
\subsection{Examples: Continuous STFT and wavelet multipliers} \label{sec:STFTwavmult0}
Particular cases of continuous frame multipliers, that means multipliers for certain continuous frames, have already been studied and used before. In this section we briefly summarize some earlier results on STFT multipliers and Calder\'on-Toeplitz operators.
\subsubsection{STFT multipliers}
Continuous frame multipliers have been discussed and extensively used earlier for the continuous frame of Definition \ref{D:Def_STFT}, i.e. the short-time Fourier transform. An operator of the form \[ \mathbf{M}_{m, \phi, \psi} f = \int_{-\infty}^{+\infty}\int_{-\infty}^{+\infty} m (a,b) \Psi_{\phi}(f)(a,b) M_{b}T_{a}\psi\,dadb, \] is called an STFT multiplier. In this context, the associated continuous frame multipliers are also known as time-frequency localization operators. They were first introduced and studied by Daubechies and Paul, \cite{da88}, \cite{dapa88}, where they are used as a mathematical tool to extract specific features of interest of a signal on phase space from its time-frequency representation. The Wigner distribution constitutes a continuous frame that is essentially identical to the STFT, cf. \cite{wi32} or \cite{Fol}. It is closely related to the so-called Weyl calculus of quantum mechanics. In physics, multipliers for the Wigner distribution have been around for quite a long time in connection with questions of quantization, under the name "Anti-Wick operators" in the work of Berezin, \cite{be71}. They had also appeared earlier in the theory of pseudodifferential operators, cf. \cite{cofe78}. In these early works, the symbol is usually taken to be the characteristic function of some portion of the time-frequency plane. In \cite{rato93}, results on decay properties of the eigenvalues as well as smoothness of the eigenfunctions of Wigner multipliers with characteristic functions as symbols are derived. A first result on Schatten class properties is contained in \cite{po66}, where it is shown for the Weyl correspondence that symbols in $L^2$ lead to Hilbert-Schmidt operators. Boundedness and mapping properties with respect to other Schatten classes of the correspondence between the symbol of a multiplier and the resulting operator are considered extensively in \cite{boco02}, \cite{bocogr04} (for Anti-Wick operators) and in \cite{cogr03}, \cite{cogr05} (for STFT multipliers). In these works, the operators are often interpreted as pseudodifferential operators. In \cite{boco02}, it is shown that symbols in $L^p$ generate Anti-Wick operators in the Schatten $p$-class. This result is a special case of our Theorem \ref{sec:schatten1}. In \cite{bocogr04}, the theory of Anti-Wick operators is extended to symbols in distributional Sobolev spaces. The paper \cite{cogr03} can very well serve as a comprehensive first survey on localization operators, i.e. STFT multipliers. The theory is developed in the framework of time-frequency analysis, see also \cite{coro05}. As symbol classes so-called modulation spaces are considered. This requires that the window functions for the STFTs that form the continuous analysis and synthesis frames also belong to modulation spaces, usually to the Feichtinger algebra $\mathcal{S}_0$. In this case it is shown that symbols in the modulation space $M^{p, \infty}$ are sufficient for localization operators in the Schatten $p$-class, $1 \leq p < \infty$. Since $L^p$ spaces are continuously embedded in the modulation spaces $M^{p, \infty}$, this extends our results in the considered special case. In \cite{cogr05} the authors also present necessary conditions for Schatten classes. Symbolic calculus and Fredholm properties for localization operators are discussed in \cite{cogr06}. The PhD thesis \cite{ba10} is concerned with questions of approximation of operators by localization operators and density properties of the set of all localization operators with symbols in certain symbol classes in spaces of operators, equipped with different topologies.
\subsubsection{Calder\'on–-Toeplitz operators}
An operator defined by \[ \mathbf{M}_{m,\psi} f = \int_{-\infty}^{+\infty}\int_{-\infty}^{+\infty} m (a,b) W_{\psi}(f)(a,b) \psi^{a,b}\, \frac{dadb}{a^{2}}, \] (in the notation of Definition \ref{D:Def_Wavelet}) is called a Calder\'on–-Toeplitz operator. This is a multiplier for the continuous frame given by the continuous wavelet transform. In this case, the function $m$ is referred to as the upper symbol of the operator, whereas the so-called lower symbol is given by $\tilde{m}(a,b) = \langle \mathbf{M}_{m, \psi} \psi^{a,b}, \psi^{a,b} \rangle$. The concept was first introduced in \cite{roch90} in 1990 as an analogue in terms of the wavelet transform to Toeplitz operators on spaces of analytic functions, for example Bergman spaces. The lower symbol corresponds analogously to the Berezin transform of a Toeplitz operator. Some interesting results on the spectral theory of these operators are shown in \cite{roch92}, for example the so-called correspondence principle, a statement on the dimensions of the spectral projections for certain bounded symbols. A number of mapping properties for Calder\'on-Toeplitz operators (with sufficiently smooth window function $\psi$) depending on the lower symbol are contained in \cite{now93}, for example the boundedness of the operator if and only the lower symbol is bounded, or the compactness of the operator if and only if the lower symbol vanishes at infinity. These are stronger versions of Theorems \ref{tar} and \ref{sec:compact2} in this specialized setting. Some Schatten class properties for lower symbols in $L^p$, see Theorem \ref{sec:schatten1} for the general case, as well as as for positive upper symbols are proven. Eigenvalue estimates are given in \cite{roch92_2} and \cite{now93_2}. Calder\'on-Toeplitz operators are (along with STFT multipliers) proposed as a tool for time-frequency localization in \cite{da92}. A unified treatment of the elementary theory of STFT multipliers and wavelet transform multipliers (based on the underlying group structures) is given in the textbook \cite{wong}.
\section{Controlled and weighted continuous frames }
The notion of controlled and weighted frames as introduced in \cite{petant} for discrete frames are closely linked to multipliers.
So here we look at the corresponding properties for continuous frames.
\subsection{Controlled continuous frames}
\begin{defn} Let $C\in GL(\mathcal{H})$. A $C$-controlled continuous frame is a map $F:\Omega\rightarrow\mathcal{H}$ such that there exist $m_{CL}>0$ and $M_{CL}<\infty$ such that \[
m_{CL}\|f\|^2\leq\int_{\Omega}\langle f,F(\omega)\langle CF(\omega),f\rangle d\mu\leq M_{CL}\|f\|^2 \quad ( f\in\mathcal{H} ). \]
\end{defn} We call $L_C f=\int_{\Omega}\langle f,F(\omega)\rangle CF(\omega) d\mu$ (in weak sense) the controlled continuous frame operator. Analogue to Proposition 2.4 of \cite{petant} one can show that $L_C\in GL(\mathcal{H})$. \begin{prop} Let $F:\Omega\rightarrow\mathcal{H}$ be a $C$-controlled continuous frame for some $C\in GL(\mathcal{H})$. Then $F$ is a continuous frame for $\mathcal{H}$. \end{prop} \begin{proof} Since $C$ is linear we have \[ S_F f=\int_{\Omega}\langle f,F(\omega)\rangle F(\omega) d\mu=C^{-1}\int_{\Omega}\langle f,F(\omega)\rangle CF(\omega) d\mu=C^{-1} L_C f. \] Therefore $S_F$ is a bounded, positive and invertible operator and so $F$ is a continuous frame. \end{proof} By definition $L_C$ is positive and $L_C=CS_F=S_F C^*$. Therefore it is easy to show that, given $C\in GL(\mathcal{H})$ is a self-adjoint operator, then the mapping $F$ is a $C$-controlled frame if and only if it is a continuous frame for $\mathcal{H}$, $C$ is positive and commutes with $S_F$.
The following proposition shows that we can retrieve a continuous frames multiplier from a multiplier of controlled frames. Actually, the role played by controlled operators is that of a precondition matrices. \begin{prop} Let $C,D\in GL(\mathcal{H})$ be self-adjoint operators. If $F$ and $G$ are $C$- respectively $D$-controlled frames and $\mathbb{M}$ is their multiplier operator with respect to $m$, then $D^{-1}\mathbb{M}C^{-1}= \mathbf{M}_{m,F,G}$. \end{prop} \begin{proof} It is easy to see that for the $C$ and $D$ controlled frames $F$ and
$G$, we have $T_C=CT$ and $T^*_D=T^* D$. Now the representation (\ref{rep1}) results $D^{-1}\mathbb{M}C^{-1}= \mathbf{M}_{m,F,G}$. \end{proof}
\subsection{Weighted continuous frames}
\begin{defn} Let $\mathcal{H} $ be a complex Hilbert space and $(\Omega ,\mu)$ be a measure space with positive measure $\mu$ and $m:\Omega\rightarrow \mathbb{R}^+$. The mapping $F:\Omega\to\mathcal{H}$ is called a weighted continuous frame with respect to $(\Omega ,\mu)$ and $m$, if\\ \begin{enumerate} \item $F$ is weakly-measurable and $m$ is measurable; \\ \item there exist constants $A, B> 0$ such that \begin{equation}\label{wecof}
A\|f\|^{2}\leq \int_{\Omega}m(\omega)|\langle f,F({\omega})\rangle|^{2}\,d\mu(\omega) \leq B\|f\|^{2}, \quad ( f\in \mathcal{H}). \end{equation} \end{enumerate}
The mapping $F$ is called weighted \emph{Bessel} if the second inequality in (\ref{wecof}) holds. \end{defn}
By using some ideas of \cite{StBa}, we have the following result. \begin{thm} Let $\mathbf{M}_{m,F,G}$ be invertible. Then: \begin{enumerate} \item If $G$ is a Bessel map, then $\overline{m}F$ satisfies the lower frame condition.\\ \item If $F$ is a Bessel map, then $mG$ satisfies the lower frame condition.\\ \item If $F$ and $mG$ (respectively $G$ and $\overline{m}F$) are Bessel maps, then they are continuous frames.\\ \item If $G$ is a Bessel map and $m\in L^\infty$, $m \neq 0$, then $F$ has a lower bound.\\ \item If $F$ and $G$ are Bessel maps and $m\in L^\infty$, $m\neq 0$, then both of $F$ and $G$ are continuous frames. \end{enumerate} \end{thm} \begin{proof} \begin{enumerate} \item For $f,g\in\mathcal{H}$, we have \[
|\langle\mathbf{M}_{m,F,G}f,g\rangle|\leq\left(\int_{\Omega}|\langle f,(\overline{m}F)(\omega)\rangle|^{2}
\,d\mu(\omega)\right)^{\frac{1}{2}}\left(\int_{\Omega}|\langle G(\omega), g \rangle|^{2}\, d\mu(\omega)\right)^{\frac{1}{2}} \] without loss of generality, we can assume $f\neq 0$ and \[
\int_{\Omega}|\langle f,(\overline{m}F)(\omega)\rangle|^{2} \,d\mu(\omega)<\infty. \] So \[
|\langle\mathbf{M}_{m,F,G}f,g\rangle|\leq\sqrt{B_G}\,\|g\|\left(\int_{\Omega}|\langle f,(\overline{m}F)(\omega)\rangle|^{2} \,d\mu(\omega)\right)^{\frac{1}{2}}. \] By letting $g=(\mathbf{M}^*_{m,F,G})^{-1}f$ we have \begin{eqnarray*}
\|f\|^2 & \leq&\sqrt{B_G}\,\|(\mathbf{M}^*_{m,F,G})^{-1}
\|\|f\|\left(\int_{\Omega}|\langle f,(\overline{m}F(\omega))\rangle|^{2} \,d\mu(\omega)\right)^{\frac{1}{2}}. \end{eqnarray*} So \[
\frac{1}{\sqrt{B_G}\,\|(\mathbf{M}^*_{m,F,G})^{-1}
\|}\|f\|\leq\left(\int_{\Omega}|\langle f,(\overline{m}F)(\omega)\rangle|^{2} \,d\mu(\omega)\right)^{\frac{1}{2}}. \]\\ \item Similar to (1). \item Let $F$ be a Bessel map, then by part (1), $mG$ has a lower bound and so it is a frame. If $mG$ is a Bessel map then by (2) $1 \cdot F = F$ satisfies the lower frame inquality and therefore is a frame. \item By (1) $\overline{m} F$ satisfies the lower frame inequality. Therefore
$$A \| f \|^2 \le \int_{\Omega}|\langle f,(\overline{m}F)(\omega)\rangle|^{2}
\,d\mu(\omega) \le \| m \|_\infty^2 \int_{\Omega}|\langle f, F(\omega)\rangle|^{2} \,d\mu(\omega) . $$ And so
$$\frac{A}{\| m \|_\infty^2}\le \int_{\Omega}|\langle f, F(\omega)\rangle|^{2} \,d\mu(\omega) . $$ \item Follows from (1), (2) and (3). \end{enumerate} \end{proof}
The following theorem finds a dual of a continuous frame in the case that the multiplier operator is invertible. (Analogous to the discrete results in \cite{stobalrep11}). \begin{thm} Let $\mathbf{M}_{m,F,G}$ be invertible and $G$ be a continuous frame. Then $(\mathbf{M}_{m,F,G}^{-1})^{*}\overline{m}F$ is a dual. \end{thm} \begin{proof} By replacing $f$ with $\mathbf{M}_{m,F,G}^{-1}f$ in \[ \langle \mathbf{M}_{m,F,G}f,g\rangle=\int_{\Omega}m(\omega)\langle f, F(\omega)\rangle\langle G(\omega),g\rangle d\mu \] we get \begin{eqnarray*} \langle f,g\rangle&=&\int_{\Omega}m(\omega)\langle \mathbf{M}_{m,F,G}^{-1}f, F(\omega)\rangle\langle G(\omega),g\rangle d\mu \\&=&\int_{\Omega}\langle f,(\mathbf{M}_{m,F,G}^{-1})^* \overline{m(\omega)} F(\omega)\rangle\langle G(\omega),g\rangle d\mu. \end{eqnarray*} \end{proof}
\textbf{Acknowledgment}: Some of the results in this paper were obtained when A. Rahimi visited the Acoustics Research Institute, Austrian Academy of Sciences, Austria. He thanks this institute for their hospitality. \\This work was supported by the WWTF project MULAC ('Frame Multipliers: Theory and Application in Acoustics; MA07-025).
\end{document} | arXiv | {
"id": "1111.2440.tex",
"language_detection_score": 0.6622018814086914,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Specifying Gaussian Markov Random Fields with Incomplete Orthogonal Factorization using Givens Rotations}
\begin{abstract} In this paper an approach for finding a sparse incomplete Cholesky factor through an incomplete orthogonal factorization with Givens rotations is discussed and applied to Gaussian Markov random fields (GMRFs). The incomplete Cholesky factor obtained from the incomplete orthogonal factorization is usually sparser than the commonly used Cholesky factor obtained through the standard Cholesky factorization. On the computational side, this approach can provide a sparser Cholesky factor, which gives a computationally more efficient representation of GMRFs. On the theoretical side, this approach is stable and robust and always returns a sparse Cholesky factor. Since this approach applies both to square matrices and to rectangle matrices, it works well not only on precision matrices for GMRFs but also when the GMRFs are conditioned on a subset of the variables or on observed data. Some common structures for precision matrices are tested in order to illustrate the usefulness of the approach. One drawback to this approach is that the incomplete orthogonal factorization is usually slower than the standard Cholesky factorization implemented in standard libraries and currently it can be slower to build the sparse Cholesky factor. \newline \noindent \textbf{Keywords}: {Gaussian Markov random field; Incomplete orthogonal factorization; Upper triangular matrix, Givens Rotation; Sparse matrix; Precision matrix} \end{abstract}
\section{Introduction} \label{sec: cholesky_introduction} Gaussian Markov random fields(GMRFs) are useful models in spatial statistics due to the Gaussian properties together with Markovian structures. They can also be formulated as conditional auto-regressions (CARs) models \citep{rue2005gaussian}. GMRFs have applications in many areas, such as spatial statistics, time-series models, analysis of longitudinal survival data, image analysis and geostatistics. See~\citet[Chapter $1$]{rue2005gaussian} for more information and literature on how the {GMRFs} can be applied in different areas. From an analytical point of view GMRFs have good properties and can be specified through mean values $\boldsymbol{\mu}$ and covariance matrices $\boldsymbol{\Sigma}$. While from a computational point of view {GMRFs} can conveniently specified through precision matrices $\boldsymbol{Q}$ (the inverse of the covariance matrices $\boldsymbol{\Sigma}$), which are usually sparse matrices. The numerical algorithms for sparse matrices can be exploited for calculations with the sparse precision matrices and hence fast statistical inference is possible~\citep{rue2001fast}. The numerical algorithms for sparse matrices can be applied to achieve fast simulation of the fields and evaluation the densities (mostly, log-densities) of GMRFs and GMRFs with conditioning on subset of variables or linear constraints. See~\citet[Chapter 2]{rue2005gaussian} for further details. These algorithms can also be used to calculate the marginal variances~\citep{rue2005marginal}, and they can be extended to non-Gaussian cases ~\citep{rue2004approximating}.
Precision matrices $\boldsymbol{Q}$ are commonly used to specify GMRFs. This approach is natural due to the sparsity patterns of the precision matrices in Markovian models. In many situations the Cholesky factors are required and are crucial for simulation and inferences with GMRFs, and the Cholesky factors are normally obtained with Cholesky factorization routines in standard libraries. See~\citet[Chapter $2$]{rue2005gaussian} for different simulation algorithms for GMRFs using Cholesky factors. In order to get an even sparser Cholesky factor, with the purposes of saving computational resources, \citet{wist2006specifying} showed that the Cholesky factor from an incomplete Cholesky factorization can be much sparser than the Cholesky factor from the regular Cholesky factorization. However, they provided theoretical and empirical evidence showing that the representation of sparser Cholesky factor was fragile when conditioning the GMRF on a subset of the variables or on observed data. It means that the sparsity patterns of the sparser Cholesky factors are destroyed when some constraints or observed data are introduced and the computational cost increases. Additionally, the sparser Cholesky factor from the incomplete Cholesky factorization is only valid for a specific precision matrix. Their approach is illustrated in Figure \ref{fig: cholesky_diag} with Routine $1$.
In this paper a different approach is chosen to solve the problem presented by \citet{wist2006specifying} . The main idea is given in the Figure \ref{fig: cholesky_diag} with Routine $2$. In this approach one rectangular matrix $\boldsymbol{A}$ is formulated,
\begin{equation} \label{eq: rectanglar_matrix_A} \boldsymbol{A} = \begin{pmatrix} \boldsymbol{L}_1^{\mbox{T}}\\\boldsymbol{L}_2^{\mbox{T}}\end{pmatrix}. \end{equation}
\noindent It consists of the Cholesky factor $\boldsymbol{L}^{\mbox{T}}_1$ from the precision matrix $\boldsymbol{Q}_1$ of a given GMRF and the Cholesky factor $\boldsymbol{L}^{\mbox{T}}_2$ of the matrix $\boldsymbol{Q}_2$. The matrix $\boldsymbol{Q}_2$ can be the additional effect when the GMRF is conditioned on observed data or on a subset of the variables. Both $\boldsymbol{L}_1$ and $\boldsymbol{L}_2$ are lower triangular matrices.
An incomplete orthogonal factorization is then used to factorize the matrix $\boldsymbol{A}$ in Equation \eqref{eq: rectanglar_matrix_A} to find the sparse Cholesky factor
for specifying the GMRF. It is shown that by using this approach an upper triangular matrix $\boldsymbol{R}$ which is sparser than the standard Cholesky factor is obtained. Furthermore, this approach is applicable when the GMRF is conditioned on a subset of the variables or on observed data. Since the upper triangular matrix $\boldsymbol{R}$ is sparser in structure than the common Cholesky factor, it is better for applications.
\begin{figure}
\caption{Diagram for the algorithm for finding sparser Cholesky factor by incomplete Cholesky factorization used by \citet{wist2006specifying} (Routine $1$) and the algorithm used in this paper (Routine $2$).}
\label{fig: cholesky_diag}
\end{figure}
The rest of this paper is organized as follows. In Section \ref{sec: cholesky_background} some basic theory on GMRFs, sparsity patterns for precision matrices and Cholesky factors of GMRFs are presented. Some basic theories on the orthogonal factorization and the incomplete orthogonal factorization are also introduced in this section. In Section \ref{sec: cholesky_algorithm} the algorithm for obtaining the sparse Cholesky factor from the incomplete orthogonal factorization is introduced. A small example is given in order to illustrate how the algorithm works when the GMRFs are conditioned on a subset of the variables or on observed data. Results for different structures on the precision matrices are given in Section \ref{sec: cholesky_results}. Conclusion and general discussion in Section \ref{sec: cholesky_conclusion} ends the paper.
\section{Background and Preliminaries} \label{sec: cholesky_background}
\subsection{Basic theory on GMRFs} \label{sec: cholesky_GMRFs} A random vector $\boldsymbol{x}=(x_1,x_2,\ldots,x_n)^{\mbox{T}} \in \mathbb{R}^n$ is called a GMRF if it is Gaussian distributed and processes a Markov property. The structure of a GMRF is usually presented by a labeled graph $\mathcal{G} = (\mathcal{V}, \mathcal{E})$, where $\mathcal{V}$ is the set of vertexes $\{ 1,2,\ldots, n \}$ and $\mathcal{E}$ is the set of edges. The graph $\mathcal{G}$ satisfies the properties that no edge between node $i$ and node $j$ if and only if
$x_i \perp x_j | \boldsymbol{x}_{-ij}$ \citep{rue2005gaussian}, where $\{ \boldsymbol{x}_{-ij}; i, j = 1, 2, \dots, n\}$ denotes $\boldsymbol{x}_{-\{i,j\}}$. If the random vector $\boldsymbol{x}$ has a mean $\boldsymbol{\mu}$ and a precision matrix $\boldsymbol{Q}_1>0$, the probability density of the vector $\boldsymbol{x}$ is
\begin{equation}\label{eq: cholesky_density_function}
\pi(\boldsymbol{x}|\boldsymbol{\theta}) =
\left(\frac{1}{2\pi} \right)^{n/2}|\boldsymbol{Q}_1(\boldsymbol{\theta})|^{1/2}\exp\left(-\frac{1}{2}(\boldsymbol{x}-\boldsymbol{\mu})^{\mbox{T}}\boldsymbol{Q}_1(\boldsymbol{\theta})(\boldsymbol{x}-\boldsymbol{\mu})\right), \end{equation} with the property $$ Q_{ij}\neq 0 \Longleftrightarrow \{i,j\} \in \mathcal{E} \text{ for all } i \neq j. $$ The notation $\boldsymbol{Q}_1>0$ means that $\boldsymbol{Q}_1$ is a symmetric positive definite matrix. $\boldsymbol{\theta}$ denotes the parameters in the precision matrix. This implies that any vector with a Gaussian distribution and a symmetric positive definite covariance matrix is a Gaussian random field (GRF), and GMRFs are GRFs with Markov properties. The graph $\mathcal{G}$ determines the nonzero pattern of $\boldsymbol{Q}_1$. If $\mathcal{G}$ is fully connected, then $\boldsymbol{Q}_1$ is a complete dense matrix. A useful property of GMRF is that we can know whether $x_i$ and $x_j$ are conditionally independently or not directly from the precision matrix $\boldsymbol{Q}_1$ and the graph $\mathcal{G}$. Values of mean $\boldsymbol{\mu}$ do not have any influence on the pairwise conditional independence properties of the GMRFs, and hence we set $ \boldsymbol{\mu} = \boldsymbol{0}$ in the following sections unless otherwise specified. The diagonal elements in the precision matrix $Q_{ii} \, (i = 1, 2, \dots, n)$ are the conditional precisions of $x_{i}$ given all the other nodes $\boldsymbol{x}_{-i}$. The off-diagonal elements $Q_{ij} \, (i,j = 1, 2, \dots, n, i \neq j)$ can provide information about the correlations between $x_i$ and $x_j$ given on the nodes $\boldsymbol{x}_{-ij}$. These are the main differences in the interpretation between the precision matrix $\boldsymbol{Q}_1$ and the covariance matrix $\boldsymbol{\Sigma}_1$. The covariance matrix $\boldsymbol{\Sigma}_1$ contains the marginal variance of $x_i$ and the marginal correlation between $x_i$ and $x_j$. However, with the precision matrix the marginal properties are not directly available \citep{rue2005gaussian}.
Since $\boldsymbol{Q}_1$ is symmetric positive definite, there is a unique Cholesky factor $\boldsymbol{L}_1$ where $\boldsymbol{L}_1$ is a lower triangle matrix satisfying $ \boldsymbol{Q}_1 = \boldsymbol{L}_1\boldsymbol{L}_1^{\mbox{T}} $. If we want to sample from the GMRF $\boldsymbol{x} \thicksim \mathcal{N}(\boldsymbol{\mu}, \boldsymbol{Q}_1^{-1})$, the Cholesky factor $\boldsymbol{L}_1$ is commonly used. One algorithm for sampling GMRFs is given in Section \ref{sec: cholesky_sampling}. More algorithms for sampling GMRFs with different specifications are also available. See \citet[Chapter $2$]{rue2005gaussian} for a detailed discussion on these algorithms. \citet{rue2005gaussian} showed how to check the sparsity pattern of the Cholesky factor of a GMRF. Define
\begin{equation}\label{eq: cholesky_F} F(i,j) = {i+1, i+2, \ldots, j-1, j+1, \ldots, n,} \end{equation} which is the future of i except j. Then \begin{equation}\label{eq: cholesky_conditional_indpendent}
x_i \bot x_j\ | \ \boldsymbol{x}_{F(i,j)} \Longleftrightarrow L_{ji} = 0, \end{equation}
and $F(i,j)$ is called a separating subset of $i$ and $j$.
If $i \sim j$ denotes that $i$ and $j$ are neighbors, then $F(i,j)$ cannot be a separating subset for $i$ and $j$ whenever $i \sim j$. Further, the Cholesky factor of the precision matrix of a GMRF is always equally dense or denser than the lower triangle part of $\boldsymbol{Q}_1$.
In many situations there are more nonzero elements in $\boldsymbol{L}_1$ than in the lower triangular part of $\boldsymbol{Q}_1$. Denote $n_{\boldsymbol{L}_1}$ and $n_{\boldsymbol{Q}_1}$ the numbers of nonzero elements in the Cholesky factor $\boldsymbol{L}_1$ and the lower triangular part of precision matrix $\boldsymbol{Q}_1$, respectively. The difference $n_f = n_{\boldsymbol{L}_1} - n_{\boldsymbol{Q}_1}$ is called the fill-in. The ideal case is $n_f = 0$ or $n_{\boldsymbol{L}_1} = n_{\boldsymbol{Q}_1}$, but commonly $n_{\boldsymbol{L}_1} > n_{\boldsymbol{Q}_1}$ or even $n_{\boldsymbol{L}_1} \gg n_{\boldsymbol{Q}_1}$. It is known that the fill-in $n_f$ not only depends on the graph, but also on the order of the nodes in the graph \citep{rue2005gaussian}. Thus a re-ordering is usually needed before doing a Cholesky factorization. It is desirable to find an optimal or approximately optimal ordering of the graph in order to make the Cholesky factor of $\boldsymbol{Q}_1$ sparser and to save computational resources, but this is not the focus in this paper. We refer to \cite{rue2005gaussian} for more information on why it is desirable to do re-ordering of the graph of a GMRF.
\subsection{Orthogonal factorization} \label{sec: cholesky_IGO}
With an $m \times n$ matrix $\boldsymbol{A}$, the orthogonal factorization of $\boldsymbol{A}$ is
\begin{equation}\label{eq: cholesky_Orthogonal_factorization}
\boldsymbol{A} = \boldsymbol{S} \cdot \boldsymbol{R},
\end{equation}
where $\boldsymbol{S} \in \mathbb{R}^{m \times m}$ is an orthogonal matrix and $\boldsymbol{R} \in \mathbb{R}^{m \times n}$ is an
upper triangular matrix. We assume without loss of the generality that $m \geq n$. There exist many algorithms for orthogonal factorization,
such as the standard Gram - Schmidt algorithm or the modified Gram--Schmidt (MGS) algorithm and the Householder orthogonal factorization. We refer to
\citet{saad2003iterative} and \citet{bjorck1996numerical} for more algorithms. If $\boldsymbol{A}$ has full column rank, then the first $n$ columns of $\boldsymbol{S}$ forms an
orthonormal basis of ran($\boldsymbol{A}$), where ran($\boldsymbol{A}$) denotes the range of $\boldsymbol{A}$
$$ \ran(\boldsymbol{A}) = \left\{ \boldsymbol{y} \in \mathbb{R}^m: \boldsymbol{y} = \boldsymbol{A} \boldsymbol{x} \ \textrm{for} \ \textrm{some} \
\boldsymbol{x} \in \mathbb{R}^n \right\}.$$ The orthogonal factorization is usually used to find an orthonormal basis for a matrix. The orthogonal factorization has many advantages and some of them are given in what follows.
\begin{enumerate}
\item It is numerically stable and robust both with a Householder orthogonal factorization and with a orthogonal factorization using Givens rotations. If the matrix $\boldsymbol{A}$ is non-singular,
it always produces an orthogonal matrix $\boldsymbol{S}$ and an upper triangular matrix $\boldsymbol{R}$ which satisfy Equation \eqref{eq: cholesky_Orthogonal_factorization};
\item It is easy to solve the linear system of equations $\boldsymbol{A}\boldsymbol{x} = \boldsymbol{b} $ using the upper triangular matrix $\boldsymbol{R}$ since $\boldsymbol{S}$ is an orthogonal matrix;
\item The normal equation has the form $\boldsymbol{A}^{\mbox{T}}\boldsymbol{A} \boldsymbol{x} = \boldsymbol{A}^{\mbox{T}}\boldsymbol{b}$
and the normal equation matrix is $\boldsymbol{A}^{\mbox{T}}\boldsymbol{A}$, where $\boldsymbol{A}^{\mbox{T}}$ denotes the transpose of $\boldsymbol{A}$.
Then the triangular matrix $\boldsymbol{R}$ is the Cholesky factor of the normal equations matrix.
\end{enumerate}
\subsection{Givens rotations} \label{sec: cholesky_givens}
A Givens rotation $G(i,j,\theta) \in \mathbb{R}^{m \times n}$ is an identity matrix $\boldsymbol{I}$ except that
\begin{displaymath}
\begin{split}
G_{ii} & = c,\hspace{6.5mm} G_{ij} = s, \\
G_{ji} &= -s, \hspace{3mm} G_{jj} = c.
\end{split}
\end{displaymath}
If $c = \cos(\theta)$ and $s = \sin(\theta)$, then $\boldsymbol{y} = G(i,j,\theta) \cdot \boldsymbol{x}$ rotates $\boldsymbol{x}$ clockwise in the $(i,j)$-plane with
$\theta$ radians, which gives
\begin{equation}\label{eq: eq6}
y_l = \left\{ \begin{array} {ll}
x_l, & \textrm{ when } l \neq i,j, \\
cx_i + sx_j, & \textrm{ when } l =i, \hspace{10mm} (1 \leq l \leq m), \\
-sx_i + cx_j, & \textrm{ when } l=j.
\end{array} \right.
\end{equation} If we want to rotate $\boldsymbol{x}$ counterclockwise in the $(i,j)$-plane with $\theta$ radians, then we can set $c = \cos(\theta)$ and $s = -\sin(\theta)$. It is obvious from Equation \eqref{eq: eq6} that if
\begin{displaymath}
s = \frac{x_j}{\sqrt{x_i^2 + x_j^2}} \ \textrm{and} \ c = \frac{x_i}{\sqrt{x_i^2 + x_j^2}}
\end{displaymath}
then $y_j = 0$. So the Givens rotations can set the elements in $\boldsymbol{A}$ to zeros one at a time. This is useful when dealing with sparse matrices. At
the same time, $c$ and $s$ are the only two values which we need for this algorithm. Givens rotations are suitable for structured least squares
problems such as the problems at the heart of GMRFs.
\subsection{Incomplete factorization algorithms} There are many algorithms for incomplete factorizations of matrices, such as the incomplete triangular factorization and the incomplete orthogonal factorization. These algorithms are commonly used in practical applications \citep{axelsson1996iterative, meijerink1981guidelines, saad1988preconditioning}. The incomplete factorizations usually have the form \begin{equation}\label{eq: cholesky:imcimplete_factorizations}
\boldsymbol{A} = \boldsymbol{M}_1 \cdot \boldsymbol{M}_2 + \boldsymbol{E},
\end{equation} where $\boldsymbol{E}$ is the error matrix, and $\boldsymbol{M}_1$ and $\boldsymbol{M}_2$ are some well-structured matrices. The incomplete factorization algorithms are usually associated with dropping strategies. A dropping strategy for an incomplete factorization specifies rules for when elements of the factors should be dropped. We returns to a detailed discussion on the dropping strategies in Section \ref{sec: cholesky_sparse_factor}.
One of the commonly used incomplete factorization algorithms is the incomplete triangular factorization, and it is also called incomplete LU (ILU) factorization since $\boldsymbol{M}_1$ is a \emph{lower} triangular matrix and $\boldsymbol{M}_2$ is an \emph{upper} triangular matrix. This algorithm
is usually applied to the square matrices, and it uses Gaussian elimination together with a predefined dropping strategy. Many incomplete orthogonal factorizations can be used both for square matrices and for rectangular matrices, and these algorithms usually use the modified Gram-Schmidt procedure together with some dropping strategies in order to return a sparse and generally non-orthogonal matrix $\boldsymbol{S}$ and a sparse upper triangular matrix $\boldsymbol{R}$. \citet{wang1997cimgs} proved the existence and stability of the associated incomplete orthogonal factorization. Incomplete orthogonal factorization using Givens rotations was proposed by \citet{bai2001class}. The main idea of the incomplete orthogonal factorization is to use the Givens rotations to zero-out the elements in the matrix one at a time. Some predefined dropping strategies are needed in order to achieve the sparsity pattern for the upper triangular matrix $\boldsymbol{R}$. This algorithm computes a sparse matrix $\boldsymbol{S}$, which is always an orthogonal matrix, together with a sparse upper triangular matrix $\boldsymbol{R}$. Since the matrix $\boldsymbol{S}$ is the product of the Givens rotations matrices, it is always an orthogonal matrix. The incomplete orthogonal factorization has the form
\begin{equation}\label{eq: cholesky:imcimplete_orthogonal_factorizations}
\boldsymbol{A} = \boldsymbol{S} \cdot \boldsymbol{R} + \boldsymbol{E}.
\end{equation}
This method was originally described and implemented by \citet{jennings1984incomplete}.
\citet{saad1988preconditioning} described this incomplete orthogonal factorization with the modified Gram--Schmidt process using some numerical dropping
strategy. Another version of the incomplete orthogonal factorization is given by \citet{bai2001class} with Givens rotations. \citet{bai2001class} claimed that this incomplete algorithm inherited the good properties of the orthogonal factorization.
\begin{enumerate}
\item $\boldsymbol{R}$ is a sparse triangular matrix and $\boldsymbol{S}$ is an orthogonal matrix.
\citet{bai2009numerical} pointed out that the sparsity pattern of the upper-triangular part of $\boldsymbol{A}$ is inherited by the incomplete upper triangular matrix $\boldsymbol{R}$.
They also pointed out that the number of nonzero elements in the upper triangular matrix $\boldsymbol{R}$ is less than the number of nonzero elements in the upper-triangular part of $\boldsymbol{A}$.
\item The error matrix $\boldsymbol{E} = \boldsymbol{A} - \boldsymbol{S} \cdot \boldsymbol{R}$ is ``small'' in some sense and the size of the errors can be controlled by the pre-defined threshold.
\item The triangular matrix $\boldsymbol{R}$ is non-singular whenever $\boldsymbol{A}$ is not singular. We can always obtain this triangular matrix in the same way as the orthogonal factorization and
$\boldsymbol{R}$ will always be an incomplete Cholesky factor for the normal equation matrix $\boldsymbol{A}^{\mbox{T}} \boldsymbol{A}$.
\item Another merit of the incomplete orthogonal factorization with Givens rotations is that we do not need to form the corresponding normal matrices $\boldsymbol{S}$ since only the $(c, s)$-pair is needed in
order to find the upper triangular matrix $\boldsymbol{R}$. More information about the Givens rotations and the $(c, s)$-pairs are given in Section \ref{sec: cholesky_givens}
\end{enumerate}
\citet{papadopoulos2005class} implemented different versions of the algorithm proposed by \citet{bai2001class}. There are two main differences between these versions. The first one is the order in which elements in the matrix $\boldsymbol{A}$ are zeroed out, and the second one is the rules for dropping strategies. We refer to \citet{bai2001class} and \citet{papadopoulos2005class} for more information about this algorithm and implementations. There are also more variations for incomplete orthogonal factorization using Givens rotations, such as \citet{bai2009modified} and \citet{bai2009numerical}. \citet{bai2009modified} proposed some modified incomplete orthogonal factorization methods and these algorithms have special storage and sparsity-preserving techniques. \citet{bai2009modified} showed a way to adopt a diagonal compensation strategy by reusing the dropped elements. These dropped elements are added to the main diagonal elements of the same rows in the incomplete upper-triangular matrix $\boldsymbol{R}$. \citet{bai2009numerical} proposed practical incomplete Givens orthogonalization (IGO) methods for solving large sparse systems of linear equations. They claimed that these incomplete IGO methods took the storage requirements, the accuracy of the solutions and the coding of the pre-conditioners into consideration.
In this report, we have chosen the column-wise threshold incomplete Givens orthogonal (cTIGO) factorization
algorithm for finding the sparse upper triangular matrix $\boldsymbol{R}$.
This sparse upper triangular matrix $\boldsymbol{R}$ has sparse structure and can be used for specifying the GMRFs.
The matrix $\boldsymbol{S}$ does not need to be stored in our setting since
we only need the upper-triangular matrix $\boldsymbol{R}$. The matrix $\boldsymbol{S}$ only needs computed whenever it is explicitly needed.
\section{Specifying GMRFs using sparse Cholesky factors} \label{sec: cholesky_algorithm} In this section we begin by introducing the background of GMRFs conditioned on a subset of the variables or on observed data. A small example is used to illustrate how the cTIGO algorithm works when applied to GMRFs.
\subsection{GMRFs conditioned on a subset of the variables} \label{sec: cholesky_conditioningsubsets} \textbf{I. GMRFs with soft constraint} \newline Let $\boldsymbol{x}$ be a GMRF and assume that we have observed some linear transformation $\boldsymbol{Ax}$ with additional Gaussian distributed noise \begin{displaymath}
\boldsymbol{e}|\boldsymbol{x} \sim \mathcal{N}(\boldsymbol{Ax}, \boldsymbol{\boldsymbol{Q}}_{\epsilon}^{-1}), \end{displaymath} where $k$ is the dimension of the vector $\boldsymbol{e}$, $\boldsymbol{A}$ is a $k \times n$ matrix with rank $k$ and $k < n$, and $ \boldsymbol{Q_{\epsilon}} > 0$ is the precision matrix of $\boldsymbol{e}$. This is called ``soft constraint" by \citet{rue2005gaussian} and the log-density for the model is
\begin{equation}\label{eq: logdensity(x|e)}
\log{\pi(\boldsymbol{x}|\boldsymbol{e})} = -\frac{1}{2}(\boldsymbol{x}^{\mbox{T}} - \boldsymbol{\mu}) \boldsymbol{Q}_1 (\boldsymbol{x} - \boldsymbol{\mu})
-\frac{1}{2}(\boldsymbol{e} - \boldsymbol{Ax})^{\mbox{T}} \boldsymbol{Q_{\epsilon}} (\boldsymbol{e}-\boldsymbol{Ax}) + \text{const}, \end{equation} where $\boldsymbol{\mu}$ and $\boldsymbol{Q}_1$ are the mean and the precision matrix of the GRMF, respectively, and ``const'' is constant. If $\boldsymbol{x}$ has mean $\boldsymbol{\mu} = \boldsymbol{0}$ then \begin{equation}
\boldsymbol{x}|\boldsymbol{e} \sim \mathcal{N}_c (\boldsymbol{A}^{\mbox{T}}\boldsymbol{Q}_{\boldsymbol{\epsilon}}\boldsymbol{e}, \boldsymbol{Q}_1+\boldsymbol{A}^{\mbox{T}}\boldsymbol{Q}_{\boldsymbol{\epsilon}}\boldsymbol{A}). \end{equation}
Here we use the canonical form $\mathcal{N}_c (\cdot, \cdot)$ for $\boldsymbol{x}|\boldsymbol{e}$. We refer to \citet[Chapter 2.3.2]{rue2005gaussian} for more information about the canonical form for GMRF. We can notice that for specifying the GMRFs with ``soft constraint", the Routine $(2)$ as shown in Figure \ref{fig: cholesky_diag} can be applied since $\boldsymbol{Q} = \boldsymbol{Q}_1 + \boldsymbol{A}^{\mbox{T}}\boldsymbol{Q}_{\boldsymbol{\epsilon}}\boldsymbol{A}$ with $\boldsymbol{Q_2} = \boldsymbol{A}^{\mbox{T}}\boldsymbol{Q}_{\boldsymbol{\epsilon}}\boldsymbol{A}$. \newline \newline \textbf{II. Models with auxiliary variables} \newline Auxiliary variables are crucial in some models to retrieve GMRF full conditionals. We look at binary regression models with auxiliary variables.
Assume that we have Bernoulli observational model for binary responses. The binary responses have latent parameters which is a GMRF $\boldsymbol{x}$, and the GMRF usually depends on some hyperparameters $\boldsymbol{\theta}$. We usually choose the logit or probit models in this case, where \begin{equation}
y_i \sim \mathcal{B} \left( \eta^{-1}(z_i^{\mbox{T}}\boldsymbol{x}) \right), \hspace{3mm} i = 1,2,\dots, m \end{equation}
where $\mathcal{B}(p)$ denotes a Bernoulli distribution with probability $p$ for $1$ and $1-p$ for $0$. $\boldsymbol{z}_i$ is a vector of covariates and we assume it is fixed. $\eta(\cdot)$ is a link function \begin{equation}
\eta(p) = \begin{cases}
\log\left(p/(1-p)\right) & \text{ for logit link} \\
\Phi(p) & \text{ for probit link}
\end{cases} \end{equation} where $\Phi(\cdot)$ denotes the cumulative distribution function (CDF) for standard Gaussian distribution. We can use models with auxiliary variables $\boldsymbol{\omega} = (\omega_1, \omega_2, \dots, \omega_m)$ to represent these models, \begin{displaymath}
\begin{split}
\epsilon_i & \overset{iid} \sim G(\epsilon_i), \\
\omega_i & = \boldsymbol{z}_i^{\mbox{T}}\boldsymbol{x} +\epsilon_i, \\
y_i & = \begin{cases}
1, & \text{ if } \omega_i > 0, \\
0, & \text{ otherwise},
\end{cases}
\end{split} \end{displaymath} where $G(\cdot)$ is the CDF of standard logistic distribution in the logit case and $G(\cdot) = \Phi(\cdot)$ in the probit case. We refer to \citet[Chapter $28$]{forbes2011statistical} for more information about the standard logistic distribution and its CDF.
Let $\boldsymbol{x}|\boldsymbol{\theta}$ be a GMRF of dimension $n$ with mean $\boldsymbol{\mu} = \boldsymbol{0}$, and assume that we have $\boldsymbol{z}_i^{\mbox{T}}\boldsymbol{x} = x_i$ and $m = n$. With the probit link the posterior distribution is \begin{equation}
\pi(\boldsymbol{x},\boldsymbol{\omega},\boldsymbol{\theta} |\boldsymbol{y}) \propto \pi(\boldsymbol{\theta})\pi(\boldsymbol{x}|\boldsymbol{\theta})\pi(\boldsymbol{\omega}|\boldsymbol{x})\pi(\boldsymbol{y}|\boldsymbol{\omega}). \end{equation} The conditional distribution of $\boldsymbol{x}$ given the auxiliary variables can then be obtained \begin{displaymath}
\pi(\boldsymbol{x}|\boldsymbol{\theta},\boldsymbol{\omega}) \propto \exp \left( -\frac{1}{2}\boldsymbol{x}^{\mbox{T}}\boldsymbol{Q}_1(\boldsymbol{\theta})\boldsymbol{x}-\frac{1}{2}\sum_i(x_i-\omega_i) \right)^2, \end{displaymath} and this can be written in the canonical form \begin{displaymath}
\boldsymbol{x}|\boldsymbol{\theta},\boldsymbol{\omega} \sim \mathcal{N}_c(\omega, \boldsymbol{Q}_1(\boldsymbol{\theta}) + \boldsymbol{I}). \end{displaymath} A general form for the conditional distribution of $\boldsymbol{x}$ given the auxiliary variables, for this binomial model with a probit link function, is given as \begin{displaymath}
\boldsymbol{x}|\boldsymbol{\theta},\boldsymbol{\omega} \sim \mathcal{N}_c(\boldsymbol{Z}^{\mbox{T}} \omega, \boldsymbol{Q}_1(\boldsymbol{\theta}) + \boldsymbol{Z}^{\mbox{T}}\boldsymbol{Z}), \end{displaymath} where $\boldsymbol{Z}$ is an $m \times n$ matrix. Similarly, the conditional distribution of $\boldsymbol{x}$ given the auxiliary variables for the logistic regression model can be written as \begin{displaymath}
\boldsymbol{x}|\boldsymbol{\theta},\boldsymbol{\omega} \sim \mathcal{N}_c(\boldsymbol{Z}^{\mbox{T}} \boldsymbol{\Lambda} \omega, \boldsymbol{Q}_1(\boldsymbol{\theta}) + \boldsymbol{Z}^{\mbox{T}} \boldsymbol{\Lambda} \boldsymbol{Z}), \end{displaymath} where $\boldsymbol{\Lambda} = \diag(\boldsymbol{\lambda})$, and $\lambda_i$ is from the model specification. See more discussions on these models in \citet[Chapter $4.3$]{rue2005gaussian}.
In all the examples in this section, the models are suitable for use Routine (2) in Figure \ref{fig: cholesky_diag} to find the sparse Cholesky factors of the precision matrices of GMRFs.
\subsection{GMRFs conditioned on data} \label{sec: cholesky_GMRFs_conditioning_data} As mentioned in Section ~\ref{sec: cholesky_GMRFs}, if a vector $\boldsymbol{x}$ is a GMRF with precision matrix $\boldsymbol{Q}_1$ and mean vector $\boldsymbol{\mu}$, then the density of the vector is given by Equation \eqref{eq: cholesky_density_function}. In practical applications it is common to set $\boldsymbol{\mu} = \boldsymbol{0}$ \citep{rue2005gaussian,gneitingmatern}, which gives the probability density function \begin{equation} \label{eq: cholesky_GMRF_zeromean}
\pi(\boldsymbol{x}|\boldsymbol{\theta})
= \left(\frac{1}{2\pi} \right)^{n/2}|\boldsymbol{Q}_1(\boldsymbol{\theta})|^{1/2}\exp\left(-\frac{1}{2}\boldsymbol{x}^{\mbox{T}}\boldsymbol{Q}_1(\boldsymbol{\theta})\boldsymbol{x}\right). \end{equation} Assume that the data are of dimension $k$ and defined as a $k$-dimensional random vector
\begin{displaymath}
\boldsymbol{y}|\boldsymbol{x}, \boldsymbol{\theta} \sim \mathcal{N} \left( \boldsymbol{Ax}, \boldsymbol{Q}_{\boldsymbol{\epsilon}}^{-1} \right) \end{displaymath} and has the probability density function
\begin{equation} \label{eq: cholesky_density_data}
\pi(\boldsymbol{y|\boldsymbol{x},\boldsymbol{\theta}}) = \left(\frac{1}{2\pi} \right) ^{k}|\boldsymbol{Q}_{\boldsymbol{\epsilon}}|^{1/2}
\exp \left(-\frac{1}{2} (\boldsymbol{y}-\boldsymbol{Ax})^{\mbox{T}} \boldsymbol{Q}_{\boldsymbol{\epsilon}} (\boldsymbol{y}-\boldsymbol{Ax}) \right), \end{equation} where $\boldsymbol{A}$ is a $k \times n$ matrix used to select the data location. The precision matrix $\boldsymbol{Q}_{\boldsymbol{\epsilon}}$ for the noise process is a positive definite matrix with dimension $k \times k$.
Notice that the density function $\pi(\boldsymbol{y|\boldsymbol{x},\boldsymbol{\theta}})$ is not dependent on the $\boldsymbol{\theta}$, and hence the probability density function $\pi(\boldsymbol{y|\boldsymbol{x},\boldsymbol{\theta}})$
can be written as $\pi(\boldsymbol{y|\boldsymbol{x}})$. The probability density function of ${\boldsymbol{x}|\boldsymbol{y}, \boldsymbol{\theta}}$ can be found from Equations \eqref{eq: cholesky_GMRF_zeromean} and \eqref{eq: cholesky_density_data} through
\begin{equation} \label{eq: cholesky_x|y,theta} \begin{split}
\pi(\boldsymbol{x}|\boldsymbol{y}, \boldsymbol{\theta}) & \propto \pi({\boldsymbol{x}, \boldsymbol{y} | \boldsymbol{\theta}}) \\
& = \pi(\boldsymbol{x}|\boldsymbol{\theta}) \pi(\boldsymbol{y}|\boldsymbol{x}, \boldsymbol{\theta}) \\
& \propto \exp \left( -\frac{1}{2} \left[ x^{\mbox{T}} (\boldsymbol{Q}_1(\boldsymbol{\theta})
+ \boldsymbol{A}^{\mbox{T}} \boldsymbol{Q}_{\boldsymbol{\epsilon}} \boldsymbol{A}) \boldsymbol{x}
- 2\boldsymbol{x}^{\mbox{T}} \boldsymbol{A}^{\mbox{T}}\boldsymbol{Q}_{\boldsymbol{\epsilon}} \boldsymbol{y} \right] \right).
\end{split} \end{equation}
Similarly, the density function \eqref{eq: cholesky_x|y,theta} can be written in the canonical form as
\begin{equation} \label{eq: cholesky_canonical_form}
{\boldsymbol{x}| \boldsymbol{y}, \boldsymbol{\theta}} \sim \mathcal{N} \left( \boldsymbol{\mu}_c (\boldsymbol{\theta}), \boldsymbol{Q}_c (\boldsymbol{\theta}) \right). \end{equation} where $\boldsymbol{\mu}_c (\boldsymbol{\theta}) = \boldsymbol{Q}_c(\boldsymbol{\theta})^{-1} \boldsymbol{A}^{\mbox{T}} \boldsymbol{Q}_{\boldsymbol{\epsilon}} \boldsymbol{y} $, and $\boldsymbol{Q}_c (\boldsymbol{\theta}) = \boldsymbol{Q}_1(\boldsymbol{\theta}) + \boldsymbol{A}^{\mbox{T}} \boldsymbol{Q}_{\boldsymbol{\epsilon}} \boldsymbol{A}$. Now we can notice that the precision matrix for the GMRF conditional on data has the form $\boldsymbol{Q} = \boldsymbol{Q}_1(\boldsymbol{\theta}) + \boldsymbol{Q}_2 $ with $\boldsymbol{Q}_2 = \boldsymbol{A}^{\mbox{T}} \boldsymbol{Q}_{\boldsymbol{\epsilon}} \boldsymbol{A}$, where $\boldsymbol{Q}_2 $ does not depend on $\boldsymbol{\theta}$. Since $\boldsymbol{Q}$ has the same form as given in Routine ($2$) in Figure \ref{fig: cholesky_diag}, it is possible to use the proposed routine to find the sparse Cholesky factor of the precision matrix of the GMRF conditioned on data.
Even though it is not the focus of this paper, it might be useful to point out that using Equations \eqref{eq: cholesky_GMRF_zeromean} - \eqref{eq: cholesky_x|y,theta}, we can find the analytical formula for the posterior density function of $(\boldsymbol{\theta} | \boldsymbol{y})$ through Bayes' formula. It is given by
\begin{equation} \label{eq: cholesky_log(theta|y)} \begin{split}
\log(\pi(\boldsymbol{\theta}|\boldsymbol{y})) = & \text{ const.} + \log(\pi(\boldsymbol{\theta})) + \frac{1}{2}\log(|\boldsymbol{Q}_1(\boldsymbol{\theta})|) \\
& - \frac{1}{2}\log(|\boldsymbol{Q}_c(\boldsymbol{\theta})|) + \frac{1}{2} \boldsymbol{\mu}_c(\boldsymbol{\theta})^{\mbox{T}} \boldsymbol{Q}_c(\boldsymbol{\theta}) \boldsymbol{\mu}_c(\boldsymbol{\theta}). \end{split} \end{equation}
We refer to \citet{hu2012multivariate} for detailed information about this log-posterior density function. The log-posterior density function $ \log(\boldsymbol{\theta} | \boldsymbol{y})$ is crucial when doing statistical inference in Bayesian statistics.
The sparse structure of $\boldsymbol{Q}_2$ depends both on the structures of $\boldsymbol{A}$ and of $\boldsymbol{Q}_{\boldsymbol{\epsilon}}$. In most cases, the $\boldsymbol{Q}_{\boldsymbol{\epsilon}}$ is a diagonal matrix and the matrix $\boldsymbol{A}$ has sparse structure. Therefore $\boldsymbol{Q}_2$ should also have a sparse structure. When the observations are conditional independent, but have a non-Gaussian distribution, then we can use a GMRF approximation to obtain a sparse structure of $\boldsymbol{Q}_2$ as presented in Section \ref{sec: cholesky_GMRF_approximation}.
\subsection{GMRF approximation} \label{sec: cholesky_GMRF_approximation}
Suppose there are $n$ conditionally independent observations $y_1,y_2, \ldots, y_n$ from a non-Gaussian distribution and that $y_i$ is an indirect observation of $x_i$. $\boldsymbol{x}$ is a GMRF with mean $\boldsymbol{\mu} =\boldsymbol{0}$ and precision matrix $\boldsymbol{Q}_1$. The full conditional $\pi(\boldsymbol{x}|\boldsymbol{y}, \boldsymbol{\theta})$ then has the form
\begin{equation}\label{eq: cholesky_nongaussian_data}
\pi \left(\boldsymbol{x}|\boldsymbol{y}, \boldsymbol{\theta} \right) \propto \exp\left(-\frac{1}{2}\boldsymbol{x}^{\mbox{T}} {\boldsymbol{Q}_1} \boldsymbol{x} + \sum_{i=1}^{n}\log{\pi(y_i|x_i)} \right). \end{equation}
Apply a second-order Taylor expansion of $\sum_{i=1}^{n} \log{\pi(y_i|x_i)}$ around $\boldsymbol{\mu}_0$. In other words, construct a suitable GMRF proposal density
$\widetilde{\pi}(\boldsymbol{x}|\boldsymbol{y}, \boldsymbol{\theta})$
\begin{equation} \label{eq: cholesky_GMRF_proposal_density}
\begin{split}
\widetilde{\pi}(\boldsymbol{x}|\boldsymbol{y}, \boldsymbol{\theta}) & \propto \exp\left( -\frac{1}{2}{\boldsymbol{x}}^{\mbox{T}} {\boldsymbol{Q}_1(\boldsymbol{\theta})}{\boldsymbol{x}} + \sum_{i=1}(a_i + b_i x_i -\frac{1}{2}c_ix_i^2) \right) \\
& \propto \exp\left( -\frac{1}{2}\boldsymbol{x}^{\mbox{T}} \left({\boldsymbol{Q}_1(\boldsymbol{\theta}) + \diag(\boldsymbol{c})}\right) \boldsymbol{x} + \boldsymbol{b}^{\mbox{T}} {\boldsymbol{x}} \right). \end{split} \end{equation}
\noindent $c_i$ should set to zero when $c_i < 0$. $\boldsymbol{b}$ and $\boldsymbol{c}$ depend on $\boldsymbol{\mu}_0$.
The canonical parametrization of $\widetilde{\pi}(\boldsymbol{x}|\boldsymbol{y}, \boldsymbol{\theta})$ has the form
\begin{displaymath} \mathcal{N}_{c}\left(\boldsymbol{b}, \boldsymbol{Q}_1(\boldsymbol{\theta}) + \diag(\boldsymbol{c})\right). \end{displaymath} In this case $\boldsymbol{Q}_2$ has a diagonal structure. An important feature of \eqref{eq: cholesky_GMRF_proposal_density} is that it inherits the \emph{Markov} property of the prior on $\boldsymbol{x}$, which is useful for sampling GMRF. When $\boldsymbol{\mu} \neq \boldsymbol{0}$, the canonical parametrization of the
$(\boldsymbol{x}|\boldsymbol{y},\boldsymbol{\theta})$ is changed to \begin{displaymath} \mathcal{N}_{c}\left(\boldsymbol{Q} \boldsymbol{\mu} + \boldsymbol{b}, \boldsymbol{Q}_1(\boldsymbol{\theta}) + \diag(\boldsymbol{c})\right), \end{displaymath} and does not change the matrix $\boldsymbol{Q} = \boldsymbol{Q}_1 +\boldsymbol{Q}_2$.
As it was pointed out in Section \ref{sec: cholesky_GMRFs}, to sample from the GMRFs, the Cholesky factor $\boldsymbol{L}$ is one of most important factors. In order to save computational resources, a sparse Cholesky factor is preferable if the approximated precision matrix is ``close'' to the original precision matrix, where ``close'' means both in structure and the elements.
\subsection{Theoretical background} \label{sec: cholesky_algorithm_orthogonal} It has been mentioned in Section \ref{sec: cholesky_GMRFs} that the sparsity pattern of the Cholesky factor is determined by the graph $\mathcal{G}$, and it is unnecessary to calculate the zero elements in the Cholesky factor. In this section, we are going to introduce the theoretical background for finding the Cholesky factor from the orthogonal factorization when the GMRF is conditioned on observed data or a subset of the variables.
Let $\boldsymbol{y}$ be the observed data and assume $ \boldsymbol{y} = (y_1, y_2, \ldots, y_n)$ has the Gaussian distribution, then the density of $\boldsymbol{x}$ conditioned on $\boldsymbol{y}$ has the form in
\eqref{eq: cholesky_x|y,theta}. In the discussed situations in Section \ref{sec: cholesky_conditioningsubsets} - Section \ref{sec: cholesky_GMRF_approximation}, the precision matrix $\boldsymbol{Q}$ can be split into two parts, the precision matrix $\boldsymbol{Q}_1$ of the GMRF $(\boldsymbol{x}|\boldsymbol{\theta})$ and the matrix $\boldsymbol{Q}_2$ which is the additional effect. The matrix $\boldsymbol{Q}_2$ is usually a diagonal matrix or another type of sparse matrix. If the data is not Gaussian distributed, then we can apply the GMRFs approximation given in \eqref{eq: cholesky_GMRF_proposal_density} and it returns the precision matrix $\boldsymbol{Q}_1$ with a diagonal matrix $\boldsymbol{Q}_2$ added. This structure satisfies the Routine (2) in Figure \ref{fig: cholesky_diag}.
Let $\boldsymbol{Q} = \boldsymbol{Q}_1 + \boldsymbol{Q}_2$ and assume that the Cholesky factors for $\boldsymbol{Q}_1$, $\boldsymbol{Q}_2$ and $\boldsymbol{Q}$ are $\boldsymbol{L}_1$, $\boldsymbol{L}_2$ and $\boldsymbol{L}$, respectively. The Cholesky factors $\boldsymbol{L}_1$ and $\boldsymbol{L}_2$ are assumed to be known. We have the following results.
\begin{obs} \label{thm_Cholesky}
Let $\boldsymbol{x} \in \mathbb{R}^n$ be a zero mean GMRF with precision matrix £$\boldsymbol{Q}_1$. Assume that
the precision matrix has the form $\boldsymbol{Q} = \boldsymbol{Q}_1 + \boldsymbol{Q}_2 \in \mathbb{R}^{n\times n}$
when conditioned on observed data or a subset of the variables.
Let the Cholesky factors for $\boldsymbol{Q}_1$ and $\boldsymbol{Q}_2$ be $\boldsymbol{L}_1$ and $\boldsymbol{L}_2$, respectively. Form
$$\boldsymbol{A} = \begin{pmatrix} \boldsymbol{L}_1^{\mbox{T}}\\\boldsymbol{L}_2^{\mbox{T}}\end{pmatrix}.$$
Then ${\boldsymbol{A}^{\mbox{T}}} {\boldsymbol{A}}$ is the precision matrix $\boldsymbol{Q}$.
\end{obs}
\begin{proof}
${\boldsymbol{A}^{\mbox{T}}} {\boldsymbol{A}} =
{\begin{pmatrix} \boldsymbol{L}_1 \ \boldsymbol{L}_2 \end{pmatrix}} {\begin{pmatrix}
\boldsymbol{L}_1^{\mbox{T}} \\ \boldsymbol{L}_2^{\mbox{T}} \end{pmatrix}} =\boldsymbol{L}_1 \boldsymbol{L}_1^{\mbox{T}} + \boldsymbol{L}_2 \boldsymbol{L}_2^{\mbox{T}} = \boldsymbol{Q}_1 +
\boldsymbol{Q}_2 = \boldsymbol{Q}$.
\end{proof}
From Observation \ref{thm_Cholesky} the following corollaries are established. Sketched proofs for these corollaries are given. We refer to \citet{simpson2008krylov} for numerical examples with Corollary \ref{cor_sample}.
\begin{cor} \label{cor_sample}
Let $\boldsymbol{X}$ be a zero mean GMRF with precision matrix $\boldsymbol{Q} = \boldsymbol{Q}_1 + \boldsymbol{Q}_2 \in \mathbb{R}^{n\times n}$, and
let $\boldsymbol{A}$ have the form given in Observation \ref{thm_Cholesky}.
Let $\boldsymbol{z} \in \mathbb{R}^{2n}$ be a vector of independent and identically distributed (i.i.d.) standard Gaussian random variables.
Then the solution of the least squares problem
\begin{equation} \label{eq10}
\boldsymbol{x} = \mathop{\arg \min}_{y} \norm{ \boldsymbol{A} \boldsymbol{y} - \boldsymbol{z}}_2
\end{equation}
\noindent is a sample from the GMRF $\boldsymbol{X}$.
\end{cor}
\begin{proof}
$\boldsymbol{Q} = \boldsymbol{A}^{\mbox{T}}\boldsymbol{A}$ from Observation \ref{thm_Cholesky} is the starting
point to prove this Corollary. Denote $\boldsymbol{A}^\dag$ the Moore-Penrose pseudo-inverse of $\boldsymbol{A}$, and then the solution to the
least squares problem is $\boldsymbol{x} = \boldsymbol{A}^\dag \boldsymbol{z}$ \citep{bjorck1996numerical}.
From the definition of the pseudo-inverse, $\boldsymbol{x} = \boldsymbol{W}\boldsymbol{S}^\dag \boldsymbol{U}^{\mbox{T}}\boldsymbol{z}$,
where $\boldsymbol{A} = \boldsymbol{U}\boldsymbol{S}\boldsymbol{W}^{\mbox{T}}$ is a singular value decomposition of $\boldsymbol{A}$ and $\boldsymbol{S}^\dag\in \mathbb{R}^{d\times 2d}$ is the matrix with the
reciprocals of the non-zero singular values on the diagonal. We can verify that $\boldsymbol{x}$ has the required distribution, and it is
sufficient to check the first two moments since $\boldsymbol{x}$ has a Gaussian distribution, being linear in $\boldsymbol{z}$. It is clear that $\mathbb{E}(\boldsymbol{x}) = 0$. Furthermore,
\begin{align*}
\mathbb{E}(\boldsymbol{x}\boldsymbol{x}^{\mbox{T}}) &= \boldsymbol{A}^\dag \mathbb{E}(\boldsymbol{z}\boldsymbol{z}^{\mbox{T}}) (\boldsymbol{A}^\dag)^{\mbox{T}}\\
&=\boldsymbol{W}\boldsymbol{S}^\dag \boldsymbol{U}^{\mbox{T}}\boldsymbol{U}(\boldsymbol{S}^\dag)^{\mbox{T}} \boldsymbol{W}^{\mbox{T}} \\
&=\boldsymbol{W}\boldsymbol{S}^\dag(\boldsymbol{S}^\dag)^{\mbox{T}} \boldsymbol{W}^{\mbox{T}} \\
&= \boldsymbol{W} (\boldsymbol{S}^{\mbox{T}}\boldsymbol{S})^\dag \boldsymbol{W}^{\mbox{T}}.
\end{align*} Calculations yield $\boldsymbol{Q} = \boldsymbol{W}\boldsymbol{S}^{\mbox{T}}\boldsymbol{S}\boldsymbol{W}^{\mbox{T}}$ and, hence,
$\boldsymbol{x}\sim MVN (0,\boldsymbol{Q}^\dag)$.
\end{proof}
Therefore, it is possible to sample from a GMRF by solving the sparse least squares problem given in \eqref{eq10} with some conditions on GMRFs.
\begin{cor} \label{cholesky_rectangle}
The upper triangular matrix $\boldsymbol{R}$ from the orthogonal factorization of the rectangular matrix
\begin{equation} \label{cholesky_A_matrix}
\boldsymbol{A} = \begin{pmatrix} \boldsymbol{L}_1^{\mbox{T}}\\\boldsymbol{L}_2^{\mbox{T}}\end{pmatrix}
\end{equation}
is the Cholesky factor of the precision matrix $\boldsymbol{Q} = \boldsymbol{Q}_1 + \boldsymbol{Q}_2$.
\end{cor}
\begin{proof}
Since the upper triangular matrix from the orthogonal factorization is the Cholesky factor for the normal equations matrix,
this is obvious from Observation \ref{thm_Cholesky}.
\end{proof}
By using the orthogonal factorization of the rectangular matrix $\boldsymbol{A}$, it is possible to get samples from the GMRFs when they are conditioned on data or a subset of the variables by using
Corollary \ref{cor_sample} or the Cholesky factor from Observation \ref{thm_Cholesky} together with the sampling algorithms discussed in \citet[Chapter 2]{rue2005gaussian}.
\subsection{Dropping strategies} \label{sec: cholesky_sparse_factor} In this section the dropping strategy for the incomplete orthogonal factorization is introduced in order to find the incomplete Cholesky factor for matrix $\boldsymbol{A}^{\mbox{T}}\boldsymbol{A}$. Together with some dropping strategy for the incomplete orthogonal factorization of the rectangular matrix $\boldsymbol{A}$, a sparse upper triangular matrix $\boldsymbol{R}$ can be obtained. From Corollary \ref{cholesky_rectangle} and the discussion in Section \ref{sec: cholesky_IGO}, we know that $\boldsymbol{R}$ is an incomplete Cholesky factor or sparse Cholesky factor for the precision matrix $\boldsymbol{Q}$. This sparse Cholesky factor can then be used to specify the GMRF. The dropping strategies are important when doing the incomplete orthogonal factorization. Generally speaking, there are two kinds of dropping strategies. \begin{enumerate}
\item Drop fill-ins based on sparsity patterns. Before doing the incomplete orthogonal factorization, the sparsity pattern of the
upper triangular matrix is predefined and fixed. If the factorization based only on the sparsity pattern of the original matrix, we
drop all the elements which are pre-defined to be zeros. The algorithm does not consider the actual numerical values of the elements during the factorizations.
\item Drop fill-ins by using a numerical threshold. This strategy only includes the elements in $\boldsymbol{R}$ if they are bigger than a predefined
threshold value. \citet{munksgaard1980solving} presented one way to select the value of the threshold parameter. His strategy drops
the elements which are smaller than the diagonal elements of their rows and columns, multiplied by some predefined small value (called dropping tolerance). In this report,
a slightly different dropping strategy is chosen. During the incomplete orthogonal factorization using Givens rotations,
or the column-wise threshold incomplete Givens orthogonal (cTIGO) factorization \citep{papadopoulos2005class},
we drop the elements according to their magnitudes with some predefined dropping tolerance.
The nonzero pattern of $\boldsymbol{R}$ is determined dynamically.
\end{enumerate}
Both the fixed sparsity pattern strategy and the dynamic strategy are useful in applications. The fixed sparsity pattern strategy is the candidate when the computation resources are low.
It is usually faster but sometimes returns unsatisfactory results. The dynamic strategy will in most cases return satisfactory results by choosing proper dropping tolerances
but it is usually more expensive both in time and computations.
There are different versions of orthogonal factorizations. We refer to \citet{saad2003iterative}, \citet{golub1996matrix} and \citet{trefethen1997numerical} for more information.
Based on the research of \citet{bai2001class}, \citet{papadopoulos2005class} and \citet{bai2009numerical}, we choose the incomplete orthogonal factorization using Givens rotations
to find the sparse Cholesky factor. This algorithm is stable and robust and always returns a sparse matrix. This algorithm inherits the advantages of orthogonal
factorization. \citet{bai2001class} commented that there is little attention given to incomplete orthogonal factorization with Givens rotations, which is actually useful in many numerical problems.
In order to use Givens rotations for incomplete orthogonal factorization, the following nonzero patterns needs to be defined,
\begin{equation*}
\begin{aligned}
N_{\boldsymbol{Q}} &= \{(i,j)~| ~Q_{ij} \neq 0, 1 \leq {i,j} \leq n\}, \\
N_{\boldsymbol{Q},l} &= \{(i,j)~| ~Q_{ij} \neq 0, i \geq j , 1 \leq {i,j} \leq n\}, \\
N_{\boldsymbol{Q},u} &= \{(i,j)~| ~Q_{ij} \neq 0, i \leq j , 1 \leq {i,j} \leq n\}, \\
N_{\boldsymbol{L}_1} &= \{(i,j)~| ~L_{1_{ij}} \neq 0, 1 \leq {i,j} \leq n\}, \\
N_{\boldsymbol{L}_2} &= \{(i,j)~| ~L_{2_{ij}} \neq 0, 1 \leq {i,j} \leq n\}, \\
N_{\boldsymbol{L}} &= \{(i,j)~| ~L_{ij} \neq 0, 1 \leq {i,j} \leq n\}, \\
N_{\boldsymbol{A}} &= \{(i,j)~| ~A_{ij} \neq 0, 1 \leq {i,j} \leq n\}, \\
N_{\boldsymbol{R}} &= \{(i,j)~| ~R_{ij} \neq 0, 1 \leq {i,j} \leq n\}, \\
\end{aligned}
\end{equation*}
\noindent where $N_{\boldsymbol{Q}}$ is the nonzero pattern of the matrix $\boldsymbol{Q}$, and $N_{\boldsymbol{Q},u}$, $N_{\boldsymbol{Q},l}$ are the nonzero patterns of the
upper and lower triangular parts of the matrix $\boldsymbol{Q}$, respectively. $N_{\boldsymbol{L}_1}$, $N_{\boldsymbol{L}_2}$, $N_{\boldsymbol{L}}$ $N_{\boldsymbol{A}}$ and $N_{\boldsymbol{R}}$
are the nonzero patterns of the lower triangular matrix $\boldsymbol{L}_1$, the lower triangular matrix $\boldsymbol{L}_2$, the lower triangular matrix $\boldsymbol{L}$, the matrix $\boldsymbol{A}$ and the matrix $\boldsymbol{R}$, respectively.
These matrices are already formulated in previous sections.
In order to use the cTIGO algorithm, the rectangular matrix $\boldsymbol{A}$ in \eqref{cholesky_A_matrix} is formed. The sparsity pattern of matrix $\boldsymbol{A}$
is already known beforehand. However, since the dynamic strategy is chosen, there will be some fill-in during Givens rotations process, and the sparsity pattern of the
sparse Cholesky factor $\boldsymbol{R}$ will depend on the dropping tolerance and usually $N_{\boldsymbol{R}} < N_{\boldsymbol{A}}$.
For more information about cTIGO algorithm, see \citet{bai2001class} for theoretical issues and \citet{papadopoulos2005class} for implementations.
\subsection{A small example} \label{sec: cholesky_example} A small example is explored in this section to illustrate how to use the cTIGO algorithm to find the sparse Cholesky factor $\boldsymbol{R}$. For simplicity and without loss of generality, we assume that $\boldsymbol{Q}_1$ is the precision matrix for a zero mean GMRF $\boldsymbol{x} \sim \mathcal{N}(\boldsymbol{0}, \boldsymbol{Q}_1)$, and that the data are normally distributed, i.e., $\boldsymbol{y} \sim \mathcal{N}(\boldsymbol{0}, \boldsymbol{I})$ and hence $\boldsymbol{Q}_2 = \boldsymbol{I}$. Assume that these matrices are given as follows
\begin{displaymath}
\boldsymbol{Q_1} =
\left( \begin{array}{cccccc}
5 & -1 & 0 & \ldots & 0 &-1 \\
-1 & 5 & -1 & 0 & \ldots & 0 \\
0 & -1 & 5 & -1 & \ldots & 0 \\
\vdots & & \ddots & \ddots & & \vdots \\
-1 & 0 & \ldots & & -1 & 5
\end{array} \right) _{9 \times 9}
\end{displaymath}
\noindent and
\begin{displaymath}
\boldsymbol{Q}_2 = \boldsymbol{I}_{9 \times 9} =
\left( \begin{array}{ccccc}
1 & 0 & 0 & \ldots & 0 \\
0 & 1 & 0 & \ldots & 0 \\
\vdots & \ddots & \ddots & \ddots\\
0 & \ldots\ & & & 1
\end{array} \right) _{9 \times 9}.
\end{displaymath}
\noindent Let $\boldsymbol{L}_1$ and $\boldsymbol{L}_2$ denote the Cholesky factor of the two matrices $\boldsymbol{Q}_1$ and $\boldsymbol{Q}_2$, respectively,
with the sparsity patterns given in Figure \ref{fig: cholesky_small_L1} and Figure \ref{fig: cholesky_small_L2}.
The rectangular matrix $\boldsymbol{A}$ can then be formed as given in \eqref{cholesky_A_matrix} with the sparsity pattern given in Figure \ref{fig: cholesky_small_A}.
Apply the cTIGO algorithm to the rectangular matrix $\boldsymbol{A}$ with a dropping tolerance of $0.0001$ to find the sparse incomplete Cholesky factor $\boldsymbol{R}$.
The sparsity pattern of $\boldsymbol{R}$ is given in Figure \ref{fig: cholesky_small_R}. The sparsity pattern of the Cholesky factor $\boldsymbol{L}$ from the standard Cholesky factorization
of the precision matrix $\boldsymbol{Q}$ is given in Figure \ref{fig: cholesky_small_L}.
\begin{figure}
\caption{The sparsity patterns of $\boldsymbol{L}_1$ (a), $\boldsymbol{L}_2$ (b), $\boldsymbol{L}$ (c), $\boldsymbol{A}$ (d) and $\boldsymbol{R}$ (e)}
\label{fig: cholesky_small_L1}
\label{fig: cholesky_small_L2}
\label{fig: cholesky_small_L}
\label{fig: cholesky_small_A}
\label{fig: cholesky_small_R}
\label{fig: SmallExample}
\end{figure}
We notice that the precision matrix $\boldsymbol{Q}_1$ is quite similar to the tridiagonal matrix except the values at
two of the corners. However, there is a lot of fill-in in the Cholesky factor $\boldsymbol{L}_1$. This is a common structure for the precision matrix of a GMRF,
for instance, a GMRF on a torus. The same comments can be given for $\boldsymbol{Q}$ and $\boldsymbol{L}$. Note that the upper triangular matrix $\boldsymbol{R}$ has less nonzero elements than
$\boldsymbol{L}_1$, $\boldsymbol{L}$ and $\boldsymbol{A}$, $N_{\boldsymbol{R}} < N_{\boldsymbol{L}_1}, N_{\boldsymbol{R}} < N_{\boldsymbol{L}} \text{ and } N_{\boldsymbol{R}} < N_{\boldsymbol{A}}$.
The sparsity pattern of $\boldsymbol{R}$ depends on the dropping tolerance and also the elements of the matrices $\boldsymbol{Q}_1$ and $\boldsymbol{Q}_2$, but we are not going deeper here.
As discussed in Section \ref{sec: cholesky_IGO} and Section \ref{sec: cholesky_sparse_factor}, the sparse upper triangular matrix $\boldsymbol{R}$ is an
incomplete Cholesky factor for the precision matrix $\boldsymbol{Q}$ of the GMRF when it is conditioned on data. The error matrix $\boldsymbol{E}$
between the true precision matrix $\boldsymbol{Q} = \boldsymbol{Q}_1 + \boldsymbol{Q}_2$ and the approximated precision matrix $\boldsymbol{\widetilde{Q}} = \boldsymbol{R}^{\mbox{T}} \boldsymbol{R}$ is given by
\begin{equation}
\boldsymbol{E} = \left(\boldsymbol{Q}_1 + \boldsymbol{Q}_2\right) - \boldsymbol{\widetilde{Q}}.
\end{equation}
\noindent The sparsity patterns of the precision matrix $\boldsymbol{Q}$ and its approximation $\boldsymbol{\widetilde{Q}}$
are shown in Figure \ref{fig: cholesky_small_Q} and Figure \ref{fig: cholesky_small_Qq}, respectively. In order to compare the difference between the approximated covariance matrices (inverse
of the approximated precision matrix) $\widetilde{\boldsymbol{\Sigma}} = \widetilde{\boldsymbol{Q}}^{-1}$
and the true covariance matrix (inverse of the true precision matrix) $\boldsymbol{\Sigma} = \boldsymbol{Q}^{-1}$, we calculate the error matrix $\widetilde{\boldsymbol{E}}$,
\begin{equation}
\widetilde{\boldsymbol{E}} = \boldsymbol{\Sigma} - \widetilde{\boldsymbol{\Sigma}}.
\end{equation}
The images of $\boldsymbol{\Sigma}$, $\boldsymbol{\widetilde{\Sigma}}$ and $\widetilde{\boldsymbol{E}}$ are given in Figure \ref{fig: ImageSQ_Qq_Err}, and they
show that the difference between $\boldsymbol{\Sigma}$ and $\widetilde{\boldsymbol{\Sigma}}$ is quite small.
By chosen different dropping tolerance, the error can be made smaller and become negligible.
\begin{figure}\label{fig: cholesky_small_Q}
\label{fig: cholesky_small_Qq}
\label{fig: cholesky_SparseQ_Qq}
\end{figure}
\begin{figure}\label{fig: ImageSQ_Qq_Err}
\end{figure}
\section{Simulation Results with cTIGO algorithm} \label{sec: cholesky_results}
Using the incomplete orthogonal factorization with Givens rotations, it leads to a sparse upper triangular matrix $\boldsymbol{R}$, which is a sparse incomplete Cholesky factor for the precision matrix $\boldsymbol{Q}$ and
can be used to specify the GMRF. Hence it has the potential possibility to reduce the computational cost.
We first apply the cTIGO algorithm to some commonly used structures of the precision matrices in Section \ref{sec: cholesky_result_band_matrices}.
In Section \ref{sec: cholesky_result_spde}, we apply the cTIGO algorithm to
precision matrices which are generated from the stochastic partial differential equations (SPDEs) discussed in \citet{lindgren2011explicit} and \citet{fuglstad2011spatial}.
\subsection{Simulation results for precision matrices with commonly used structures} \label{sec: cholesky_result_band_matrices}
It is known that if the precision matrix $\boldsymbol{Q} > 0$ is a band matrix with bandwidth $p$, then its Cholesky factor $\boldsymbol{L}$ (lower triangular matrix)
has the same bandwidth $p$. See \citet{golub1996matrix} (Theorem 4.3.1) for a direct proof and \citet[Chapter 2.4.1]{rue2005gaussian} for more information on
how to finding Cholesky factor efficiently in this case with Algorithm $2.9$. \citet{wist2006specifying} pointed out that if the original precision
matrix $\boldsymbol{Q}$ is a band matrix, then the incomplete Cholesky factor $\boldsymbol{\widetilde{L}}$ from the incomplete Cholesky factorization will also be a band
matrix with the same bandwidth $p$.
In this section we consider some commonly used structures for the precision matrices. The first two examples are band matrices with different bandwidths.
Let $x$ be Gaussian auto-regressive processes of order $1$ or $2$,
and then the precision matrix for the process will be a band matrix with bandwidth $p = 2$ or $p = 3$, respectively. The precision matrices for the first-order Random Walk (RW$1$) and the second-order Random
Walk (RW$2$) models have bandwidths $p = 2$ and $p = 3$. Since these models are intrinsic GMRFs, the precision matrices are not of full rank.
We fix this by slightly modifying the elements in the precision matrices for the RW$1$ and RW$2$ models but we still called them as the precision matrices for
the RW$1$ and the RW$2$ models. For more information about intrinsic GMRFs and
the RW$1$ and RW$2$ models, see, for example, \citet[Chapter 3]{rue2005gaussian}.
Assume that the data are Gaussian distributed. Then from Section \ref{sec: cholesky_GMRFs_conditioning_data} the matrix $\boldsymbol{Q}_2$ is a diagonal matrix when $\boldsymbol{A} = \boldsymbol{I}$. For simplicity and without lost of generality,
assume the data $\boldsymbol{y}$ $\sim$ $\mathcal{N}(\boldsymbol{0},\boldsymbol{I})$, then the matrix $\boldsymbol{Q}_2$ and its Cholesky factor $\boldsymbol{L}_2$ are identity matrices.
Since we know exactly what the sparsity patterns of the precision matrices $\boldsymbol{Q}_1$ and $\boldsymbol{Q}_2$ and the Cholesky factors $\boldsymbol{L}_1$ and $\boldsymbol{L}_2$ are,
the sparsity pattern of $\boldsymbol{A}$ is known beforehand and can be taken advantage of in the implementation.
By applying the cTIGO algorithm to the matrix $\boldsymbol{A}$ with dropping tolerance $\tau = 0.0001$, the sparse upper triangular matrix $\boldsymbol{R}$ can be obtained.
The sparsity patterns of the matrices $\boldsymbol{L}_1$, $\boldsymbol{L}_2$, $\boldsymbol{L}$, $\boldsymbol{A}$ and $\boldsymbol{R}$ are given in Figure \ref{fig: cholesky_RW1}.
The sparsity patterns of the true precision matrix $\boldsymbol{Q}$ and the approximated precision matrix $\boldsymbol{\widetilde{Q}}$ are given in Figure \ref{fig:cholesky_RW1_QandQq}.
The image of the true covariance matrices $\boldsymbol{\Sigma}$,
the approximated covariance matrix $\boldsymbol{\widetilde{\Sigma}}$ and the error matrix $\widetilde{\boldsymbol{E}}$
for the RW$1$ model are shown in Figure \ref{fig: cholesky_RW1ImQ12QqR}. Note that the order of the numerical values in the error matrix $\widetilde{\boldsymbol{E}}$
is $10^{-8}$, which is essentially zero in practice applications.
Similarly for the RW$2$ model we apply the cTIGO algorithm to the matrix $\boldsymbol{A}$ with the dropping tolerance $\tau = 0.0001$.
The results in this case are quite similar to the results for the RW$1$ model. We only show the
images of the true covariance matrix $\boldsymbol{\Sigma}$, the approximated covariance matrix $\boldsymbol{\widetilde{\Sigma}}$, and the error matrix $\boldsymbol{\widetilde{E}}$.
The results are given in Figure \ref{fig: cholesky_RW2_ImQ12QqR}. Note that the order of the numerical values in the error matrix $\widetilde{\boldsymbol{E}}$ is $10^{-8}$ as for the RW$1$ model.
See Section \ref{sec: cholesky_result_comparsion} from more simulation results for the RW$1$ and RW$2$ models and discussions. We can notice that the sparseness of $\boldsymbol{R}$ is the same as $\boldsymbol{L}$. Hence in
these two cases, we do not save computational resources. However, this approach is still have the potential to be used in applications since it is robust.
\begin{figure}
\caption{Sparsity patterns of $\boldsymbol{L}_1$ (a), $\boldsymbol{L}_2$ (b), $\boldsymbol{L}$ (c), $\boldsymbol{A}$ (d) and $\boldsymbol{R}$ (e) for the RW$1$ model}
\label{fig: cholesky_RW1_L1}
\label{fig: cholesky_RW1_L2}
\label{fig: cholesky_RW1_L}
\label{fig: cholesky_RW1_A}
\label{fig: cholesky_RW1_R}
\label{fig: cholesky_RW1}
\end{figure}
\begin{figure}\label{fig: cholesky_RW1_Q}
\label{fig: cholesky_RW1_Qq}
\label{fig:cholesky_RW1_QandQq}
\end{figure}
\begin{figure}\label{fig: cholesky_RW1_InverseQ}
\label{fig: cholesky_RW1_InverseQq}
\label{fig: cholesky_RW1_Err}
\label{fig: cholesky_RW1ImQ12QqR}
\end{figure}
\begin{figure}\label{fig: cholesky_RW2_InverseQ}
\label{fig: cholesky_RW2_InverseQq}
\label{fig: cholesky_RW2_Err}
\label{fig: cholesky_RW2_ImQ12QqR}
\end{figure}
The next example we have chosen is a block tridiagonal matrix of order $n^2$ resulting from
discretizing Poisson's equation with the $5$-point operator on an $n$-by-$n$ mesh. Thus it is called Poisson matrix in this paper. The sparsity pattern of this matrix
is given in Figure \ref{fig: cholesky_Possion_Q}. With the Poisson matrix and $\boldsymbol{Q}_2$ as before, we find the Cholesky factors $\boldsymbol{L}_1$ and $\boldsymbol{L}_2$ and form the rectangular matrix $\boldsymbol{A}$.
We apply the cTIGO algorithm to the matrix $\boldsymbol{A}$ with dropping tolerance $\tau = 0.0001$ to find the sparse upper triangular matrix $\boldsymbol{R}$.
The sparsity patterns of the matrices $\boldsymbol{L}_1$, $\boldsymbol{L}_2$, $\boldsymbol{L}$, $\boldsymbol{A}$ and $\boldsymbol{R}$ are given in Figure \ref{fig: cholesky_Possion_L1} - Figure \ref{fig: cholesky_Possion_R}, respectively.
The sparsity patterns of the true precision matrix $\boldsymbol{Q}$ and the approximated precision matrix $\boldsymbol{\widetilde{Q}}$ are given in Figure \ref{fig: Cholsky_Possion_QandQq}.
We can notice that the upper triangular matrix $\boldsymbol{R}$ is sparser than the Cholesky factor $\boldsymbol{L}$ from the original precision matrix $\boldsymbol{Q}$.
It can be shown that the sparseness depends on the dropping tolerance $\tau$.
The images of the true covariance matrices $\boldsymbol{\Sigma}$, the approximated precision matrix $\boldsymbol{\widetilde{\Sigma}}$, and the error
matrix $\boldsymbol{\widetilde{E}}$ in this case are shown in Figure \ref{fig: cholesky_Possion_ImQ12QqR}. Note that
the order of the numerical values in the error matrix $\boldsymbol{\widetilde{E}}$ is $10^{-5}$. This is small for practical use. More results for this band matrix are given in Section \ref{sec: cholesky_result_comparsion}.
\begin{figure}
\caption{ Sparsity patterns of $\boldsymbol{L}_1$ (a), $\boldsymbol{L}_2$ (b), $\boldsymbol{L}$ (c), $\boldsymbol{A}$ (d) and $\boldsymbol{R}$ (e) with Poisson matrix }
\label{fig: cholesky_Possion_L1}
\label{fig: cholesky_Possion_L2}
\label{fig: cholesky_Possion_L}
\label{fig: cholesky_Possion_A}
\label{fig: cholesky_Possion_R}
\label{fig: cholesky_Possion}
\end{figure}
\begin{figure}\label{fig: cholesky_Possion_Q}
\label{fig: cholesky_Possion_Qq}
\label{fig: Cholsky_Possion_QandQq}
\end{figure}
\begin{figure}\label{fig: cholesky_Possion_InverseQ}
\label{fig: cholesky_Possion_InverseQq}
\label{fig: cholesky_Possion_Err}
\label{fig: cholesky_Possion_ImQ12QqR}
\end{figure}
The next example is a precision matrix with a nearly band matrix.
Assume that $\boldsymbol{Q}_1$ is a nearly banded matrix but with the values $\boldsymbol{Q}_1(1,n) = 1$ and $\boldsymbol{Q}_1(n,1) = 1$. We call this matrix as Toeplitz matrix in this paper.
The sparsity pattern of this matrix is given in Figure \ref{fig: cholesky_Toeplitz_Q}.
With the dropping tolerance $\tau = 0.0001$, we apply the cTIGO algorithm to the rectangular matrix $\boldsymbol{A}$.
The sparsity patterns of $\boldsymbol{L}_1$, $\boldsymbol{L}_2$, $\boldsymbol{L}$, $\boldsymbol{A}$ and $\boldsymbol{R}$ are given in Figure \ref{fig: cholesky_Toeplitz_L1} - Figure \ref{fig: cholesky_Toeplitz_R}, respectively.
We notice that the upper triangular matrix $\boldsymbol{R}$ is sparser than the matrix $\boldsymbol{L}$.
We can also notice that the sparseness of $\boldsymbol{R}$ depends on the tolerance $\tau$. The sparsity pattern of the approximated precision matrix $\boldsymbol{\widetilde{Q}}$ is given in Fig \ref{fig: cholesky_Toeplitz_Qq}.
The image of the true covariance matrices $\boldsymbol{\Sigma}$, the approximated covariance matrix $\boldsymbol{\widetilde{\Sigma}}$, and the error
matrix $\boldsymbol{\widetilde{E}}$ are shown in Figure \ref{fig: cholesky_Toeplitz_ImQ12QqR}. Note that the order of the numerical values in the error matrix is $10^{-5}$ with
the given tolerance. More simulation results for this matrix are found in Section \ref{sec: cholesky_result_comparsion}.
\begin{figure}
\caption{ Sparsity patterns for $\boldsymbol{L}_1$ (a), $\boldsymbol{L}_2$ (b), $\boldsymbol{L}$ (c), $\boldsymbol{A}$ (d) and $\boldsymbol{R}$ for Toeplitz matrix.}
\label{fig: cholesky_Toeplitz_L1}
\label{fig: cholesky_Toeplitz_L2}
\label{fig: cholesky_Toeplitz_L}
\label{fig: cholesky_Toeplitz_A}
\label{fig: cholesky_Toeplitz_R}
\label{fig: cholesky_Toeplitz}
\end{figure}
\begin{figure}
\caption{Sparsity patterns of $\boldsymbol{Q}$ (a) and $\boldsymbol{Qq}$ (b) for Toeplitz matrix.}
\label{fig: cholesky_Toeplitz_Q}
\label{fig: cholesky_Toeplitz_Qq}
\label{fig: cholesky_Toeplitz_QandQq}
\end{figure}
\begin{figure}\label{fig: cholesky_Toeplitz_InverseQ}
\label{fig: cholesky_Toeplitz_InverseQq}
\label{fig: cholesky_Toeplitz_Err}
\label{fig: cholesky_Toeplitz_ImQ12QqR}
\end{figure}
\subsection{Simulation results for particular precision matrices} \label{sec: cholesky_result_spde}
In this section we emphasize on some particular precision matrices, namely the precision matrices from the stochastic partial differential equations (SPDEs) approach discussed by
\citet{lindgren2011explicit} and \citet{fuglstad2011spatial}. As pointed out by \citet{lindgren2011explicit} there is an explicit link between GRFs and GMRFs through SPDEs.
The important relationship which was initially used by \citet{lindgren2011explicit} is that the solution $\boldsymbol{x}(\boldsymbol{u})$ to the following SPDE
is a GRF with Mat\'ern covariance function,
\begin{equation} \label{eq: cholesky_spde1}
(\kappa^2 - \Delta)^{\alpha/2} x(\boldsymbol{s}) = \mathcal{W}(\boldsymbol{s}), \hspace{2mm} \boldsymbol{s} \in \mathbb{R}^{d}, \hspace{2mm} \alpha = \nu +d/2, \hspace{2mm} \kappa > 0, \hspace{2mm} \nu > 0,
\end{equation}
where $\Delta = \sum_{i=1}^d \frac{\partial}{\partial x_i^2}$ is the Laplacian, $(\kappa^2 - \Delta)^{\alpha/2}$ is a differential operator and $d$ is the dimension of the field $x(\boldsymbol{s}) $.
\citet{fuglstad2011spatial} extended this approach to construct anisotropic and inhomogeneous fields with the SPDE
\begin{equation} \label{eq: cholesky_spde2}
\kappa^2(\boldsymbol{u}) x(\boldsymbol{u}) - \nabla \cdot \boldsymbol{H}(\boldsymbol{u}) \nabla x(\boldsymbol{u}) = \mathcal{W} (\boldsymbol{u}), \
\end{equation}
where $\kappa$ and $\boldsymbol{H}$ control the local range and anisotropy, and $\nabla =\left( \frac{\partial}{\partial{x}}, \frac{\partial}{\partial{y}} \right)$.
One important difference between \citet{lindgren2011explicit} and \citet{fuglstad2011spatial} is that \citet{lindgren2011explicit} have chosen the Neumann boundary condition but \citet{fuglstad2011spatial} has chosen the
periodic boundary condition. With Neumann boundary condition the precision matrix $\boldsymbol{Q}_1$ is a band matrix. However, the periodic boundary condition gives elements `` in the corners'' of the precision matrix.
\citet{hu2012multivariate} extended the approach to multivariate settings by using systems of SPDEs.
For more information about the SPDE approach, We refer to \citet{lindgren2011explicit}, \citet{fuglstad2011spatial} and \citet{hu2012multivariate}.
First, choose the precision matrix for $\boldsymbol{Q}_1$ that results from the discretization of the SPDE \eqref{eq: cholesky_spde1} with $\alpha = 2, \, d = 2$ and $\kappa = 0.3$.
The sparsity pattern of $\boldsymbol{Q}_1$ is given in Figure \ref{fig: cholesky_SPDE1_Q}. We still assume $\boldsymbol{Q}_2 = \boldsymbol{I}$.
The sparsity patterns of $\boldsymbol{L}_1$, $\boldsymbol{L}_2$, $\boldsymbol{L}$, $\boldsymbol{A}$ and $\boldsymbol{R}$ are given in Figure \ref{fig: cholesky_SPDE1_L1} -\ref{fig: cholesky_SPDE1_R}, respectively.
We notice that the upper triangular matrix $\boldsymbol{R}$ is sparser than the matrix $\boldsymbol{L}$.
The sparsity pattern of the approximated precision matrix $\boldsymbol{\widetilde{Q}}$ is given in Figure \ref{fig: cholesky_SPDE1_Qq}.
The images of the true covariance matrix $\boldsymbol{\Sigma}$, the approximated covariance matrix $\widetilde{\boldsymbol{\Sigma}}$
and the error matrix $\widetilde{\boldsymbol{E}}$ are shown in Figure \ref{fig: cholesky_result_spde1_comparation}. We can notice that the elements of in the error matrix $\widetilde{\boldsymbol{E}}$
are reasonably small.
The second precision matrix for $\boldsymbol{Q}_1$ is generated from the SPDE \eqref{eq: cholesky_spde2} with $\kappa = 0.1$ and
\begin{displaymath}
\boldsymbol{H} = 0.1 \times
\begin{pmatrix}
1 & 0.5 \\
0.5 & 1
\end{pmatrix}.
\end{displaymath}
The sparsity pattern of the precision matrix $\boldsymbol{Q}_1$ is given in
Figure \ref{fig: cholesky_SPDE2_Q}. We use the same $\boldsymbol{Q}_2$ as previous examples. The sparsity patterns of $\boldsymbol{L}_1$, $\boldsymbol{L}_2$, $\boldsymbol{L}$, $\boldsymbol{A}$ and $\boldsymbol{R}$
are given in Figure \ref{fig: cholesky_SPDE2_L1} - Figure \ref{fig: cholesky_SPDE2_R}, respectively. We can notice that the upper triangular matrix $\boldsymbol{R}$ is sparser than the matrix $\boldsymbol{L}$.
The sparsity pattern of the approximated precision matrix $\boldsymbol{\widetilde{Q}}$ is given in Figure \ref{fig: cholesky_SPDE2_Qq}.
The images of the true covariance matrix $\boldsymbol{\Sigma}$, the approximated covariance matrix $\widetilde{\boldsymbol{\Sigma}}$
and the error matrix $\widetilde{\boldsymbol{E}}$ are illustrated in Figure \ref{fig: cholesky_result_spde2_comparation}. We could notice that the order of the numerical values in the error matrix
$\widetilde{\boldsymbol{E}}$ are also reasonably small in this case.
\begin{figure}
\caption{ Sparsity pattern for $\boldsymbol{L}_1$(a), $\boldsymbol{L}_1$(b), $\boldsymbol{L}_1$(c), $\boldsymbol{A}$ (d) and $\boldsymbol{R}$(e) for the random field from the SPDE \eqref{eq: cholesky_spde1}.}
\label{fig: cholesky_SPDE1_L1}
\label{fig: cholesky_SPDE1_L2}
\label{fig: cholesky_SPDE1_L}
\label{fig: cholesky_SPDE1_A}
\label{fig: cholesky_SPDE1_R}
\label{fig: cholesly_SPDE1}
\end{figure}
\begin{figure}
\caption{Sparsity patterns of $\boldsymbol{Q}$ (a) and $\boldsymbol{Qq}$ (b) for the random field generated from the SPDE \eqref{eq: cholesky_spde1}.}
\label{fig: cholesky_SPDE1_Q}
\label{fig: cholesky_SPDE1_Qq}
\label{fig: cholesky_spde1_QandQq}
\end{figure}
\begin{figure}\label{fig: cholesky_SPDE1_InverseQ}
\label{fig: cholesky_SPDE1_InverseQq}
\label{fig: cholesky_SPDE1_Err}
\label{fig: cholesky_result_spde1_comparation}
\end{figure}
\begin{figure}
\caption{ Sparsity pattern for $\boldsymbol{L}_1$(a), $\boldsymbol{L}_1$(b), $\boldsymbol{L}_1$(c), $\boldsymbol{A}$ (d) and
$\boldsymbol{R}$(e) for the random field generated from SPDE given in \eqref{eq: cholesky_spde2}. }
\label{fig: cholesky_SPDE2_L1}
\label{fig: cholesky_SPDE2_L2}
\label{fig: cholesky_SPDE2_L}
\label{fig: cholesky_SPDE2_A}
\label{fig: cholesky_SPDE2_R}
\label{fig: cholesly_SPDE2}
\end{figure}
\begin{figure}
\caption{Sparsity patterns of $\boldsymbol{Q}$ (a) and $\boldsymbol{Qq}$ (b) for the random field generated from SPDE given in \eqref{eq: cholesky_spde2}.}
\label{fig: cholesky_SPDE2_Q}
\label{fig: cholesky_SPDE2_Qq}
\label{fig: cholesky_spde2_QandQq}
\end{figure}
\begin{figure}\label{fig: cholesky_SPDE2_InverseQ}
\label{fig: cholesky_SPDE2_InverseQq}
\label{fig: cholesky_SPDE2_Err}
\label{fig: cholesky_result_spde2_comparation}
\end{figure}
\subsection{Sampling from GMRFs} \label{sec: cholesky_sampling}
In this section samples from a GMRF are obtained using the sparse upper triangular matrices $\boldsymbol{R}$ and the Cholesky factors $\boldsymbol{L}$ for the precision matrices $\boldsymbol{Q}$.
Let the precision matrix $\boldsymbol{Q} = \boldsymbol{Q}_1 + \boldsymbol{Q}_2$, where $\boldsymbol{Q}_1$ is from the SPDE \eqref{eq: cholesky_spde1} or \eqref{eq: cholesky_spde2},
and $\boldsymbol{Q}_2$ is a diagonal matrix. The sampling is done as follows.
\begin{itemize}
\item Compute the Cholesky factor $\boldsymbol{L}$ with a Cholesky factorization or compute the sparse upper triangular matrix $\boldsymbol{R}$ from the cTIGO algorithm;
\item Sample $\boldsymbol{z} \sim \mathcal{N}(\boldsymbol{0}, \boldsymbol{I})$;
\item Solve the equation $\boldsymbol{L}^{\mbox{T}}\boldsymbol{x} = \boldsymbol{z}$ or $\boldsymbol{R}\boldsymbol{x} = \boldsymbol{z}$;
\item $\boldsymbol{x}$ is the sample of the GMRF with precision matrix $\boldsymbol{Q}$ or $\widetilde{\boldsymbol{Q}}$.
\end{itemize}
If the mean $\boldsymbol{\mu}$ of the field is not zero, then we just need a last step $\boldsymbol{x} = \boldsymbol{\mu} + \boldsymbol{x}$ to correct the mean. With $\boldsymbol{L}$ the field $\boldsymbol{x}$ has the true covariance matrix
$\boldsymbol{Q}$ because
\begin{displaymath}
\Cov(\boldsymbol{x}) = \Cov(\boldsymbol{L^{-T}\boldsymbol{z}}) = (\boldsymbol{L}\boldsymbol{L}^{T})^{-1} = \boldsymbol{Q}^{-1}.
\end{displaymath}
Similarly, with $\boldsymbol{R}$ the field $\boldsymbol{x}$ has the approximated covariance matrix $\tilde{\boldsymbol{Q}}$.
Many other sampling algorithms are provided by \citet[Chapter 2]{rue2005gaussian} for different parametrization of the GMRF.
We cannot notice any large differences between the samples using the Cholesky factor $\boldsymbol{L}$ and the samples using the sparse matrix $\boldsymbol{R}$
based on Figure \ref{fig: cholesky_sampling1} and Figure \ref{fig: cholesky_sampling2}.
\begin{figure}
\caption{Samples from the GMRF with the common Cholesky factor $\boldsymbol{L}$ with \eqref{eq: cholesky_spde1}
(a) and the upper triangular matrix $\boldsymbol{R}$ (b) from the cTIGO algorithm.}
\label{fig: cholesky_sampling1}
\end{figure}
\begin{figure}
\caption{Samples from the GMRF with the common Cholesky factor $\boldsymbol{L}$ (a) with \eqref{eq: cholesky_spde2}
and the upper triangular matrix $\boldsymbol{R}$ (b) from the cTIGO algorithm.}
\label{fig: cholesky_sampling2}
\end{figure}
\subsection{Effect of dropping tolerance} \label{sec: cholesky_result_comparsion}
In this section we choose different values of $\tau$ in order to know the effect of dropping tolerance. We use the same kinds of structures for the precision matrices as discussed in Section
\ref{sec: cholesky_result_band_matrices} with dropping tolerances
$ \tau = \{0, 0.000001, 0.00001, 0.0001, 0.001, 0.01\} $. The $1$-norm for the error matrix $\boldsymbol{E} = \boldsymbol{Q} - \boldsymbol{\widetilde{Q}}$
is used for the comparisons. The results are given in Table \ref{RWcomparison} and Table \ref{othercomparison}.
From these tables, we can see that as the dropping tolerance $\tau$ becomes smaller and smaller, the error becomes smaller and smaller. Notice that by choosing $10^{-5}$
as the dropping tolerance, the error reaches a level acceptable in many applications. If the dropping tolerance is equal to $0$, then the error is also equal to zero which means no element
has been zeroed out during the Givens rotations and it returns the common Cholesky factor.
\begin{table}[htb]
\caption{Comparisons for RW$1$ Model and RW2 Model} \label{RWcomparison}
\centering
\begin{minipage}[b]{0.35\textwidth}
\begin{tabular}{r|r}
\hline
\multicolumn{2}{c} {Random Walk 1} \\
\hline
tolerance & error \\
\hline
0.01 & 2.55E-04 \\
0.001 & 1.66E-06 \\
0.0001 & 2.13E-08 \\
0.00001 & 2.50E-10 \\
1.00E-6 & 2.34E-12 \\
0 & 4.00E-15 \\
\hline
\end{tabular}
\end{minipage}
\begin{minipage}[b]{0.35\textwidth}
\begin{tabular}{r|r}
\hline
\multicolumn{2}{c} {Random Walk 2} \\
\hline
tolerance & error \\
\hline
0.01 & 0.33 \\
0.001 & 1.80E-05 \\
0.0001 & 1.48E-07 \\
0.00001 & 1.07E-08 \\
1.00E-6 & 2.15E-09 \\
0 & 1.73E-14 \\
\hline
\end{tabular}
\end{minipage} \end{table}
\begin{table}[htb]
\caption{Comparisons for Poisson matrix and Toeplitz matrix} \label{othercomparison}
\centering
\begin{minipage}[c]{0.35\textwidth}
\begin{tabular}{r|r}
\hline
\multicolumn{2}{c} {Poisson matrix} \\
\hline
tolerance & error \\
\hline
0.01 & 0.11 \\
0.001 & 8.51E-03 \\
0.0001 & 6.33E-04 \\
0.00001 & 5.91E-05 \\
1.00E-6 & 3.44E-06 \\
0 & 1.49E-14 \\
\hline
\end{tabular}
\end{minipage}
\begin{minipage}[c]{0.35\textwidth}
\begin{tabular}{r|r}
\hline
\multicolumn{2}{c} {Toeplitz matrix} \\
\hline
tolerance & error \\
\hline
0.01 & 9.15E-03 \\
0.001 & 9.59E-04 \\
0.0001 & 7.91E-05 \\
0.00001 & 8.70E-06 \\
1.00E-6 & 7.19E-07 \\
0 & 5.66E-15 \\
\hline
\end{tabular}
\end{minipage}
\end{table}
\section{Discussion and Conclusion} \label{sec: cholesky_conclusion}
In this paper we use the cTIGO algorithm to find sparse Cholesky factors for specifying GMRFs.
Some commonly used structures of the precision matrices and two precision matrices generated from SPDEs have been tested.
By using the incomplete orthogonal factorization with Givens rotations, a sparse incomplete Cholesky factor can be found and it is usually sparser than the Cholesky factor from the standard Cholesky factorization.
The sparsity of the incomplete Cholesky factor depends on the value of the tolerance. With a good choice for the dropping tolerance, the error between the true covariance matrix and the approximated covariance matrix becomes negligible.
One advantage of this approach is that it is robust. It always produces a sparse incomplete Cholesky factor. Since the algorithm works both for square matrices and for rectangular matrices,
this approach can be applied to GMRFs conditioned on observed data or a subset of the variable.
On the negative side, it seems that our current implementation of the approach is slow when the dimension of the matrix becomes large.
We believe that this is due to the nature of the incomplete orthogonal factorization with dynamic dropping strategy.
The orthogonal factorization is usually slower than the Cholesky factorization. Further, Givens rotations only zero out values to zeros one at a time.
This leads to the slowness of the algorithm. When the computation resources are limited,
we might need to use the fixed pattern dropping strategy. However, to implement a fast cTIGO algorithm is out the scope of this paper and it is for further research.
\linespread{0.8}{\small {\bibliography {Reference/Ref}}}
\end{document} | arXiv | {
"id": "1307.1368.tex",
"language_detection_score": 0.6638768315315247,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\begin{abstract} Let $R$ be a commutative Noetherian $F$-finite ring of prime characteristic and let $\mathcal{D}$ be a Cartier algebra. We define a self-map on the Frobenius split locus of the pair~$(R,\mathcal{D})$ by sending a point $P$ to the splitting prime of $(R_P, \mathcal{D}_P)$. We prove this map is continuous, containment preserving, and fixes the $\mathcal{D}$-compatible ideals. We show this map can be extended to arbitrary ideals $J$, where in the Frobenius split case it gives the largest $\mathcal{D}$-compatible ideal contained in~$J$. Finally, we apply Glassbrenner's criterion to prove that the prime uniformly $F$-compatible ideals of a Stanley-Reisner rings are the sums of its minimal primes. \end{abstract}
\title{The Cartier core map for Cartier algebras}
\section{Introduction} \label{sec:introduction}
Frobenius splitting is an important tool in characteristic $p$ commutative algebra and algebraic geometry. Locally, Frobenius splitting (or $F$-purity) is a restriction on singularities, analogous to log canonicity for complex singularities. Strong $F$-regularity is a strengthening of Frobenius splitting, analogous to how Kawamata log terminality is a strengthening of log canonicity. There is much interest in understanding the world of Frobenius split objects that are not strongly $F$-regular.
For a local ring, Aberbach and Enescu introduced the \emph{splitting prime} as a way to measure the difference between Frobenius splitting and strong $F$-regularity---the elements in the splitting prime are obstructions to strong $F$-regularity, and in particular, the splitting prime of a domain is zero precisely for strongly $F$-regular rings \cite{Aberbach+Enescu.05}. Aberbach and Enescu's splitting prime can also be described as the largest uniformly $F$-compatible ideal in the sense of Schwede \cite{Schwede.10a}. In this paper, we will be working with a generalization of the splitting prime following two different directions.
Frobenius splitting and strong $F$-regularity have been generalized to further settings, including pairs $(X, \Delta)$ consisting of a $\mathbb{Q}$-divisor $\Delta$ on a smooth variety $X$ of characteristic $p$; pairs $(R,\mf a^t)$ where $R$ is a ring of characteristic $p$, $\mf a$ is an ideal, and the formal exponent $t$ is a positive real number; as well as more general settings \cite{Hara+Watanabe.02,Schwede.08,Schwede.10,Takagi.04}.
The Cartier algebra (see \Cref{full-cart-alg-def}) gives a unified and more general approach, allowing us to talk about Frobenius splitting and strong $F$-regularity for an arbitrary subalgebra of the Cartier algebra \cite{Schwede.11a}.
An important problem is to understand the extent to which strong $F$-regularity fails for an arbitrary Frobenius split subalgebra of the Cartier algebra \cite{Blickle+etal.12,Schwede.10a}.
Fix a Frobenius split pair~$(R,\mc D)$, where $\mc D$ is a Cartier subalgebra (see \Cref{cartier-mod-defs}). One main theme of this paper is to consider splitting primes via the perspective of the Cartier core map \[ \Ccore_{\mc D}:\Spec R \to \Spec R \] which assigns to each prime $P\in \Spec R$ the splitting prime $\Ccore_{\mc D}(P)$ corresponding to the pair~$(R_P, \mc D_P)$. Alternatively, if $R$ is not Frobenius split one can define $\Ccore_{\mc D}$ on the Frobenius split locus, $\mc U_{\mc D}$, of $(R,\mc D)$. In this context, we show the following main result.
\begin{thmA}[\Cref{c-map-summary-thm}] Let $R$ be an $F$-finite Noetherian ring of characteristic $p$, and let $\mc D$ be a Cartier subalgebra. Then the Cartier core map \[ \mathcal U_{\mc D} \rightarrow \Spec R \qquad\qquad P\mapsto C_{\mathcal D}(P) \] is a continuous containment preserving map on the $F$-pure locus $\mathcal U_{\mc D}$ of the pair~$(R,\mc D)$ which fixes the $\mc D$-compatible ideals. The image of $C_{\mathcal D}$ is the set of prime $\mc D$-compatible ideals and is always finite. The image is the set of minimal primes of $R$ precisely when the pair~$(R,\mc D)$ is strongly $F$-regular. \end{thmA}
In another direction, for an arbitrary (not necessarily Frobenius split) pair~$(R,\mc D)$, we can associate to \emph{any} ideal $J$ of $R$ the largest $\mc D$-compatible ideal $\Ccore_{\mc D}(J)$ contained in $J$. We call this ideal the \emph{Cartier core} of $J$, following Badilla-C\'espedes, who considered this earlier for the non-pair setting \cite{Badilla-Cespedes.21}. We will prove some basic facts about the Cartier core, many of which mirror results of Schwede in the triples setting and of Badilla-C\'espedes in the setting where $\mc D$ is the full Cartier algebra.
Returning to the non-pair setting, we also give the following explicit formula for the Cartier core of an arbitrary ideal in a quotient of a regular ring, making use of a criterion for strong $F$-regularity due to Glassbrenner \cite{Glassbrenner.96}.
\begin{thmB}[\Cref{c-ideal-quo-reg-ring}] Let $S$ be a regular $F$-finite ring, let $I\subseteq J$ be ideals of $S$, and let $R=S/I$. Then \[ C_R(J/I) = \left(\bigcap_{e>0} J^{[p^e]} :_S (I^{[p^e]} :_S I)\right)/I. \] \end{thmB}
This presentation of the Cartier core allows us to prove that the Cartier core map commutes with basic operations such as localizing, adjoining a variable, and in the case of quotients of polynomial rings, with homogenization (see \Cref{localization}, \Cref{qr-add-var}, \Cref{qr-homogenization}). As an application of these techniques, we give an exact description of the Cartier core map in the case of Stanley-Reisner rings.
\begin{thmC}[\Cref{stanley-reisner-c-map}, \Cref{stanley-reisner-general-c-map}] Let $R$ be a Stanley-Reisner ring over a field that has prime characteristic and is $F$-finite. Let $Q$ be any prime ideal of $R$. Then \[ C_R(Q) = \sum_{\substack{P \in \Min(R) \\ P\subseteq Q}} P. \] In particular, the image of the Cartier core map on primes, i.e., the set of generic points of $F$-pure centers of $R$, is the set of sums of minimal primes. Further, if $J$ is any ideal, then \[ \Ccore_R(J) = \sum_{\substack{\mc Q\subset \Min(R) \\ \big({\bigcap\limits_{P\in \mc Q}} P\big) \subset J }}
\left( \bigcap_{P\in \mc Q} P \right). \] \end{thmC}
This theorem extends existing work on computing certain specific uniformly $F$-compatible ideals and $\mc D$-compatible ideals, including the splitting prime and test ideals, for Stanley-Reisner rings \cite{Aberbach+Enescu.05,Badilla-Cespedes.21,Enescu+Ilioaea.20,Vassilev.98}.
\begin{assume} All rings in this paper (other than the Cartier algebras) are commutative, Noetherian, and unital. Furthermore, all such rings are of prime characteristic $p$ and are $F$-finite. \end{assume}
\begin{ackblock} I am grateful to my advisor, Karen Smith, for all of her guidance and suggestions. I would also like to thank Shelby Cox and Swaraj Pande for the helpful conversations. Thanks to W\'agner Badilla-C\'espedes, Karl Schwede, Kevin Tucker, Janet Vassilev, and the referee for their feedback on an earlier version of this paper; and especially to Anne Fayolle for pointing out an error. \end{ackblock}
\section{Background} \label{sec:background} For a ring $R$ of prime characteristic $p$, the Frobenius endomorphism is the ring map $F:R\to R$ where $F(r) = r^p$. To distinguish the copies of $R$,
we will write $F_*R$ for the codomain. As a ring, this Frobenius pushforward $F_*R = \{F_*r\: | \: r\in R\}$ is exactly the same as $R$, just with this formal symbol $F_*$ prepended everywhere. For example, multiplication is $(F_* r)(F_* s) = F_*(rs)$. The benefit of this notation is that it clarifies the $R$-module structure induced by $F$---the Frobenius map is now written as $F: R \to F_*R$ so that $F(r) = F_*(r^p)$, and the $R$-module action is now written $rF_*s = F_*(r^p s)$. We can iterate the Frobenius, writing $F^e: R\to F_*^e R$, where $F^e(r) = F_*^e(r^{p^e})$ and $rF_*^e s = F_*^e (r^{p^e}s)$.
We will utilize this $R$-module structure on $F_*^eR$, but first we need a cohesive way to consider only certain maps in $\hom_R(F_*^e R, R)$. First, given any map $\psi \in \hom_R(F_*^dR, R)$, we write $F_*^e \psi: F_*^{e+d}R\to F_*^eR$ for the \emph{Frobenius pushforward} of the map, where \[ (F_*^e\psi)(F_*^{e+d} r) = (F_*^e \psi)(F_*^e(F_*^d r)) = F_*^e(\psi(F_*^d r)). \] Now we define a (non-commutative) multiplication on the abelian group $\bigoplus_e \hom_R(F_*^e R, R)$ as follows. Given maps $\phi \in \hom_R(F_*^e R, R)$ and $\psi \in \hom_R(F_*^d R, R)$, we define their product as \begin{equation} \label{eq:cartier-mult} \phi\cdot \psi = \phi \circ F_*^e \psi. \end{equation} More concretely, for any $r\in R$ we have \[ (\phi\cdot \psi) (F_*^{e+d} r) = \phi \left( F_*^e(\psi(F_*^d r))\right). \]
\begin{defn} \label{full-cart-alg-def} The \emph{(full) Cartier algebra} on $R$ is the graded non-commutative ring \[ \mc C_R = \bigoplus_{e\geq 0} \hom_R(F_*^e R, R), \] where multiplication is as defined in \Cref{eq:cartier-mult}. \end{defn} Note that we are writing $F_*^0R$ to mean $R$ as an $R$-module, so that $(\mc C_R)_0 = \hom_R(R,R)\cong R$. We will often write the ``multiplication by $c$'' map as simply $c$, and its pushforward as $F_*^ec$, so that $(F_*^ec)(F_*^e r) = F_*^e(cr)$. However, this copy of $R$ is rarely central in $\mc C_R$, because for $\phi$ of degree $e$, we have $r\cdot \phi = \phi \cdot r^{p^e} $. In particular, $R$ is central only if $R = \bb F_p$.
\begin{defn} \label{cart-subalg-def} A \emph{Cartier subalgebra} $\mc D$ is a graded subalgebra of $\mc C_R$ such that $\mc D_0 = R$. In particular, $\mc D$ has the form $\mc D = \bigoplus_e \mc D_e$ where $\mc D_e \ \subseteq \hom_R(F_*^eR, R)$ for all $e\geq 0$. \end{defn}
\begin{zb}[{\cite[Rmk.~3.10]{Schwede.11a}}] Let $(R,\mf a^t)$ be a pair where $\mf a$ is an ideal and the formal exponent $t$ is a positive real number. Then the corresponding Cartier subalgebra $\mc C^{\mf a^t}$ has \[ \mc C^{\mf a^t}_e = \hom_R(F_*^e R, R)\cdot \mf a^{\lceil t(p^e-1)\rceil}. \] \end{zb}
\begin{defn} \label{cartier-mod-defs} Let $R$ be a ring of prime characteristic, and let $\mc D$ be a Cartier subalgebra on $R$. \begin{itemize}
\item The pair~$(R,\mc D)$ is \emph{$F$-finite} if $R$ is $F$-finite, i.e., $F_*R$ is a finite $R$-module. Every ring $R$ in this paper will be $F$-finite.
\item The pair~$(R,\mc D)$ is \emph{Frobenius split} or \emph{(sharply) $F$-pure} if there exists some $e>0$ and some $\phi \in \mc D_e$ with $\phi(F_*^e 1)=1$.
\item If $c$ is an element of $R$, then the pair~$(R,\mc D)$ is \emph{eventually Frobenius split along $c$}, or \emph{$F$-pure along $c$} if there exists some $e>0$ and some $\phi \in \mc D_e$ with $\phi(F_*^e c)=1$.
\item The pair~$(R,\mc D)$ is \emph{strongly $F$-regular} if it is eventually Frobenius split along every $c$ which is not in any minimal prime of $R$. \end{itemize} \end{defn}
We will follow the example of Blickle, Schwede, and Tucker and omit the adjective ``sharp'' when discussing $F$-purity of pairs \cite[Def.~2.7]{Blickle+etal.12}. Observe that if $\phi \in \mc D_e$ is a splitting of $F^e$, then there is a splitting in any multiple of the degree, given by $\phi^n \in \mc D_{en}$.
Since we will consider only pairs $(R,\mc D)$ where $R$ is Noetherian and $F$-finite, this means that for any ring $S$ such that $R\to S$ is flat, we have by \cite[Thm.~7.11]{Matsumura.89}, \[ S\otimes_R \hom_R(F_*^eR, R)\cong \hom_S(S\otimes_R F_*^e R, S). \] In the case that $S$ is a localization of $R$ we further know that $S$ commutes with the Frobenius, that is, for any multiplicative set $W$, \begin{align*} W^{-1} R \otimes_R \hom_R(F_*^eR, R) &\cong \hom_{W^{-1} R}(F_*^e (W^{-1} R), W^{-1} R). \end{align*} We will use this isomorphism freely: if $\frac{r}{w} \otimes \phi$ is a pure tensor in $W^{-1} R \otimes \hom_{R}(F_*^e R, R)$, we will identify this with the map in $\hom_{W^{-1} R}(F_*^e (W^{-1} R), W^{-1} R)$ which sends $F_*^e(\frac{s}{u})$ to $\frac{r\phi(F_*^e( su^{p^e-1}))}{wu}$. This identification is easier to understand if we first rewrite $F_*^e(\frac{s}{u})$ as \[ F_*^e\left(\frac{su^{p^e-1}}{u^{p^e}}\right) = \frac{1}{u}\cdot \frac{F_*^e(su^{p^e-1})}{1} = \frac{F_*^e(su^{p^e-1})}{u}. \]
Thus we have a natural containment $W^{-1} R\otimes_R \mc D_e\subseteq (\mc C_{W^{-1} R})_e$. We can therefore construct a new Cartier subalgebra $W^{-1} \mc D$ on $W^{-1} R$ using this isomorphism, so that \begin{align*} (W^{-1} \mc D)_e = W^{-1} R\otimes \mc D_e. \end{align*} When we are localizing at a prime ideal $P$, we write this Cartier subalgebra as $\mc D_P$.
Now that we have the setup to discuss localizations of Cartier subalgebras, we can state and prove the following standard result on the Frobenius split locus in the setting of Cartier algebra pairs. \begin{thm} \label{f-pure-open-local} Let $R$ be an $F$-finite ring, and $\mc D$ a Cartier subalgebra. Then the set of primes $P$ of $R$ at which $(R_P, \mc D_P)$ is $F$-pure is open. Further, the pair~$(R,\mc D)$ is $F$-pure if and only if the localized pair~$(R_P, \mc D_P)$ is $F$-pure for all primes $P$. \end{thm} \begin{proof} For any $e$, we get a module map $\Psi_e: \mc D_e \to R$ via evaluation at~$F_*^e 1$. The pair~$(R,\mc D)$ is $F$-pure exactly when this map is surjective for some $e>0$, or equivalently, when there exists an $e>0$ such that $R/\im \Psi_e=0$. The localization $(\Psi_e)_P$ corresponds to the evaluation map $(\mc D_P)_e \to R_P$, so the pair~$(R_P, \mc D_P)$ is \emph{not} $F$-pure if and only if $R_P/\im (\Psi_e)_P \neq 0$ for all $e$. Thus the non-$F$-pure locus is precisely the closed set $\bigcap_{e>0} \bb V(\im \Psi_e)$.
For the second statement, if $(R,\mc D)$ is $F$-pure, then there exists some $e>0$ and $\phi\in \mc D_e$ with $\phi(F_*^e 1)=1$. By definition, the localization $\phi_P : F_*^e(R_P) \to R_P$ is in $(\mc D_P)_e$, and so $(R_P, \mc D_P)$ is also $F$-pure.
Conversely, if each $(R_P, \mc D_P)$ is $F$-pure, then the complements of the sets $\bb V(\im \Psi_e)$ give an open cover of $\Spec R$. Since $\Spec R$ is compact, only finitely many are needed, say, the complements of $\bb V(\im \Psi_{e_1}), \ldots, \bb V(\im \Psi_{e_t})$. Then taking $e=e_1\cdots e_t$ to be the product of these indices, we must have that $(R_P, \mc D_P)$ has a splitting in $\mc D_e$ for every prime $P$. Thus the map $\Psi_e$ is surjective at every prime, and therefore is surjective. \end{proof} This proof in fact shows that for any $c$, the set of primes $P$ such that $(R_P, \mc D_P)$ is not eventually Frobenius split along $c$ is closed. Further, it also shows that $(R,\mc D)$ is eventually Frobenius split along $c$ if and only if $(R_P, \mc D_P)$ is for every prime ideal $P$. In particular, this shows that just like in the classical case, $(R,\mc D)$ is strongly $F$-regular if and only if every $(R_P, \mc D_P)$ is as well.
\section{The Cartier core map}
Fix a pair~$(R,\mc D)$ where $R$ is an $F$-finite and Frobenius split ring and where $\mc D$ is a Cartier subalgebra. In this section we will define an explicit continuous map \[ \Ccore_{\mc D}: \Spec R \to \Spec R \] that has some especially nice properties. The image of our map is the set of $\mc D$-compatible primes of $\Spec R$, which in the case $\mc D = \mc C_R$ is the set of (generic points of) $F$-pure centers.
If $R$ is not Frobenius split, we can instead define $\Ccore_{\mc D}$ on the open locus of Frobenius split points. More generally, the map $\Ccore_{\mc D}$ can be viewed as an endomorphism defined on the set of \emph{all ideals} of $R$ (not necessarily proper), which is especially interesting on the class of radical ideals in a Frobenius split ring.
\begin{defn} Let \(R\) be an $F$-finite ring of prime characteristic. Let \(J\) be an ideal of \(R\). Let $\mc D \subseteq \mc C_R$ be a Cartier subalgebra. Then the \emph{Cartier core} of \(J\) in \(R\) with respect to $\mc D$ is \[
\Ccore_{\mc D}(J) = \left\{r\in R \: | \: \phi(F_*^e r)\in J \ \ \forall e>0,\ \forall\phi\in \mc D_e\right\}. \] \end{defn}
We will write $\Ccore_R(J)$ to mean the Cartier core with respect to the full Cartier algebra $\mc C_R$, and just $\Ccore(J)$ when the ring and Cartier subaglebra are clear from context. In the case that $\mc D = \mc C_R$, the Cartier core $\Ccore_R(J)$ is also denoted (e.g., in \cite{Badilla-Cespedes.21}) as $\mc P(J)$.
\begin{notation} The \emph{\(e\)-th Cartier contraction} of $J$ with respect to $\mc D$ is \[
A_{\mc D_e}(J) = \left\{ r \in R \: | \: \phi(F_*^e r) \in J\ \ \forall \phi \in \mc D_e\right\}. \] \end{notation} We can express the Cartier core in terms of the Cartier contractions as \[ \Ccore_{\mc D}(J) = \bigcap_{e>0} A_{\mc D_e}(J). \] We can also express the Frobenius pushforward of the $e$-th Cartier contraction as \[ F_*^e(A_{\mc D_e}(J)) = \bigcap_{\phi \in \mc D_e}\phi^{-1}(J). \] When $\mc D = \mc C_R$, the \(e\)-th Cartier contraction \(A_{\mc D_e}(J)\) is sometimes denoted by $J_e$.
Note that for an $F$-finite pair~$(R,\mc D)$, $A_{\mc D_e}(J)$ and \(\Ccore_{\mc D}(J)\) are ideals. Both are clearly additively closed, so it suffices to check that if $a\in A_{\mc D_e}(J)$ and $r\in R$, then $ra\in A_{\mc D_e}(J)$. For any $\phi \in \mc D_e$, we have $\phi(F_*^e(ra)) = (\phi \cdot r)(F_*^e a)$, which is in $J$ since $a\in A_{\mc D_e}(J)$.
The Cartier core was defined for the case $\mc C_R = \mc D$ by Badilla-C\'espedes \cite[Def.~4.12]{Badilla-Cespedes.21} as a generalization of Aberbach and Enescu's splitting prime \cite{Aberbach+Enescu.05} and of Brenner, Jeffries, and N\'u\~{n}ez Betancourt's differential core \cite{Brenner+etal.19}. Here we generalize this definition to the context of pairs, similar to Blickle, Schwede, and Tucker's generalization of the splitting prime to the context of pairs \cite{Blickle+etal.12}.
To motivate the definition of the Cartier core, note that the condition \(J\subseteq \Ccore_{\mc D}(J)\), i.e., that \(\phi(F_*^e(J))\subseteq J\) for all $e$ and for all $\phi\in \mc D_e$, is precisely the condition that \(J\) is $\mc D$-compatible. In the case where $\mc D$ is the full Cartier algebra, this is the same as saying $J$ is uniformly $F$-compatible. In fact, it is known that when $R$ is $F$-pure, $\Ccore_{R}(J)$ is the largest uniformly $F$-compatible ideal contained in \(J\) \cite[Prop.~4.11]{Badilla-Cespedes.21}. We will see in \Cref{largest-D-compat} that when the pair~$(R,\mc D)$ is Frobenius split, $\Ccore_{\mc D}(J)$ is the largest $\mc D$-compatible ideal contained in $J$.
Further, as the next two results show, the Cartier core of a prime ideal $P$ carries information about the localization $(R_P, \mc D_P)$.
\begin{prop}[{\cite[Prop.~2.12]{Blickle+etal.12}}] \label{f-split-proper} Let $(R,\mc D)$ be an $F$-finite pair and let $P$ be a prime ideal of $R$. Then $r\notin \Ccore_{\mc D}(P)$ if and only if the pair~$(R_P, \mc D_P)$ is $F$-pure along $r/1$. In particular, $(R_P, \mc D_P)$ is $F$-pure if and only if $\Ccore_{\mc D}(P)$ is proper. \end{prop} \begin{proof} Since $\mc D_P = \mc D\otimes R_P$, saying $\phi(F_*^e(r))\in P$ for some $\phi\in\mc D_e \subseteq \hom_R(F_*^e R, R)$ is equivalent to saying $\phi(F_*^e(r/1))\in PR_P$, viewing $\phi\in \mc (D_P)_e\subseteq \hom_{R_P}(F_*^e(R_P), R_P)$.
The pair~$(R_P,\mc D_P)$ is $F$-pure if and only if there is some $\phi\in \mc D_P$ such that $\phi(F_*^e(1))$ is a unit, i.e., not in $PR_P$, which by the above is equivalent to having $1\notin \Ccore_{\mc D}(P)$. \end{proof}
\begin{prop}[{Cf. \cite[Thm.~2.11,Prop.~2.12]{Blickle+etal.12}}] \label{str-f-reg=in-minl-prime} Let $(R,\mc D)$ be an $F$-finite pair and let $P$ be a prime ideal of $R$. Then the pair~$(R_P, \mc D_P)$ is strongly $F$-regular if and only if $\Ccore_{\mc D}(P)$ is contained in some minimal prime of $R$. \end{prop} \begin{proof} The pair~$(R_P, \mc D_P)$ is strongly $F$-regular if and only if $(R_P, \mc D_P)$ is $F$-pure along every non-zero divisor, i.e., $\Ccore_{\mc D}(P)$ is contained in the union of the minimal primes of $R$. Since $\Ccore_{\mc D}(P)$ is an ideal, prime avoidance says this is equivalent to having $\Ccore_{\mc D}(P)$ contained in some minimal prime of $R$. \end{proof}
Now that we have provided some motivation for the Cartier core construction, we will discuss some of its nice properties.
\begin{prop} \label{containment} Let $(R,\mc D)$ be an $F$-finite pair. If $J_1 \subseteq J_2$ in \(R\), then $\Ccore_{\mc D}(J_1)\subseteq \Ccore_{\mc D}(J_2)$. \end{prop} \begin{proof} For every $e$, $A_{\mc D_e}( J_1)\subseteq A_{\mc D_e}(J_2)$, since if $\phi(F_*^e r)\in J_1$ for some $\phi \in \mc D_e$, we also have $\phi(F_*^e r)\in J_2$. Taking the intersection over all $e$ gives our result. \end{proof}
\begin{prop}[{Cf. \cite[Prop~4.6]{Badilla-Cespedes.21}}] \label{arbitrary-intersection-c-ideal} Let $\{J_\alpha\}$ be an arbitrary collection of ideals in an $F$-finite ring $R$, and let $\mc D$ be a Cartier subalgebra. Then \[ \Ccore_{\mc D}\left(\bigcap_\alpha J_\alpha\right) = \bigcap_\alpha \Ccore_{\mc D}(J_\alpha).
\] \end{prop} \begin{proof} We see that \begin{align*}
\Ccore_{\mc D}\left(\bigcap_\alpha J_\alpha\right) &= \left\{r\in R\: | \: \phi(F_*^e r) \in \bigcap_\alpha J_\alpha \; \forall e, \forall \phi\in \mc D_e \right\} \\
&= \bigcap_\alpha \left\{r\in R\: | \: \phi(F_*^e r) \in J_\alpha \ \forall e, \forall \phi\in \mc D_e \right\} \\
&= \bigcap_\alpha \Ccore_{\mc D}(J_\alpha) \qedhere \end{align*} \end{proof} In particular, the set of Cartier cores with respect to $\mc D$ is closed under arbitrary intersection. We will see in \Cref{prop:arbitrary-sum-c-ideals} that this set is also closed under arbitrary sum for $F$-pure pairs.
Our next goal is to show that the Cartier core construction commutes with localization. To do so, we need the following lemma. \begin{lemma} \label{localization} Let $(R,\mc D)$ be an $F$-finite pair, let $Q$ be a $P$-primary ideal of $R$, and let $W$ be a multiplicative set avoiding $P$, so that $W\cap P=\emptyset$. Then \[ \Ccore_{W^{-1} \mc D}(QW^{-1} R) \cap R = \Ccore_{\mc D}(Q). \] \end{lemma} \begin{proof} By our discussion in \Cref{sec:background}, $W^{-1} \mc D_e$ is generated by the maps $\frac{\phi}{w}: F_*(W^{-1} R)\to R$ for $\phi\in \mc D_e$ and $w\in W$, where $\frac{\phi}{w}(F_*(\frac{s}{u})) = \frac{\phi(F_*^e(su^{p^e-1}))}{wu}$. We will start by showing that $\frac{s}{1}\in A_{W^{-1} \mc D_e}(QW^{-1} R)$ if and only if $s\in A_{\mc D_e}(Q)$.
By definition, $\frac{s}{1}\in A_{W^{-1} \mc D_e}(QW^{-1} R)$ if and only if $\psi(F_*^e(\frac{s}{1}))\in QW^{-1} R$ for all \( \psi \in W^{-1} \mc D_e. \) This is equivalent to having \[ \frac{\phi(F_*^e(s))}{w}\in QW^{-1} R \] for all $\phi \in \mc D_e$ and all $w\in W$. This means that we can write $\frac{\phi(F_*^e(s))}{w} = \frac{j}{u}$ for some $j\in Q$, $u\in W$, i.e., there exists $v\in W$ such that $vu \phi(F_*^e(s)) = vwj$. The latter is in $Q$, but $vu\notin P$, so by $P$-primaryness of $Q$ we must then have $\phi(F_*^e s) \in Q$. This holds for all $\phi$ exactly when $s \in A_{\mc D_e}(Q)$.
Now we have shown our first claim, which implies $A_{\mc D}(Q) = A_{W^{-1} \mc D_e}(Q) \cap R$. Intersecting both sides over all $e>0$, we see \[ \Ccore_{W^{-1} \mc D}(Q)\cap R = \Ccore_{\mc D}(Q).\qedhere \] \end{proof}
\begin{thm} \label{thm:localization} Let $(R,\mc D)$ be an $F$-finite pair, let $J$ be an ideal of $R$, and let $W$ be a multiplicative set avoiding every prime in $\Ass(J)$. Then \[ \Ccore_{W^{-1} \mc D}(JW^{-1} R)\cap R = \Ccore_{\mc D}(J) \quad \text{ and } \quad \Ccore_{\mc D}(J)W^{-1} R = \Ccore_{W^{-1} \mc D}(JW^{-1} R). \] \end{thm} \begin{proof} Write $J= Q_1 \cap \cdots \cap Q_t$ a minimal primary decomposition of $J$ with corresponding primes $P_i = \sqrt {Q_i}$. Then since intersection commutes with applying $\Ccore_{\mc D}$ and with contraction, \begin{align*} \Ccore_{W^{-1} \mc D}(J)\cap R
&= \bigcap_{i=1}^t (\Ccore_{W^{-1} \mc D}(Q_i)\cap R). \end{align*} By \Cref{localization}, since $W\cap P_i=\emptyset$ we have $\Ccore_{W^{-1} \mc D}(Q_i)\cap R = \Ccore_{\mc D}(Q_i)$ and so \[ \Ccore_{W^{-1} \mc D}(J)\cap R = \bigcap_{i=1}^t \Ccore_{\mc D}(Q_i) = \Ccore_{\mc D}(J). \]
For the second equality, we note \[ \Ccore_{W^{-1} \mc D}(J) = (\Ccore_{W^{-1} \mc D}(J)\cap R)W^{-1} R = \Ccore_{\mc D}(J)W^{-1} R \] since contracting then extending to a localization preserves ideals. \end{proof}
Now that we have established the preliminary results for arbitrary ideals, we move to considering prime ideals. Our main results of the rest of this section can be summarized in the following theorem. \begin{thm} \label{c-map-summary-thm} Let $R$ be an $F$-finite Noetherian ring, and let $\mc D$ be a Cartier subalgebra. Then the Cartier core construction with respect to $\mc D$ induces a well-defined, continuous, and containment preserving map on the $F$-pure locus of the pair~$(R,\mc D)$ which fixes $\mc D$-compatible ideals. The image of the map is the set of $\mc D$-compatible ideals in $\mc U_{\mc D}$ and is always finite. The image is the set of minimal primes of $R$ precisely when the pair~$(R,\mc D)$ is strongly $F$-regular. \end{thm}
\begin{proof} We have already seen in \Cref{containment} that the Cartier core is containment preserving, even without restricting to primes. \Cref{c-map-well-defined} will show that the map $\Ccore:\mc U_{\mc D} \to \mc U_{\mc D}$ is well-defined. \Cref{cts-map} will show that this map is continuous, and \Cref{fin-many-C-cores} discusses the finiteness of the image. \Cref{cartier-core=D-compatible} will show that the image is precisely the set of $F$-pure $\mc D$-compatible ideals, which combined with \Cref{prop:c-ideal-stable} shows that all the $\mc D$-compatible ideals in $\mc U_{\mc D}$ are fixed.
The one statement that doesn't have a stand-alone proof elsewhere is the last one. $(R,\mc D)$ is strongly $F$-regular if and only if each $(R_P, \mc D_P)$ is strongly $F$-regular. By \Cref{str-f-reg=in-minl-prime}, this occurs exactly when each $\Ccore_{\mc D}(P)$ is contained in a minimal prime of $R$. But since $\Ccore_{\mc D}(P)$ is prime, this is equivalent to having $\Ccore_{\mc D}(P)$ be a minimal prime. \end{proof}
It is known that the splitting prime, which in our notation is $\Ccore_{R}(\mf m)$ for $(R,\mf m)$ local, is indeed prime \cite[Thm.~3.3]{Aberbach+Enescu.05}, even in the case of an arbitrary Cartier subalgebra \cite[Prop.~2.12]{Blickle+etal.12}. After localizing, the same proof works here, which we repeat for the reader's convenience. \begin{prop}[{\cite[Prop.~2.12]{Blickle+etal.12}}] \label{proper-prime} If \(P\) is prime and \(\Ccore_{\mc D}(P)\) is proper, then \(\Ccore_{\mc D}(P)\) is prime. \end{prop} \begin{proof} Suppose $c_0,c_1\notin \Ccore_{\mc D}(P)$. Then we will show $c_0c_1\notin \Ccore_{\mc D}(P)$. Our assumption means that $(R_P, \mc D_{P})$ is $F$-pure along each $c_i$, i.e., there exists an $e_i$ and $\psi_i\in \mc (D_P)_{e_i}$ such that $\psi_i(F_*^{e_i}c_i) = 1$. Then applying the map $\psi_1\circ F_*^{e_1}\psi_0 \circ F_*^{e_0+e_1}(c_1^{p^{e_0}-1})$ to $F_*^{e_0+e_1}(c_0c_1)$, where we are writing $F_*^{e_0+e_1}(c_1^{p^{e_0}-1})$ to mean multiplication by this ring element, we get \begin{center} \begin{tikzcd}[column sep=large] F_*^{e_0+e_1} (R_P) \rar["F_*^{e_0+e_1}(c_1^{p^{e_0}-1})"]
& F_*^{e_0+e_1}(R_P) \rar["F_*^{e_1}\psi_0"]
& F_*^{e_1}(R_P) \rar["\psi_1"]
& R_P \\ F_*^{e_0+e_1}(c_0c_1) \rar[mapsto]
& F_*^{e_0+e_1}(c_0c_1^{p^{e_0}}) = F_*^{e_1}(c_1F_*^{e_0}(c_0)) \rar[mapsto]
& F_*^{e_1}c_1 \rar[mapsto]
& 1. \end{tikzcd} \end{center} Rewriting this map as $\psi_1 \circ F_*^{e_1}\psi_0 \circ F_*^{e_0+e_1}(c_1^{p^{e_0}-1}) = \psi_1 \cdot \psi_0 \cdot c_1^{p^{e_0}-1}$, we see that it is in $(\mc D_P)_{e_0+e_1}$, and thus that that $(R_P, \mc D_P)$ is also $F$-pure along $c_0c_1$, as desired. \end{proof}
\begin{prop} \label{prop:c-ideal-in-primary-original-ideal} Let $R$ be a characteristic $p$, $F$-finite ring, and let $\mc D$ be a Cartier subalgebra. If $Q$ is a $P$-primary ideal of $R$ and $\Ccore_{\mc D}(P)$ is proper, then $\Ccore_{\mc D}(Q)\subseteq Q$. \end{prop} \begin{proof} Since $\Ccore_{\mc D}(P)$ is proper, there is some $e>0$ and $\psi \in \mc D_e$ with $\psi(F_*^e 1)\notin P$. Consider $r\notin Q$ and the map $\psi \circ (F_*^e (r^{p^e-1}))= \psi \cdot r^{p^e-1}$ in $\mc D_e$. Then by $P$-primaryness, \[ r\psi(F_* 1) = \psi(F_*^e r^{p^e}) = (\psi\cdot r^{p^e-1})(F_*^e r)\notin Q, \] and so $r\notin \Ccore_{\mc D}(Q)$ as desired.
\end{proof}
\begin{cor} \label{c-map-well-defined} Let $(R, \mc D)$ be an $F$-finite pair, with $F$-pure locus $\mc U_{\mc D}$. Then the Cartier core construction induces a well-defined map $\Ccore_{\mc D}:\mc U_{\mc D}\to \mc U_{\mc D}$. \end{cor} \begin{proof} Let $P$ be a prime ideal in $\mc U_{\mc D}$. Then $(R_P,\mc D_P)$ is Frobenius split, so \Cref{f-split-proper} gives that $\Ccore_{\mc D}$ is proper, and thus prime by \Cref{proper-prime}. This gives a map $\Ccore_{\mc D}:\mc U_{\mc D} \to \Spec R$.
Then \Cref{prop:c-ideal-in-primary-original-ideal} says $\Ccore_{\mc D}(P) \subseteq P$. Since the $F$-pure locus is open, this means $\Ccore_{\mc D}(P)$ must also be in the $F$-pure locus. \end{proof}
\begin{cor}[{Cf. \cite[Cor.~4.8]{Schwede.10a}}] \label{c-minl-prime} Suppose the pair~$(R, \mc D)$ is $F$-finite and $F$-pure. If $P$ is a minimal prime of $R$, then $\Ccore_{\mc D}(P) = P$. \end{cor} \begin{proof} Since $(R,\mc D)$ is $F$-pure, $\Ccore_{\mc D}(P)\subseteq P$. Since $\Ccore_{\mc D}(P)$ is prime by \Cref{proper-prime} and $P$ is minimal, we must have that $\Ccore_{\mc D}(P)=P$. \end{proof}
\begin{cor}[{Cf. \cite[Prop.~4.5]{Badilla-Cespedes.21}}] \label{c-ideal-in-original-ideal} If the pair~$(R,\mc D)$ is $F$-finite and $F$-pure, then for any ideal $J$ we have $\Ccore_{\mc D}(J)\subseteq J$. \end{cor} \begin{proof} Write $J = Q_1\cap \cdots \cap Q_t$, where the $Q_i$ give a primary decomposition of $J$. Then by \Cref{arbitrary-intersection-c-ideal}, \[ \Ccore_{\mc D}(J) = \Ccore_{\mc D}(Q_1)\cap \cdots \cap \Ccore_{\mc D}(Q_t). \] Since $(R,\mc D)$ is Frobenius split, for every prime $P$ the pair~$(R_P, \mc D_P)$ is also Frobenius split, and thus has $\Ccore_{\mc D}(P)$ proper by \Cref{f-split-proper}. By \Cref{prop:c-ideal-in-primary-original-ideal}, each $\Ccore_{\mc D}(Q_i)\subseteq Q_i$. Intersecting, we get that $\Ccore_{\mc D}(J)\subseteq J$ as desired. \end{proof}
\begin{prop}[{Cf. \cite[Lemma~3.5]{Schwede.10a}}] \label{prop:arbitrary-sum-c-ideals} Let $(R,\mc D)$ be an $F$-finite, $F$-pure pair, and let $\{J_\alpha\}_{\alpha \in \mc A}$ be a collection of ideals with $\Ccore_{\mc D}(J_\alpha)=J_\alpha$ for all $\alpha\in \mc A$. Then we have \begin{align*} \Ccore_{\mc D}\left(\sum_\alpha J_\alpha\right) &= \sum_\alpha \Ccore_{\mc D}(J_\alpha) \end{align*} \end{prop} \begin{proof} Since $J_\beta\subseteq \sum J_\alpha$, we have $\Ccore_{\mc D}(J_\beta)\subseteq \Ccore_{\mc D}\left(\sum_\alpha J_\alpha\right)$ for all $\beta\in \mc A$ by \Cref{containment}, and so \[ \sum_\alpha \Ccore_{\mc D}(J_\alpha)\subseteq \Ccore_{\mc D}\left(\sum_\alpha J_\alpha\right). \]
For the reverse containment, we use our assumption that $\Ccore_{\mc D}(J_\alpha)=J_\alpha$ and \Cref{c-ideal-in-original-ideal} to see that \[ \Ccore_{\mc D}\left(\sum J_\alpha\right) = \Ccore_{\mc D}\left(\sum \Ccore_{\mc D}(J_\alpha)\right) \subseteq \sum \Ccore_{\mc D}(J_\alpha) \] which is our desired opposite inclusion. \end{proof}
\begin{prop} \label{prop:c-ideal-stable} If the pair~$(R,\mc D)$ is $F$-finite and $F$-pure, then for any ideal $J$ in $R$, \[ \Ccore_{\mc D}(J) = \Ccore_{\mc D}\left(\Ccore_{\mc D}(J)\right). \] \end{prop} \begin{proof} By \Cref{c-ideal-in-original-ideal}, we know that $\Ccore_{\mc D}(J)\subseteq J$. Then $\Ccore_{\mc D}\left(\Ccore_{\mc D}(J)\right)\subseteq \Ccore_{\mc D}(J)$ by \Cref{containment}, so it suffices to show the other direction.
Consider $f\notin \Ccore_{\mc D}\left(\Ccore_{\mc D}(J)\right)$. Thus there exists $e>0$ and $\phi\in \mc D_e$ with $\phi(F_*^e f)\notin \Ccore_{\mc D}(J)$. Then there must also exist $e'$ and $\phi'\in \mc D_{e'}$ with $\phi'(F_*^{e'} \phi(F_*^e(f))\notin J$. This term can be rewritten as $(\phi'\cdot \phi)(F_*^{e'+e}(f)) = \phi'\left(F_*^{e'}(\phi(F_*^e f))\right)$, and so $f\notin \Ccore_{\mc D}(J)$. \end{proof}
\begin{rmk} If the pair $(R,\mc D)$ is $F$-finite and $F$-pure, then combining \Cref{c-ideal-in-original-ideal}, \Cref{containment}, and \Cref{prop:c-ideal-stable}, shows that $\Ccore_{\mc D}$ is a relative interior operation on ideals of $R$, in the sense of Epstein, R.G., and Vassilev \cite[Def.~2.2]{Epstein+etal.21}.
\end{rmk}
The following result is known when $\mc D = \mc C_R$ \cite{Badilla-Cespedes.21}, and for triples $(R,\Delta, \mf a^t)$ \cite{Schwede.10a}. The proof in the Cartier algebra setting proceeds the same as Badilla-C\'espedes' proof, with a little care needed for the exponents used. \begin{prop}[{Cf. \cite[Rmk.~4.14]{Badilla-Cespedes.21}, \cite[Cor.~3.3]{Schwede.10a}}] \label{c-ideal-radical} If the pair~$(R,\mc D)$ is $F$-finite and $F$-pure, then for any ideal $J$, the Cartier core $\Ccore_{\mc D}(J)$ is radical. \end{prop} \begin{proof} Suppose $r\in \sqrt{\Ccore_{\mc D}(J)}$. Then there exists some $n$ so that $r^{p^n}\in \Ccore_{\mc D}(J)$. Since the pair is $F$-pure, there also exists some $\psi \in \mc D_d$ so that $\psi(F_*^d1)=1$. Take $e = nd$, so that there is $\phi \in \mc D_e$ with $\phi(F_*^e 1)=1$, and so that \Cref{prop:c-ideal-stable} gives $r^{p^e}\in \Ccore_{\mc D}(J) = \Ccore_{\mc D}(\Ccore_{\mc D}(J))$. Then \[ \phi(F_*^e(r^{p^e})) = r\phi(F_*^e 1) = r \in \Ccore_{\mc D}(J).\qedhere \] \end{proof}
The hypothesis that $(R,\mc D)$ be $F$-pure is necessary. Consider $R=k[x]/\langle x^2\rangle$ where $k$ is an $F$-finite field, and let $\mc D =\mc C_R$. This ring $R$ is non-reduced, so can't be $F$-pure. For any ideal $J\subset k[x]$, use $\olin J$ to denote the image of $J$ in $R$. Now using the presentation from \Cref{c-ideal-quo-reg-ring}, we compute \begin{align*} A_e(\olin{\langle x^2\rangle}) &= \olin{\langle x^2 \rangle^{[p^e]}:_{k[x]}\left(\langle x^2\rangle ^{[p^e]}:_{k[x]}\langle x^2 \rangle\right) } = \olin{ \langle x^{2p^e} \rangle :_{k[x]} \langle x^{2p^e-2}\rangle } = \olin{\langle x^{2}\rangle}. \end{align*} Intersecting over all $e$, we see that $\Ccore_R(\olin{\langle x^2\rangle}) =\olin{\langle x^2\rangle}$, a non-radical ideal.
\begin{thm}[{Cf. \cite[Prop.~4.9, Thm.~4.10]{Badilla-Cespedes.21}}] \label{cartier-core=D-compatible}
If the pair~$(R,\mc D)$ is $F$-finite and $F$-pure, then the set of Cartier cores with respect to $\mc D$, i.e., the set \(\left\{ \Ccore_{\mc D}(J) \: | \: J\text{ an ideal of \(R\)} \right\}\), is precisely the set of $\mc D$-compatible ideals. \end{thm} \begin{proof}
An ideal $J$ is $\mc D$-compatible precisely if $\phi(F_*^e(J))\subseteq J$ for all $e$ and for all $\phi \in \mc D_e$, and thus by construction $J$ is $\mc D$-compatible if and only if $J\subseteq \Ccore_{\mc D}(J)$. By \Cref{c-ideal-in-original-ideal}, if the pair~$(R,\mc D)$ is $F$-pure then this is equivalent to having $J=\Ccore_{\mc D}(J)$. This shows that every $\mc D$-compatible ideal is a Cartier core.
Conversely, the Cartier core $\Ccore_{\mc D}(J)$ is $\mc D$-compatible since by \Cref{prop:c-ideal-stable} we have $\Ccore_{\mc D}(J) = \Ccore_{\mc D}(\Ccore_{\mc D}(J))$. \end{proof}
\begin{cor}[{Cf. \cite[Prop.~4.11]{Badilla-Cespedes.21}}] \label{largest-D-compat} If the pair~$(R,\mc D)$ is $F$-finite and $F$-pure and $J$ is an ideal of $R$, then $\Ccore_{\mc D}(J)$ is the largest $\mc D$-compatible ideal contained in $J$. \end{cor} \begin{proof} $\Ccore_{\mc D}(J)$ is $\mc D$-compatible by the previous result. If another $\mc D$-compatible ideal~$J'$ has $\Ccore_{\mc D}(J) \subseteq J' \subseteq J$, then by \Cref{containment} we have $\Ccore_{\mc D}(\Ccore_{\mc D}(J)) \subseteq \Ccore_{\mc D}(J') \subseteq \Ccore_{\mc D}(J)$, and by \Cref{prop:c-ideal-stable} we in fact have $\Ccore_{\mc D}(J')=J'=\Ccore_{\mc D}(J)$. \end{proof}
The following result, originally due to Schwede \cite[Cor.~5.10]{Schwede.09} and to Kumar and Mehta \cite[Thm.~1.1]{Kumar+Mehta.09}, captures another nice property of the Cartier core map. Recent work of Datta and Tucker \cite[Prop.~3.4.1]{Datta+Tucker.21} provides an alternate proof that uses similar language to the rest of this paper. \begin{prop}[{\cite[Prop.~3.4.1]{Datta+Tucker.21}}] \label{fin-many-C-cores} If $(R,\mc D)$ is an $F$-finite, $F$-pure pair, then there are only finitely many Cartier cores with respect to $\mc D$, i.e., there are only finitely many $\mc D$-compatible ideals. \end{prop}
\begin{rmk} If additionally $R$ is local, one can in fact get concrete bounds on the number of $\mc D$-compatible ideals. Using Theorem~4.2 of \cite{Schwede+Tucker.10} or the argument from Remark~3.4 of \cite{huneke+Watanabe.15}, the number of prime Cartier cores with respect to $\mc D$ of coheight $d$ is bounded above by $\binom{n}{d}$, where $n$ is the embedding dimension of $R$. \end{rmk}
\begin{thm} \label{cts-map} Let $(R,\mc D)$ be an $F$-finite pair, and let $\mc U_{\mc D}$ denote the $F$-pure locus of $(R,\mc D)$. Then the map $\Ccore_{\mc D}:\mc U_{\mc D} \to \mc U_{\mc D}$ is continuous under the Zariski topology. \end{thm} \begin{proof} We will show that the inverse image of the closed set $ V = \bb V(J)\cap \mc U_{\mc D}$ is also closed, where $J$ is an ideal of $R$. Let $K$ be the intersection of all Cartier cores containing $J$ which come from primes, so that \[ K = \bigcap_{\substack{P \in \mc U_{\mc D}\\ \Ccore_{\mc D}(P)\in \mathbb V(J)}} \Ccore_{\mc D}(P). \] Since the set of Cartier cores with respect to $\mc D$ is closed under infinite intersection by \Cref{arbitrary-intersection-c-ideal}, $K = \Ccore_{\mc D}(K)$ is also a Cartier core. We claim that $\Ccore_{\mc D}^{-1}(V) = \mathbb V(K)\cap \mc U_{\mc D}$.
Suppose $P\in \Ccore_{\mc D}^{-1}(V)$. Then since $P\in \mc U_{\mc D}$, we have $\Ccore_{\mc D}(P)\subseteq P$ by \Cref{prop:c-ideal-in-primary-original-ideal}. Since $\Ccore_{\mc D}(P)\in \bb V(J)$,
we have $K\subseteq \Ccore_{\mc D}(P)$ by construction. Thus $K\subseteq P$ and so $\Ccore_{\mc D}^{-1}(V) \subseteq \bb V(K)\cap \mc U_{\mc D}$.
Conversely, if $P\in \bb V(K)\cap \mc U_{\mc D}$, then $K\subseteq P$ and by \Cref{containment}, \[ J\subseteq K = \Ccore_{\mc D}(K) \subseteq \Ccore_{\mc D}(P). \] Thus $\bb V(K)\cap \mc U_{\mc D} \subseteq \Ccore_{\mc D}^{-1}(V)$. \end{proof}
\section{Quotients of Regular Rings} \label{sec:quo-regular-ring} Now that we have seen some abstract properties of the Cartier core map, $\Ccore_{\mc D}$, we shift our focus to actually computing it. In this section we give a concrete description of the Cartier core in the case when $R$ is presented as a quotient of a regular ring and $\mc D$ is the full Cartier algebra $\mc C_R$. We will then use this concrete description to show that the Cartier core commutes with adjoining a variable and with homogenization (in the case that our regular ring is a polynomial ring).
One reason to focus on this case is that regularity of $S$ forces $F_*^eS$ and $\hom_S(F_*^eS, S)$ to be well-behaved, as the following result of Kunz and result of Fedder illustrate.
\begin{thm}[{\cite[Cor.~2.7]{Kunz.69}}] If $R$ is a Noetherian ring of prime characteristic, then $R$ is regular if and only if $F_*R$ is a flat $R$-module. \end{thm}
\begin{lemma}[{\cite[Lemma~1.6]{Fedder.83}}] If $S$ is an $F$-finite regular local ring, then $\hom_S(F_*^eS, S)$ is a free rank one $F_*^eS$ module. \end{lemma}
Further, Glassbrenner, building on work of Fedder, gives us the following description of the $R$-module structure on maps in the local case.
\begin{lemma}[{Fedder's Lemma \cite[Lemma~2.1]{Glassbrenner.96}}] \label{qr-hom-presentation} Let $S$ be an $F$-finite regular local ring and let $R=S/I$ for some ideal $I$. Then \[ \hom_{R}(F_*^e R, R) \cong F_*^e\left( \frac{I^{[p^e]}:I}{I^{[p^e]}} \right) \] as $R$-modules. \end{lemma}
This description of $\hom_R(F_*^eR, R)$ is the core of Fedder's criterion and of Glassbrenner's criterion.
\begin{prop}[{\cite[Prop.~1.7]{Fedder.83}}] Let $(S,\mf m)$ be an $F$-finite regular local ring of prime characteristic $p$, and let $I$ be an ideal of $S$. Then $R$ is $F$-pure if and only if $(I^{[p]}:I)\not\subseteq \mf m^{[p]}$. \end{prop}
\begin{lemma}[{\cite[Lemma~2.2]{Glassbrenner.96}}] \label{lem:e-split-locus} Let $(S,\mf m)$ be an $F$-finite regular local ring of prime characteristic $p$. Let $I$ be an ideal of $S$. Then the map $S/I \to F_*^e(S/I)$, where $1\mapsto F_*^ec$, splits as an $(S/I)$-module map exactly when $c\notin \mf m^{[p^e]} :(I^{[p^e]}:I)$. \end{lemma}
Since the Cartier core of $J$ is composed precisely of the elements which cannot be split in this manner, the technique of this lemma naturally leads to the following result. For the following, we use $\olin J$ to denote the image of an ideal $J$ in a quotient ring, and similarly $\olin c$ to denote the image of an element $c$.
\begin{thm} \label{c-ideal-quo-reg-ring} Let $S$ be a regular $F$-finite ring, let $I\subseteq J$ be ideals of $S$, and let $R=S/I$. Fix $e\geq 1$, $c\in S$. Then there exists some $\phi \in \hom_R(F_*^eR, R)$ with $\phi(\olin c)\notin \olin{J}$ if and only if $c\notin J^{[p^e]}:(I^{[p^e]}:I)$. In particular, \[ A_{e;R}(\olin J) = \olin{J^{[p^e]}:_S (I^{[p^e]}:_S I)} \quad\textrm{ and }\quad C_R(\olin J) = \olin{\bigcap_e J^{[p^e]}:_S (I^{[p^e]}:_S I)}. \] \end{thm} \begin{proof} The representations of $A_e$ and $\Ccore_R$ follow directly from the first statement, so it suffices to prove that $c\notin J^{[p^e]}:(I^{[p^e]}:I)$ if and only if there is some $\phi \in \hom_R(F_*^eR, R)$ with $\phi(\olin c)\notin \olin J$.
For our fixed $e$, let $E: \hom_R(F_*^e R, R)\to R$ be the ``evaluation at $c$'' map, so that $E(\phi) = \phi(F_*^e c)$. Our goal is to show $\im(E)\subseteq \olin J$ if and only if $c\in J^{[p^e]}:(I^{[p^e]}:I)$. By the discussion in \Cref{sec:background}, we can view the localization of $E$ as a map $\hom_{R_P}(F_*^e(R_P), R_P)\to R_P$ so that $(\im E)_P \cong \im (E_P)$. Since localization also commutes with Frobenius and with ideal colon, we can without loss of generality assume that $(S,\mf m)$ is local.
Let $\Psi$ be a generator of $\hom_S(F_*^e S, S)$ as an $F_*^e S$ module. By \Cref{qr-hom-presentation}, the maps $\phi\in \hom_R(F_*^eR, R)$ are exactly those maps induced by something of the form $\Psi\circ F_*^e(s)$ where $s\in I^{[p^e]}:I$. Thus \[ \phi(F_*^e(\olin c)) = \olin{(\Psi\circ F_*^es)(F_*^ec)} = \olin{\Psi(F_*^e(sc))} \] and so there exists $\phi$ with $\phi(F_*^e(\olin c))\notin \olin J$ if and only if there exists $s\in I^{[p^e]}:I$ with $\Psi(F_*^e(sc))\notin J$, i.e., if and only if \[ \Psi\left(F_*^e\left( c(I^{[p^e]}:I)\right)\right) = (F_*^ec \cdot \Psi)(I^{[p^e]}:I)\not\subseteq J. \] Using \cite[Lemma~1.6]{Fedder.83}, this occurs if and only if \[ F_*^e(c)\notin (J F_*^eS):(F_*^e(I^{[p^e]}:I)) = F_*^e(J^{[p^e]}):F_*^e(I^{[p^e]}:I). \] Since $S$ is regular, the flat Frobenius commutes with colon and is injective, thus this is equivalent to \[ c\notin J^{[p^e]}:(I^{[p^e]}:I).\qedhere \] \end{proof}
We will frequently move between considering $\Ccore_R(\olin J)$ in $R$ and its lift $\bigcap_{e>0}J^{[p^e]}:(I^{[p^e]}:I)$ in $S$, which we will denote as either $\wt{\Ccore}_R(J)$ or $\wt{\Ccore}_R(\olin J)$. Similarly, we will denote the lift of $A_{e;R}(\olin J)$ as $\wt A_{e;R}(J)$ or $\wt A_{e;R}(\olin J)$.
We now prove results which let us connect Cartier cores of related ideals computed in different, related rings.
\begin{lemma} \label{qr-flat-extension-containment} Let $S_1\to S_2$ be a flat map of regular $F$-finite rings. Consider ideals $I\subseteq J_1$ in $S_1$, and ideal $J_2$ in $S_2$ contracting to $J_1$. Let $R_1 = S_1/I$ and $R_2 = S_2/IS_2$. Then \[ C_{R_1}(\olin{J_1})R_2 \subseteq C_{R_2}(\olin{J_2}). \] \end{lemma} \begin{proof} Finite intersections always commute with flat base change. Thus for any sequence of ideals $\{K_e\}_{e\in \mathbb{N}}$ and for any $n$, \[ \left(\bigcap_{e=1}^\infty K_e\right) S_2 \subseteq \left(\bigcap_{e=1}^n K_e\right) S_2 = \bigcap_{e=1}^n (K_eS_2) \] and in particular we must have $\left(\bigcap_{e=1}^\infty K_e\right) S_2 \subseteq \bigcap_{e=1}^\infty (K_eS_2)$. Colon commutes with flat base change when the ideals are finitely generated \cite[Thm.~7.4]{Matsumura.89}. Thus \begin{align*} \left(\bigcap_{e\geq 1}J_1^{[p^e]}:(I^{[p^e]}:I)\right) S_2 &\subseteq \bigcap_{e\geq 1} \left( (J_1S_2)^{[p^e]}:((IS_2)^{[p^e]}:IS_2) \right)
\subseteq \bigcap_{e\geq 1} \left( J_2^{[p^e]}:((IS_2)^{[p^e]}:IS_2) \right), \end{align*} which by using \Cref{c-ideal-quo-reg-ring} to pass to the quotient gives \[ C_{R_1}(\olin{J_1})R_2 \subseteq C_{R_2}(\olin{J_2}).\qedhere \] \end{proof} In the case of a general flat map, even a general faithfully flat map, containment is the best we can do. For example, consider $S_1=k[x^p]$ and $S_2 = k[x]$ where $k$ is a perfect field. The inclusion of $S_1$ into $S_2$ is faithfully flat since it corresponds to the Frobenius on the regular ring $k[x]$. Now consider $I=J_1=\langle x^p\rangle\subset S_1$ and $J_2 = \langle x \rangle \subset S_2$. Then $R_1=S_1/I\cong K$ which is Frobenius split, so $C_{R_1}(\olin J_1) = \olin J_1$. But $R_2 = S_2/IS_2 = k[x]/\langle x^p\rangle$ is not reduced, thus cannot be Frobenius split. Since $\olin J_2$ is a prime ideal, this means $C_{R_2}(\olin J_2)=R_2$.
However, it turns out that in the case of adjoining a variable, we can get a stronger result.
\begin{prop} \label{qr-add-var} Let $R$ be a quotient of a regular $F$-finite ring, let $J$ be an ideal of $R$, and let $J'$ be an ideal of $R[x]$ such that $JR[x] \subseteq J' \subseteq JR[x]+\langle x \rangle$. Then \[ C_{R}(J)R[x] = C_{R[x]}(J') \quad \textrm{ and } \quad C_{R[x]}(J')\cap R = C_R(J). \] \end{prop} \begin{proof} By \Cref{containment}, \[ C_{R[x]}(JR[x]) \subseteq C_{R[x]}(J') \subseteq C_{R[x]}(JR[x]+\langle x\rangle). \] Our first step will be to show \[ C_{R[x]}(JR[x]) \supseteq C_{R[x]}(JR[x] + \langle x\rangle), \] which will then give us $C_{R[x]}(JR[x])=C_{R[x]}(J') = C_{R[x]}(JR[x] + \langle x \rangle)$.
To do so, note that by assumption we can write $R=S/I$ where $S$ is a regular $F$-finite ring, and so we can also write $R[x] = S[x]/IS[x]$. We use $\wt{\ }$ to denote lifting an ideal from $R$ or $R[x]$ to $S$ or $S[x]$, as appropriate. Consider $S[x]$ to be $\mathbb{N}$-graded by $x$. Since $\wt{JR[x]+\langle x \rangle}$, the lift of $JR[x]+\langle x\rangle$ to $S[x]$, is homogeneous, as is $IS[x]$, our lift of the Cartier core \[ \wt{\Ccore}_{R[x]}(J[x]+\langle x\rangle) = \bigcap_{e>0} \wt{JR[x]+\langle x\rangle}:(IS[x]^{[q]}:IS[x]) \] is also homogeneous. Consider some homogeneous $g$ in this lift of the Cartier core. Ideal colon commutes with flat maps, and $S\to S[x]$ and the Frobenius are both flat. Thus for every $q=p^e$ we have \[ IS[x]^{[q]}:IS[x] = (I^{[q]}:I)S[x]. \] Since $g\in \wt{A}_e(J)$, we must have $g(I^{[q]}:I)\subseteq (\wt{JR[x]+\langle x\rangle})^{[q]}$. However, any element of $(\wt{JR[x]+\langle x\rangle})^{[q]}$ of degree less than $q$ must be expressible in terms of elements of $\wt{JR[x]}^{[q]}$. In particular, if $q>\deg g$ then $g(I^{[q]}:I) \subseteq \wt{JR[x]}^{[q]}$. Thus for $e\gg 0$, we have \[ g\in \wt{JR[x]}^{[q]}:(IS[x]^{[q]}:IS[x]) = \wt{A}_{e;R[x]}(JR[x]). \]
By \cite[Prop.~4.15]{Badilla-Cespedes.21}, since $\Ccore_{R[x]}(JR[x]) = \bigcap_{e\gg0}A_{e;R[x]}(JR[x])$, this tells us that \[ \Ccore_{R[x]}(JR[x]+\langle x \rangle) \subseteq \Ccore_{R[x]}(JR[x]) \] as desired.
Now we have shown $C_{R[x]}(JR[x])=C_{R[x]}(J')$, and it suffices to show $C_R(J)R[x]=C_{R[x]}(J[x])$. To do so, we will show that adjoining a variable commutes with infinite intersection. Consider an arbitrary ideal $K = \bigcap_\alpha K_\alpha$ in $S$. As a set, each $K_\alpha S[x]$ is polynomials with coefficients in $K_\alpha$, and so the polynomials in $\bigcap_\alpha K_\alpha S[x]$ are those with coefficients in $K_\alpha$ for every $\alpha$, which is precisely $KS[x]$, as desired.
This lets us repeat the argument in \Cref{qr-flat-extension-containment} but with equalities, and thus \[ C_R(J)R[x] = C_{R[x]}(J[x]) = C_{R[x]}(J') \] as desired. The contraction result then follows directly from the fact that adjoining a variable is faithfully flat, so that \[ C_R(J) = C_R(J)R[x] \cap R = C_{R[x]}(J')\cap R. \qedhere \]
\end{proof}
If $R$ is a quotient of a polynomial ring by a homogeneous ideal, we can also look at how the Cartier core behaves under homogenization. More concretely, take $R=S/I$ for $S=k[x_1,\ldots, x_d]$ and $I$ a homogeneous ideal of $S$, so that $R$ is $\mathbb{N}$-graded. If $f\in R$, we let $f^h$ denote the minimal homogenization of $f$ in $R[t]$, so that \[ f^h = t^{\deg f} f\left(\frac{x_1}{t}, \ldots, \frac{x_n}{t}\right). \]
If $J$ is an ideal of $R$, we define its homogenization in $R[t]$ to be $J^h = \langle f^h \: | \: f\in J\rangle$.
For any degree-preserving lift of $f$ to $S$, there is a corresponding lift of $f^h$ to $S[t]$ so that the lift of the homogenization is the homogenization of the lift. This means we can freely consider a given homogenization to live either in $R[t]$ or in $S[t]$. Further, the ideals $\wt{(J^h)}$ and $(\wt J)^h$ are the same: $\wt{(J^h)}$ is generated by the lifts of the homogenizations of elements of $J$, and $(\wt J)^h$ is generated by homogenizations of lifts of elements of $J$.
There is also a corresponding dehomogenization map $\delta:R[t]\to R$ defined by $\delta(t)=1$, which ensures that $\delta(f^h) = f$.
We recall the following straightforward facts about homogenization. \begin{lemma} Let $R$ be a quotient of a polynomial ring by a homogeneous ideal. Let $I,J$ be ideals of $R$, and $\{I_\alpha\}$ a family of ideals. Let $f$ be an element of $R$. Then the following statements all hold. \begin{itemize} \item $f\in I$ if and only if $f^h\in I^h$. \item $(I:J)^h=I^h:J^h$ and $\left( \bigcap I_\alpha\right)^h = \bigcap (I_\alpha^h)$. \item $(I^{h})^{[p^e]} = (I^{[p^e]})^h$. \end{itemize} \end{lemma} \begin{proof} For the first two bullets, see Problems~3.15 and~3.17 in \cite{Ene+Herzog.12}. For the third bullet, use Proposition~3.15 of \cite{Ene+Herzog.12} and Theorem~6.2 of \cite{Herzog+Trung.92}. \end{proof}
Using these facts, we will prove the following lemma. \begin{lemma} \label{qr-homogenization} Let $R=S/I$ where $S$ is a polynomial ring over an $F$-finite field and $I$ is a homogeneous ideal. Let $J$ be an ideal of $R$. Then \[ \left(C_R(J)\right)^h = C_{R[t]}(J^h) \quad\textrm{ and }\quad C_R(J) = \delta\left( C_{R[t]}(J^h)\right) \] \end{lemma} \begin{proof} If we lift to $S[t]$ using \Cref{c-ideal-quo-reg-ring} and the above discussion on lifting and homogenization, then \begin{align*} \wt{\left(C_R(J)\right)^h} &= \left(\wt C_R(J)\right)^h \\
&= \left(\bigcap_{e>0} (\wt J)^{[q]}:(I^{[q]}:I)\right)^h \\
&= \bigcap_{e>0} \left((\wt{J}^h)^{[q]}:((I^h)^{[q]}:I^h) \right) \\
&= \bigcap_{e>0} \left((\wt{J^h})^{[q]}:(I^{[q]}:I) \right) \\
&= \wt{C}_{R[t]}(J^h) \end{align*} and so contracting back to $R[t]$ via \Cref{c-ideal-quo-reg-ring}, \[ (C_R(J))^h = C_{R[y]}(J^h). \]
The last statement follows directly from dehomogenizing each side of the equation. \end{proof}
\section{Stanley-Reisner Rings} \label{sec:stanley-reisner} A ring $R$ is a \emph{Stanley-Reisner ring} if it can be written as $R = S/I$, where $S$ is a polynomial ring and $I$ is a square-free monomial ideal.
The following theorem gives a complete description of the Cartier core map for $\Spec R$ where $R$ is a Stanley-Reisner ring.
\begin{thm} \label{stanley-reisner-c-map} Let $R$ be a Stanley-Reisner ring over a field that has prime characteristic and is $F$-finite. Let $Q$ be any prime ideal. Then \[ C_R(Q) = \sum_{\substack{P \in \Min(R) \\ P\subseteq Q}} P. \] In particular, the set of prime Cartier cores of $R$, i.e., the set of generic points of $F$-pure centers of $R$, is the set of sums of minimal primes. \end{thm}
This theorem extends some earlier results. Aberbach and Enescu showed that the splitting prime of a Stanley-Reisner ring, which is its largest proper uniformly $F$-compatible ideal, is the sum of the minimal primes \cite[Prop~4.10]{Aberbach+Enescu.05}. For the reader's convenience, we will reprove this in our proof of \Cref{stanley-reisner-c-map}. At the other extreme, Vassilev showed that the test ideal of a Stanley-Reisner ring, which is its smallest non-zero uniformly $F$-compatible ideal, is $\sum_{i=1}^t \bigcap_{j\neq i} P_j$ where $P_1,\ldots, P_t$ are the minimal primes of $R$ \cite[Thm.~3.7]{Vassilev.98}.
In a related but different direction, for a specific choice of $\phi:F_*^e R\to R$, Enescu and Ilioaea showed that the $\phi$-compatible primes of $R$ are precisely the prime monomial ideals which \emph{contain} a minimal prime of $R$. They used this to give a combinatorial description the test ideal of the pair~$(R,\phi)$ \cite[Prop.~3.9, Prop.~3.10]{Enescu+Ilioaea.20}.
Badilla-C\'espedes showed that if $P'$ is a prime monomial ideal, then $\Ccore(P')$ as well as each $A_e(P')$ is also a monomial ideal, and more explicitly that $A_e(P') = (P')^{[p^e]}+\Ccore(P')$ in this setting \cite[Lemma~4.16,Prop~4.17]{Badilla-Cespedes.21}. Meanwhile, \`Alvarez~Montaner, Boix, and Zarzuela gave a concrete description of $I^{[p^e]}:_S I$ in terms of the minimal primes of $I$, which could be used to explicitly compute the Cartier contractions for any ideal $J$ \cite[Prop~3.2]{AlvarezMontaner+etal.12}.
\begin{proof}[Proof of \Cref{stanley-reisner-c-map}] Our proof will proceed as follows: First we will reduce to the case where every minimal prime is contained in $Q$. Then we will homogenize and trap $Q^h$ between a sum of minimal primes and the homogeneous maximal ideal, and use \Cref{containment} and the convenient form of monomial primes to get our desired equality.
Let $\wt{\ }$ denote the lift of any ideal to $S$, let $I' = \displaystyle\bigcap_{P\in \Min(R),\ P \subseteq Q} \wt P$ be the intersection of the minimal primes contained in $Q$, and let $R'=S/I'$. Then \[ R_{Q} \cong S_{\wt Q}/I_{\wt Q} = S_{\wt Q}/I'_{\wt Q} \cong R'_{Q} \] and so by \Cref{localization}, \[ C_R(Q)R_{Q} = C_{R_{Q}}(Q) = C_{R'_{Q}}(Q) = C_{R'}(Q)R'_{Q}. \] Stanley-Reisner rings are $F$-pure \cite[Prop.~5.8]{Hochster+Roberts.76}, and so $C_R(Q)\subseteq Q$ by \Cref{c-ideal-in-original-ideal}, and thus when we lift back to $S$ using \Cref{c-ideal-quo-reg-ring}, we see \[ \bigcap_{e>0} \wt Q^{[p^e]}: (I^{[p^e]}: I) = \bigcap_{e>0} \wt Q^{[p^e]}:(I'^{[p^e]}:I'). \] Thus we can use $I'$ as our new $I$, and so we can assume $P\subseteq Q$ for all minimal primes $P$.
Relabel the variables so that $\sum_{P\in \Min(R)} P = \langle x_1,\ldots, x_c\rangle$ and define $A = k[x_1,\ldots, x_c]/I$, so that $R = A[x_{c+1}, \ldots, x_{d}]$. Now we homogenize, so $Q^h \subseteq \mf m$ where $\mf m$ is the homogeneous maximal ideal in $S[t]$. Then \Cref{containment} tells us \[ C_{R[t]}\left(\sum_{P\in \Min(R)} P^h\right) \subseteq C_{R[t]}(Q^h) \subseteq C_{R[t]}(\olin{\mf m}). \] Each minimal prime $P$ of $R$ remains a minimal prime of $R[t]$ after homogenizing, so \Cref{c-minl-prime} says $C_{R[t]}(P^h) = P^h$, and \Cref{prop:arbitrary-sum-c-ideals} then says that their sum is also preserved by the Cartier core map. Applying \Cref{qr-add-var} to $\olin{\mf m}$, we get \[ \langle x_1,\ldots, x_c\rangle R[t] = C_{A}(P_1+\cdots+P_t)A[x_{c+1},\ldots, x_d,t] = C_{R[t]}(\olin{\mf m}). \]
Thus \[ \langle x_1,\ldots, x_c\rangle = \Ccore_{R[t]}\left(\sum_{P\in \Min(R)} P^h \right) \subseteq C_{R[t]}(Q^h) \subseteq C_{R[t]}(\olin{\mf m}) = \langle x_1,\ldots, x_c\rangle \] and by \Cref{qr-homogenization}, \[ (C_{R}(Q))^h = C_{R[t]}(Q^h) = \langle x_1,\ldots, x_c\rangle. \] Dehomogenizing the homogenization always gives back the original ideal, and so \[ C_R(Q) = \langle x_1,\ldots, x_c\rangle = \sum_{P\in \Min(R)} P. \]
For the last statement of the theorem, note that since each minimal prime of $R$ corresponds to an ideal of $S$ which is generated by variables, any sum of minimal primes is also prime, and thus is fixed by the Cartier core map. \end{proof}
Since taking the Cartier core commutes with intersection, \Cref{stanley-reisner-c-map} immediately gives a formula for the Cartier core of any radical ideal in terms of the Cartier cores of its minimal primes. The following corollary instead gives a formula for the Cartier core of an arbitrary ideal which is more analogous to the previous one.
\begin{cor} \label{stanley-reisner-general-c-map} Let $R$ be a Stanley-Reisner ring over a field that has prime characteristic and is $F$-finite. Let $J$ be any ideal. Then \[ \Ccore_R(J) = \sum_{\substack{\mc Q\subset \Min(R) \\ \big({\bigcap\limits_{P\in \mc Q}} P\big) \subset J }}
\left( \bigcap_{P\in \mc Q} P \right). \] \end{cor} \begin{proof}
First, we will show our desired formula gives an ideal contained in $\Ccore_R(J)$. The restriction on the $P$'s appearing ensures that the resulting ideal is contained in $J$. Further, each $P$ appearing is minimal, so $P= \Ccore_R(P)$ by \Cref{c-minl-prime}. Using Propositions~\ref{arbitrary-intersection-c-ideal} and~\ref{prop:arbitrary-sum-c-ideals} (our intersection and sum results) to apply $\Ccore_R$ to the formula and then using \Cref{containment} to preserve the containment gives \[ \sum_{\substack{\mc Q\subset \Min(R) \\ \big({\bigcap\limits_{P\in \mc Q}} P\big) \subset J }}
\left( \bigcap_{P\in \mc Q} P \right)
=\Ccore_R\left( \sum_{\substack{\mc Q\subset \Min(R) \\ \big({\bigcap\limits_{P\in \mc Q}} P\big) \subset J }}
\left( \bigcap_{P\in \mc Q} P \right)\right)
\subset \Ccore_R(J). \]
To show equality, we will show that summing over a specific smaller subset in fact already yields $\Ccore_R(J)$, and so the larger sum above must yield $\Ccore_R(J)$ as well. Since $R$ is Frobenius split, $\Ccore_R(J)$ is radical by \Cref{c-ideal-radical}, so we can write $\Ccore_R(J) = \bigcap_{i=1}^n Q_i$ as the intersection of its minimal primes. By the same argument as in the proof of \Cref{cts-map}, applying \Cref{proper-prime}, \Cref{c-ideal-in-original-ideal}, and \Cref{prop:c-ideal-stable} shows that $\Ccore_R(Q_i)=Q_i$ for each $i$.
Now since the Cartier core commutes with intersection, we use \Cref{stanley-reisner-c-map} to see \[ \Ccore_R(J) = \bigcap_i \Ccore_R(Q_i) = \bigcap_i \left( \sum_{\substack{P\in \Min(R)\\P\subset Q_i}} P\right). \] Writing $R=S/I$ as a quotient of a polynomial ring and lifting back up to $S$, this says that the lift of $\Ccore_R(J)$ is an intersection of sums of monomial ideals. Sum and intersection of monomial ideals commute \cite[Problem~1.17]{Ene+Herzog.12},
and so passing back to the quotient gives \[ \Ccore_R(J) = \sum_{\substack{P_1,\ldots, P_n \in \Min(R) \\ P_i \subset Q_i }} \left(\bigcap_{i=1}^n P_i\right). \] For each possibility for $P_1,\ldots, P_n$ in the above sum, we have $\bigcap_{i=1}^n P_i \subset \Ccore_R(J) \subset J$, and so this sum is a subset of our desired formula, which thus must be equal to $\Ccore_R(J)$ as well. \end{proof}
\end{document} | arXiv | {
"id": "2203.01911.tex",
"language_detection_score": 0.7412700057029724,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\maketitle
\pdfbookmark[0]{\contentsname}{Contents}
\begin{abstract} In dimension $N\geq 5$ and given $0< s<4$ and $\gamma\in\rr$, we study the existence of nontrivial weak solutions for the doubly critical problem
$$\Delta^2 u-\frac{\gamma}{|x|^4}u= |u|^{2^\star_0-2}u+\frac{|u|^{\crit-2}u}{|x|^s}\hbox{ in }\rr_+^N,\; u=\Delta u=0\hbox{ on }\partial \rr_+^N,$$ where $\crit:=\frac{2(N-s)}{N-4}$ is the critical Hardy–Sobolev exponent. For $N\geq 8$ and $0< \gg<\frac{(N^2-4)^2}{16}$, we show the existence of nontrivial solution using the Mountain-Pass theorem by Ambrosetti-Rabinowitz. The method used is based on the existence of extremals for certain Hardy-Sobolev embeddings that we prove in this paper.
\end{abstract} \section{Introduction} Let $\rr_+^N:=\{x\in \rr^N \bb{ such that } x_1>0\}$ be the half-space domain, $N\geq 5$ and $0\leq s<4$. In this work, we establish the existence of nontrivial weak solutions for the following doubly critical problem: \begin{equation}\label{eq:doublehardybi}
\left\{\begin{array}{ll}
\Delta^2 u -\frac{\gg}{|x|^4} u=|u|^{2^\star_0-2}u+\frac{|u|^{\crit-1}u}{|x|^s} &\hbox{ in } \rr_+^N, \\
u=\Delta u = 0 &\hbox{ on } \partial \rr_+^N,
\end{array}\right. \end{equation} where $\Delta=\hbox{div}\left( \nabla\right)$ is the Laplacian, $\gg\in \rr$ and $\crit:=\frac{2(N-s)}{N-4}$ is the critical Hardy–Sobolev exponent. In order to write a variational formulation, the relevant space is the following: given $\Omega$ be a smooth domain in $\rr^N$, define \begin{equation*}
\tilde{H}_0(\Omega)=\hbox{ completion of }\{u\in C^2_c(\overline{\Omega})\hbox{ s.t. }u_{|\partial\Omega}\equiv 0\}\hbox{ for the norm }u\mapsto \Vert\Delta u\Vert_2. \end{equation*}
\noindent Note that $\tilde{H}_0(\rn)$ is the usual Beppo-Levi space $D^{2,2}(\rn)$. We say that $u\in \H$ is a weak solution of \eqref{eq:doublehardybi}, if \begin{eqnarray*}
\int_{\e} \Delta u \Delta \varphi \, dx-\gg\int_{\e}\frac{u\varphi}{|x|^4}\, dx=\int_{\e} \left|u \right|^{\crito-2}u\varphi\, dx+\int_{\e}\frac{\left|u\right|^{\crit-2}u\varphi}{ |x|^s}\,dx, \end{eqnarray*}
for all $\varphi\in \H$. For any $u\in \H$, Sobolev's embedding yields $u\in L^{\crito}(\e)\cap L^{\crit}(\e, |x|^{-s})$ (see \eqref{ineq:sobo} below), and therefore the definition of weak solution makes sense.\par
\noindent The existence of weak solution to \eqref{eq:doublehardybi} on $\rr^N$ has been studied by Filippucci-Pucci-Robert \cite{FPR} for the p--Laplacian. Equations like \eqref{eq:doublehardybi} have been studied for the Fractional Laplacian (see Ghoussoub-Shakerian \cite{GS}) and the bi-laplacian in $\rn$ (see Bhakta \cites{B1,B2} and Bhakta-Musina \cite{BM}).\par
\noindent In the present paper, we tackle this type of nonlinear singular problems on $\rr_+^N$ when $0\in \partial \rr_+^N$. The case $0\in\partial\Omega$, when $\Omega$ be a domain of $\rr^N$, $(N\geq 3 )$ to the equations \eqref{eq:doublehardybi} with the Laplace operator $-\Delta$ and one critical nonlinearitie was initiated by Ghoussoub–Kang \cite{GK} and studied by Chern-Lin \cite{CL} and Ghoussoub-Robert \cites{GRGAFA},\cite{GR}) when $\Omega$ is a smooth domain. For non-smooth domains modeled on cones, we refer to the more recent works of Cheikh-Ali \cite{HCA1,HCA2}.\par
\noindent Our main result is the following: \begin{theo}\label{theo2}
Let $N\geq 8$, $0<s<4$ and $0<\gg <\frac{(N^2-4)^2}{16}$. Then, there exists a nontrivial weak solution of \eqref{eq:doublehardybi}. \end{theo} Let us discuss the hypothesis of the Theorem. Our problem depends of the constant $\gg$ in $\rr$. From here, given an arbitrary domain $\Omega\subset \rn$, $N\geq 5$, we define the Hardy-Rellich constant
\begin{equation}\label{def:hardy:X}
\gg_H(\Omega,X):=\inf\left\lbrace \frac{ \int_{\Omega}\left| \Delta u\right| ^2\, dx}{\int_{\Omega}\frac{u^2}{|x|^4}\, dx}; u\in X\backslash\{0\} \right\rbrace,
\end{equation} for a suitable space $X$. There are several references around this constant. A first version of this Hardy inequality has been introduced by Rellich in 1953 \cites{R1, R2} (see also Mitidieri \cite{EM}), and reads \begin{equation}\label{ineq:hardy:rn} \gg_{H}(\rr^N,C^2_c(\rn))= \frac{N^2(N-4)^2}{16}. \end{equation} As a consequence, for $\Omega$ be a bounded domain in $\rr^N$ with $0\in \Omega$, Perez-Llamos-Primo \cite{PP} proved that $$\gg_{H}(\Omega,H^{2}(\Omega)\cap H_0^1(\Omega))= \frac{N^2(N-4)^2}{16}.$$ In particular, the value of the constant is independent of the domain as long as $0$ is an interior point.
\noindent The situation is different when $0\in\partial\Omega$. Let us consider cones. For any regular domain $\Sigma$ in the unit sphere $\mathbb{S}^{N-1}$, we define the cone
$$C_{\Sigma}:=\{r\sigma \, | \,r>0, \sigma \in \Sigma\}.$$ Caldiroli-Musina \cite{CM} proved that $$\gg_{H}(C_{\Sigma},X_0(C_\Sigma)) =\hbox{dist}\left(-\frac{N(N-4)}{4}, \Lambda(\Sigma)\right)^2,$$ where
$$X_0(\Omega):=\{u\in C^2(\overline{\Omega})\cap C^2_c(\rn\setminus\{0\})\hbox{ s.t. }u_{|\partial\Omega}\equiv 0\}.$$ and $\Lambda(\Sigma)$ of the Laplace–Beltrami operator on $\Sigma$. For instance:
\begin{itemize}
\item If $\Sigma=\mathbb{S}^{N-1}$, then $C_{\Sigma}= \rr^N\backslash \{0\}$. Therefore, we have
\begin{align*}
\gg_{H}(\rr^N\backslash \{0\},X_0(\rr^N\backslash \{0\})) &=\min_{k\in\{0,1,...\}}\left|\gg_N+k(N-2+k) \right|^2=\frac{N^2(N-4)^2}{16}.
\end{align*}
\item If $\Sigma$ is the half-sphere $S^{N-1}_+$, then $C_{\Sigma}=\e$. Therefore, we have
\begin{align}\label{eq:hardyinC2}
\gg_{H}(\e,X_0(\e)) =\min_{k\in\{1,2,...\}}\left|\gg_N+k(N-2+k) \right|^2=\frac{(N^2-4)^2}{16}.
\end{align}
\end{itemize} In the sequel, we write for convenience $$\gg_{H,+}:=\gg_{H}(\e,\H).$$ In order to prove Theorem \ref{theo2}, we will prove in Section \ref{sec:HardyRellich} that
$$\gg_{H,+} =\gg_{H}(\e,X_0(\e))=\frac{(N^2-4)^2}{16}.$$ In order to tackle the nonlinear problem \eqref{eq:doublehardybi}, let us consider the Sobolev inequality \begin{equation}\label{ineq:sobo}
\left(\int_{\rn}\frac{|u|^{\crit}}{|x|^s}\, dx\right)^{\frac{2}{\crit}}\leq C\int_{\rn}|\Delta u|^2\, dx\hbox{ for all }u\in C^\infty_c(\rn). \end{equation}
\noindent Let us fix a domain $\Omega\subset\rn$. Interpolating the Hardy inequality \eqref{def:hardy:X} and the Sobolev inequality \eqref{ineq:sobo} and using that $X_0(\Omega)\subset C^\infty_c(\rn)$, given $s\in [0,4]$, we get the existence of $C(N,s,\Omega)>0$ such that
\begin{equation}\label{eq:hardys}
\left( \int_{\Omega}|x|^{-s}|u|^{\crit}\,dx\right)^{\frac{2}{\crit}} \leq C(N,s,\Omega) \int_{\Omega}\left| \Delta u\right|^2\, dx \bb{ for any } u\in C^\infty_c(\Omega)
\end{equation} Using again \eqref{def:hardy:X}, for any $s\in [0,4]$ and any $\gamma<\gamma_H(\Omega, \tilde{H}_0(\Omega))$, we get the Hardy-Sobolev inequality
\begin{equation}\label{ineq:hardysobolev:omega}
\left( \int_{\Omega}|x|^{-s}|u|^{\crit}\,dx\right)^{\frac{2}{\crit}} \leq C\, \int_{\rr^N}\left( \left| \Delta u\right|^2-\gamma\frac{u^2}{|x|^4}\right) \, dx \bb{ for all }u\in \tilde{H}_0(\Omega).
\end{equation}
Let us define \begin{equation}\label{eq:qgammas}
Q_{\gamma, s}(\Omega)=\inf\Big\{I_{\gamma, s}^{\Omega}(u)/u\in \tilde{H}_0(\Omega)\backslash\{0\}\Big\}, \hbox{ with }I_{\gamma, s}^{\Omega}(u):=\frac{\int_{\Omega}\left( \left| \Delta u\right| ^2 -\gamma\frac{u^2}{|x|^4}\right) \,dx}{\left(\int_{\Omega}\frac{|u|^{\crit}}{|x|^s}\, dx\right)^{\frac{2}{\crit}}}. \end{equation}
In order to prove the existence of weak solutions for \eqref{eq:doublehardybi}, we need extremals for the best constant $Q_{\gg,s}(\e)$ in \eqref{eq:qgammas}, that is $u\in\H\setminus\{0\}$ such that $I_{\gamma, s}^{\e}(u)=Q_{\gg,s}(\e)$. If $u\in \H \backslash\{0\}$ is an extremal for $Q_{\gamma, s}(\e)>0$, then, up to a constant factor, $u$ is a solution to the following Euler-Lagrange equation: \begin{equation}\label{eq:hardybi}
\left\{\begin{array}{cc}
\Delta^2 u -\frac{\gg}{|x|^4} u=\frac{|u|^{\crit-2}u}{|x|^s} &\hbox{ in } \rr_+^N, \\
u=\Delta u = 0 &\hbox{ on } \partial \rr_+^N.
\end{array}\right. \end{equation} The existence of extremals is mostly ruled by the following Theorem that we prove in Section \ref{sec:extr}: \begin{theo}\label{theo1}
For $s\in [0,4)$ and $\gg < \gg_{H,+}$, we have that
\begin{itemize}
\item[(a)] If $\{s>0\}$ or $\{ s=0, \gg>0, N\geq 8\}$, then there are extremals for $Q_{\gg,s}(\rr_+^N)$.
\item[(b)] If $\{s=0\bb{ and } \gg\leq 0\}$, there are no extremals for $Q_{\gg,s}(\rr_+^N)$.
\item[(c)] If there are no extremals for $Q_{\gg,0}(\e)$, then $Q_{\gg,0}(\e)=S_N$,
\end{itemize} where \begin{equation}\label{eq:bestconstantsoblevrn}
S_N:=\inf_{u\in \tilde{H}_0(\rn)\backslash\{0\}} \frac{\int_{\rn}\left| \Delta u\right| ^2\, dx}{\left(\int_{\rn}|u|^{\crito}\, dx \right)^{\frac{2}{\crito}}}. \end{equation} \end{theo} \noindent The remaining case, that is $\{s=0\, ,\,\gg>0\hbox{, and }N=5,6,7\}$, is not clear. This limitation is due to the lack of localization of the $L^2-$norm in the computation \eqref{eq:i1ep}.
The study of fourth-order Hardy-Sobolev problem with a singularity on the boundary of a smooth domain is the object of the work \cite{HCA3}.\par
\noindent With all these elements, we will construct weak solutions for the doubly critical problem \eqref{eq:doublehardybi} by finding critical points of corresponding functional on $\H$. The method to obtain this critical points is via yhe Mountain-Pass Theorem of Ambrosetti and Rabinowitz. Since \eqref{eq:doublehardybi} is invariant under the conformal one parameter transformation group: $$\left\{\begin{array}{cccc}
T_r: & \H &\to & \H \\
& u& \mapsto & T_r[u]:=\{x\mapsto r^{\frac{N-4}{2}}u(rx)\} \end{array}\right\}, \bb{ where } r>0,$$ then the Mountain-Pass theorem will not yield critical points, but only Palais-Smale sequences. We will use the startegy of Filippucci-Pucci-Robert \cite{FPR}. As in \cite{FPR}, we the main difficulty will be the asymptotic competition between the energy carried by the two critical nonlinearities. Hence, the crucial point here is to balance the competition to avoid the domination of one term over another. Otherwise, there is vanishing of the weakest one, and we get solution for the same equation but with only one critical nonlinearity. To deal with this problem, we will choose a Palais-Smale sequence at a minimax energy level. In such a way, after a careful analysis of concentration, we will show that there is a balance between the energies of the two nonlinearities mentioned above, and therefore none can dominate the other. This will yield a solution to \eqref{eq:doublehardybi}.
\section{Profile of solutions and study the value of Hardy Rellich Constant $\gg_{H,+}$ }\label{sec:HardyRellich} First result, we are intersted by determine the value of Hardy Rellich constant $\gg_{H,+}:=\gg_{H}\left( \e,\H\right) $ when $0\in \partial \rr_+^N$.
\begin{lem}\label{lemma:extraonhalfspace}
We have that
\begin{eqnarray}\label{eq:lemmaext}
\gg_{H,+}=\gg_{H}\left( \e,X_0(\rn_+)\right)=\frac{(N^2-4)^2}{16}.
\end{eqnarray} \end{lem}
\noindent \textit{Proof of Lemma \ref{lemma:extraonhalfspace}:} We prove the Lemma in two steps proving each an inequality.
\begin{step}\label{step1:lemma1}
We claim that
\begin{align}\label{eq:inversHS}
\int_{\rr_+^N}\left| \Delta v\right|^2\, dx \geq \frac{(N^2-4)^2}{16}\, \int_{\rr_+^N}\frac{v^2}{|x|^4}\, dx \bb{ for all } v\in C^{2}_c(\overline{\rr_+^N})\hbox{ s.t. }u_{|\partial\rn_+}=0.
\end{align} \end{step} \noindent{\it Proof of Step \ref{step1:lemma1}:} Fix $v\in C^{2}_c(\overline{\rr_+^N})$ such that $v_{\partial\rn_+}=0$. We choose $\eta\in C^\infty(\rr^N)$
such that $\eta(x)_{|B_1(0)}\equiv 0$, $\eta(x)_{|B_2(0)^c}\equiv 1$ and $0\leq \eta\leq 1$. For $\ep>0$, we set $v_\ep(x):=\eta_\ep(x) v(x) , \bb{ where } \eta_\ep(x):=\eta(\frac{x}{\ep}) \bb{ for all } x\in\rr^N.$ We have that $v_\ep\in X_0(\rn_+)$. It follows from Caldiroli-Musina \cite{CM} (see \eqref{eq:hardyinC2}) that \begin{align}\label{eq:inversHSep}
\int_{\rr_+^N}\left|\Delta v_\ep\right|^2\, dx \geq \frac{(N^2-4)^2}{16}\, \int_{\rr_+^N}\frac{v_\ep^2}{|x|^4}\, dx. \end{align}
\noindent\textit{Step \ref{step1:lemma1}.1} We claim that \begin{align}\label{eq:delta1ep}
\int_{\rr_+^N}\left| \Delta v_\ep \right|^2\,dx= \int_{\rr_+^N} \left|\eta_\ep \right|^2 \left|\Delta v\right|^2\,dx+o(1) \bb{ as }\ep\to 0. \end{align}
\noindent\textit{Proof of this claim:} For convenience $A_{+,\ep}:=\rr_+^N\cap \left( B_{2\ep}(0)\backslash B_{\ep}(0) \right).$ Using the definition of $v_\ep$ yields \begin{equation}\label{eq:variationDelta}
\int_{\rr_+^N}|\Delta v_\ep|^2\,dx=\int_{\rr_+^N}|\eta_\ep|^2|\Delta v|^2 \,dx+R_{\ep},\hbox{ with } \end{equation}\begin{equation}\label{aDelta0ep}
R_\ep:= \int_{A_{+,\ep} }\Big[ |\Delta \eta_\ep|^2v^2+4\nabla \eta_\ep\cdot\nabla v \Delta(\eta_\ep)v+\,2\, \Delta(\eta_\ep)\eta_\ep v\Delta v+4\, (\nabla \eta_\ep\cdot\nabla v)^2+4\, \nabla \eta_\ep\cdot\nabla v \eta_\ep\Delta v\Big]\,dx. \end{equation} We claim that \begin{equation}\label{eq:Rep}
R_\ep=O\left(\int_{A_{+,\ep}}\frac{|v|^2}{|x|^4}\,dx+\left( \int_{A_{+,\ep}}\frac{|v|^2}{|x|^4}\,dx\right)^{\frac{1}{2}}+\left( \int_{A_{+,\ep}}\frac{|v|^2}{|x|^4}\,dx\right)^{\frac{1}{4}} \right). \end{equation}
\begin{proof} We estimate each term of $R_\ep$. Since $v_{|\partial\rn_+}=0$, integrating by parts yield
\begin{eqnarray*}
\int_{A_{+,\ep}} \nabla \eta_\ep\cdot\nabla v \Delta(\eta_\ep)v\, dx
&=&O \left( -\int_{A_{+,\ep}}\left( |\Delta \eta_\ep|^2+ \nabla \eta_\ep\cdot \nabla (\Delta \eta_\ep)\right) v^2 \,dx\right. \\
&&\left. +\int_{\rr_+^N\cap\partial\left(B_{2\ep}(0)\backslash B_{\ep}(0) \right) }v^2 \Delta \eta_\ep\partial_{\nu}\eta_\ep \,d\sigma\right) ,
\end{eqnarray*}
where $\nu$ is the outer normal vector of $B_{2\ep}(0)\backslash B_{\ep}(0)$. Since $\partial_{\nu} \eta_\ep=0$ on $\rr_+^N\cap\partial\left(B_{2\ep}(0)\backslash B_{\ep}(0) \right)$, we have
\begin{align}
\int_{A_{+,\ep}} \nabla \eta_\ep\cdot\nabla v \Delta(\eta_\ep)v\, dx
&=O\left( \frac{1}{\ep^4}\vv (\Delta \eta)^2+ \nabla \eta\cdot \nabla (\Delta \eta)\vv_{\infty}\int_{A_{+,\ep}} v^2\, dx\right)\nonumber \\
&=O\left( \int_{A_{+,\ep}}\frac{|v|^2}{|x|^4}\, dx\right).\label{e12ep}
\end{align} By Hölder's inequality and $v\in C^2_c(\overline{\e})$, we get \begin{eqnarray}
\int_{A_{+,\ep}} \Delta(\eta_\ep)\eta_\ep v\Delta v\, dx&&=O\left( \left( \int_{A_{+,\ep}}\left|\Delta v \right|^2\, dx\right)^{\frac{1}{2}}\left( \int_{A_{+,\ep}}\left|\Delta(\eta_\ep)\eta_\ep v \right|^2\, dx\right)^{\frac{1}{2}}\right) \nonumber\\
&&=O\left( \left( \int_{A_{+,\ep}}\frac{|v|^2}{|x|^4}\,dx\right)^{\frac{1}{2}} \right). \label{e14ep} \end{eqnarray} It follows from the Cauchy-Schwarz and H\"older inequalites and integrations by parts that
\begin{eqnarray}
\int_{A_{+,\ep}}\Big( \nabla \eta_\ep\cdot\nabla v\Big)^2 \,dx &=&O\left( \int_{A_{+,\ep}}|\nabla \eta_\ep|^2|\nabla v|^2 \, dx\right)\nonumber\\
&=&O\left( \int_{A_{+,\ep}} v\Delta v |\nabla \eta_\ep|^2 \, dx+\int_{A_{+,\ep}}v\nabla v\nabla(|\nabla \eta_\ep|^2 )\, dx\right)\nonumber\\
&=&O\left( \left( \int_{A_{+,\ep}}\frac{|v|^2}{|x|^4}\,dx\right)^{\frac{1}{2}}+\int_{A_{+,\ep}}\frac{|v|^2}{|x|^4}\, dx\right).\label{e13ep}
\end{eqnarray} Using again Hölder's inequality yields \begin{eqnarray}
\int_{A_{+,\ep}} \nabla \eta_\ep\cdot \nabla v \eta_\ep\Delta v\,dx &=&O\left( \left( \int_{A_{+,\ep}}|\nabla \eta_\ep|^2|\nabla v|^2 \, dx\right)^{\frac{1}{2}}\right)\nonumber\\
&=&O\left( \left( \int_{A_{+,\ep}}\frac{|v|^2}{|x|^4}\,dx\right)^{\frac{1}{2}}+\left( \int_{A_{+,\ep}}\frac{|v|^2}{|x|^4}\,dx\right)^{\frac{1}{4}}\right).\label{e15ep} \end{eqnarray} We inject \eqref{e12ep}, \eqref{e14ep}, \eqref{e13ep} and \eqref{e15ep} in \eqref{aDelta0ep}, we obtain \eqref{eq:Rep}. \end{proof} \noindent It follows from \eqref{eq:Rep} that $R_\ep=o(1)$ as $\ep \to 0$. Therefore, by \eqref{eq:variationDelta} we obtain as $\ep \to 0$ that \eqref{eq:delta1ep}. This ends the proof of Step \ref{step1:lemma1}.\qed\par
\noindent Using again the inequality \eqref{eq:inversHSep} and by \eqref{eq:delta1ep}, we find that \begin{align*}
\int_{\rr_+^N}|\eta_\ep|^2\left| \Delta v\right|^2\, dx+o(1)\geq \frac{(N^2-4)^2}{16}\, \int_{\rr_+^N}\frac{|\eta_\ep|^2 v^2}{|x|^4}\, dx . \end{align*} Therefore, passing $\ep \to 0$, we get \eqref{eq:inversHS}. This proves Step \ref{step1:lemma1}.\qed \begin{step}\label{step2:lemma1} We claim that \begin{align}\label{eq:inversHS1}
\int_{\rr_+^N}|\Delta u|^2\, dx\geq\frac{(N^2-4)^2}{16}\, \int_{\rr_+^N}\frac{u^2}{|x|^4}\, dx \bb{ for all } u\in \H. \end{align} \end{step} \noindent{\it Proof of Step \ref{step2:lemma1}:} We fix $u\in \H$. We then take a sequence $(u_n)_n$ such that $u_n\in C^{2}_c(\overline{\rr_+^N})$ and $u_n(x)=0$ for all $x\in\partial\rn_+$ and $\lim_{n\to +\infty}u_n=u$ for the norm $\Vert\Delta\cdot\Vert_2$. Therefore, \begin{equation*}
\lim_{n\to +\infty} \int_{\rr_+^N}\frac{u_n^2}{|x|^4}\, dx=\int_{\rr_+^N}\frac{u^2}{|x|^4}\, dx\hbox{ and } \lim_{n\to +\infty} \int_{\rr_+^N}\left|\Delta u_n\right|^2\, dx =\int_{\rr_+^N} \left| \Delta u\right|^2\, dx. \end{equation*} It then follows from \eqref{eq:inversHS} that \begin{align*}
\int_{\rr_+^N}|\Delta u_n|^2\, dx\geq \frac{(N^2-4)^2}{16}\, \int_{\rr_+^N}\frac{u_n^2}{|x|^4}\, dx . \end{align*} Letting $n\to+\infty$ and we get \eqref{eq:inversHS1}. This proves Step \ref{step2:lemma1}. \qed \par \begin{step}\label{step3:lemma1} We claim that \eqref{eq:inversHS1} is optimal. \end{step} \noindent{\it Proof of Step \ref{step3:lemma1}:} This will be achieved via test-function estimates. We define
$$v(x):=x_1|x|^{-\frac{N-2}{2}}\hbox{ for all } x\in\rr_+^N\backslash \{0\}.$$ We have that $v\in C^2(\overline{\e})$, $v=0$ on $\partial\e$ and
$$-\Delta v(x)=\frac{N^2-4}{16}\frac{v(x)}{|x|^{2}}.$$
Let $\varphi,\psi\in C^\infty(\rr^n)$ such that $$\left\{\begin{array}{l}
\varphi(0)=0,\, |\varphi(x)|\leq c|x|\hbox{ if }|x|<1\hbox{ and }\varphi(x)=1\hbox{ if }|x|\geq 1;\\
\psi(x)=1\hbox{ if }|x|< 1 \hbox{ and } \psi(x)=0 \hbox{ if }|x|>2, \end{array}\right.$$ for some constant $c>0$. For $0<\ep\ll 1$, we define the function $v_{\ep}\in \H$ as follows: \begin{equation*} v_{\ep}(x):=\varphi\left(\frac{x}{\ep}\right)\psi(\ep x)v(x)=\left\{\begin{array}{cc}
\varphi\left(\frac{x}{\ep}\right) v(x) &\hbox{ if }|x|<\ep,\\
v(x) &\hbox{ if }\ep\leq |x|<\frac{1}{\ep},\\
\psi(\ep x)v(x) &\hbox{ if }|x|\geq\frac{1}{\ep}. \end{array}\right. \end{equation*}
It follows from the definition of $v_\ep$ that
\begin{equation}\label{hb1}
\int_{\rn_+\backslash \overline{B}_{\ep^{-1}}(0)}\left| \Delta v_{\ep}\right| ^2dx=O(1) \bb{ and } \int_{\rn_+\cap B_{\ep}(0)}\left| \Delta v_{\ep}\right| ^2\, dx=O(1) \bb{ as } \ep\to 0. \end{equation}
It remains just one factor to calculate, we have \begin{eqnarray}
\int_{B_{\ep^{-1}}(0)\backslash\overline{B}_{\ep}(0)}\left| \Delta v_{\ep}\right| ^2\, dx
&=&\frac{(N^2-4)^2}{16}\int_{B_{\ep^{-1}}(0)\backslash\overline{B}_{\ep}(0)}\frac{v_{\ep}^2}{|x|^4}\, dx\label{hb30}. \end{eqnarray} Moreover, we calculate that \begin{eqnarray}
\int_{B_{\ep^{-1}}(0)\backslash\overline{B}_{\ep}(0)}\frac{v_{\ep}^2}{|x|^4}dx&=&\int_{B_{\ep^{-1}}(0)\backslash\overline{B}_{\ep}(0)}x_1^2|x|^{-N-2}dx\nonumber\\
&=&2w(2)\ln\left(\frac{1}{\ep}\right)\label{hb40}, \end{eqnarray} where $w(2):=\int_{\mathbb{S}_+^{N-1}}x_1^2\, d\sigma$. It follows from \eqref{hb1}, \eqref{hb30} and \eqref{hb40} that \begin{eqnarray}
\int_{\rr_+^N}\left| \Delta v_{\ep}\right| ^2\, dx=2w(2)\frac{(N^2-4)^2}{16}\ln\left(\frac{1}{\ep}\right)+O(1),\label{hb50} \end{eqnarray} when $\ep \to 0$. Using again the definition of $v_\ep$ yields, \begin{eqnarray}\label{hb05}
\int_{\rn_+\backslash \overline{B}_{\ep^{-1}}(0)}\frac{v_{\ep}^2}{|x|^4}\, dx=O(1) &\bb{ and }& \int_{ \rn_+\cap B_{\ep}(0)}\frac{v_{\ep}^2}{|x|^4}\, dx=O(1). \end{eqnarray} Therefore, it follows from \eqref{hb40}, we get as $\ep\to 0$ that \begin{eqnarray}
\int_{\rr_+^N}\frac{v_{\ep}^2}{|x|^4}\, dx=2w(2)\ln\left(\frac{1}{\ep}\right)+O(1).\label{hb6} \end{eqnarray} Combining \eqref{hb50} and \eqref{hb6}, we get \begin{eqnarray*}
\frac{ \int_{\rr_+^N}\left| \Delta v_{\ep}\right| ^2\, dx}{\int_{\rr_+^N}\frac{v_{\ep}^2}{|x|^4}\, dx}=\frac{(N^2-4)^2}{16}+o(1), \end{eqnarray*} as $\ep\to 0$. Since $v_\ep\in\H$, we get that $\gamma_H(\rn_+, \H)\leq \frac{(N^2-4)^2}{16}$. This completes the proof of Lemma \ref{lemma:extraonhalfspace}\qed\par
\noindent To conclude this section, we discuss the model solutions for the homogeneous equation.
\begin{lem}\label{lemma:sol} Given $\alpha\in\rr$, for $x\in \rn_+$, $N\geq 5$, we define $v_\alpha(x):=x_1|x|^{-\alpha}$.
Then for $-N^2\leq \gg <\gg_{H,+}$, we have that
\begin{equation}\label{eq:doublehardybi0}
\left\{\begin{array}{ll}
\Delta^2v_\alpha -\frac{\gg}{|x|^4} v_\alpha=0 &\hbox{ in } \rr_+^N, \\
v_\alpha=\Delta v_\alpha = 0 &\hbox{ on } \partial \rr_+^N,
\end{array}\right.
\end{equation}
if and only if $\alpha\in \{\alpha_{-}(\gg),\alpha_{+}(\gg),\beta_{-}(\gg),\beta_{+}(\gg)\}$ where
\begin{equation}\label{p00}
\alpha_{\pm}(\gg):=\frac{N-2}{2}\pm \frac{1}{2} \sqrt{N^2+4- 4\sqrt{N^2+\gg}},
\end{equation}
and,
\begin{equation}\label{p01}
\beta_{\pm}(\gg):=\frac{N-2}{2}\pm \frac{1}{2} \sqrt{N^2+4+ 4\sqrt{N^2+\gg}}.
\end{equation}
\end{lem}
\noindent \textit{Proof of Lemma \ref{lemma:sol}:} First, it follows from $-\Delta v=\alpha(N-\alpha)\frac{x_1|x|^{-\alpha}}{|x|^2} \bb{ on } \rr_+^N$ that
$$ \Delta^2v -\frac{\gg}{|x|^4} v=\left\{ \alpha(N-\alpha)(\alpha+2)(N-\alpha-2)-\gg\right\} \frac{v}{|x|^{4}}.$$
To get our result, we want to solve the following equation:
\begin{eqnarray}\label{p1}
\alpha^4-2(N-2)\alpha^3+(N^2-6N+4)\alpha^2+(2N^2-4N)\alpha-\gg=0.
\end{eqnarray}
We denote $ a=1 \bb{ , } b=-2(N-2) \bb{ , } c=N^2-6N+4 \bb{ and } d=2N^2-4N$. Since $b^3-4abc+8a^2d=0$ then we can transform \eqref{p1} to biquadratic equation. We take $ \alpha:=t -\frac{b}{4a},$ and it follows from \eqref{p1} that
\begin{eqnarray}\label{p3}
t^4-\left[ (N+2)^2+(N-2)^2 \right]\frac{t^2}{4}+\frac{(N+2)^2(N-2)^2}{16}-\gg=0.
\end{eqnarray}
It's easy to find the roots of \eqref{p3}. Since $ -N^2\leq \gg < \gg_{H,+}$, we find that
\begin{eqnarray*}
t_{\pm}( \gg):=\pm\frac{1}{2} \sqrt{N^2+4\pm 4\sqrt{N^2+\gg}}.
\end{eqnarray*}
Therefore, since $\alpha=t +\frac{N-2}{2}$ we get \eqref{p00}. Then, our fuction $v$ is a solution of equation \eqref{eq:doublehardybi0} when $\alpha\in\{ \alpha_{\pm}(\gg),\beta_{\pm}(\gg)\}$. This ends the proof of Lemma \ref{lemma:sol}. \qed\par
\noindent \textbf{Remarks about this Lemma:} First, we denote that $$\beta_{-}(\gg)\leq \alpha_{-}(\gg)\leq \alpha_{+}(\gg)\leq \beta_{+}(\gg).$$ If $\gg=\gg_{H,+}$, then we have that $\alpha_{\pm}(\gg)=\frac{N-2}{2}.$ Hence, for $\gg\in[0,\gg_{H,+})$, we find that $$\alpha_{-}(\gg)\in \left[0,\frac{N-2}{2}\right)\bb{ and }\alpha_{+}(\gg)\in \left(\frac{N-2}{2}, N-2\right].$$ Moreover, notice that if $\gg\in[0,\gg_{H,+})$, then $$\beta_{-}(\gg)\in\left(\beta_-(\gg_{H,+}),-2\right] \bb{ and }\beta_{+}(\gg)\in\left[N,\beta_+(\gg_{H,+})\right).$$
We also remark that $x_1|x|^{-\beta_{-}}$ is bounded and $x_1|x|^{-\alpha_{-}}$ is the singular solution that is locally in $\H$. The following graph concerns the localizations of $\alpha_{\pm}$, $\beta_{\pm}$ when $\gg\in[0,\gg_{H,+})$:
\section{Existence of extremals for $Q_{\gg,s}(\rr_+^N)$: proof of Theorem \ref{theo1}}\label{sec:extr}
We fix $\gg < \gg_{H,+}=\frac{(N^2-4)^2}{16}$ and $0\leq s<4$. Recall that \begin{equation}\label{HShalfspace}
Q_{\gg,s}(\rr_+^N)=\inf_{u\in \H\backslash \{0\}}\frac{\int_{\rr_+^N}\left( \left| \Delta u\right|^2-\gamma\frac{|u|^2}{|x|^4}\right) \, dx}{\left( \int_{\rr_+^N}\frac{|u|^{\crit}}{|x|^s}\,dx\right)^{\frac{2}{\crit}}}>0. \end{equation} In order to prove the existence of extremals for \eqref{HShalfspace}, we proceed as in Ghoussoub-Robert \cite{GR} (see Filippucci-Pucci-Robert \cite{FPR}): these proofs rely on Lions's proof of the existence of extremals for the Sobolev inequality \cite{Lio2}. We let $(\bar{u}_m)_{m\in \nn}\in \H$ be a minimizing sequence for $Q_{\gg,s}(\rr_+^N)$ in \eqref{HShalfspace} such that \begin{eqnarray*}
\int_{\rr_+^N}\frac{|\bar{u}_m|^{\crit}}{|x|^s}\,dx =1 \bb{ and } \lim_{m\to+\infty}\left( \int_{\rr_+^N}\left| \Delta \bar{u}_m\right|^2\,dx-\gamma\int_{\rr_+^N}\frac{|\bar{u}_m|^2}{|x|^4}\,dx\right) = Q_{\gg,s}(\rr_+^N). \end{eqnarray*}
For all $m\in \nn$, since $\int_{\rr_+^N}\frac{|\bar{u}_m|^{\crit}}{|x|^s}\,dx =1$, then, up to considering a subsequence, there exists $\rho_m>0$ such that $\int_{\rr_+^N\cap B_{\rho_m}(0)}\frac{|\bar{u}_m|^{\crit}}{|x|^s}\,dx =\frac{1}{2}$. We define $$ u_m(x):= \rho_m^{\frac{N-4}{2}}\bar{u}_m(\rho_m x) \bb{ for any } x\in \rr_+^N.$$ It is easy to check that $u_m \in \H$ for all $m\in \nn$, and \begin{eqnarray}\label{a00}
\lim_{m\to +\infty}\left( \int_{\rr_+^N}\left| \Delta u_m\right|^2\,dx-\gamma\int_{\rr_+^N}\frac{|u_m|^2}{|x|^4}\,dx\right) =Q_{\gg,s}(\rr_+^N), \end{eqnarray} and \begin{eqnarray}\label{a0}
\int_{\rr_+^N}\frac{|u_m|^{\crit}}{|x|^s}\,dx=1 \bb{ and } \int_{\rr_+^N\cap B_{1}(0)}\frac{|u_m|^{\crit}}{|x|^s}\,dx=\frac{1}{2}, \end{eqnarray} for all $m\in \nn$. Since $\gg <\gg_{H,+}$, there exists $C>0$ such that \begin{equation}\label{eq:coercive}
\int_{\rr_+^N}\left( \left| \Delta u\right|^2-\gamma\frac{|u|^2}{|x|^4}\right) \, dx\geq C\, \int_{\rr_+^N}\left| \Delta u\right|^2\, dx \bb{ for all } u\in \H. \end{equation} Therefore, with \eqref{a00}, there exists $C>0$ such that $C\,\vv u_m\vv^2 \leq Q_{\gg, s}(\rr_+^N)+o(1)$ as $m\to +\infty$. Hence, $(u_m)_{m\in \nn}$ is bounded in $\H$. As a consequence, up to the extraction of a subsequence, there exists $u\in \H$ such that \begin{eqnarray*}
\left\{\begin{array}{ll}
u_m \rightharpoonup u \hbox{ weakly in } \H,\\
u_m \to u \hbox{ strongly in } L^q_{loc}(\rr^N)\bb{ for any } 1\leq q <\crito:=\frac{2N}{N-4}.
\end{array}\right. \end{eqnarray*} We define now the measures $ \nu_m, \la_m$ on $\rr^N$ as \begin{eqnarray}\label{eq:definitionmesures}
\nu_m:=\frac{ |u_m|^{\crit}}{|x|^s}\mathbf{1}_{\rr_+^N}\, dx \hbox{ and }\la_m:= \left(\left| \Delta u_m\right|^2-\frac{\gg}{|x|^4}u_m^2 \right)\mathbf{1}_{\rr_+^N}\, dx. \end{eqnarray} Using \eqref{a0} and \eqref{a00} yield \begin{eqnarray}\label{a000}
\int_{\rr^N} d\nu_m =1 \bb{ and } \lim_{m\to +\infty} \int_{\rr^N}d\la_m=Q_{\gg,s}(\rr_+^N). \end{eqnarray} Up to extracting a subsequence, there exist two measures $\la$, $\nu$ on $\rr^N$ such that \begin{eqnarray}\label{eq:convmesures}
\la_m\rightharpoonup\la \bb{ and } \nu_m \rightharpoonup \nu \bb{ weakly in the sens of measure as } m\to +\infty. \end{eqnarray}
We now apply Lions's first concentration-compactness Lemma \cite{Lio2} to the sequence of measures $(\nu_m)_{m\in \nn}$. Indeed, up to a subsequence, three situations may occur: \begin{itemize}
\item[(a)] (Compactness) There exists a sequence $(x_m)_{m\in \nn}$ in $\rr^N$ such that for any $\ep >0$ there exists $R_\ep>0$ with the property that
$$ \int_{B_{R_{\ep}}(x_m)} d\nu_m \geq 1- \ep \hbox{ for all } m\in \nn \bb{ large}.$$
\item[(b)] (Vanishing) For all $R>0$ there holds
$$ \lim_{m\to +\infty} \left(\sup_{x\in \rr^N} \int_{B_R(x)} \, d\nu_m \right) =0.$$
\item[(c)] (Dichotomy) There exists $\alpha \in (0,1)$ such that for any $\ep >0$ there exists $R_\ep>0$ and a sequence $(x_m^\ep)_{m\in \nn}\in \rr^N$, with the following property: give $R^\prime >R_\ep$, there are non-negative measures $\nu_m^1$ and $\nu_m^2$ such that
\begin{align*}
\hspace{1cm} 0\leq \nu_m^1&+\nu_m^2\leq \nu_m,\, Supp(\nu_m^1)\subset B_{R_\ep}(x_m^\ep), \, Supp(\nu_m^2)\subset \rr^N\backslash B_{R^\prime}(x_m^\ep),\\
& \nu_m^1=\nu_m \left|_{B_{R_\ep}(x_m^\ep)}\right., \hspace{0.8cm} \nu_m^2=\nu_m \left|_{\rr^N\backslash B_{R^\prime}(x_m^\ep)}\right.\\
\lim_{m\to +\infty}&\sup \left(\left|\alpha-\int_{\rr^n} \, d\nu_m^1 \right| + \left|(1-\alpha)-\int_{\rr^n} \, d\nu_m^2 \right| \right)\leq \ep.
\end{align*} \end{itemize} \begin{step}\label{step:lemma1}
We claim that point $(a)$ (Compactness) holds. In particular, we have that $\int_{\rr^N} \, d\nu =1$. \end{step}
\noindent \textit{Proof of Step \ref{step:lemma1}}: Indeed, it follows from \eqref{a0} that point (b), does not hold. Assume by contradiction that point (c) holds, that there exists $\alpha\in (0,1)$ such that (c) above holds. Taking $\ep=(m+1)^{-1}$, we can assume that, up to a subsequence, there exists $(R_m)_{m\in \nn}$ in $\rr_+$, $(x_m)_{m\in \nn}$ in $\rr^N$ and two sequence of non-negative measures, $(\nu_m^1)_{m\in \nn}$ and $(\nu_m^2)_{m\in \nn}$ such that $\lim_{m\to +\infty} R_m=\infty$ and \begin{equation}\label{eq:convergencemesure}
\left\{\begin{array}{ll}
0\leq \nu_m^1+\nu_m^2\leq \nu_m&,\hspace{0.4cm} Supp(\nu_m^1)\subset B_{R_m}(x_m), \, Supp(\nu_m^2)\subset \rr^N\backslash B_{2R_m}(x_m),\\
\nu_m^1=\nu_m \left|_{B_{R_m}(x_m)}\right.&, \hspace{0.4cm} \nu_m^2=\nu_m \left|_{\rr^N\backslash B_{2R_m}(x_m)},\right.\\
\lim\limits_{m\to +\infty} \int_{\rr^N} \, d\nu_m^1=\alpha &,\hspace{0.12cm}\lim\limits_{m\to +\infty} \int_{\rr^N} \, d\nu_m^2=1-\alpha.
\end{array}\right. \end{equation} We define $D_m:=B_{2R_m}(x_m)\backslash B_{R_m}(x_m)$, it follows from \eqref{a000} and \eqref{eq:convergencemesure} that \begin{equation}\label{eq:convergenceDm}
\lim_{m\to +\infty} \int_{D_m} \, d\nu_m=0. \end{equation}
Taking $\eta\in C_c^\infty(\rr^N)$ be such that $\eta_{|B_1(0)}\equiv1$, $\eta_{|B_2(0)^c}\equiv0$ and $0\leq \eta\leq 1$. For $m\in \nn$, we define $\eta_m(x):=\eta\left( R_m^{-1}(x-x_m)\right) \bb{ for all } x\in\rr^N.$ Using \eqref{eq:convergencemesure} and \eqref{eq:convergenceDm} \begin{eqnarray}
1&=&\left( \int_{\rr^N} \eta_m^{\crit}\, d\nu_m^1+ \int_{\rr^N} (1-\eta_m)^{\crit}\, d\nu_m^2\right)^{\frac{2}{\crit}}+o(1)\nonumber\\
&\leq& \left( \int_{\rr^N} \eta_m^{\crit}\, d\nu_m^1\right)^{\frac{2}{\crit}}+ \left( \int_{\rr^N} (1-\eta_m)^{\crit}\, d\nu_m^2\right)^{\frac{2}{\crit}}+o(1)\label{eq:etam1}. \end{eqnarray} On the other hand, it follows from the \eqref{HShalfspace} that \begin{eqnarray}
&&\left( \int_{\rr^N} \eta_m^{\crit}\,d\nu_m^1\right)^{\frac{2}{\crit}}+ \left( \int_{\rr^N} (1-\eta_m)^{\crit}\,d\nu_m^2\right)^{\frac{2}{\crit}}+o(1)\nonumber\\
&\leq& \left( \int_{\rr_+^N} \frac{|\eta_mu_m|^{\crit}}{|x|^s}\, dx\right)^{\frac{2}{\crit}}+ \left( \int_{\rr_+^N} \frac{|(1-\eta_m)u_m|^{\crit}}{|x|^s}\, dx\right)^{\frac{2}{\crit}}+o(1)\nonumber\\
&\leq& Q_{\gg,s}(\rr_+^N)^{-1} \int_{\rr_+^N}\left( \left|\Delta (\eta_mu_m)\right|^2-\gamma\frac{|\eta_mu_m|^2}{|x|^4}\right) \,dx \nonumber\\
&&+\,Q_{\gg,s}(\rr_+^N)^{-1}\int_{\rr_+^N}\left( \left| \Delta ((1-\eta_m)u_m)\right|^2-\gamma\int_{\rr_+^N}\frac{|(1-\eta_m)u_m|^2}{|x|^4}\right) \,dx+o(1).\label{eq:ineqdelta12} \end{eqnarray}
\noindent{\bf Step \ref{step:lemma1}.1:} We claim that, as $m\to +\infty$, \begin{eqnarray}
\int_{\rr_+^N}\left| \Delta (\eta_mu_m)\right|^2\,dx&=& \int_{\rr_+^N}|\eta_m|^2\left|\Delta u_m\right|^2\,dx+o(1),\label{eq:delta1}\\
\int_{\rr_+^N}\left| \Delta ((1-\eta_m)u_m)\right| ^2\,dx&=&\int_{\rr_+^N}|(1-\eta_m)|^2\left| \Delta u_m\right|^2\,dx+o(1).\label{eq:delta2} \end{eqnarray}
\noindent \textit{Proof of the claim:} We write for convenience $A_{+,m}:=\rr_+^N\cap \left( B_{2R_m}(x_m)\backslash B_{R_m}(x_m)\right) $. Since $(u_m)_{m\in \nn}$ is bounded in $\H$, similarly to \eqref{eq:variationDelta} and \eqref{eq:Rep}, we get \begin{eqnarray}\label{eq:variationDelta2}
&&\int_{\rr_+^N}|\Delta (\eta_mu_m)|^2\,dx=\int_{\rr_+^N}|\eta_m|^2|\Delta u_m|^2 \,dx+O\left( \int_{A_{+,m}}\frac{|u_m|^2}{|x-x_m|^4}\,dx\right.\\
&&+\left. \left( \int_{A_{+,m}}\frac{|u_m|^2}{|x-x_m|^4}\,dx\right)^{\frac{1}{2}}+\left( \int_{A_{+,m}}\frac{|u_m|^2}{|x-x_m|^4}\,dx\right)^{\frac{1}{4}}\right).\nonumber \end{eqnarray}
We claim that, \begin{equation}\label{eq:convergenceumx4}
\lim_{m\to +\infty}\int_{A_{+,m}}\frac{u_m^2}{|x-x_m|^4}\,dx=0. \end{equation}
\textit{Proof.} Indeed, by Hölder's inequality and since $ N(1-\frac{2}{\crit})+\frac{2s}{\crit}=4$, we get that \begin{align}
\int_{A_{+,m}}\frac{u_m^2}{|x-x_m|^4}\,dx&\leq R_m^{-4} \left[\int_{A_{+,m}}\, dx\right]^{1-\frac{2}{\crit}} \left[\int_{A_{+,m}} u_m^{\crit}\,dx\right]^{\frac{2}{\crit}}\nonumber\\
&\leq c\,w_{N-1}^{1-\frac{2}{\crit}}{R_m}^{\frac{2s}{\crit}-4}\left[ \int_{R_m}^{2R_m}r^{N-1}\,dr\right]^{1-\frac{2}{\crit}}\left[\int_{A_{+,m}} \frac{u_m^{\crit}}{|x|^s}\,dx\right]^{\frac{2}{\crit}}\nonumber\\
&\leq c\,w_{N-1}^{1-\frac{2}{\crit}}\,\left[\int_{A_{+,m}} \frac{u_m^{\crit}}{|x|^s}\,dx\right]^{\frac{2}{\crit}},\label{e16} \end{align} where $w_{N-1}$ is the volume of the canonical $(N-1)$--sphere. Using \eqref{a0} yields \begin{equation*}
\lim_{m\to +\infty} \int_{\rr_+^N }\frac{|u_m|^{\crit}}{|x|^s}\,dx=1+\lim_{m\to +\infty}\int_{A_{+,m}} \frac{u_m^{\crit}}{|x|^s}\,dx. \end{equation*}
In addition, by \eqref{eq:convergencemesure} we infer that $\lim\limits_{m\to+\infty} \vv u_m\vv_{\crit,|x|^{-s}}^{2}=0$. Therefore, it follows from \eqref{e16} that \eqref{eq:convergenceumx4}. This ends the proof of this claim.\qed\par
\noindent Combining \eqref{eq:variationDelta2} and \eqref{eq:convergenceumx4}, we get the result. Similarly we prove \eqref{eq:delta2}. This ends the proof of Step \ref{step:lemma1}.1.\qed\par
\noindent It follows from \eqref{eq:ineqdelta12}, \eqref{eq:delta1}, \eqref{eq:delta2} and \eqref{a000} that \begin{eqnarray*}
&&\left( \int_{\rr^N} \eta_m^{\crit}d\nu_m^1\right)^{\frac{2}{\crit}}+ \left( \int_{\rr^N} (1-\eta_m)^{\crit}d\nu_m^2\right)^{\frac{2}{\crit}}\\
&\leq& Q_{\gg,s}(\rr_+^N)^{-1} \int_{\rr_+^N} \eta_m^2\left(|\Delta u_m|^2 -\gamma\frac{|u_m|^2}{|x|^4}\right)\, dx \end{eqnarray*} \begin{eqnarray*}
&&+\,Q_{\gg,s}(\rr_+^N)^{-1} \int_{\rr_+^N} (1-\eta_m)^2\left(|\Delta u_m|^2 -\gamma\frac{|u_m|^2}{|x|^4}\right)\, dx+o(1)\\
&\leq& Q_{\gg,s}(\rr_+^N)^{-1} \int_{\rr^N}\left(\eta_m^2+ (1-\eta_m)^2\right)\,d\la_m+o(1)\\
&\leq& 1+2\, Q_{\gg,s}(\rr_+^N)^{-1}\int_{\rr_+^N} \eta_m(1-\eta_m) \frac{|u_m|^2}{|x|^4}\, dx+o(1)\\
&\leq&1+O\left(\int_{A_{+,m} }\frac{|u_m|^2}{|x-x_m|^4}\, dx \right) +o(1). \end{eqnarray*} Letting $m \to +\infty$ and using \eqref{eq:convergencemesure}, \eqref{eq:etam1} and \eqref{eq:convergenceumx4} yields $\alpha^{\frac{2}{\crit}}+(1-\alpha)^{\frac{2}{\crit}}=1$. This is impossible when $\alpha\in (0,1)$ and $\crit>2$. This contradiction proves Step \ref{step:lemma1}. \qed \begin{step}\label{step:lemma2}
There exists $I\subset \nn$ at most countable, and a family $\{x_i\}_{i\in I}\in \rr^N$ such that
\begin{equation}\label{eq:limitedenu}
\nu=\frac{ |u|^{\crit}}{|x|^s}\mathbf{1}_{\rr_+^N}\, dx+\sum_{i\in I}\nu_i \delta_{x_i},
\end{equation} where $\nu_i:=\nu(x_i)>0$ for all $i\in I$. In particular, $\{x_i, i\in I\}\subset \{0\}$ when $s>0$. Moreover, there exists a bounded non–negative measure $\la_0\geq 0$ with no atoms (that is $\la_0(\{x\})=0$ for all $x\in \rr^N$) and \begin{equation}\label{eq:ll}
\la=\la_0+\left( \left|\Delta u\right|^2- \gg \frac{ |u|^{2}}{|x|^4}\right) \mathbf{1}_{\rr_+^N}\, dx+\sum_{i\in I} \la_i \delta_{x_i}, \end{equation} with $\la_i=\la(\{x_i\})>0$ and $\la_i \geq Q_{\gg,s}(\rr_+^N) \nu_i^{\frac{2}{\crit}}$. \end{step}
\noindent \textit{Proof of Step \ref{step:lemma2}:} For $s=0$, \eqref{eq:limitedenu} is a consequence Lions's second concentration–compactness Lemma \cite{Lio2}. Take now $s>0$ so that $\crit<\frac{2N}{N-4}$, then $u_m\to u$ strongly in $L^{\crit}_{loc}(\rr_+^N)$. Therefore, we obtain that \begin{equation}\label{eq:ucrit}
\nu= \frac{ |u|^{\crit}}{|x|^s}\mathbf{1}_{\rr_+^N}\, dx+\nu(\{0\})\delta_0. \end{equation} This proves \eqref{eq:limitedenu} in the case $s\geq 0$.\par
\noindent We now prove \eqref{eq:ll} of Step \ref{step:lemma2}. We start by the following claim.\par
\noindent \noindent{\bf Step \ref{step:lemma2}.1:} We claim that
\begin{equation}\label{eq:controlenulambda}
\left( \nu(\{x\})\right)^{\frac{2}{\crit}}\leq Q_{\gg,s}(\rr_+^N)^{-1}\la(x) \bb{ for all } x\in \rr^N.
\end{equation}
\noindent \textit{Proof of the claim:} Indeed, $\varphi\in C^\infty(\rr^n)$ be such that $\varphi(x)=1$ for $x\in B_{1}(0)$, $\varphi(x)=0$ for $x\in \rr^N\backslash B_{2}(0)$ and $0\leq \varphi\leq 1$.
Given $y\in\rr^N$ and $\delta >0$, we define $\varphi_\delta(x)=\varphi(\frac{x-y}{\delta})\bb{ for all } x\in \rr^N.$ Since $\varphi_\delta u_m\in \H$, the definition \eqref{eq:qgammas} yields \begin{equation}\label{eq:Delta123}
\left(\int_{\rr_+^N }\frac{|\varphi_\delta u_m|^{\crit}}{|x|^s}\,dx \right)^{\frac{2}{\crit}} \leq Q_{\gg,s}(\rr_+^N)^{-1}\int_{\rr_+^N}\left( \left| \Delta( \varphi_\delta u_m)\right| ^2-\gamma\frac{|\varphi_\delta u_m|^2}{|x|^4}\right) \,dx. \end{equation} As in the last proof of Step \ref{step:lemma1}.1 (see \eqref{eq:variationDelta2}), we have that \begin{eqnarray*}
\int_{\rr_+^N}|\Delta (\varphi_\delta u_m)|^2\,dx= \int_{\rr_+^N}|\varphi_\delta|^2|\Delta u_m|^2\,dx+R_{m,\delta}+o(1), \end{eqnarray*} where $o(1)\to 0$ as $m\to +\infty$ and, \begin{eqnarray*}
R_{m,\delta}:=O\left(\int_{A_{+,\delta}} \frac{u_m^{2}}{|x-y|^4}\,dx+\left( \int_{A_{+,\delta}} \frac{u_m^{2}}{|x-y|^4}\,dx\right)^\frac{1}{2}\right. \left.+ \left( \int_{A_{+,\delta}}\frac{|u_m|^2}{|x-y|^4}\,dx\right)^{\frac{1}{4}} \right). \end{eqnarray*} where $A_{+,\delta}:=\rr_+^N\cap \left(B_{2\delta}(y)\backslash B_{\delta}(y) \right)$. Therefore, for all $\delta >0$, using \eqref{eq:Delta123} yields \begin{align*}
\left(\int_{\rr^N }\left| \varphi_\delta\right| ^{\crit} \, d\nu_m \right)^{\frac{2}{\crit}}& \leq Q_{\gg,s}(\rr_+^N)^{-1}\int_{\rr^N}\varphi_\delta^2 \,d\la_m+R_{m,\delta}+o(1), \end{align*} letting $m\to +\infty$ and then $\delta \to 0$, we get that \eqref{eq:controlenulambda}. \qed \par
\noindent Up to extraction, let $\la^\prime$ be the weak limit of $|\Delta u_m|^2\mathbf{1}_{\rr_+^N}\, dx$ as $m\to +\infty$. Since $u_m\rightharpoonup u$ weakly in $\H$ as $m\to +\infty$, we get that $\la^\prime \geq |\Delta u|^2\mathbf{1}_{\rr_+^N}\, dx.$ Hence \begin{equation}\label{eq:llprime}
\la^\prime= \la_0 +\left| \Delta u\right|^2\mathbf{1}_{\rr_+^N}\, dx+\sum_{j\in J} \la^\prime(\{z_j\})\delta_{z_j}+\la^\prime_0\delta_0, \end{equation} where $\la_0\geq 0$ with no atoms, $z_j^{,}s$, $j\in J$ countable, and are the atoms of $\la^\prime$. \noindent As above \eqref{eq:ucrit}, we have that there exists $L\geq 0$ such that \begin{equation}\label{eq:limiteux4}
\frac{ |u_m|^{2}}{|x|^4}\mathbf{1}_{\rr_+^N}\, dx\rightharpoonup \frac{ |u|^{2}}{|x|^4}\mathbf{1}_{\rr_+^N}\, dx+L \, \delta_0, \end{equation} It follows from \eqref{eq:llprime} and \eqref{eq:limiteux4} that \begin{equation}\label{mino:ll}
\la =\la_0+ \left( |\Delta u|^2- \gg \frac{ |u|^{2}}{|x|^4}\right) \mathbf{1}_{\rr_+^N}\, dx-\gg L\delta_0+\sum_{j\in J} \la^\prime(\{z_j\})\delta_{z_j}. \end{equation} First, using \eqref{mino:ll} and \eqref{eq:controlenulambda} yields \begin{equation}\label{eq:nu0} 0< \left( \nu(\{0\})\right)^{\frac{2}{\crit}} Q_{\gg,s}(\rr_+^N)\leq \la(\{0\}) =(\la^\prime_0(\{0\})-\gg L ). \end{equation} On the other hand, for $x_j\neq 0$ we have \begin{equation}\label{eq:nuj} 0< \left( \nu(\{x_j\})\right)^{\frac{2}{\crit}} Q_{\gg,s}(\rr_+^N)\leq \la(\{x_j\}) =\la^\prime(\{x_j\}). \end{equation} From \eqref{mino:ll}, \eqref{eq:nu0} and \eqref{eq:nuj} we get the result \eqref{eq:ll}. This proves Step \ref{step:lemma2}.\qed \begin{step}\label{step:lemma12}
We claim that one and only one of the two following situations occur:
\begin{eqnarray*}
&\hbox{either} \left\lbrace \nu =\frac{ |u|^{\crit}}{|x|^s}\mathbf{1}_{\rr_+^N}\, dx \hbox{ and } \int_{\rr_{+}^n} \frac{ |u|^{\crit}}{|x|^s} \, dx=1\right\rbrace \\
& or\, \Big\{ \hbox{there exists $x_0\in \rr^N$ such that } \nu =\delta_{x_0} \hbox{ and } u=0 \Big\} .
\end{eqnarray*}
\end{step}
\noindent \textit{Proof of Step \ref{step:lemma12}:} Indeed, it follows from Step \ref{step:lemma1} that, \begin{align}\label{eq:i1}
1&=\left( \int_{\rr^N} d\nu \right)^{\frac{2}{\crit}}= \left(\int_{\rr_+^N}\frac{|u|^{\crit}}{|x|^s}\, dx +\sum_{i\in I} \nu^i \int_{\rr^N} \delta_{x_i}\, dx \right)^{\frac{2}{\crit}} \bb{ from \eqref{eq:limitedenu}}\nonumber\\
&=\left(\int_{\rr_+^N}\frac{|u|^{\crit}}{|x|^s}\, dx +\sum_{i\in I} \nu^i \right)^{\frac{2}{\crit}}\leq \vv u\vv_{\crit,|x|^{-s}}^{2}+\sum_{i\in I}\nu_i^{\frac{2}{\crit}}. \end{align} Now, using again \eqref{HShalfspace} and \eqref{eq:controlenulambda} yields, \begin{align}\label{eq:i2}
&\vv u\vv_{\crit,|x|^{-s}}^{2}+\sum_{i\in I}\nu_i^{\frac{2}{\crit}}\nonumber\\
&\leq Q_{\gg,s}(\rr_+^N)^{-1}\left(\int_{\rr_+^N} \left( |\Delta u|^2-\gg \frac{u^2}{|x|^4}\right) \, dx+\sum_{i\in I} \la^i\right) \leq Q_{\gg,s}(\rr_+^N)^{-1} \int_{\rr^N} d\la \end{align} from \eqref{eq:ll}. Combining \eqref{eq:i1} and \eqref{eq:i2}, we have that $ \int_{\rr^N} d\la\geq Q_{\gg,s}(\rr_+^N)$.\par
\noindent We claim now that $ \int_{\rr^N} d\la\leq Q_{\gg,s}(\rr_+^N).$ Indeed, we let $f\in C^\infty(\rr^N)$ be such that $f(x)=0$ for $x\in B_{1}(0)$, $f(x)=1$ for $x\in \rr^N\backslash B_{2}(0)$ and $0\leq f\leq 1$. Given $\rho>0$, we let $f_\rho(x)=f(\rho^{-1}x)$ for all $x\in \rr^N$. So $(1-f_\rho^2)u_m\in \H$ and therefore \begin{align*}
\int_{\rr^N} (1-f_\rho^2)\, d\la_m&= \int_{\rr^N} \, d\la_m- \int_{\rr_+^N} \left( f_\rho^2|\Delta u_m|^2-\gg \frac{|f_\rho u_m|^2}{|x|^4}\right) \, dx\\
&=\int_{\rr^N} \, d\la_m- \int_{\rr_+^N} \left( |\Delta (f_\rho u_m)|^2-\gg \frac{|f_\rho u_m|^2}{|x|^4}\right) \, dx\\
&\qquad +\int_{\rr_+^N} \left( |\Delta (f_\rho u_m)|^2-f_\rho^2|\Delta u_m|^2\right) \, dx\\
&\leq \int_{\rr^N} \, d\la_m +\int_{\rr_+^N} \left( |\Delta (f_\rho u_m)|^2-f_\rho^2|\Delta u_m|^2\right) \, dx\bb{ from \eqref{eq:coercive}}\\
&\leq Q_{\gg,s}(\rr_+^N)+R_{m,\rho}+o(1)\bb{ from \eqref{eq:variationDelta2}}, \end{align*} where $o(1)\to \bb{ as }m\to +\infty$, and \begin{eqnarray*}
R_{m,\rho}:=O\left(\int_{A_{+,\rho}} \frac{u_m^{2}}{|x-y|^4}\,dx+\left( \int_{A_{+,\rho}} \frac{u_m^{2}}{|x-y|^4}\,dx\right)^\frac{1}{2}\right. \left.+ \left( \int_{A_{+,\rho}}\frac{|u_m|^2}{|x-y|^4}\,dx\right)^{\frac{1}{4}} \right). \end{eqnarray*} where $A_{+,\rho}:=\rr_+^N\cap\left( B_{2\rho}(0)\backslash B_{\rho}(0) \right)$. Therefore, letting $m\to +\infty$, and then $\rho\to +\infty$, and we then get this claim.
\noindent This implies that $\int_{\rr^N} \, d\la=Q_{\gg,s}(\rr_+^N)$. Therefore, it follows from \eqref{eq:i1} and \eqref{eq:i2} that
$ \vv u\vv_{\crit,|x|^{-s}}^{2}+\sum_{i\in I}\nu_i^{\frac{2}{\crit}}= 1.$ By convexity, we have that one and only one term in \eqref{eq:limitedenu} is nonzero,
then there exist $i_0\in I$ such that $x_0:=x_{i_0}$ and
$$\left\lbrace\nu^{i_0}=1 \hbox{ and } \int_{\rr_+^N}\frac{|u|^{\crit}}{|x|^s}\, dx=0\right\rbrace \hbox{ or } \left\lbrace \nu^{i_0}=0 \hbox{ and } \int_{\rr_+^N}\frac{|u|^{\crit}}{|x|^s}\, dx=1\right\rbrace ,$$ with the equation \eqref{eq:limitedenu}, the exist $x_0\in \rr^N$ such that we get the claim of Step \ref{step:lemma12}.\qed\par \begin{step}\label{step:extunozero}
Suppose that $u\not\equiv 0$. We claim that that $u$ is an extremal for $Q_{\gg,s}(\rr_+^N)$. \end{step}
\noindent \textit{Proof of Step \ref{step:extunozero}:} Since $u\not\equiv0$, it follows from the previous Step that we have $\vv u\vv_{\crit,|x|^{-s}}^{\crit}=1$.
Using again the Hardy-Sobolev inequality \eqref{HShalfspace} yields, \begin{align*}
Q_{\gg,s}(\rr_+^N)\leq \int_{\rr_+^N}\left( |\Delta u|^2-\gg\frac{u}{|x|^4}\right)\, dx. \end{align*} On the other hand, we have $u_m\rightharpoonup u$ as $m\to +\infty$ and we get that
$$\int_{\rr_+^N}\left( |\Delta u|^2-\gg\frac{u^2}{|x|^4}\right)\, dx \leq \lim_{m\to +\infty}\inf\int_{\rr_+^N}\left( |\Delta u_m|^2-\gg\frac{u_m^2}{|x|^4}\right)\, dx=Q_{\gg,s}(\rr_+^N).$$ Therefore, we get the equality $I_{\gamma, s}^{\e}(u)=Q_{\gg,s}(\rr_+^N)$. That is $u$ is an extremal for $Q_{\gg,s}(\rr_+^N)$. We obtain the result of Step \ref{step:extunozero}.\qed
\begin{step}\label{step:melsun}
We suppose that $u\equiv 0$. Then, we have\begin{align*}
s=0, \lim_{m\to +\infty}\int_{\rr_+^N}\frac{ |u_m|^2}{|x|^4}\, dx=0 \bb{ and } |\Delta u_m|^2\, dx \rightharpoonup Q_{\gg,0}(\rr_+^N) \delta_{x_0},
\end{align*}
as $m\to +\infty$ in the sense of measures. \end{step} \noindent\textit{Proof of step \ref{step:melsun}:} Since $u\equiv0$, and it follows from the Step \ref{step:lemma12} that there exists $x_0\in \rr^N$ such that $\nu =\delta_{x_0}$.\par
\noindent We claim that $x_0\neq 0$. Indeed, if $x_0=0$, we get that $\int_{B_{1/2}(0)} \, d\nu=1$ which contradicts \eqref{a0}.
Therefore $x_0\neq 0$. Since $u_m\rightharpoonup 0$ weakly in $\H$ as $m\to +\infty$, then for any $1\leq q< \frac{2N}{N-4}$, we have $u_m\to 0$ strongly in $L^q_{loc}(\rr_+^N)$.\par
\noindent We claim that $s=0$. Indeed, we argue by contradiction and assume that $s>0$, then $\crit <\frac{2N}{N-4}$. Let $r>0$, $\bb{since } x_0\neq 0 \bb{ and } u_m\to 0 \bb{ strongly in } L^{\crit}_{loc}(\rr_+^N)$. Hence, we have $\lim\limits_{m\to +\infty} \int_{B_{r}(x_0)\cap\rr_+^N }\frac{|u_m|^{\crit}}{|x|^s}dx=0$,
and, it follows from \eqref{eq:convmesures} and $\nu= \delta_{x_0}$ that $\lim\limits_{m\to +\infty} \int_{B_{r}(x_0)\cap\rr_+^N }\frac{|u_m|^{\crit}}{|x|^s}dx=1,$
for all $r>0$ enough, a contradiction to our assumption.
\noindent Therefore $s=0$, and we prove the rest of this Step. Let $\rho>0$ and $f\in C^\infty(\rr^N)$ be such that $f(x)=0$ for $x\in B_{\rho}(x_0)$, $f(x)=1$ for $x\in \rr^N\backslash B_{2\rho}(x_0)$ and $0\leq f\leq 1$. We now define, $\varphi:=1-f^2 \hbox{ and } \psi:=f\sqrt{2-f^2}.$ Clearly $\varphi,\psi\in C^\infty(\rr^N)$ and $\varphi^2+\psi^2=1$. It follows from \eqref{HShalfspace} and \eqref{eq:variationDelta2} that \begin{eqnarray*}
&&Q_{\gg,0}(\rr_+^N)\left(\int_{\rr_+^N }|\varphi u_m|^{2^{\star\star}_0}\,dx \right)^{\frac{2}{2^{\star\star}_0}}
\leq \int_{\rr_+^N}\left( |\Delta( \varphi u_m)|^2\,dx-\gamma\int_{\rr_+^N}\frac{|\varphi u_m|^2}{|x|^4}\right) \,dx\\
&&\leq\int_{\rr_+^N}\varphi^2\left( |\Delta u_m|^2\,dx-\gamma\int_{\rr_+^N}\frac{ |u_m|^2}{|x|^4}\right) \,dx+R_{m,\rho}+o(1), \end{eqnarray*} where $o(1)\to 0$ as $m\to +\infty$, and \begin{eqnarray*}
R_{m,\rho}:=O\left(\int_{A_{+,\rho}} \frac{u_m^{2}}{|x-y|^4}\,dx+\left( \int_{A_{+,\rho}} \frac{u_m^{2}}{|x-y|^4}\,dx\right)^\frac{1}{2}\right. \left.+ \left( \int_{A_{+,\rho}}\frac{|u_m|^2}{|x-y|^4}\,dx\right)^{\frac{1}{4}} \right). \end{eqnarray*} where $A_{+,\rho}:=\rr_+^N\cap\left( B_{2\rho}(x_0)\backslash B_{\rho}(x_0) \right)$.
Using $u_m\to 0$ in $L^2_{loc}(\rr^N)$ yields $R_{m,\rho}=o(1)$ as $m\to +\infty$. And, so by $\varphi^2=1-\psi^2$ \begin{align}
Q_{\gg,0}(\rr_+^N)\left(\int_{\rr_+^N }|\varphi u_m|^{2^{\star\star}_0}\,dx \right)^{\frac{2}{2^{\star\star}_0}}&\leq \int_{\rr_+^N}\left( |\Delta u_m|^2\,dx-\gamma\int_{\rr_+^N}\frac{ |u_m|^2}{|x|^4}\right) \,dx\nonumber\\
&\quad-\int_{\rr_+^N}\psi^2\left( |\Delta u_m|^2\,dx-\gamma\int_{\rr_+^N}\frac{ |u_m|^2}{|x|^4}\right) \,dx+o(1), \label{eq:phipsi} \end{align} $\bb{ as }m\to +\infty.$ It follows from \eqref{eq:convmesures} and Step \ref{step:lemma12} that \begin{equation}\label{eq:phium}
\left(\int_{\rr_+^N }|\varphi u_m|^{2^{\star\star}_0}\,dx \right)^{\frac{2}{2^{\star\star}_0}}=\left( |\varphi (x_0)|^{\crito}+o(1)\right)^{\frac{2}{2^{\star\star}_0}}=1+o(1) \bb{ as } m\to +\infty. \end{equation} Plugging \eqref{eq:phium} into \eqref{eq:phipsi} we get that \begin{align}
Q_{\gg,0}(\rr_+^N)+o(1)&\leq \int_{\rr_+^N}\left( |\Delta u_m|^2-\gamma\frac{ |u_m|^2}{|x|^4}\right) \,dx\nonumber\\
&-\int_{\rr_+^N}\psi^2\left( |\Delta u_m|^2-\gamma\frac{ |u_m|^2}{|x|^4}\right) \,dx+o(1),\label{eq:new1} \end{align} as $m\to +\infty$. From \eqref{a00}, we have \begin{equation}\label{eq:psi}
\int_{\rr_+^N}\psi^2\left( |\Delta u_m|^2-\gamma\frac{ |u_m|^2}{|x|^4}\right) \,dx\leq o(1) \hbox{ as } m \to +\infty. \end{equation}
As in the proof of \eqref{eq:delta1}, we obtain that $ \int_{\rr_+^N} |\Delta( \psi u_m)|^2\,dx=\int_{\rr_+^N}\psi^2 |\Delta u_m|^2\,dx+o(1)$ as $m\to +\infty$. Plugging this expansion into \eqref{eq:psi} yields
$$\lim\limits_{m\to+\infty}\int_{\rr_+^N}\left( |\Delta (\psi u_m)|^2-\gamma|x|^{-4} |\psi u_m|^2\right) \,dx=0.$$
Hence, by the coercivity \eqref{eq:coercive}, we get
\begin{equation}\label{convergenceDelta}
\lim_{m\to +\infty} \vv \Delta (\psi u_m) \vv_2=0. \end{equation}
With the result of the Lemma \ref{lemma:extraonhalfspace}, we have that $$ \lim_{m\to +\infty}\int_{\rr_+^N}\frac{ |\psi u_m|^2}{|x|^4}\, dx=0.$$ We then have $\lim_{m\to +\infty}\int_{\rr_+^N\backslash B_{2\rho}(x_0)}\frac{ |u_m|^2}{|x|^4}\, dx=0.$
Moreover, taking $\rho>0$ small enough, since $x_0\neq 0$ and $u_m\to 0$ in $L^2_{loc}(\rr_+^N)$ around $x_0\neq 0$ yields $$\lim\limits_{m\to +\infty}\int_{\rr_+^N}\frac{ |u_m|^2}{|x|^4}\, dx=0,$$ which implies by \eqref{a00} that $\lim\limits_{m\to +\infty}\vv \Delta u_m\vv^2=Q_{\gg,0}(\rr_{+}^N)$. Hence, using \eqref{convergenceDelta} yields the third part of claim. Step \ref{step:melsun} is proved.\qed \begin{step}\label{step:mq00rn}
We now claim that, if $u\equiv0$, then $s=0$ and $Q_{\gg,s}(\rr_+^N)=S_N$, where $S_N$ is defined in \eqref{eq:bestconstantsoblevrn}. \end{step} \noindent\textit{Proof of step \ref{step:mq00rn}:} We have already seen that $s=0$. Since $u_m\in \H\subset \tilde{H}_0(\rn)$, we have that \begin{eqnarray*}
S_N\left(\int_{\rr^N} |u_m|^{2^{\star\star}(0)} \, dx \right)
&\leq&\int_{\rr^N} |\Delta u_m|^2\, dx\\
&\leq&\int_{\rr_+^N} \left( |\Delta u_m|^2-\gg \frac{u_m^2}{|x|^4}\right) \, dx\\
&&+\int_{\rr^N\backslash\rr_+^N} |\Delta u_m|^2\, dx+\,\gg\int_{\rr_+^N}\frac{u_m^2}{|x|^4} \, dx, \end{eqnarray*}
so with \eqref{a00} and by the result of Step \ref{step:melsun}, we get \begin{eqnarray*}
S_N\left(\int_{\rr^N} |u_m|^{2^{\star\star}(0)} \, dx \right)& \leq& Q_{\gg,0}(\rr_+^N) +o(1). \end{eqnarray*} It follows then from \eqref{a0} that $S_N\leq Q_{\gg,0}(\rr_+^N) +o(1)$.
Letting $m\to +\infty$ we obtain that $S_N\leq Q_{\gg,0}(\rr_+^N)$. Conversely, it follows from the computations of Proposition \ref{prop:nonext} below that $Q_{\gg,0}(\rr_+^N)\leq S_N$. Hence, we have $Q_{\gg,0}(\rr_+^N)= S_N$. This proves Step \ref{step:mq00rn} \qed \begin{step}\label{step:casegammaneagtive}
We assume that $s=0$ and $\gg\leq 0$. Then, we have that $Q_{\gg,0}(\rr_+^N)= S_N$, where $S_N$ is defined in \eqref{eq:bestconstantsoblevrn}. \end{step} \noindent\textit{Proof of step \ref{step:casegammaneagtive}:} Indeed, since $\gg \leq 0$, it follows from the Proposition \ref{prop:nonext} when $\Omega= \rr_+^N$ is smooth domain that $Q_{\gg,0}(\rr_+^N)= S_N.$ \qed \begin{step}\label{step:casegammapositive}
Taking $\{s=0, \gg>0 \bb{ and } N\geq 8\}$, we claim that
\begin{equation}\label{eq:conditonexistence}
Q_{\gg,0}(\rr_+^N)< S_N.
\end{equation} Therefore, we get that $Q_{\gg,0}(\e)$ is attained. \end{step}
\noindent\textit{Proof of step \ref{step:casegammapositive}:} We fix $x_0\in \rr_+^N$ such that $x_0\neq 0$. We define $U(x):=(1+|x|^2)^{-\frac{N-4}{2}}$ for all $x\in\rn$. It follows from Lions \cite{Lio1,Lio2} that $U\in \tilde{H}_0(\rn)=D^{2,2}(\rn)$ is an extremal for \eqref{eq:bestconstantsoblevrn}, that is
$$S_N:=\frac{\int_{\rn}\left| \Delta U\right| ^2\, dx}{\left(\int_{\rn}|U|^{\crito}\, dx \right)^{\frac{2}{\crito}}}.$$
Let $\eta \in C_c^{\infty}(\rr_+^N)$ and $0<\delta<|x_0|/2$ be such that $\eta(x)=1$ for $x\in B_\delta(x_0)$. We consider the test function \begin{equation}\label{def:Ue}
U_\ep(x):=\eta(x)u_\ep(x) \hbox{ for all } x\in \rr_+^N\bb{ and } \ep >0, \end{equation} \begin{equation}\label{eq:defuepsilon}
\hbox{where } u_\ep(x):=\ep^{-\frac{N-4}{2}}U\left(\frac{x-x_0}{\ep}\right)=\left( \frac{\ep}{\ep^2+|x-x_0|^2}\right)^{\frac{N-4}{2}}\hbox{ for all }x\in\rn. \end{equation}
\noindent{Step \ref{step:casegammapositive}.1:} We claim as $\ep \to 0$ that: \begin{eqnarray}\label{eq:i1ep}
\int_{\rr_+^N} \frac{U_\ep^2}{|x|^4}\, dx=\left\{\begin{array}{ll}
\frac{\ep^{4}}{2}|x_0|^{-4}\int_{\rr^N}U^2\, dx+o(\ep^{4}) &\bb{ if } N\geq 9,\\
\ep^4 \ln(\frac{1}{\ep})|x_0|^{-4}+o\left(\ep^4 \ln\left(\frac{1}{\ep}\right)\right) &\bb{ if } N=8,\\
O(\ep^{N-4})&\bb{ if } N=5,6,7.
\end{array}\right. \end{eqnarray}
\noindent \textit{Proof of the claim:} Indeed, for $\delta >0$ we begin by noticing that \begin{equation}\label{eq:Uepx4}
\int_{\rr_+^N} \frac{U_\ep^2}{|x|^4}\, dx
=|x_0|^{-4} I_{1,\ep}+I_{2,\ep}+O(\ep^{N-4}) \bb{ as } \ep \to 0, \end{equation}
where: \begin{eqnarray}
I_{1,\ep}:=\int_{B_\delta(x_0)\cap\e}u_\ep^2\, dx \bb{ and } I_{2,\ep}:=\int_{B_\delta(x_0)\cap\e}\left(\frac{1}{|x|^4} -\frac{1}{|x_0|^4}\right) u_\ep^2\, dx. \end{eqnarray} First, we claim that \begin{eqnarray}\label{eq:i11ep}
I_{1,\ep}=\left\{\begin{array}{ll}
\frac{\ep^{4}}{2}\int_{\rr^N}U^2\, dx+O(\ep^{N-4}) &\bb{ if } N\geq 9,\\
\ep^4 \ln(\frac{1}{\ep})+O(\ep^4) &\bb{ if } N=8,\\
O(\ep^{N-4})&\bb{ if } N=5,6,7,
\end{array}\right.. \end{eqnarray} \begin{proof} For $N\geq 9$, $U\in L^2(\rn)$, and we get as $\ep \to 0$ that \begin{equation*}
I_{1,\ep}=\frac{\ep^{4}}{2}\int_{\rr^N}U^2\, dx+O(\ep^{N-4}). \end{equation*} Take now the case $N=8$. It follows from the change of variable to polar coordinates that \begin{align*}
I_{1,\ep}
&=\ep^4\, w_7\left[O(1)+\int_{1}^{\delta\ep^{-1}}\frac{1}{r}\, dr+\int_{1}^{\delta\ep^{-1}}r^{7}\left[ \frac{1}{\left( 1+r^2\right)^4 }-\frac{1}{r^8}\right]\, dr \right]\\
&=\ep^4w_7 \ln\left(\frac{\delta}{\ep}\right)+O(\ep^4). \end{align*} As one checks, we have that $I_{1,\ep}=O(\ep^{N-4})$ for $N=5,6,8$. This proves \eqref{eq:i11ep}. \end{proof} \noindent Next, we claim as $\ep \to 0$ that \begin{eqnarray}\label{eq:i2ep}
I_{2,\ep}=\left\{\begin{array}{ll}
o(\ep^4) &\bb{ if } N\geq 9,\\
o(\ep^4\ln(\frac{1}{\ep})) &\bb{ if } N=8,\\
O(\ep^{N-4})&\bb{ if } N=5,6,7.
\end{array}\right. \end{eqnarray} \begin{proof} It follows from the definition of $u_\ep$ that
\begin{align*}
\left| I_{2,\ep}\right| \leq \ep^{N-4}\, \int_{B_\delta(0)\cap\e}\frac{|A(x)|}{\left( \ep^{2}+|x|^{2}\right)^{N-4} }\, dx,
\end{align*}
where $A(x):=\frac{1}{|x+x_0|^4}-\frac{1}{|x_0|^4}$. Fix $\alpha \in (0,\delta)$, with a change of variables we write
\begin{equation*}
\ep^{N-4}\, \int_{(B_{\delta}(0)\backslash B_\alpha(0))\cap\e}\frac{|A(x)|}{\left( \ep^{2}+|x|^{2}\right)^{N-4} }\, dx
\leq \ep^{N-4}\, \vv A\vv_{\infty} \frac{\delta^{N}w_{N-1}}{\alpha^{2(N-4)}}:=C_{1,\alpha}\ep^{N-4}.
\end{equation*} On the other hand, we have that \begin{equation}\label{eq:casgeneral}
\ep^{N-4}\, \int_{ B_\alpha(0)\cap \e}\frac{|A(x)|}{\left( \ep^{2}+|x|^{2}\right)^{N-4} }\, dx\\
\leq \ep^{4}\left(\sup_{|x|\leq \alpha}|A(x)|\right) w_{N-1}\int_{ 0}^{\alpha \ep^{-1}}\frac{r^{N-1}}{\left( 1+r^{2}\right)^{N-4} }\, dr. \end{equation} From here, we distinguish our proof to three cases:\par
\noindent \textbf{Case 1:} If $N\geq 9$, we have $U\in L^2(\rr^N)$, and then there exists $C_{2, \alpha}>0$ such that \begin{equation*}
\ep^{N-4}\, \int_{ B_\alpha(0)\cap\e}\frac{|A(x)|}{\left( \ep^{2}+|x|^{2}\right)^{N-4} }\, dx
\leq C\ep^4 \,\left(\sup_{|x|\leq \alpha}|A(x)|\right) . \end{equation*}
Take now $\theta>0$. Then there exists $\alpha_0>0$ such that for all $\alpha<\alpha_0$ we have that $C\left(\sup_{|x|\leq \alpha}|A(x)|\right) \leq \frac{\theta}{2}$. On the other hand, we have $\lim_{\ep\to 0}C_{1,\alpha} \ep^{N-8}=0$, then there exists $\ep_0:=\ep(\theta)>0$ such that $C_{1,\alpha}\ep^{N-8}\leq \frac{\theta}{2}$. Then, we have $I_{2,\ep}=o(\ep^4)$ as $\ep\to 0$. \par
\noindent \textbf{Case 2:} The proof of case $N=8$ is similar to the proof of \eqref{eq:i11ep}. \end{proof}
\noindent Plugging \eqref{eq:i11ep} and \eqref{eq:i2ep} into \eqref{eq:Uepx4}, we obtain that \eqref{eq:i1ep}. This proves Step \ref{step:casegammapositive}.1. \qed\par \noindent For $N\geq 5$, it is also classical as $\ep \to 0$ that: \begin{eqnarray}
\int_{\rr_+^N} \left|\Delta U_\ep (x) \right|^2\, dx&=& \int_{\rr^N}\left| \Delta U\right|^2\, dx+O(\ep^{N-4}),\label{eq:la}\\ \int_{\rr^N_+}| U_{\ep}|^{\crito}\, dx&=& \int_{\rr^N}| U|^{\crito}\, dx+O(\ep^{N}).\label{eq:U2**} \end{eqnarray}
\noindent Combining \eqref{eq:i2ep}, \eqref{eq:la} and \eqref{eq:U2**}, we have that \begin{eqnarray*}
I_{\gamma, 0}^{\e}(U_\ep)=\left\{\begin{array}{ll}
S_N-\gg|x_0|^{-4}c\,\ep^{4}+o(\ep^{N-4}) &\bb{ if } N\geq 9,\\
S_N-\gg|x_0|^{-4} c\,\ep^4\ln(\frac{1}{\ep})+o(\ep^4 \ln(\frac{1}{\ep})) &\bb{ if } N=8,
\end{array}\right. \end{eqnarray*} where $c$ is a positive constant. Since $\gg >0$ and $x_0\neq 0$, then $Q_{\gg,0}(\rr_+^N)< S_N$. Therefore, it follows from the Step \ref{theo1} that $u\not\equiv 0$, and we have $u$ is a extremal for $Q_{\gg,0}(\e)$. This ends Step \ref{step:casegammapositive}.\qed
\noindent All these cases end the proof of Theorem \ref{theo1}.
\section{Proof of Theorem \ref{theo2}}\label{sec:theo2} In this section, we use the existence of extremals for Hardy-Sobolev inequality, established in Section \ref{sec:extr} to prove that there exists a nontrivial weak solution for double critical equation \eqref{eq:doublehardybi}.\par
\noindent For any function $G\in C^1(E,\rr)$ where $(E,\vv \cdot\vv)$ is a Banach space, we say that $(u_m)_{m\in \nn}\in E$ is a Palais-Smale sequence of $G$ if there exists $\beta\in\rr$ such that \begin{eqnarray*}
G(u_m)\to \beta \hbox{ and } G^{\prime}(u_m)\to 0 \hbox{ in } E^{\prime} \hbox{ as } m\to +\infty. \end{eqnarray*} Here, we say that the Palais-Smale sequence is at level $\beta$. The main tool is the Mountain-Pass Lemma of Ambrosetti-Rabinowitz \cite{AR}: \begin{theo}[Mountain-Pass Lemma \cite{AR}]\label{thAR}
We consider $G\in C^{1}(X,\rr)$ where $(X,\vv \cdot\vv)$ is a Banach space. We assume that $G(0)=0$ and that
\begin{itemize}
\item There exists $\lambda, r>0$ such that $G(u)\geq \lambda$ for all $u\in X$ such that $\vv u \vv=r$,
\item There exists $u_0$ in $X$ such that $\lim sup_{t\to +\infty}G(tu_0)<0$.
\end{itemize}
We consider $t_0>0$ sufficiently large such that $\vv t_0u_0\vv >r$ and $G(t_0u_0)<0$, and
$$\beta=\inf_{c\in \Gamma} \sup_{t\in [0,1]} G(c(t)),$$
$$ \hbox{where }\Gamma:=\{c\in C^0([0,1],X) \hbox{ s.t. } c(0)=0, \, c(1)=t_0u_0\}.$$
Then, there exists a Palais-Smale sequence at level $\beta$ for $G$. Moreover, we have that $ \beta \leq \sup_{t\geq 0} G(tu_0)$. \end{theo} \noindent We define the energy functional noted by $E$ \begin{equation*}
E(u):=\frac{1}{2}\int_{\e}\left( \left| \Delta u\right| ^2-\gg\frac{u^2}{|x|^4}\right)\, dx-\frac{1}{\crit}\int_{\e}\frac{|u|^{\crit}}{ |x|^s}\,dx- \frac{1}{\crito}\int_{\e} |u|^{\crito}\, dx, \end{equation*} for any $u\in \H$. Any weak solution to \eqref{eq:doublehardybi} is a critical point of $E$. In the sequel, since $\gg <\gg_{H,+}$, then \eqref{eq:coercive} holds.
\begin{prop}\label{prop0}
We assume that \eqref{eq:coercive} holds. Fix $u_0\in \H$ such that $u_0\geq 0$, $u_0\not\equiv 0$. Then there exists a sequence $(u_m)_{m\in\nn}\in \H$ that is a Palais-Smale sequence for $E$ at level $\beta$ such that $0<\beta\leq \sup_{t\geq 0}E(tu_0)$. \end{prop} \noindent{\it Proof of Proposition \ref{prop0}:} Indeed, clearly $E\in C^1(\H)$. Note that $\E(0)=0$. It follows from \eqref{eq:coercive} and the Sobolev and Hardy-Sobolev embeddings that there exist $c_0,c_1,c_2>0$ such that \begin{equation}\label{1}
\E(u)\geq c_0 \vv u \vv^2 -c_1\vv u\vv^{\crit}-c_2\vv u\vv^{\crito} \hbox{ for all } u\in \H. \end{equation} Define $f(r)=r^2 \left[ c_0-c_1r^{\crit-2}-c_2r^{\crito-2}\right] :=r^2 g(r)$ and since $\crit,\crito>2$ we have $g(r)\to c_0$ as $r\to 0$. Then there exists $r_0>0$ such that $r<r_0$, we have $g(r)>\frac{c_0}{2}$. Therefore, for all $u\in\H$ such that $\vv u \vv =\frac{r_0}{2}$ and by \eqref{1}, we have $\E(u)\geq \frac{c_0r_0^2}{8}:=\lambda$. We fix $u_0\in\H$, $u_0\not\equiv 0$, and \begin{eqnarray*}
\E(tu_0)&=&\frac{t^2}{2}\int_{\e}\left( \left| \Delta u_0\right| ^2-\frac{\gg}{|x|^4}u_0^2\right) dx\\
&-&\frac{t^{\crit}}{\crit}\int_{\e}\frac{|u_0|^{\crit}}{|x|^s}dx
- \frac{t^{\crito}}{\crito} \int_{\e}|u_0|^{\crito}\, dx\\
&:=&\frac{t^2}{2}R_1
-\frac{t^{\crit}}{\crit}R_2
- \frac{t^{\crito}}{\crito}R_3\leq t^{\crit}\left( \frac{t^{2-\crit}}{2}R_1
-R_2\right), \end{eqnarray*} where $R_1,R_2>0$ and $R_3\geq 0$. Since $\crit>2$, we have $E(tu_0)\to -\infty$ as $t\to +\infty$. Then $\limsup_{t\to +\infty} E(tu_0)<0$. We consider $t_0>0$ large such that $\vv t_0u_0\vv >r$ and $E(t_0u_0)<0$. For $t\in[0,1]$, we have $E(c(t))\geq \lambda$ and then there exists \begin{eqnarray*}
\beta:=\inf_{c\in \Gamma} \sup E(c(t))\geq \lambda >0. \end{eqnarray*} We apply Theorem \ref{thAR} to get the expected Palais-Smale sequence. This ends the proof of Proposition \ref{prop0}.\qed \begin{prop}\label{prop00} Suppose $0<\gg <\gg_{H,+}$, $0\leq s<4$ and $N\geq 8$. We assume that \eqref{eq:coercive} holds. Then there exists a sequence $(u_m)_{m\in\nn}\in \H$ that is a Palais-Smale sequence for $E$ at level $\beta$ such that \begin{equation}\label{eq:levelbeta}
0<\beta <\beta^{\star}:=\min\left\lbrace\frac{2}{N}\,Q_{\gg,0}(\e)^{\frac{N}{4}}, \frac{4-s}{2(N-s)}Q_{\gg,s}(\e)^{\frac{N-s}{4-s}}\right\rbrace . \end{equation}
\end{prop}
\noindent\noindent{\it Proof of Proposition \ref{prop0}:} From Theorem \ref{theo1}, we know that there exists an extremal $u_0\in\H$ for $Q_{\gg,0}(\e)$ whenever $\gg>0$ and $N\geq 8$. It follows then from Proposition \ref{prop0} that there exists a sequence $u_m\in \H$ a Palais-Smale sequence for $\E$ at level $\beta$ such that $$\beta\leq \sup_{t\geq 0}E(tu_0)\leq \sup_{t\geq 0} f_1(t),$$
where: $$f_1(t):= \frac{t^2}{2}\int_{\e}\left( \left| \Delta u_0\right| ^2-\frac{\gg}{|x|^4}u_0^2\right)\, dx- \frac{t^{\crito}}{\crito}\int_{\e} |u_0|^{\crito}\, dx \hbox{ for all } t>0.$$
Simple computations yield that $f_1(t)$ attains its maximum at the point $$t_{max}=\left(\frac{\int_{\e}\left( \left| \Delta u_0\right| ^2-\frac{\gg}{|x|^4}u_0^2\right)\, dx}{\int_{\e} |u_0|^{\crito}\, dx} \right)^{\frac{1}{\crito-2}}. $$ Therefore, using $\crito=\frac{2N}{N-4}$ and $u_0$ is an extremal for $Q_{\gg,0}(\e)$ yields, \begin{align*}
\sup_{t\geq 0} f_1(t)&=\left[\frac{1}{2}-\frac{1}{\crito} \right]\left(\frac{\int_{\e}\left( \left| \Delta u_0\right| ^2-\frac{\gg}{|x|^4}u_0^2\right)\, dx}{\left( \int_{\e} |u_0|^{\crito}\, dx\right)^\frac{2}{\crito} } \right)^{\frac{\crito}{\crito-2}}=\frac{2}{N}Q_{\gg,0}(\e)^{\frac{N}{4}}. \end{align*} Thus, $\beta \leq \sup_{t\geq 0} f_1(t)=\frac{2}{N}\,Q_{\gg,0}(\e)^{\frac{N}{4}}.$ We now prove that this inequality is strict. Assume by contradiction that $$\sup_{t\geq 0}E(tu_0)= \sup_{t\geq 0} f_1(t),$$ and we consider $t_1>0$ (resp. $t_2>0$) where $\sup_{t\geq 0}E(tu_0)$ (resp. $\sup_{t\geq 0} f_1(t)$) is attained. We obtain that
$$f_1(t_1)-\frac{t_1^{\crit}}{\crit}\int_{\e}\frac{|u_0|^{\crit}}{|x|^s}\, dx = f_1(t_2),$$ this give us $f_1(t_1)-f_1(t_2)>0$ (because $t_1>0$). Contradiction with $t_2$ is a maximum point of $f_1(t)$. Therefore, we have $\beta <\frac{2}{N}\,Q_{\gg,0}(\e)^{\frac{N}{4}}$. Similar, we can get $\beta < \frac{4-s}{2(N-s)}Q_{\gg,s}(\e)^{\frac{N-s}{4-s}}$ whenever $s>0$. Thus, we can define $\beta^{\star}$ as in \eqref{eq:levelbeta} such that $0<\beta<\beta^{\star}$. This proves Proposition \ref{prop0}.\qed
\begin{prop}\label{prop2} Suppose $0<\gg < \gg_{H,+}$, $0\leq s<4$ and $N\geq 8$. We assume that $(u_m)_{m\in \nn}$ is a Palais-Smale sequence of $G$ at energy level $\beta \in (0, \beta^\star)$. If $u_m \rightharpoonup 0 \hbox{ weakly in } \H$ as $m\to +\infty$, then there exists $\ep:=\ep(N,\gamma,s,\beta)>0$ such that for every $r>0$ such that \begin{eqnarray*}
\hbox{ either }\lim_{m\to +\infty}\sup \int_{B_r(0)} \left| u_m\right|^{\crito}\, dx =\lim_{m\to +\infty}\sup \int_{B_r(0)} \frac{\left| u_m\right|^{\crit}}{|x|^s}\, dx=0; \end{eqnarray*} \begin{eqnarray*}
\hbox{ or } \lim_{m\to +\infty}\sup \int_{B_r(0)} \left| u_m\right|^{\crito}\, dx\geq\ep \hbox{ and } \lim_{m\to +\infty}\sup \int_{B_r(0)} \frac{\left| u_m\right|^{\crit}}{|x|^s}\, dx\geq \ep. \end{eqnarray*}
\end{prop}
\noindent{\it Proof of Proposition \ref{prop2}:} Indeed, the proof of this proposition is divided into several steps :
\begin{step}\label{step:00} For $s\in(0,4)$. Let $K$ be an arbitrary compact set in $\e\backslash \{0\}$, we claim
\begin{align*}
&\lim_{m\to +\infty} \int_{K}\frac{u_m^{\crit}}{|x|^s}\, dx=\lim_{m\to +\infty}\int_{K}\frac{u_m^2}{|x|^4}\, dx=0,\\
&\lim_{m\to +\infty}\int_K\left| \Delta u_m\right|^2\, dx=\lim_{m\to +\infty}\int_{K}|u_m|^{\crito}\, dx =0.
\end{align*} \end{step} \noindent{\it Proof of Step \ref{step:00}:} Note that $u_m \rightharpoonup 0 \hbox{ weakly in } \H$ implies that
$u_m \to 0\hbox{ strongly in } L^q_{loc}(\e)$ for $1\leq q<\crito$. Therefore, since $2<\crit<\crito$ because $0<s<4$ and the fact $|x|^{-1}$ is bounded on $K$, we get \begin{equation}\label{eq:step5.1:0}
\int_{K}\frac{u_m^{\crit}}{|x|^s}\, dx=o(1) \bb{ and }\int_{K}\frac{u_m^2}{|x|^4}\, dx=o(1)\bb{ as } m\to +\infty. \end{equation} We take $\eta \in C^\infty_{c}(\e\backslash\{0\})$ such that $\eta=1$ in $K$ and $0\leq \eta \leq1$. \par
\noindent\textit{Step \ref{step:00}.1} We claim $\bb{ as } m\to +\infty$ that \begin{equation}\label{eq:etaDeltaum}
\int_{\e}\left| \eta\Delta u_m\right|^2\, dx
\leq \left( \int_{\e}|\eta u_m|^{\crito}\, dx \right)^{\frac{2}{\crito}}\left( \int_{\e}| u_m|^{\crito}\, dx \right)^{\frac{\crito-2}{\crito}}+\, o(1) . \end{equation}
\noindent \textit{Proof of Step \ref{step:00}.1 :} Indeed, using $\lim\limits_{m\to +\infty}\langle \E^{\prime}(u_m), \eta^2u_m\rangle=0$ yields, \begin{eqnarray}\label{eq:001}
o(1)&=&\langle \E^{\prime}(u_m), \eta^2u_m\rangle
=\int_{\e} \langle \Delta u_m, \Delta(\eta^2u_m)\rangle\, dx -\gg\int_{\e}\frac{\left|\eta u_m \right|^2 }{|x|^4}\, dx\nonumber\\
&&\hspace{4cm}- \int_{\e}\frac{\eta^2|u_m|^{\crit}}{ |x|^s}\,dx- \int_{\e}\eta^2 |u_m|^{\crito}\, dx. \end{eqnarray} Regarding the first term, we have \begin{eqnarray}\label{eq:01}
\int_{\e} \langle \Delta u_m, \Delta(\eta^2u_m)\rangle\, dx&=&\int_{\e}\left| \eta\Delta u_m\right|^2\, dx+\int_{\e} u_m\Delta u_m\Delta(\eta^2)\, dx\nonumber\\
&&+2\int_{\e}\Delta u_m\langle\nabla (\eta^2), \nabla u_m\rangle\, dx. \end{eqnarray} From H\"older's inequality and since $\nabla u_m \to 0$ in $L_{loc}^2(\e)$ as $m\to +\infty$, we get \begin{align}\label{eq:02}
\int_{\e}\Delta u_m\langle\nabla (\eta^2), \nabla u_m\rangle\, dx
&=O\left(\vv \nabla (\eta_m^2)\vv_\infty\int_{supp\left( \nabla(\eta^2)\right)}\left| \Delta u_m\right| \left| \nabla u_m\right| \, dx \right)\nonumber \\
&=O\left(\vv \nabla (\eta^2)\vv_\infty\vv u_m\vv\left( \int_{supp\left( \nabla(\eta^2)\right) } \left| \nabla u_m\right|^2 \, dx \right)^{\frac{1}{2}}\right)\nonumber\\
&=o(1) \bb{ as } m\to +\infty. \end{align} Also, since $ u_m \to 0$ in $L_{loc}^2(\e)$ as $m\to +\infty$, we obtain that \begin{align}\label{eq:03} \int_{\e} u_m\Delta u_m\Delta(\eta^2)\, dx=o(1) \bb{ as } m\to +\infty. \end{align} Plugging \eqref{eq:02} and \eqref{eq:03} in \eqref{eq:01} yields \begin{align*}
\int_{\e} \langle \Delta u_m, \Delta(\eta^2u_m)\rangle\, dx=\int_{\e}\left| \eta\Delta u_m\right|^2\, dx+o(1) \bb{ as } m\to +\infty. \end{align*} We write $D:=supp(\eta)$. It follows from \eqref{eq:001} that \begin{equation}\label{eq:prop:1}
o(1)
=\int_{\e}\left| \eta\Delta u_m\right|^2\, dx -\gg\int_{D}\frac{\left|\eta u_m \right|^2 }{|x|^4}\, dx
- \int_{D}\frac{\eta^2|u_m|^{\crit}}{ |x|^s}\,dx- \int_{\e}\eta^2 |u_m|^{2^{\star\star}(0)}\, dx. \end{equation} Similarly to \eqref{eq:step5.1:0} to get \begin{equation*}
\lim_{m\to +\infty} \int_{D}\frac{\left|\eta u_m \right|^2 }{|x|^4}\, dx=\lim_{m\to +\infty}\int_{D}\frac{\eta^2|u_m|^{\crit}}{ |x|^s}\,dx=0. \end{equation*} Therefore, by \eqref{eq:prop:1} and using again the Hölder's inequality, we find as $m\to +\infty$ that \begin{align*}
\int_{\e}\left| \eta\Delta u_m\right|^2\, dx
&\leq \left( \int_{\e}|\eta u_m|^{\crito}\, dx \right)^{\frac{2}{\crito}}\left( \int_{\e}| u_m|^{\crito}\, dx \right)^{\frac{\crito-2}{\crito}}+o(1). \end{align*}
This proves Step \ref{step:00}.1.\qed\par
\noindent\textit{Step \ref{step:00}.2} We claim that
\begin{equation}\label{eq:Delta(etaum)}
\int_{\e} \left| \Delta(\eta u_m)\right|^2\, dx =\int_{\e}\left|\eta \Delta u_m\right|^2\, dx+o(1) \bb{ as } m\to +\infty.
\end{equation}
\noindent \textit{Proof of Step \ref{step:00}.2:} Indeed, using the following inequality for $X, Y\in \rr$
$$ \left| \left| X+Y\right|^2-\left| X\right|^2\right| \leq C\, \left( \left| X\right| \left| Y\right| +\left| Y\right|^2\right), $$ with $X:= \eta \Delta u_m$ and $Y:= u_m\Delta \eta+2\nabla \eta \nabla u_m$, and we obtain that \begin{align}
\int_{\e}\left( \left| \Delta(\eta u_m)\right|^2-\left|\eta \Delta u_m\right|^2\right) \, dx
&=O\left( \int_{\e}\left| \eta \Delta u_m\right| \left|u_m\Delta \eta+2\nabla \eta \nabla u_m \right|\, dx \right.\label{eq:step01:0} \\
&\left. +\int_{\e}\left|u_m\Delta \eta+2\nabla \eta \nabla u_m \right|^2\, dx \right). \nonumber \end{align} Using Hölder's inequality and $u_m\to 0$ in $L^2_{loc}(\e)$ and $H^1_{loc}(\e)$ as $m\to +\infty$ \begin{align}\label{eq:step01:1}
\int_{\e}\left| \eta \Delta u_m\right| \left|u_m\Delta \eta+2\nabla \eta \nabla u_m \right|\, dx&=O\left( \vv \eta \vv_{\infty}\vv \Delta \eta\vv_\infty \vv u_m\vv \left( \int_{supp\left( \eta\right)\cap supp\left( \Delta\eta\right)} u_m^2\, dx \right)^{\frac{1}{2}}\right.\nonumber \\
&\left. + \vv \eta \vv_{\infty}\vv \nabla \eta\vv_\infty \vv u_m\vv \left( \int_{supp\left( \eta\right)\cap supp\left( \nabla\eta\right)} \left| \nabla u_m\right|^2\, dx \right)^{\frac{1}{2}}\right)\nonumber\\
&=o(1) \bb{ as } m\to +\infty. \end{align} In a similar way, we have \begin{equation}\label{eq:step01:2}
\int_{\e}\left|u_m\Delta \eta+2\nabla \eta \nabla u_m \right|^2\, dx=o(1) \bb{ as } m\to +\infty. \end{equation} Injecting \eqref{eq:step01:1} and \eqref{eq:step01:2} in \eqref{eq:step01:0} and we have \eqref{eq:Delta(etaum)}. This proves of Step \ref{step:00}.2. \qed \par
\noindent Using \eqref{eq:etaDeltaum} and \eqref{eq:Delta(etaum)} yields as $m\to +\infty$. \begin{equation}\label{eq:step01:3}
\int_{\e} \left| \Delta(\eta u_m)\right|^2\, dx
\leq \left( \int_{\e}|\eta u_m|^{\crito}\, dx \right)^{\frac{2}{\crito}}\left( \int_{\e}| u_m|^{\crito}\, dx \right)^{\frac{\crito-2}{\crito}}+o(1). \end{equation}
Now, since $\eta u_m\in \H$, and we go back to the definition of $Q_{\gg,0}(\rr_+^N)$, and $\lim\limits_{m\to +\infty} \int_{supp(\eta)}\frac{\left|\eta u_m \right|^2 }{|x|^4}\, dx=0$, we have that \begin{equation}\label{ineq:step:01:sobolev}
\left( \int_{\e}|\eta u_m|^{\crito}\, dx \right)^{\frac{2}{\crito}}\leq Q_{\gg,0}(\e)^{-1}\vv \eta u_m\vv^2+o(1) \bb{ as } m\to +\infty. \end{equation} It follows from \eqref{eq:step01:3} and \eqref{ineq:step:01:sobolev} that \begin{align}\label{eq:step01:5}
\left[ 1- Q_{\gg,0}(\e)^{-1} \left( \int_{\e}| u_m|^{\crito}\, dx \right)^{\frac{\crito-2}{\crito}}\right] \vv \eta u_m\vv^2\leq o(1) \bb{ as } m\to +\infty. \end{align} Since $E_{\crito}(u_m)=\beta$ and $E_{\crito}^{\prime}(u_m)=o(1)$ as $m\to +\infty$, we have that \begin{align}
\beta+o(1)&=E_{\crito}(u_m)-\frac{1}{2}\langle E_{\crito}^{\prime}(u_m), u_m\rangle\nonumber\\
&=\left[\frac{1}{2}-\frac{1}{\crit} \right]\int_{\e}\frac{|u_m|^{\crit}}{|x|^s}\, dx +\left[\frac{1}{2}-\frac{1}{\crito} \right]\int_{\e}|u_m|^{\crito}\, dx.\label{eq:step01:4} \end{align} Therefore, since $\crit >2$ when $0\leq s<4$, we obtain that \begin{align}\label{eq:step01:05}
\int_{\e}\frac{|u_m|^{\crit}}{|x|^s}\, dx\leq 2\, \beta\left[ \frac{N-s}{4-s}\right] +o(1) \bb{ and } \int_{\e}|u_m|^{\crito}\, dx \leq \, \frac{N}{2} \beta+ o(1) \bb{ as } m\to +\infty. \end{align} Therefore, It follows from \eqref{eq:step01:5} and $\frac{\crito-2}{\crito}=\frac{4}{N}$ that \begin{align}\label{eq:step01:6}
\left[ 1- Q_{\gg,0}(\e)^{-1} \left( \frac{N}{2} \beta \right)^{\frac{4}{N}}\right] \vv \eta u_m\vv^2\leq o(1) \bb{ as } m\to +\infty. \end{align}
Since $\beta \in (0, \beta^\star)$, and we have $ \left[ 1- Q_{\gg,0}(\e)^{-1} \left( N\,\frac{\beta}{2} \right)^{\frac{4}{N}}\right]>0$. Moreover, using inequality \eqref{eq:step01:6} yields $\lim\limits_{m\to+\infty} \vv \eta u_m\vv^2=0$. But $\eta=1$ in the compact $K$, then $\lim_{m\to +\infty}\int_k\left| \Delta u_m\right|^2\, dx=0$, from this and by the Sobolev inequality, we obtain that $\lim\limits_{m\to +\infty}\int_{K}|u_m|^{\crito}\, dx =0$. The proof of the Step \eqref{step:00} is complete. \qed\par
\noindent For $R>0$, we define \begin{align*}
I_{1,R}:=\lim_{m\to +\infty}\sup \int_{B_R(0)}|u_m|^{\crito}\, dx \bb{ , } I_{2,R}:=\lim_{m\to +\infty}\sup \int_{B_R(0)}\frac{|u_m|^{\crit}}{|x|^s}\, dx, \end{align*} and \begin{equation*}
I_{3,R}:=\lim_{m\to +\infty}\sup \int_{B_R(0)}\left(\left|\Delta u_m\right|^2-\gg \frac{|u_m|^2}{|x|^4} \right)\, dx. \end{equation*} \begin{step}\label{step:01} For $R>0$, we claim that
\begin{align*}
I_{1,R}^{\frac{2}{\crito}}\leq Q_{\gg,0}(\e)^{-1}I_{3,R} \,\bb{ ; }\,I_{2,R}^{\frac{2}{\crit}}\leq Q_{\gg,0}(\e)^{-1}I_{3,R}\, \bb{ and }\, I_{3,R}\leq I_{1,R}+I_{2,R}.
\end{align*} \end{step}
\noindent{\it Proof of Step \ref{step:01}:} Indeed, for $R>0$ we take a cut-off function $\zeta\in C^{\infty}_{c}(\e)$ such that $\zeta=1$ in $B_{R}(0)$ and $0\leq \zeta\leq1$. Since $\zeta u_m \in \H$ and by the definition of $Q_{\gg,0}(\e)$, we get
\begin{eqnarray}\label{ineq:step:02:sobolev}
\left( \int_{\e}|\zeta u_m|^{\crito}\, dx \right)^{\frac{2}{\crito}}&\leq&
Q_{\gg,0}(\e)^{-1} \left[ \int_{B_R(0)}\left( \left| \Delta u_m\right| ^2-\gg\frac{| u_m|^2}{|x|^4} \right) \, dx\right. \nonumber\\
&&\left. +\, \int_{supp(\zeta)\backslash B_R(0)}\left( \left|\Delta( \zeta u_m)\right| ^2-\gg\frac{| \zeta u_m|^2}{|x|^4}\, dx \right)\right] .
\end{eqnarray} It follows from $supp(\zeta)\backslash B_R(0)\subset \e\backslash\{0\}$ and Step \ref{step:00} that
$$\lim_{m\to +\infty} \int_{supp(\zeta)\backslash B_R(0)}\left( \left|\Delta( \zeta u_m)\right| ^2-\gg\frac{| \zeta u_m|^2}{|x|^4}\right) \, dx =0.$$ Therefore, using \eqref{ineq:step:02:sobolev} and since $\zeta=1$ in $B_{R}(0)$ yields \begin{align*}
\left( \int_{B_R(0)}|u_m|^{\crito}\, dx \right)^{\frac{2}{\crito}}
&\leq Q_{\gg,0}(\e)^{-1} \int_{B_R(0)}\left( \left| \Delta u_m\right| ^2-\gg\frac{| u_m|^2}{|x|^4} \right) \, dx+o(1), \end{align*} as $m\to +\infty$, and we have that $I_{1,R}^{\frac{2}{\crito}}\leq Q_{\gg,0}(\e)^{-1}I_{3,R}$. The proof of $I_{2,R}^{\frac{2}{\crit}}\leq Q_{\gg,0}(\e)^{-1}I_{3,R}$ is similar. \par
\noindent Since $\zeta^2 u_m\in \H$ and with $\lim\limits_{m\to +\infty}\langle \E^{\prime}(u_m), \eta^2u_m\rangle=0$, we have \begin{align}\label{eq:step:01:01}
o(1)&=\langle \E^{\prime}(u_m), \eta^2u_m\rangle\nonumber\\
&=\int_{\e} \langle \Delta u_m, \Delta(\zeta^2u_m)\rangle\, dx -\gg\int_{\e}\frac{\left|\zeta u_m \right|^2 }{|x|^4}\, dx\\
&- \int_{\e}\frac{\zeta^2|u_m|^{\crit}}{ |x|^s}\,dx- \int_{\e}\zeta^2 |u_m|^{\crito}\, dx.\nonumber \end{align} It is similar of the proof of Step \ref{step:00}, we have as $m\to +\infty$ that
$$\int_{\e} \langle \Delta u_m, \Delta(\zeta^2u_m)\rangle\, dx=\int_{\e} \left| \Delta( \zeta^2u_m)\right|^2\, dx+o(1).$$ Therefore, by \eqref{eq:step:01:01}, since $\zeta=1$ in $B_R(0)$, $supp(\zeta)\backslash B_R(0)\subset \rr_+^N\backslash \{0\}$ and with the result of Step \ref{step:00}, we have that \begin{align*}
\int_{B_{R}(0)}\left( \left| \Delta u_m\right|^2-\gg \frac{\left|\zeta u_m \right|^2 }{|x|^4}\right) \, dx&\leq
\int_{\e}\left( \left| \Delta\left( \zeta^2 u_m\right) \right|^2-\gg \frac{\left|\zeta u_m \right|^2 }{|x|^4}\right) \, dx\\
&= \int_{B_R(0)}\frac{|u_m|^{\crit}}{ |x|^s}\,dx+ \int_{B_{R}(0)}\left| u_m\right| ^{\crito}\, dx+o(1). \end{align*} Taking $m\to +\infty$ on both sides yields $I_{3,R}\leq I_{1,R}+I_{2,R}$. This proves Step \ref{step:01}.\qed\par
\noindent Now, I will to complete the proof of the Proposition \ref{prop2}. Using Step \ref{step:01} yields \begin{align*} I_{1,R}^{\frac{2}{\crito}}\leq Q_{\gg,0}(\e)^{-1}I_{1,R}+Q_{\gg,0}(\e)^{-1}I_{2,R}, \end{align*} this give us \begin{equation}\label{eq:prop:0} I_{1,R}^{\frac{2}{\crito}}\left[1-Q_{\gg,0}(\e)^{-1}I_{1,R}^{\frac{4}{N}} \right]\leq Q_{\gg,0}(\e)^{-1}I_{2,R}. \end{equation} It follows from \eqref{eq:step01:05} and the definition of $I_{1,R}$ that $I_{1,R}\leq \frac{N}{2}\beta.$ Therefore, by \eqref{eq:prop:0} \begin{equation*}
I_{1,R}^{\frac{2}{\crito}}\left[1-Q_{\gg,0}(\e)^{-1}\left( \frac{N}{2}\beta\right) ^{\frac{4}{N}} \right]\leq Q_{\gg,0}(\e)^{-1}I_{2,R}. \end{equation*} Since $\beta <\beta^{\star}< \frac{2}{N}Q_{\gg,0}(\e)^{\frac{4}{N}}$, then there exists a constant $C_1(N,\gg,\beta)>0$ such that $I_{1,R}^{\frac{2}{\crito}}\leq C_1(N,\gg,\beta)\, I_{2,R}.$ Similar, then there exists $C_2(N,\gg,s,\beta)>0$ such that $I_{2,R}^{\frac{2}{\crit}}\leq C_2(N,\gg,s,\beta)\, I_{1,R}$. Combining these two inequality we find that \begin{equation*}
I_{2,R} ^{\frac{2}{\crit}}\left[ 1-C_2(N,\gg,s,\beta)C_1(N,\gg,\beta)^{\frac{\crito}{2}}I_{2,R}^{\frac{N}{2(N-4)}}\right]\leq 0. \end{equation*} Therefore, we have $I_{2,R}=0 \bb{ or there exists } \ep:=\ep(N,\gg,s,\beta) \bb{ such that } I_{2,R}\geq \ep.$ Similarly, we have $I_{1,R}=0 \bb{ or there exists } \ep \bb{ such that } I_{1,R}\geq \ep.$ This ends of the proof of Propostion \ref{prop2}.\qed\par
\noindent \underline{\textbf{End of proof of Theorem \ref{theo2}} :} Indeed, we let $(u_m)_{m\in \nn}$ be the Palais-Smale sequence for $E$ that was constructed in Proposition \ref{prop0}. First, we claim that \begin{eqnarray}\label{eq:crit000}
\lim_{m\to +\infty} \sup\int_{\e}\left|u_m \right|^{\crito}\, dx >0. \end{eqnarray}
\noindent Indeed, otherwise $\lim\limits_{m\to +\infty} \sup\int_{\e}\left|u_m \right|^{\crito}\, dx =0.$ Using again $\lim\limits_{m\to +\infty}\langle \E^{\prime}(u_m), u_m\rangle=0$ yields,
\begin{equation*}
\int_{\e}\left( \left| \Delta u_m\right|^2-\gg \frac{\left|u_m \right|^2 }{|x|^4}\right) \, dx= \int_{\e}\frac{|u_m|^{\crit}}{ |x|^s}\,dx+o(1).
\end{equation*} Therefore, we go back to the definition of $Q_{\gg,s}(\e)$ \begin{align*}
\vv u \vv_{\crit,s}^{2}
\leq Q_{\gg,s}(\e)^{-1}\vv u \vv_{\crit,s}^{\crit}+o(1). \end{align*} This give us, \begin{equation*} \vv u \vv_{\crit,s}^{2}\left[1-Q_{\gg,s}(\e)^{-1} \vv u \vv_{\crit,s}^{\crit-2} \right] \leq o(1). \end{equation*} It follows then from the left inequality of \eqref{eq:step01:05} that \begin{equation*}
\vv u \vv_{\crit,s}^{2} \left[1-Q_{\gg,s}(\e)^{-1}\left(2 \frac{N-s}{4-s}\beta\right) ^{\frac{\crit-2}{\crit}} \right]\leq o(1) \end{equation*}
Since $0<\beta<\beta^{\star}$, we have the quantity between the brackets is positive. Thus, we get $ \lim\limits_{m\to +\infty} \int_{\e}\frac{|u_m|^{\crit}}{ |x|^s}\,dx=0.$ Therefore, using \eqref{eq:step01:4} yields $\beta=0$ which contradicts the fact that $\beta \in (0,\beta^{\star})$. This proves the claim.\qed\par
\noindent Next, we claim that the sequence $(u_m)_{m\in \nn}$ is bounded in $\H$. \par
\noindent Indeed, since $u_m$ is a Palais-Smale sequence for $\E$ and using \eqref{eq:coercive} yields \begin{align*}
\beta+o(1)&=\E(u_m)-\frac{1}{\crit}\langle \E^{\prime}(u_m), u_m\rangle\nonumber\\
&=\left[\frac{1}{2}-\frac{1}{\crit} \right]\int_{\e}\left( \left| \Delta u_m\right|^2-\gg\frac{u_m^2}{|x|^4}\right)\, dx +\left[\frac{1}{\crit}-\frac{1}{\crito} \right]\int_{\e}|u_m|^{\crito}\, dx\\
&\geq C \left[\frac{1}{2}-\frac{1}{\crit} \right]\int_{\e}\left| \Delta u_m\right|^2\, dx, \end{align*} where $C$ is a positive constant. It follows then from $2<\crit<\crito$ that $u_m$ is bounded in $\H$. This proves the claim. \qed\par
\noindent Since $u_m$ is bounded in $\H$, then there exists $u\in \H$ such that $u_m \rightharpoonup u\hbox{ weakly in } \H$. If $u\not\equiv 0$, we get that $u$ is a nontrivial weak solution of \eqref{eq:doublehardybi}. \par
\noindent If $u\equiv0$, we have $u_m \rightharpoonup 0\hbox{ weakly in } \H$. We claim that, for small enough $\ep^\prime>0$, there exists another Palais-Smale sequence $(v_m)_{m\in \nn}$ satisfying the properties of Proposition \ref{step:00} and
$$\int_{B_1(0)}\left|v_m \right|^{\crito}\, dx =\ep^{\prime} \bb{ ; $v_m$ is bounded in $\H$ for all } m\in \nn.$$
\noindent Indeed, by \eqref{eq:crit000}, we can take $c:=\lim\limits_{m\to +\infty} \sup\int_{\e}\left|u_m \right|^{\crito}\, dx$. We set $\ep_0:=\min\{c, \frac{\ep}{2}\}$, where $\ep>0$ is the same wich we obtain from Proposition \ref{step:00}. Therefore, for any $\ep^\prime \in (0, \ep_0)$, there exists a sequence $(r_m)_{m\in \nn}>0$ such that up to a subsequence $\int_{B_{r_m}(0)}\left|u_m \right|^{\crito}\, dx =\ep^\prime$. Define now $v_m(x):=r_m^{\frac{N-4}{2}}u_m(r_mx) \bb{ for all } x\in \rr_+^N.$ With change of variable, we write \begin{equation}\label{eq:0}
\int_{B_{1}(0)}\left|v_m \right|^{\crito}\, dx = \int_{B_{r_m}(0)}\left|u_m \right|^{\crito}\, dx =\ep^\prime. \end{equation} As one checks, $(v_m)_{m\in \nn}$ is also a Palais-Smale sequence for $\E$ that satisfies the properties of Proposition \ref{step:00}. Using the definition of $v_m$ and the boundedness of the sequence $u_m$ yields $v_m$ is bounded in $\H$. This ends the prove of Claim. \qed \par
\noindent Hence, we can assume that there exists $v\in \H$ such that, up to a subsquence $v_m \rightharpoonup v\hbox{ weakly in } \H$.\par
\noindent We claim now that $v$ is a nontrivial weak solution of problem \eqref{eq:doublehardybi}.\par
\noindent Indeed, if $v\equiv 0$. It follows from the result of Proposition \ref{prop2} that \begin{eqnarray*}
\hbox{ either }\lim_{m\to +\infty}\sup \int_{B_1(0)} \left| v_m\right|^{\crito}\, dx =0 \hbox{ or } \lim_{m\to +\infty}\sup \int_{B_1(0)} \left| v_m\right|^{\crito}\, dx \geq \ep. \end{eqnarray*} Since $\ep^\prime \in (0, \frac{\ep}{2})$, this is contradiction with \eqref{eq:0}. Then $v\not\equiv 0$.\qed\par
\noindent Since $(v_m)_{m\in \nn}$ is a sequence Palais-Smale for $\E$, we have \begin{eqnarray} o(1)&=& \langle \E^\prime(v_m), \varphi \rangle \nonumber\\
&=&\int_{\e} \langle \Delta v_m, \Delta \varphi \rangle \, dx-\gg\int_{\e}\frac{v_m\varphi}{|x|^4}\, dx\label{eq:000}\\
&&-\int_{\e} \left|v_m \right|^{\crito-2}v_m\varphi\, dx-\int_{\e}\frac{\left|v_m\right|^{\crit-2}v_m\varphi}{ |x|^s}\,dx,\nonumber \end{eqnarray} for all $\varphi\in C^\infty_c(\e)$. Using $v_m \rightharpoonup v\hbox{ weakly in } \H$ yields \begin{equation}\label{eq:convergefaible} \lim_{m\to +\infty} \int_{\e} \langle \Delta v_m, \Delta \varphi \rangle \, dx = \int_{\e} \langle \Delta v, \Delta \varphi \rangle \, dx \bb{ for all }\varphi\in C^\infty_c(\e). \end{equation}
Since $v_m$ is bounded in $\H$, we get that $v_m$, $\left|v_m \right|^{\crito-2}v_m$ and $\left|v_m\right|^{\crit-2}v_m$ are bounded in $L^2(|x|^{-4}, \e)$, $L^{\frac{\crito}{\crito-1}}(\e)$ and $L^{\frac{\crit}{\crit-1}}(|x|^{-s}, \e)$ respectively. Therefore, we get that \begin{eqnarray*}
\left\{\begin{array}{ll}
v_m \rightharpoonup u \bb{ weakly in } L^2(|x|^{-4}, \e),\\
\left|v_m \right|^{\crito-2}v_m \to\left|v \right|^{\crito-2}v \bb{ weakly in } L^{\frac{\crito}{\crito-1}}(\e),\\
\left|v_m\right|^{\crit-2}v_m\rightharpoonup\left|v\right|^{\crit-2}v \bb{ weakly in }
L^{\frac{\crit}{\crit-1}}(|x|^{-s}, \e).
\end{array}\right. \end{eqnarray*} Moreover, passing the $m\to +\infty$ in \eqref{eq:000} and using \eqref{eq:convergefaible} yields \begin{eqnarray*}
o(1)
&=&\int_{\e} \langle \Delta v, \Delta \varphi \rangle \, dx-\gg\int_{\e}\frac{v\varphi}{|x|^4}\, dx\\
&&-\int_{\e} \left|v \right|^{\crito-2}v\varphi\, dx-\int_{\e}\frac{\left|v\right|^{\crit-2}v\varphi}{ |x|^s}\,dx \bb{ for all }\varphi\in C^\infty_c(\e). \end{eqnarray*} Thus, $v$ is a weak solution of \eqref{eq:doublehardybi}. This completes the proof of Theorem \ref{theo2}.\qed \section{Appendix} \begin{prop}\label{prop:nonext}
Let $\Omega \subset\rr^N$ be a smooth domain such that $0\in \partial \Omega\neq\emptyset$ (No bound-
edness is assumed). If $\gg \leq 0$ and $s=0$, we have that $Q_{\gg,0}(\Omega)= S_N$, where $S_N$ is defined in \eqref{eq:bestconstantsoblevrn} and there is no extremal. \end{prop}
\noindent \textit{Proof of Proposition \ref{prop:nonext}:} Indeed, we call back $\crito=\frac{2N}{N-4}$. Since $\gg\leq 0$, we find \begin{eqnarray*}
\frac{\int_{\Omega} \left( |\Delta u|^2-\gg \frac{u^2}{|x|^4}\right) \, dx}{\left(\int_\Omega |u|^{\crito}\, dx \right)^{\frac{2}{\crito}}}\geq \frac{\int_{\Omega} |\Delta u|^2 \, dx}{\left(\int_\Omega |u|^{\crito}\, dx \right)^{\frac{2}{\crito}}}\geq S_N, \end{eqnarray*} and we have that $Q_{\gg,0}(\Omega)\geq S_N$.
Fix $x_0\in \Omega$ such that $x_0\neq 0$. We define $(U_\ep)_\ep$ as in \eqref{def:Ue}. It follows from \eqref{eq:i1ep} that
$\lim\limits_{\ep\to 0}\int_\Omega \frac{U_\ep^2}{|x|^4}\, dx=0$. Moreover, with \eqref{eq:la} and \eqref{eq:U2**}, we get that
\begin{equation*}
\lim_{\ep\to 0} \frac{\int_{\Omega}|\Delta U_\ep|^2\, dx}{\left(\int_\Omega |U_\ep|^{\crito}\, dx \right)^{\frac{2}{\crito}} }=S_N. \end{equation*} We then get $Q_{\gg,0}(\Omega)\leq S_N$. This proves that $Q_{\gg,0}(\Omega)=S_N$. If there was an extremal for $Q_{\gg,0}(\Omega)$, it would also be a extremal for $S_N$, with their support is the whole of $\rr^N$, contradicting since the extremal has support in $\Omega\neq \rr^N$ smooth. This proves Proposition \ref{prop:nonext}.\qed
\end{document} | arXiv | {
"id": "2303.09641.tex",
"language_detection_score": 0.45592379570007324,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Cloning and Joint Measurements of Incompatible Components of Spin} \author{Thomas Brougham, Erika Andersson and Stephen M. Barnett} \affiliation{SUPA, Department of Physics, University of Strathclyde, Glasgow G4 0NG, UK} \date{\today} \pacs{ 03.67.-a, 03.65.Ta}
\begin{abstract} A joint measurement of two observables is a {\it simultaneous} measurement of both quantities upon the {\it same} quantum system. When two quantum-mechanical observables do not commute, then a joint measurement of these observables cannot be accomplished by projective measurements alone. In this paper we shall discuss the use of quantum cloning to perform a joint measurement of two components of spin associated with a qubit system. We introduce a cloning scheme which is optimal with respect to this task. This cloning scheme may be thought to work by cloning two components of spin onto its outputs. We compare the proposed cloning machine to existing cloners.
\end{abstract} \maketitle
\section{Introduction} Quantum information has some fundamental differences from classical information. One of the most famous of these is the inability to perfectly clone an arbitrary quantum state. This important observation, first enunciated by Wootters and Zurek, is referred to as the no-cloning theorem \cite{noclone}. While quantum mechanics precludes perfect cloning it does allow one to create approximate clones. The first such scheme was implicit in the proof of the no-cloning theorem. This cloning procedure perfectly cloned states from a designated orthogonal basis, but faired less well with states that were a superposition within the prescribed basis.
Since this work many other cloning machines have been devised. Although the various cloners all share the same common goal of trying to copy a quantum state, the precise way that this is achieved differs due to the different specifications that they are subject to. One example is the {\it universal cloner} of Buzek and Hillery \cite{universal}, which was designed to clone all qubit states equally well and produces two identical output cloned states. Dropping the requirement that the clones should be identical is sometimes useful and leads to the {\it universal asymmetric cloner} \cite{cloning4, cloning5}. Another approach is to try to clone a restricted set of states as well a possible. Such cloners are known as state dependent cloners \cite{cloning1}. The cloning machine of Wootters and Zurek is an example of a state dependent cloner, where the states of interest are orthogonal. The examples given thus far have applied to cloning 2-level systems (qubits); imperfect cloning of $n$-level systems is, however, also possible \cite{cloning6, cloning7}. Yet another important development is the experimental implementation of cloning \cite{experiment1, experiment2, experiment3}. For a thorough review of all these topics see \cite{cloningRev}.
The cloners described thus far are sometimes referred to as {\it deterministic}, as they always return the same output for each particular input state. {\it Probabilistic cloners} also exist \cite{cloning2, cloning3}. These allow a state, drawn from a specific linearly independent set, to be cloned exactly with a certain probability, in such a way that one knows when the cloning has succeeded and when it has failed.
The relationship between deterministic and probabilistic cloners is similar to the relationship between minimum error state discrimination and unambiguous state discrimination \cite{cloning3}. In minimum error state discrimination, we are given a quantum state drawn from a known set of states, and we seek to determine which one. If, however, the states we wish to distinguish between are non-orthogonal, then we cannot distinguish them perfectly.
Therefore we aim to distinguish between the states as well as possible, by minimising the overall probability of error. In unambiguous state discrimination, on the other hand, we seek to never mis-identify a state. The price we must pay to achieve this is to accept that we will sometimes not get an answer at all. To illustrate this, consider the case of two non-orthogonal states $\{|s_1\rangle$, $|s_2\rangle\}$. For unambiguous discrimination we would require three outcomes corresponding to the state being $|s_1\rangle$, $|s_2\rangle$, and to the outcome of the experiment being undetermined. For a review of state discrimination see \cite{chefles}.
In this paper we shall explore another connection between measurement and cloning. One way of performing a {\it joint measurement} of two observables would be to first clone the quantum system to produce two copies, and then to measure one quantum observable on each clone. While this approach can be used to jointly measure any pair of observables, it is important to note that current cloning procedures have not been devised with this in mind. We will derive the optimal cloning procedure with respect to measuring two incompatible spin components of a spin-1/2 particle. The paper will be organised in the following way: In section \ref{sharpness}, we introduce joint quantum measurements. We evaluate how effective current cloners are for the task of performing a joint measurement of two spin components of a spin-1/2 particle. After this we shall describe an existing scheme for performing a joint measurement of two components of spin. This will lead us, in section \ref{construct}, to a cloning scheme which is optimal with respect to the task of performing a joint measurement of spin. In section \ref{fid} we shall calculate the global fidelity for this cloning procedure. We conclude with a discussion in Section \ref{discussion}.
\section{ Joint quantum measurements} \label{sharpness} A joint quantum measurement of two observables is a {\it single} measurement of a quantum system that allows us to simultaneously give values to {\it both} observables. When the two observables of interest commute, then the joint measurement can be accomplished with standard von Neumann or projective quantum measurements. When the observables do not commute then we must adopt a more generalized view of quantum measurements. This is provided by the probability operator measures (POMs), also called positive operator valued measures (POVMs). In the POM formalism each measurement outcome has assigned to it a measurement operator $\hat\Pi_i$, with the probability for outcome $i$ given by $\text{Tr}\{\hat\rho\hat\Pi_i\}$ for any measured state $\hat\rho$. This requirement leads to the fact that the measurement operators $\Pi_i$ must have eigenvalues which are either positive or zero. There is no requirement, however, that the measurement operators are projectors onto eigenstates. The measurement operators must also sum to the identity operator, as the sum of all the probabilities for different outcomes is one. We should point out that one feature of generalized measurments is that the number of measurement operators and outcomes is not restricted to be less than or equal to the number of dimensions of the measured quantum system. A detailed discussion of this elegant approach can be found in references \cite{nch, preskill, peres}.
One of the earliest investigations of joint measurements was that of Arthurs and Kelly \cite{AK}, who discussed simultaneous measurements of position and momentum. Their method was to introduce two ancillary systems that would track both the position and momentum of the particle. This method was extended by Arthurs and Goodman to yield an uncertainty principle for any two jointly measured observables \cite{AG}.
It is frequently assumed that joint measurements satisfy the {\it joint unbiasedness condition} \cite{AG, erikas, hall}\footnote{It is not necessary to assume the joint unbiasedness condition. Relaxing this condition would lead to a more general description of joint measurements \cite{hall}.}. This condition states that the expectation values for the jointly measured observables should be proportional to the expectation values for the observables measured by themselves. For the case of jointly measuring two components of a spin-$1/2$ system, we shall denote the directions of the two components by the unit vectors ${\bf a}$ and ${\bf b}$. The observables that we seek to measure are $\hat A={\bf a\cdot\hat\sigma}$ and $\hat B={\bf b\cdot\hat\sigma}$, where ${\bf\hat\sigma}$ is a vector, the cartesian components of which are the familiar Pauli matrices. The joint unbiasedness condition will now take the form $\langle\hat A_J\rangle=\alpha\langle \hat A\rangle$ and $\langle\hat B_J\rangle=\beta\langle \hat B\rangle$, where
$\langle\hat A_J\rangle$ and $\langle\hat B_J\rangle$ are the expectation values for the jointly measured observables. We will assume that the measurement outcomes are $\pm 1$, so that the magnitude of the real constants $|\alpha|$ and $|\beta|$ will vary between one and zero. If they assume the value one, then we say that the measurement of the associated component is completely sharp, and it will correspond to a projective or von Neumann measurement of the component. Alternatively if one of the constants is zero then the measurement of the associated component is said to be completely unsharp and we would have done no worse by guessing the outcome.
The price for performing a joint measurement of incompatible observables is that the uncertainty in the estimates of the observables will increase. In other words, the variances of jointly measured spin components will be larger than the variances of the spin components measured by themselves. This leads us to say that joint measurements are unsharp \cite{opQM}. For the case of a spin-1/2 particle, the values of $\alpha$ and $\beta$ will be restricted by the inequality \cite{busch} \begin{equation} \label{ineq}
|\alpha{\bf a}+\beta{\bf b}|+|\alpha{\bf a}-\beta{\bf b}|\le 2. \end{equation} If a joint measurement scheme allows us to saturate inequality (\ref{ineq}), then, for given directions {\bf a} and {\bf b}, this measurement gives the largest possible values of $\alpha$ for a given $\beta$. Any joint measurement of two components of spin for which inequality (\ref{ineq}) is saturated shall be called an {\it optimal} joint measurement.
We can now evaluate how effective the various existing cloners are for performing joint measurements. It turns out that none of these allow us to saturate the inequality (\ref{ineq}) for all states, as they have not been devised with joint measurements in mind. As an example we will consider the universal cloner \cite{universal}. This cloner produces two identical cloned states, each having a Bloch vector pointing in the same direction as the original Bloch vector of the state. The magnitude of the Bloch vectors is, however, reduced by the factor ${2\over 3}$. Hence if we initially had the state $\hat \rho={1\over 2}(\hat 1+{\bf c}\cdot\hat\sigma)$, then the universal cloner would return two identical states $\hat \rho_c={1\over 2}(\hat 1+{2\over 3}{\bf c}\cdot\hat\sigma)$. If we measure the ${\bf a}$ component of spin on one clone, and the ${\bf b}$ component of spin on the other clone, then we can realise a joint measurement of the two components with $\alpha$ and $\beta$ both equal to ${2\over 3}$. It can be seen that the inequality (\ref{ineq}) can never be saturated for these values of the $\alpha$ and $\beta$. Even if we use a cloner that operates under less restrictive conditions, such as the universal asymmetrical cloner \cite{cloning4, cloning5}, (\ref{ineq}) cannot be satisfied except in the trivial cases of $\alpha=0$, $\beta=1$ or $\alpha=1$, $\beta=0$. This begs the question of whether it is possible to construct a cloning machine that can be used to perform an optimal joint measurement. In section \ref{construct}, we shall show this can be done.
\begin{figure}
\caption{A diagram showing the ${\bf m}$ and ${\bf l}$ axes in relation to the ${\bf a}$ and ${\bf b}$ axes.}
\label{fig1}
\end{figure}
\section{an optimal joint measurement} \label{realisation} As has just been stated, none of the existing cloning machines can be used to perform an optimal joint measurement. However such measurement schemes do exist, and a particularly simple one is outlined in \cite{busch2, demuynck, steves, erikas}. The essence of this procedure is to introduce two new axes to measure along, which are shown in Fig. \ref{fig1} as ${\bf m}$ and ${\bf l}$. Each time a measurement is performed, a choice of which axis to measure along is made. The probability of measuring along ${\bf m}$ is given by $p$ and the probability of measuring along ${\bf l}$ by $1-p$. If we measure along ${\bf m}$ and obtain the outcome spin up along ${\bf m}$, then this is interpreted as spin along both ${\bf a}$ and ${\bf b}$ being spin up. Conversely if we obtain spin down along ${\bf m}$, then this is interpreted as spin down along both ${\bf a}$ and ${\bf b}$. For the case when we measure along ${\bf l}$, the result spin up along ${\bf l}$ is taken as the component of spin along ${\bf a}$ being spin up and the component of spin along ${\bf b}$ being spin down, and the result spin down along ${\bf l}$ is interpreted as ${\bf a}$ being spin down and the ${\bf b}$ being spin up.
It can be seen that the four measurement operators that describe the measurement are \begin{eqnarray} \label{mo} \hat\Pi^{ab}_{\pm\pm}&=&\frac{p}{2}(\hat 1\pm{\bf m}\cdot\hat\sigma), \nonumber \\ \hat\Pi^{ab}_{\pm\mp}&=&\frac{1-p}{2}(\hat 1\pm{\bf l}\cdot\hat\sigma). \end{eqnarray} The joint probability distribution $P^{ab}_{ij}$ can be calculated by taking the expectation values of the measurement operators (\ref{mo}). The marginal probability distributions, $P^{\alpha a}_i$ and $P^{\beta b}_j$, can then be obtained from the joint probability distribution. Equivalently, we may obtain the marginal probability distributions directly from the marginal measurement operators $\hat\Pi^{\alpha{\bf a}}_\pm=\hat\Pi^{ab}_{\pm\pm}+\hat\Pi^{ab}_{\pm\mp}$ and $\hat\Pi^{\beta{\bf b}}_\pm=\hat\Pi^{ab}_{\pm\pm}+\hat\Pi^{ab}_{\mp\pm}$.
The directions of ${\bf m}$ and ${\bf l}$ can be deduced from the condition of joint unbiasedness. This criterion imposes the constraint $\langle\hat A_J\rangle=\alpha\langle\hat A\rangle$, which implies that \begin{eqnarray} \label{ju1} \langle\hat A_J\rangle= \langle\hat\Pi^{\alpha{\bf a}}_+\rangle-\langle\hat\Pi^{\alpha{\bf a}}_-\rangle&=&\alpha\langle{\bf a}\cdot\hat\sigma\rangle, \nonumber\\ \iff \langle p{\bf m}\cdot\hat\sigma\rangle +\langle(1-p){\bf l}\cdot\hat\sigma\rangle&=&\alpha\langle{\bf a}\cdot\hat\sigma\rangle. \end{eqnarray} Similarly, the constraint $\langle\hat B_J\rangle=\beta\langle\hat B\rangle$ implies that \begin{equation} \label{ju2} \langle p{\bf m}\cdot\hat\sigma\rangle-\langle(1-p){\bf l}\cdot\hat\sigma\rangle=\beta\langle{\bf b}\cdot\hat\sigma\rangle. \end{equation} The conditions (\ref{ju1}) and (\ref{ju2}) suggest that a suitable choice for ${\bf m}$ and ${\bf l}$ would be \begin{eqnarray} \label{m&l} {\bf m}=\frac{1}{2p}(\alpha{\bf a}+\beta{\bf b}), \nonumber \\ {\bf l}=\frac{1}{2(1-p)}(\alpha{\bf a}-\beta{\bf b}). \end{eqnarray} As these are unit vectors, it can be seen that \begin{eqnarray} \label{prob}
p={1\over 2}|\alpha{\bf a}+\beta{\bf b}|, \\
1-p={1\over 2}|\alpha{\bf a}-\beta{\bf b}|, \end{eqnarray} and thus the condition that $p+(1-p)=1$ ensures that this measurement scheme is optimal. A more detailed discussion can be found in \cite{erikas}. This measurement procedure can be used to construct a cloner that enables us to perform an optimal joint measurement, which will be considered next.
\section{Construction of Cloner} \label{construct}
It should not be surprising that the current cloning procedures do not represent optimal means of realising joint measurements. This is as they have been conceived with the notion of maximising the fidelity between the input state and the output cloned states. The cloning device that we shall outline is designed to saturate inequality (\ref{ineq}), and as such it should be expected that its fidelity will be lower than that of some of the existing cloners. Heuristically we can imagine that this cloner operates by cloning the ${\bf a}$ and ${\bf b}$ components of spin of the input state onto its outputs. Now let $|a\pm\rangle$ and $|b\pm\rangle$ represent the eigenstates of spin along ${\bf a}$
and ${\bf b}$. In addition to this let $|\psi\rangle$ represent the state that is to be cloned and let $|0\rangle$ represent the second `blank' qubit to which information will be transferred . Then we should expect that the action of our cloner is \begin{eqnarray} \label{action}
\hat U|\psi\rangle_1|0\rangle_2 =|c\rangle_{12}&=&\lambda_1|a+\rangle_1 |b+\rangle_2 + \lambda_2|a+\rangle_1 |b-\rangle_2 \\
&+&\lambda_3|a-\rangle_1 |b+\rangle_2 +\lambda_4|a-\rangle_1|b-\rangle_2,\nonumber \end{eqnarray}
where $|\lambda_1|^2=P^{ab}_{++}$, $|\lambda_2|^2=P^{ab}_{+-}$, $|\lambda_3|^2=P^{ab}_{-+}$ and $|\lambda_4|^2=P^{ab}_{--}$, where these joint probabilities are those of an optimal joint measurement.
Thus we find that the squares of the magnitudes of the coefficients in (\ref{action}) yield the same joint probability distribution as we would calculate from the measurement operators (\ref{mo}). Now a projective measurement along the ${\bf a}$ direction on the first qubit and along ${\bf b}$ on the second qubit of $|c\rangle_{12}$ will implement a joint measurement of the ${\bf a}$ and ${\bf b}$ components of spin of the state $|\psi\rangle$.
To construct the unitary operator $\hat U$ that implements the cloning process, we require an important result that applies to POMs. It can be shown that any POM can be realised as a projective measurement by extending the dimensions of the systems Hilbert space. This result is known as {\it Naimark's theorem}, a proof of which can be found in \cite{preskill, peres}. To illustrate this theorem consider the pertinent situation of performing a joint measurement of two components of spin of a spin-1/2 particle. The state $|\psi\rangle$ of the particle is described in a two-dimensional Hilbert space. We can add another spin-1/2 particle which is prepared in the state $|b+\rangle$. To describe this new combined system, with state vector $|\psi\rangle_1|b+\rangle_2$, we require a four-dimensional Hilbert space. Naimark's theorem assures us that there exists an orthonormal basis $\{|\phi_{ij}\rangle_{12}\}$, where $i,j=\pm$, which has the property \begin{equation} \label{basiscondition}
|_{12}\langle\phi_{ij}|\psi\rangle_1|b+\rangle_2|^2=\langle \psi|\hat\Pi^{ab}_{ij}|\psi\rangle=P^{ab}_{ij}. \end{equation}
Hence we may realise the joint measurement by performing a projective measurement upon the state $|\psi\rangle_1|b+\rangle_2$. If we find such a measurement basis
$\{|\phi_{ij}\rangle_{12}\}$, then it is clear that \begin{eqnarray} \label{lambdas}
|\lambda_1|=|\langle\phi_{++}|\psi\rangle|b+\rangle|,\text{ }|\lambda_2|=|\langle\phi_{+-}|\psi\rangle|b+\rangle|, \nonumber \\
|\lambda_3|=|\langle\phi_{-+}|\psi\rangle|b+\rangle|,\text{ }|\lambda_4|=|\langle\phi_{--}|\psi\rangle|b+\rangle|. \end{eqnarray} This observation suggests that a suitable form for $\hat U$ is\footnote{From (\ref{lambdas}) it is clear that we are free to choose the phase factors of the terms that appear in the $\hat U$, without affecting the joint probability distribution for the outcomes of the joint measurement. The choice of phases that appear in (\ref{Cloner}) was made with a view to enhancing the fidelities of the cloning process.} \begin{eqnarray} \label{Cloner}
\hat U=|a+\rangle_1|b+\rangle_2\langle\phi_{++}|+|a+\rangle_1|b-\rangle_2\langle\phi_{+-}|\\
-|a-\rangle_1|b+\rangle_2\langle\phi_{-+}|-|a-\rangle_1|b-\rangle_2\langle\phi_{--}|.\nonumber \end{eqnarray}
For this unitary operator to effect the cloning procedure we would require that the state $|0\rangle_2$ in (\ref{action}) is prepared as $|b+\rangle$.
The task of constructing the cloner has been reduced to finding a suitable orthonormal basis $\{|\phi_{ij}\rangle_{12}\}$, which is accomplished by performing a Naimark extension. Examples of performing such extensions may be found in \cite{preskill, atomic}. To aid us in performing the Naimark extension we shall introduce the states $|m\pm\rangle$ and $|l\pm\rangle$ as the eigenstates of ${\bf m}\cdot\hat\sigma$ and ${\bf l}\cdot\hat\sigma$ respectively. Thus we may express the POM operators as $\hat\Pi^{ab}_{\pm\pm}=p|m\pm\rangle\langle m\pm|$
and $\hat\Pi^{ab}_{\pm\mp}=(1-p)|l\pm\rangle\langle l\pm|$. It can now be shown that one choice of basis is \begin{eqnarray} \label{basis}
|\phi_{++}\rangle_{12}&=&\sqrt{p}|m+\rangle_1|b+\rangle_2+\sqrt{1-p}|a+\rangle_1|b-\rangle_2, \nonumber \\
|\phi_{--}\rangle_{12}&=&\sqrt{p}|m-\rangle_1|b+\rangle_2+\sqrt{1-p}|a-\rangle_1|b-\rangle_2, \nonumber \\
|\phi_{+-}\rangle_{12}&=&\sqrt{1-p}|l+\rangle_1|b+\rangle_2 -\sqrt{p}(\cos(\epsilon)|a+\rangle_1 \nonumber \\
&+&\sin(\epsilon)|a-\rangle_1)|b-\rangle_2, \\
|\phi_{-+}\rangle_{12}&=&\sqrt{1-p}|l-\rangle_1|b+\rangle_2+\sqrt{p}(\sin(\epsilon)|a+\rangle_1\nonumber \\
&-&\cos(\epsilon)|a-\rangle_1)|b-\rangle_2, \nonumber \end{eqnarray} where $\epsilon$ is half the angle between the vectors ${\bf m}$ and ${\bf l}$. It can be verified that this basis satisfies (\ref{basiscondition}). It may also be verified that the basis states are orthonormal as required. The basis (\ref{basis}) is expressed in terms of $p$ and $\epsilon$ which relate to the measurement scheme of
\cite{erikas}. However for this cloning machine it would be more natural to express $\hat U$ in terms of $\alpha$, $\beta$ and $\eta$, the angle between ${\bf a}$ and ${\bf b}$. In section \ref{sharpness} we explained that $p$ can be expressed as $p={1\over 2}|\alpha{\bf a}+\beta{\bf b}|$ and $1-p={1\over 2}|\alpha{\bf a}-\beta{\bf b}|$, hence all that remains is to find how $\epsilon$ can be expressed in terms of $\alpha$, $\beta$ and $\eta$. This may be achieved by remembering that $2\epsilon$ is the angle between ${\bf m}$ and ${\bf l}$, and thus ${\bf m}\cdot{\bf l}=\cos(2\epsilon)$. From (\ref{m&l}) it is clear that \begin{equation} \label{epsilon} {\bf m}\cdot{\bf l}=\frac{\alpha^2-\beta^2}{4p(1-p)}=\cos(2\epsilon). \end{equation}
We may view the action of the cloner (\ref{action}) as taking information about the measurement statistics and transferring it into our new basis $\{|a\pm\rangle_1, |b\pm\rangle_2\}$ in such a way that the information pertaining to ${\bf a}$ being $\pm$ is associated with the basis states
$|a\pm\rangle_1$, and likewise for ${\bf b}$ and the states $|b\pm\rangle_2$. If we use this cloner to aid us in performing a joint measurement of the ${\bf a}$ and ${\bf b}$ components of spin, then we would find that the probabilities for each of the four outcomes occurring is the same as for the joint measurement scheme outlined in \cite{erikas}. As that measurement scheme saturated the inequality (\ref{ineq}), then this implies that a joint measurement implemented using this cloning machine will also saturate (\ref{ineq}) and as such will represent an optimal joint measurement.
An important point to note is that in general the state $|c\rangle_{12}$ will be entangled, and thus if we wished to consider only one of the cloned qubits, then the reduced states $\hat\rho_a=Tr_2(|c\rangle\langle c|)$ and $\hat\rho_b=Tr_1(|c\rangle\langle c|)$ will be mixed states. Thus, the lengths of the Bloch vectors ${\bf c}_a$ and ${\bf c}_b$ for $\hat\rho_a$ and $\hat\rho_b$ respectively, will both be less than one. It is straighforward to show that \begin{equation} \label{bloch1} {\bf a}\cdot{\bf c}_a=\alpha({\bf a}\cdot{\bf c}), \end{equation}
where ${\bf c}$ is the Bloch vector of the initial pure state $|\psi\rangle$. This means that the components of $\hat\rho_a$'s Bloch vector along the ${\bf a}$ axis is just the component of the original Bloch vector of the state, along ${\bf a}$, shrunk by the factor $\alpha$. A similar result can be found for $\hat\rho_b$, \begin{equation} \label{bloch2} {\bf b}\cdot{\bf c}_b=\beta({\bf b}\cdot{\bf c}). \end{equation} This supports the view that the cloning process imprints the individual components of spin onto its outputs. It also shows that the expection values for $\hat A$ and $\hat B$ satisfy the joint unbiasedness condition introduced in section \ref{sharpness}. It is interesting to examine other components of ${\bf c}_a$ and ${\bf c}_b$. Thus let ${\bf n}$ be the unit normal vector of the plane spanned by ${\bf a}$ and ${\bf b}$, i.e. ${\bf n}\cdot{\bf a}={\bf n}\cdot{\bf b}=0$. It can be shown that \begin{equation} \label{orthocomp} {\bf n}\cdot{\bf c}_a=\sqrt{1-\beta^2}{\bf n}\cdot{\bf c} \quad\text{ and }\quad{\bf n}\cdot{\bf c}_b=0, \end{equation} and thus ${\bf c}_b$ is confined to the ${\bf a}{\bf b}$ plane. This is not entirely unexpected, as our cloning procedure is asymmetric. It ideally leaves as much information as possible about the {\bf a} component of spin in the original qubit state, and imprints as much information as possible about the {\bf b} component of spin on the blank auxiliary qubit. Relation (\ref{orthocomp}) shows that some of the information related to a spin component orthogonal to both {\bf a} and {\bf b} is left in the original qubit, but none of it makes its way onto the blank qubit. This is not surprising, as we only ever intended to transfer information about the {\bf b} component onto the blank qubit.
The cloning procedure outlined so far applies to pure states. It is, however possible to clone mixed states using the same cloning procedure. If we denote the state that is to be cloned as $\hat\rho$, then the output cloned state $\hat\rho_c$ will simply be $\hat\rho_c=\hat U\hat\rho\otimes|b+\rangle\langle b+|\hat U^+$. A joint measurement of ${\bf a\cdot\sigma}$ and ${\bf b\cdot\sigma}$ can now be realised in the same manner as for the pure state cloning.
\section{Fidelities} \label{fid} An important quantity to consider when analysing any cloning procedure is the fidelity, which quantifies how close the cloned states are to the original state. Here we shall take our definition of fidelity, $F$, to be \begin{equation} \label{fidelity}
F=|_1\langle\psi|_2\langle\psi|c\rangle_{12}|^2, \end{equation} which is the probability that the state produced is found, by a suitable measurement, to be a pair of perfect copies. The fidelity (that is obtained) will depend upon the initial state and thus, as expected, it differs from the fidelity obtained using the universal cloner. However it also differs from the fidelities of traditional state dependent cloners, which are optimised for a restricted set of states, as this cloner is optimal with respect to (\ref{ineq}) for all pure qubit states. It is interesting to look at how the fidelity varies with the angle between ${\bf a}$ and ${\bf b}$, and how it varies for different intended measurement sharpness. As the fidelity is a function of the input state, we shall average $F$ over all possible initial pure states. This task is best accomplished in terms of the Bloch sphere picture, where the input states are represented by unit vectors in ${\mathbb R}^3$. Choosing ${\bf a}$ to be along the $z$-axis, we define $\theta$ to be the angle made by the Bloch vector of the state and the $z$-axis. We also define $\phi$ to be the angle made by the projection of the Bloch vector of the state in the $xy$ plane with the $x$-axis. Assuming all initial states to be equally probable, the averaged fidelity will be given by \begin{equation} \label{average} F_{av}=\frac{1}{4\pi}\int^{2\pi}_0{\int^{\pi}_0{F_{(\theta, \phi)}\sin\theta d\phi d\theta}}. \end{equation} This averaged fidelity is equivalent to the global fidelity of \cite{cloning1}. With some effort it can be shown that \begin{eqnarray} \label{averaged} F_{av}&=&{1\over 4}+\frac{\alpha}{12}+\frac{\beta}{12}+\frac{\alpha\beta}{12}\cos^2(\eta)+\frac{\sqrt{1-\beta^2}}{12}\nonumber \\ &+&\frac{\sqrt{1-\alpha^2}}{12}\sin(\eta)+\frac{1}{24p} [ \alpha\sqrt{1-\beta^2}+\nonumber \\ &+&\beta\sqrt{1-\beta^2}\cos(\eta)+\beta\sqrt{1-\alpha^2}\sin(\eta) ], \end{eqnarray} where $p$ is given by (\ref{prob}). A plot of the averaged fidelity is shown in figure \ref{plot3}. In this figure, $\beta$ was chosen to have its largest possible value consistent with the choice of $\alpha$ and $\eta$. It was found in \cite{universal} that the universal cloner produces clones with a fidelity of ${5\over 6}$. Thus the two particle fidelity of the univeral cloner is $({5\over 6})^2\approx 0.6944$. Comparing this to figure \ref{plot3} we observe that the universal cloner produces clones with a higher fidelity than the cloner we are considering.
We can also define fidelities for the single particle reduced states $\hat\rho_a$ and $\hat\rho_b$ to be $F_a=\langle \psi|\hat\rho_a|\psi\rangle$ and $F_b=\langle \psi|\hat\rho_b|\psi\rangle$. These fidelities can then be averaged in the same manner as (\ref{average}). This yields the results \begin{eqnarray} \label{fa} F_a&=&{1\over 2}+{\alpha\over 6}+{1\over 6}\sqrt{1-\beta^2}+{1\over 12p}(\alpha\sqrt{1-\beta^2}+ \nonumber\\ &+&\beta\cos(\eta)\sqrt{1-\beta^2}+\beta\sin(\eta)\sqrt{1-\alpha^2}),\\ \label{fb} F_b&=&{1\over 2}+{\beta\over 6}. \end{eqnarray} The fact that the averaged fidelities are not equal is to be expected, as the process that we are performing is inherently asymmetric with respect to the {\bf a} and {\bf b} directions.
To elaborate further upon this, the reduced states $\hat\rho_a$ and $\hat\rho_b$ contain information about the measurement statistics of measurements along the ${\bf a}$ and ${\bf b}$ axes of the state $|\psi\rangle$. Provided ${\bf a}\ne{\bf b}$, we should not expect $\hat\rho_a$ and $\hat\rho_b$ to be equal, and thus we should not expect $F_a$ to equal $F_b$. For the case when ${\bf a}={\bf b}$, i.e. $\eta=0$, it is possible to have $\alpha=\beta=1$ and thus the equations (\ref{fa}) and (\ref{fb}) will both yield the answer of $2\over 3$ for the averaged fidelity. The cloning process we have considered leaves information relating to spin along the {\bf a} direction in the original qubit state, and transfers information relating to spin along {\bf b} onto a blank qubit. We could, of course, equally well consider a cloning process which would leave the information relating to {\bf b} in the original qubit, and copy the information relating to {\bf a} onto the blank state. The fidelities in equations (\ref{fa}) and (\ref{fb}) would then be reversed with respect to $\alpha, \bf a$ and $\beta, \bf b$. One could also consider devising a cloning procedure which would be more symmetric with respect to {\bf a} and {\bf b} as far as the fidelities for the single particle reduced states are concerned. This cloning procedure would still give the same measurement statistics. The possibility of many different cloning procedures, all yielding the same measurement statistics, is due to the fact that there are infinitely many ways of realising the joint quantum measurement, in terms of how to make the Naimark extension.
\begin{figure}
\caption{A plot showing $F_{av}$ (the upper surface) in relation to $F_m$ (the lower surface). The parameter $\eta$ is the angle between the $a$ and $b$ axes.}
\label{plot3}
\end{figure}
In section \ref{realisation} a way of realising a joint measurement of two components of spin was outlined. As our cloner is primarily concerned with replicating measurement statistics, it may seem more natural to use the measurement scheme of section \ref{sharpness} in a more direct fashion to prepare the clones.
We could just perform the joint measurement and then prepare one of the basis states $|a\pm\rangle_1|b\pm\rangle_2$ corresponding to the obtained outcome. This process would, on average, yield the mixed state \begin{equation} \label{mixed}
\hat\rho_{12}=\sum_{i,j=+,-}{P^{ab}_{i,j}|a_i\rangle_1|b_j\rangle_{2 1}\langle a_i|_2\langle b_j|}. \end{equation}
The state (\ref{mixed}) is simpler to create and gives the same measurement statistics as $|c\rangle$. There is, however, a difference between the two cloning procedures. This difference manifests itself within the averaged fidelities of the two processes. We can define the fidelity, $F_m$, of $\hat\rho_{12}$ with the original state
$|\psi\rangle$ as \begin{equation}
F_m={ }_1\langle\psi|_2\langle\psi |\hat\rho_{12}|\psi\rangle_1|\psi\rangle_2. \end{equation} When we average $F_m$ over all the pure qubit states, then we find that the averaged fidelity is less than (\ref{averaged}), as is shown in figure \ref{plot3}. It is informative to consider the two reduced states $\hat\rho^a_1=\text{Tr}(\hat\rho_{12})$ and $\hat\rho^b_2=\text{Tr}(\hat\rho_{12})$. The averaged fidelities may be calculated for these states and are found to be \begin{eqnarray} F_{ma}&=&{1\over 2}+{\alpha\over 6}, \\ F_{mb}&=&{1\over 2}+{\beta\over 6}. \end{eqnarray} It may be seen that $F_{mb}=F_b$ but that $F_{ma}\ne F_a$. This result again shows that only information pertaining to the ${\bf b}$ component is transferred to the second qubit, where as the first qubit retains additional information about the original state.
\section{Conclusion} \label{discussion} We have looked at how quantum cloning can be used to perform joint quantum measurements of two components of spin. A criterion for judging the optimality of a joint measurement was discussed. We then introduced a cloning machine which can be used to perform an optimal joint measurements of spin. This cloning scheme could be though to act by cloning the ${\bf a}$ and ${\bf b}$ components of spin onto its outputs. Fidelities for the cloner were also investigated. We could compare these results to that of the universal cloner, which provides clones with a fidelity of $5\over 6$ for all input states. Thus its two particle averaged fidelity would simply be $({5\over 6})^2\approx$0.6944. Hence the fidelity of the universal cloner is, as expected, higher than that of the cloner presented in section \ref{construct}.
Finally we compared the cloning procedure to a more direct approach producing the mixed state output (\ref{mixed}). It was found that the averaged fidelity for the cloning procedure outline in section \ref{construct} was greater than or equal to the fidelity of the mixed state procedure. Thus the added complexity of the method outlined in section \ref{construct} may be balanced against the higher fidelity that it provides.
\acknowledgments We would like to thank Prof. Alain Aspect for suggesting the question of how to make a joint quantum measurement using cloning. EA acknowledges the Royal Society for financial support.
\end{document} | arXiv | {
"id": "0601098.tex",
"language_detection_score": 0.8687189221382141,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\begin{abstract}
We introduce a new notion of viscosity solutions for the level set formulation of the motion by crystalline mean curvature in three dimensions. The solutions satisfy the comparison principle, stability with respect to an approximation by regularized problems, and we also show the uniqueness and existence of a level set flow for bounded crystals.
\end{abstract}
\title{A level set crystalline mean curvature flow of surfaces}
\tableofcontents \section{Introduction}
A crystalline mean curvature flow is a typical example of an anisotropic mean curvature flow, which can be regarded as a mean curvature flow under a Minkowski or Finsler metric \cite{BP96}. A crystalline mean curvature flow was proposed by S.~B.~Angenent and M.~E.~Gurtin \cite{AG89} and independently by J.~Taylor \cite{T91} to describe the motion of an anisotropic antiphase boundary in materials science. There is a large amount of literature devoted to the study of the motion by crystalline mean curvature. However, even local-in-time unique solvability of its initial value problem has been a long-standing open problem except in the case of planar motion or convex initial data. The main reason is that the surface energy density is not smooth and hence the speed of evolution is determined by a nonlocal quantity.
Our goal in this paper is to solve this long-standing open problem for purely crystalline mean curvature flow in $\ensuremath{\mathbb{R}}^3$. In fact, we shall introduce a new notion of solutions which corresponds to a generalization of a level set flow for the mean curvature flow equation and establish its unique existence.
To motivate the problem, let us explain an example of anisotropic mean curvature flow equation and its level set formulation; see e.g.\ \cite{CGG,GG92,G06}. Let $\gamma:S^2 \to (0,\infty)$ be a given interfacial energy density on the unit sphere $S^2$. For a given closed surface $\Gamma$ we define the interfacial energy \[ I_\gamma(\Gamma) = \int_\Gamma \gamma(\mathbf{n})\;d\mathcal{H}^2, \] and call $I_\gamma$ the interfacial energy of $\Gamma$ with density $\gamma$. Here $\mathbf{n}$ denotes the unit exterior normal of $\Gamma$ and $d\mathcal{H}^2$ denotes the area element. The anisotropic mean curvature $\kappa_\gamma$ is the first variation of $I_\gamma$ with respect to change of volume enclosed by $\Gamma$. Its explicit form is \[ \kappa_\gamma = -\operatorname{div}_\Gamma \left(\nabla_p \gamma(\mathbf{n})\right) \] where $\gamma$ is $1$-homogeneously extended as
$\gamma(p)=|p|\gamma\left(p/|p|\right)$ for $p\in \ensuremath{\mathbb{R}}^3\backslash\{0\}$ and $\gamma(0)=0$; $\operatorname{div}_\Gamma$ denotes the surface divergence \cite{Si83,G06}. If $\gamma(p)=|p|$, $I_\gamma$ is the surface area and $\kappa_\gamma=-\operatorname{div}_\Gamma \mathbf{n}$, which is nothing but (two times) the classical mean curvature. When the interfacial energy density $\gamma$ is not a constant function on $S^2$, we say $\kappa_\gamma$ is an anisotropic mean curvature. Let $\{\Gamma_t\}_{t>0}$ be a smooth family of closed surfaces in $\ensuremath{\mathbb{R}}^3$ and let $V$ be its normal velocity in the direction of $\mathbf{n}$. The equation for $\{\Gamma_t\}$ of the form \[ V = \kappa_\gamma \quad \text{on} \quad \Gamma_t \]
is a simple example of an anisotropic mean curvature flow equation. Of course, if $\gamma(p)=|p|$, then this equation is nothing but the standard mean curvature flow equation $V=\kappa$. A typical feature of this equation is that even if one starts with a smooth surface $\Gamma_0$, the solution $\Gamma_t$ may pinch in finite time, for example a dumbbell with thin neck \cite{Gr89}. So a weak formulation is necessary to track the evolution after the formation of singularities. There are two standard approaches for the (isotropic) mean curvature flow equation. One is a variational way like a varifold solution initiated by K.~Brakke \cite{B78} and developed further by T.~Ilmanen \cite{Il93} and K.~Takasao and Y.~Tonegawa \cite{TT}. Another approach is a level set method based on a comparison principle introduced by \cite{CGG,ES}. As already noted in \cite{CGG} the level set method is very flexible and it applies to anisotropic curvature flow equation \cite{GG92} while a varifold solution is still limited to the isotropic mean curvature flow equation.
Let us explain the idea of the level set formulation. We introduce an auxiliary function $u:\ensuremath{\mathbb{R}}^3\times [0,\infty)\to\ensuremath{\mathbb{R}}$ so that its zero level set agrees with $\Gamma_t$. To fix the idea we assume that $u>0$ in a region $D_t$ enclosed by $\Gamma_t$ and $u<0$ outside of $D_t \cup \Gamma_t$. Then the equation $V=\kappa_\gamma$ is represented as \[
\frac{u_t}{|\nabla u|}=-\operatorname{div}\left(\nabla_p \gamma\left(-\frac{\nabla u}{|\nabla u|}\right)\right) \quad \text{on} \quad \Gamma_t \]
since $V=u_t/|\nabla u|$, $\mathbf{n}=-\nabla u/|\nabla u|$. The idea of the level set method is to consider this equation not only on $\Gamma_t$ but also in $\ensuremath{\mathbb{R}}^3$, i.e.\ each level set of $u$ is required to move by $V=\kappa_\gamma$. In other words, we consider \begin{equation} \label{level set mcf}
u_t-|\nabla u|\left(-\operatorname{div}\left(\nabla_p \gamma \left(-\nabla u/|\nabla u|\right)\right)\right) = 0 \quad \text{in} \quad \ensuremath{\mathbb{R}}^3 \times (0,\infty) \end{equation} with initial condition \begin{equation} \label{level set mcf initial} u(x,0)=u_0(x), \ x \in \ensuremath{\mathbb{R}}^3. \end{equation}
Here $u_0$ is taken so that $\Gamma_0$ is its zero level set. In the case $\gamma(p)=|p|$, \eqref{level set mcf} is nothing but the famous level set mean curvature flow equation \[
u_t - |\nabla u|\operatorname{div} \left(\nabla u/|\nabla u|\right)=0. \] The level set equation \eqref{level set mcf} is degenerate even if $\gamma$ is convex. It is unexpected that the problem can be solved even locally-in-time in classical sense even if $u_0$ is smooth.
Fortunately, if $\gamma$ is $C^2$ on $\ensuremath{\mathbb{R}}^3 \setminus \set0$ and convex, the notion of viscosity solutions \cite{CIL} is adjustable to solve \eqref{level set mcf}--\eqref{level set mcf initial} uniquely and globally-in-time for any uniformly continuous initial data \cite{CGG,G06}. One shall notice that there is a large freedom to choose $u_0$ for given $\Gamma_0$. However, it is known \cite{CGG,G06} that the zero level set is uniquely determined by $\Gamma_0$ (independently of the choice of $u_0$). Although the zero level set of $u$ may fatten, it is often called a level set flow (solution) of $V=\kappa_\gamma$ with initial data $\Gamma_0$. The theory is based on a comparison principle for viscosity solutions and it applies when $\gamma$ is not necessarily $C^2$ but the singularity is weak. For example, in planar motion even if the second derivative of $\gamma\in C^1\left(\ensuremath{\mathbb{R}}^2 \backslash\{0\}\right)$ is allowed to jump at finitely many point in $S^1$, the result of \cite{CGG} is extendable \cite{OS93,GSS}; see \cite{I96} for higher dimensional problem. However, if the singularity of $\gamma$ is strong, such that the first derivative of $\gamma$ may have jumps, then the situation is completely different. The equation becomes very singular in the sense that the speed becomes a nonlocal quantity and establishing the level set method becomes totally non-trivial even if only a planar motion is considered, although it has been established in \cite{GG01}. However, it has been a long-standing open problem for surface evolution even if $\gamma$ is (purely) crystalline, i.e.\ $\gamma$ is piecewise linear and convex in $\ensuremath{\mathbb{R}}^3$. Such functions are often in convex analysis referred to as \emph{polyhedral} \cite{Rockafellar}.
Our purpose is to establish a level set method for a crystalline mean curvature flow, whose typical example includes $V=\kappa_\gamma$ for crystalline $\gamma$. Our theory can apply to more general equations such as $V=\kappa_\gamma+1$. We shall introduce a new notion of viscosity solutions so that the following well-posedness result holds.
\begin{theorem}[Unique existence] \label{th:unique existence}
Let $\gamma$ be crystalline in $\ensuremath{\mathbb{R}}^3$. Assume that $f=f(m,\lambda)$ is continuous on $S^2\times\ensuremath{\mathbb{R}}^3$ and $\lambda \mapsto f(m,\lambda)$ is non-decreasing. Assume that $\left|f(m,\lambda)\right|/\left(|\lambda|+1\right)$ is bounded in $S^2\times\ensuremath{\mathbb{R}}$. Let $D_0$ be a bounded open set in $\ensuremath{\mathbb{R}}^3$ with the boundary $\Gamma_0=\partial D_0$. Then there exists a global unique level set flow $\{\Gamma_t\}_{t\geq 0}$ with \begin{equation} \label{general mcf} V=f(\mathbf{n},\kappa_\gamma) \quad \text{on} \quad \Gamma_t \end{equation} and initial data $\Gamma_0$. \end{theorem}
The assumption of the linear growth for $f$ in $\lambda$ is just for simplicity. One can remove it by introducing a special class of test functions \cite{IS,G06} or by a flattening argument \cite{Go}.
To prove the uniqueness part a key step is to establish a comparison principle for the level set equation of \eqref{general mcf} which is of the form \begin{equation} \label{P} u_t + F \left( \nabla u, \operatorname{div}\partial W(\nabla u) \right) = 0, \end{equation} where \begin{align} \label{geometric F}
F(p,\Lambda) = -|p|f \left(-p/|p|,\Lambda \right),\ W(p)=\gamma(-p). \end{align} Here we rather use the subdifferential notion $\partial W$ instead of $\nabla W$ since $W$ is piecewise linear and so not everywhere differentiable. To prove the existence part, one cannot unfortunately apply Perron's method since the nonlocal quantity ``$\operatorname{div}\partial W(\nabla u)$'' is not constant in a flat part of the solution (which is different from planar case.) We thus construct a solution by smoothing $W$. Here we need to establish a stability of our viscosity solutions. The basic idea of proofs is an elaboration on the idea for establishing uniqueness based on the comparison principle and stability for the total variation flow of non-divergence type \cite{GGP13JMPA,GGP13AMSA}. We shall establish comparison principle for a more general nonlinearity $F$ than \eqref{geometric F}, see Remark~\ref{rem:general F} below.
The bibliography of \cite{GGP13AMSA} includes many references on unique solvability. We take this opportunity to mention related results for evolution of closed surfaces by crystalline or more general singular interface energy. In three dimensions and higher, the crystalline mean curvature $\kappa_\gamma$ is not only a nonlocal quantity as mentioned above, but it might be non-constant on facets of the crystal \cite{BNP99}. In fact, it might be discontinuous, and in general it is known to be only a function of bounded variation \cite{BNP01a,BNP01b}. Therefore facet breaking and bending might occur and we cannot restrict the solutions only to surfaces with facets parallel to those of the Wulff shape corresponding to the crystalline energy density $\gamma$. A more general notion of solutions is necessary. The variational approach have led to a significant progress by understanding the properties of $\kappa_\gamma$. A notion of solutions via an approximation by reaction-diffusion equations for $V = \gamma \kappa_\gamma$ was established in \cite{BGN00,BN00}. An approximation via minimizing movements was used in \cite{CasellesChambolle06,BCCN06,BCCN09}. However, all these results only provide existence for \emph{convex} initial data.
We also establish a convergence result which is useful to discuss approximation by an Allen-Cahn type equation.
\begin{theorem}[Convergence] \label{th:convergence} Under the assumption of Theorem~\ref{th:unique existence}, let $u$ be a viscosity solution of \eqref{P} with initial data
$u_0 \in C(\ensuremath{\mathbb{R}}^3)$ such that $u_0(x)=-c$ for $|x|\geq R$ with some $R$ and $c>0$. Assume that $\gamma_\varepsilon$ is smooth in $\ensuremath{\mathbb{R}}^3\setminus\set0$, convex and $1$-homogeneous and $\gamma_\varepsilon\to\gamma$ uniformly on $S^2$. Let $u^\varepsilon$ be a viscosity solution of \eqref{P} with $W=W_\varepsilon(p)=\gamma_\varepsilon(-p)$, with initial data $u_0^\varepsilon$ such that
$u_0^\varepsilon(x)=-c$ for $|x|\geq R$. Assume that $u_0^\varepsilon\to u_0$ uniformly. Then $u^\varepsilon$ converges locally uniformly to $u$ in $\ensuremath{\mathbb{R}}^3\times [0,\infty)$. \end{theorem}
This gives a convergence of diffuse interface model to the sharp interface model even if $\gamma$ is crystalline; see \cite{GOS,TC98}.
After this work had been completed, the authors learned of a recent work by A. Chambolle, M. Morini and M. Ponsiglione \cite{CMP}, where they established a unique global solvability (up to fattening) for $V=\gamma \kappa_\gamma$ for any convex $\gamma$ by introducing a new notion of a solution related to the anisotropic distance function. Their approach applies to all dimension and all initial data not necessarily bounded. However, their approach requires a special form of the equation so that the mobility is proportional to the interfacial energy density $\gamma$ and it does not apply to $V=\kappa_\gamma$ or $V=\kappa_\gamma +1$. Our approach applies to all $V=f(\mathbf{n},\kappa_\gamma)$ including these equations but the dimension $n$ is limited as $n \leq 3$ and $\gamma$ is limited to crystalline. It is not yet clear whether or not our solution agrees with theirs in the case when both approaches are available although it is very likely.
\begin{remark} \label{rem:general F} In full generality, we will assume that $F \in C(\Rd \times \ensuremath{\mathbb{R}})$, $n \geq 1$, and that it is nonincreasing in the second variable, that is, \begin{align} \label{F ellipticity} F(p, \xi) \leq F(p, \eta) \qquad \text{for all } p \in \Rd,\ \xi \geq \eta. \end{align} For simplicity, we shall also assume that \begin{align*} F(0,0) = 0. \end{align*} In particular, constants are solutions of \eqref{P}. \end{remark}
\subsection*{Viscosity solutions and the contribution of this paper}
We extend the notion of viscosity solutions to the problem \eqref{P} with crystalline $W$. The main strength of the viscosity solution approach is that it can handle general problems that are not of divergence form by exploiting their comparison principle structure \cite{CIL,G06}.
The main difficulty in defining a solution of \eqref{P} is the singular, nonlocal operator $\operatorname{div} \partial W(\nabla \cdot)$. We interpret this operator as the minimal section (also known as the canonical restriction) of the subdifferential of the anisotropic total variation energy in the Hilbert space $L^2(\Omega)$, \begin{align*} E(\psi) := \begin{cases} \int_\Omega W(D \psi) \;dx, & \psi \in L^2(\Omega) \cap BV(\Omega),\\ +\infty, & \text{otherwise}, \end{cases} \end{align*} where $\Omega$ is the flat torus $\ensuremath{\mathbb{R}}^n / L\ensuremath{\mathbb{Z}}^n$ for some $L > 0$, $n \geq 1$, and $BV(\Omega)$ is the space of functions of bounded variation. That is, we only consider this energy for periodic functions $\psi$ to avoid issues with handling the boundary of $\Omega$. Since $D\psi$ is in general only a Radon measure, the functional $E$ is understood as the lower semi-continuous envelope (closure) of the functional defined for Sobolev functions $W^{1,1}(\Omega)$.
It is well-known that the subdifferential of $E$ defined above is the set of divergences of certain vector fields, often called \emph{Cahn-Hoffman vector fields} \cite{Moll}. More precisely, if $\psi$ is a Lipschitz function on $\Omega$, then \begin{align*} \partial E(\psi) = \set{-\operatorname{div} z: z(x) \in \partial W(\nabla \psi(x)) \text{ for a.e. $x$, } \operatorname{div} z \in L^2(\Omega)}. \end{align*} The subdifferential $\partial E(\psi)$ is a closed convex, possibly empty subset of the Hilbert space $L^2(\Omega)$. If it is nonempty, we say that $\psi \in \operatorname{\mathcal{D}}(\partial E)$ and the unique element of the subdifferential with the minimal $L^2$-norm is called the minimal section of $\partial E(\psi)$ and is denoted as $\partial^0 E(\psi)$. In such a case we will interpret $\operatorname{div} \partial W(\nabla \psi)$ as $- \partial^0 E(\psi)$.
This interpretation is consistent with the classical theory of monotone operators for the solvability of problems of the form \begin{align*} u'(t) \in - \partial E(u(t)). \end{align*} Indeed, it is known that a solution is right-differentiable and the right derivative $d^+u/dt (t) = -\partial^0 E(u(t))$. As we noted above, the mean curvature flow can be viewed as the gradient flow of the surface energy functional.
The viscosity solutions are defined via a comparison with a suitable class of test functions. It is therefore necessary to identify a sufficiently large class of functions for which we can define $\operatorname{div} \partial W(\nabla \cdot)$ so that they can serve as test functions in the definition of viscosity solutions. In particular, it must be possible to prove both uniqueness (via a comparison principle) and existence (via a stability property of solutions).
Since the energy density $W$ is crystalline, that is, piecewise linear, the domain of the subdifferential of $E$ can be understood as functions that have flat parts with gradients that fall into the set where $W$ is not differentiable. These flat parts then correspond to the features of the crystal---facets and edges---depending on the dimension of the subdifferential $\partial W(\nabla \psi)$ on the given flat part of $\psi$. This then leads to an idea of energy stratification with respect to the subdifferential dimension. It turns out that the value of $\operatorname{div} \partial W(\nabla \psi)$ at a point $x$ depends only on the shape of $\psi$ in the directions parallel to $\partial W(\nabla \psi(x))$, and it is basically independent of the shape in the orthogonal direction.
Because of the simple structure of $W$, the local behavior of $W$ (and $\partial W$) in a neighborhood of a given gradient $p$ can be completely captured by a one-homogeneous function that is linear in directions orthogonal to the subspace spanned by the directions in $\partial W(p)$, Proposition~\ref{pr:direction-decomposition}. We therefore for a given slope $p$ define a sliced energy $E^{\rm sl}_p$ to capture the interesting behavior, and reduce the analysis to a space $\ensuremath{\mathbb{R}}^k$, where $k$ is the dimension of $\partial W(p)$. Then we consider \emph{stratified faceted functions} by separating the variables into the directions parallel to $\partial W(p)$, in which we assume that the function has a ``nice'' facet, and the orthogonal directions where the function can be of any form (as long as it is differentiable), Definition~\ref{def:strat-faceted-test-function}.
It can be easily seen that $\operatorname{div} \partial W(\nabla \psi)(x) = 0$ whenever $\psi$ is twice continuously differentiable in a neighborhood of $x$ and $W$ is differentiable at $\nabla \psi(x)$. We therefore have to identify the value of this operator at points where $\partial W(\nabla \psi)$ is not a singleton, that is, on the flat parts of the stratified faceted functions. These flat parts can be thought of as $k$-dimensional facets, and they can be described by a pair of open sets $(A_-, A_+)$, which specify where the function is below ($A_-$) or above $(A_+)$ the flat part. It turns out that $\operatorname{div} \partial W(\nabla \psi)$ is independent of the particular choice of $\psi$, Corollary~\ref{co:lambda support func indep}, but only depends on the sets $(A_-, A_+)$ and the slope $p = \nabla \psi$ of the flat part. We call this value $\Lambda_p(\psi)$ to emphasize this dependence on $p$, and connect this to the previous results \cite{GGP13JMPA,GGP13AMSA}, see Section~\ref{sec:crystalline curvature}. While $\Lambda_p(\psi)$ might be discontinuous on the flat parts, it satisfies a comparison principle property with respect to a natural ordering of the $k$-dimensional facets.
We use the stratified faceted functions as the test functions for the definition of viscosity solutions. Heuristically speaking, a continuous function $u$ is a viscosity solution of \eqref{P} if it satisfies a comparison principle with all stratified faceted functions that are local solutions of \eqref{P}.
To show that this definition of viscosity solutions is reasonable, we have to establish a general comparison principle and stability of solutions (with respect to approximation by regularized problems). For the comparison principle, we need a sufficiently large class of stratified faceted test functions. In particular, for any given gradient $p$ such that $\partial W(p)$ is not a singleton and a pair of smooth disjoint open sets $(A_-, A_+)$ in $\ensuremath{\mathbb{R}}^k$, $k = \dim \partial W(p)$, we need to be able to construct a $k$-dimensional facet arbitrarily close to the facet given by $(A_-, A_+)$ such that there exists a stratified faceted function with this facet, and for which $\Lambda_p(\psi)$ is well-defined. See Corollary~\ref{co:approximate pair sliced} for details. This unfortunately seems to be quite nontrivial, and we currently know how to do this construction in one and two dimensions. This allows us to prove the comparison principle for \eqref{P} in three dimensions. However, if this approximated admissible facet construction in Corollary~\ref{co:approximate pair sliced} can be extended to higher dimensions, our results Theorem~\ref{th:unique existence} and Theorem~\ref{th:convergence} will automatically apply to the higher dimensions as well.
The proof of the comparison principle Theorem~\ref{th:comparison principle} follows the standard doubling-of-variables argument with an additional parameter as in \cite{GGP13JMPA,GGP13AMSA}. This is substantially extended to handle the stratified energy and the stratified faceted test functions. We consider two solutions $u$, $v$ of \eqref{P} that are ordered as $u \leq v$ at $t = 0$ and consider the function \begin{align*} \Phi_{\zeta,\ensuremath{\varepsilon}}(x,t,y,s) := u(x,t) - v(y,s) - \frac{\abs{x-y-\zeta}^2}{2\ensuremath{\varepsilon}}-
S_\ensuremath{\varepsilon}(t,s), \end{align*} on $(x, t, y, s) \in \ensuremath{\mathbb{R}}^n \times (0, T) \times \ensuremath{\mathbb{R}}^n \times (0, T)$, where $S_\ensuremath{\varepsilon}$ is defined in \eqref{Se}, and $T, \ensuremath{\varepsilon} > 0$ are fixed. We then analyze the maxima of $\Phi_{\zeta, \ensuremath{\varepsilon}}$ for $\zeta \in \ensuremath{\mathbb{R}}^n$ small. This extra parameter $\zeta$ allows us to recover additional information about the behavior of $u$ and $v$ near the maximum of $\Phi_{\zeta, \ensuremath{\varepsilon}}$. We then argue by contradiction: if $u > v$ at some point, we can construct stratified faceted test functions for $u$ and $v$ near the maximum of $\Phi_{\zeta, \ensuremath{\varepsilon}}$. These test functions have ordered facets, which then together with the comparison principle for $\Lambda_p$ yields a contradiction.
The stability of solutions with respect to approximation of \eqref{P} by regularized problems then follows from an extension of the argument developed in \cite{GGP13JMPA}. We have to again overcome the discrepancy between the test functions of the regularized problem, which are only smooth functions, and the stratified faceted functions for the limit problem \eqref{P}. This is related to the fact that we are approximating a singular, nonlocal operator by local operators. The idea is to perturb the test function by solving the resolvent problem for the energy $E$ and the regularized (elliptic) energy $E_m$ with a small parameter $a > 0$: \begin{align*} \psi_a = (I + a \partial E)^{-1} \psi, \qquad \psi_{a,m} = (I + a \partial E_m)^{-1} \psi, \end{align*} which amounts to solving one step of the implicit Euler discretization of the gradient flow of those energies. This transfers the nonlocal information onto the perturbed test function and allows passing in the limit, Theorem~\ref{th:stability quadratic}. The main extension in this paper is the handling of the sliced energy. An elaboration on this argument yields also stability with respect to an approximation by one-homogeneous energies $E_m$, Theorem~\ref{th:linear growth stability}.
Combining the above results we obtain the existence of a unique solution of \eqref{P}. Since the level set of the solution does not depend on the choice of the initial level set function, we have uniqueness of the level set flow.
\subsection*{Outline}
We open with a review of the theory for convex functionals with linear growth in Section~\ref{sec:convex func lin growth}. This will allow us to introduce the idea of energy stratification and the slicing of the energy density $W$ according to its features, Section~\ref{sec:energy-stratification}. We then define the crystalline mean curvature $\Lambda$ on various features of the evolving surface such as edges and facets, Section~\ref{sec:crystalline curvature}, and establish its properties, including a comparison principle. At this point we introduce the notion of viscosity solutions, Section~\ref{sec:viscosity solutions}, and construct faceted test functions in Section~\ref{sec:faceted functions}. The comparison principle for viscosity solutions is established in Section~\ref{sec:comparison principle}, followed by the stability results, Section~\ref{se:stability}. Finally, the main result on the well-posedness of \eqref{P} is presented in Section~\ref{sec:well-posedness}.
\section{Convex functionals with linear growth} \label{sec:convex func lin growth}
There are a considerable number of publications on the topic of convex functionals with linear growth, see \cite{ACM} for a list of references. In this section we review the rather standard notation and results that we will use throughout the paper, and prove two important lemmas that will allow us to better understand the crystalline mean curvature later.
Suppose that $W: \ensuremath{\mathbb{R}}^d \to \ensuremath{\mathbb{R}}$, $d \geq 1$, is a convex function that satisfies the growth condition \begin{align} \label{growth-condition} \abs{W(p)} \leq M (1 + \abs{p}), \qquad p \in \ensuremath{\mathbb{R}}^d, \end{align} for some $M > 0$. Note that it is usually also assumed that $W(p) \geq c\abs{p}$ for some $c > 0$, or that $W(p) = W(-p)$, but we make no such assumption since they are unnecessary for our purposes, and in fact we need the generality.
Let $\Omega$ be either an bounded open subset of $\ensuremath{\mathbb{R}}^d$ or the $d$-dimensional flat torus $\ensuremath{\mathbb{R}}^d / L \ensuremath{\mathbb{Z}}^d$. We are interested in the functional $E_W(\cdot; \Omega): L^2(\Omega) \to \ensuremath{\mathbb{R}}$ defined as \begin{align} \label{EW} E_W(\psi; \Omega) &= \begin{cases} \int_\Omega W(D\psi) & \psi \in L^2(\Omega) \cap BV(\Omega),\\ +\infty & \text{otherwise} \end{cases} \end{align} that is understood as the \emph{relaxation} (also the \emph{closure} or the \emph{lower semi-continuous envelope}) of the functional \begin{align} \label{functional-w11} \psi \mapsto \begin{cases} \int_\Omega W(\nabla\psi) & \psi \in L^2(\Omega) \cap W^{1,1}(\Omega),\\ +\infty & \text{otherwise}. \end{cases} \end{align}
The relaxed functional $E_W$ can be expressed more explicitly following \cite{GiaquintaModicaSoucek,BouchitteDalMaso}. Indeed, we introduce the recession function of $W$, \begin{align*} W0^+(p) = \lim_{\lambda\to 0+} \lambda W(\lambda^{-1} p), \end{align*} which is a positively one-homogeneous convex function on $\ensuremath{\mathbb{R}}^d$ due to the growth condition \eqref{growth-condition}. If $W$ is one-homogeneous itself, we have $W0^+ = W$.
For $\psi \in BV(\Omega)$, $\nabla\psi$ will denote the Radon-Nikod\'{y}m derivative of the absolutely continuous part of $D\psi$ with respect to the Lebesgue measure $L^d \lfloor \Omega$ and $D^s \psi$ will be the singular part. Then we have \begin{align*} D \psi = \nabla \psi L^d \lfloor \Omega + D^s \psi, \end{align*} and we can write $E_W$ as \begin{align} \label{ew-decomp} E_W(\psi; \Omega) = \int_\Omega W(\nabla \psi) \;dx + \int_\Omega W0^+\pth{\frac{D^s \psi}{\abs{D^s \psi}}} \;d\abs{D^s\psi}, \end{align} where $\frac{D^s \psi}{\abs{D^s \psi}}$ is the Radon-Nikod\'{y}m derivative of $D^s\psi$ with respect to $\abs{D^s\psi}$. We note that if $\psi \in L^2(\Omega) \cap W^{1,1}(\Omega)$, or even $\psi \in {\rm Lip}(\Omega)$, then this formula simplifies to \eqref{functional-w11} since $D^s \psi = 0$.
\subsection{Subdifferentials}
Since $E_W(\cdot; \Omega)$ is a proper closed (that is, lower semi-continuous) convex functional on $L^2(\Omega)$, its subdifferential \begin{align*} \partial E_W(\psi; \Omega) = \set{ v \in L^2(\Omega): E_W(\psi + h; \Omega) - E_W(\psi; \Omega) \geq (h, v) \text{ for all $h \in L^2(\Omega)$}} \end{align*} is a closed convex, possibly empty subset of the Hilbert space $L^2(\Omega)$ equipped with the inner product $(h, v) := \int_\Omega h v \;dx$. If $\partial E_W(\psi; \Omega)$ is nonempty, we say that $\psi \in \operatorname{\mathcal{D}}(\partial E_W(\cdot; \Omega))$, the \emph{domain} of the subdifferential, and we define the \emph{minimal section} (also known as the \emph{canonical restriction}) $\partial^0 E_W(\psi; \Omega)$ of the subdifferential as the unique element of $\partial E_W(\psi; \Omega)$ with the minimal norm in $L^2(\Omega)$.
The characterization of the subdifferential of $E_W$ is well-known when $W$ is a positively one-homogeneous function, that is, when \begin{align*} W(tp) = t W(p) \qquad t \geq 0. \end{align*} We will need this characterization for Lipschitz functions only, and we therefore present it in this simplified settings. Let $\Omega$ be an open subset of $\ensuremath{\mathbb{R}}^d$ or a $d$-dimensional torus $\ensuremath{\mathbb{R}}^d / L \ensuremath{\mathbb{Z}}^d$ for some $L > 0$. Following \cite{Anzellotti}, let us introduce the space of vector fields with $L^2$ divergence, \begin{align*} X_2(\Omega) = \set{z \in L^\infty(\Omega; \ensuremath{\mathbb{R}}^d): \operatorname{div} z \in L^2(\Omega)}. \end{align*} For given $\psi \in {\rm Lip}(\Omega)$, we define the set of \emph{Cahn-Hoffman vector fields} on $\psi$ as \begin{align} \label{cahn-hoffman} {CH}_W(\psi; \Omega) := \set{z \in X_2(\Omega): z(x) \in \partial W(\nabla \psi(x)) \text{ a.e. $x \in \Omega$}}. \end{align} Note that the set \begin{align} \label{div-cahn-hoffman} \operatorname{div} {CH}_W(\psi; \Omega) := \set{\operatorname{div} z: z \in {CH}_W(\psi; \Omega)} \end{align} is a closed convex, possibly empty subset of $L^2(\Omega)$. We have the well-known characterization of the subdifferential of $E_W$ in the periodic case, see \cite[Section~1.3]{ACM} or \cite{Moll}. \begin{proposition} \label{pr:subdiff-char-periodic} Let $\Omega = \ensuremath{\mathbb{R}}^d / L \ensuremath{\mathbb{Z}}^d$ for some $d \in \ensuremath{\mathbb{N}}$ and $L > 0$, and assume that $W$ is a positively one-homogeneous convex function on $\ensuremath{\mathbb{R}}^d$. If $\psi \in {\rm Lip}(\Omega)$ then \begin{align*} \partial E_W(\psi; \Omega) = \set{-\operatorname{div} z: z \in {CH}_W(\psi; \Omega)} = -\operatorname{div} {CH}_W(\psi; \Omega). \end{align*} \end{proposition}
\begin{remark} If $\Omega$ is a bounded open subset of $\ensuremath{\mathbb{R}}^d$ with a Lipschitz boundary, then the subdifferential is given by the vector fields $z \in {CH}_W(\psi; \Omega)$ such that $[z \cdot \nu] = 0$ on $\partial\Omega$; see \cite{ACM} for details. We will work on periodic domains to not have to deal with this technicality. We will see later (Lemma~\ref{le:cahn-hoffman-patch} and Properties~\ref{pr:lambda-well-defined}) that this does not change the value of the crystalline curvature on the facet. \end{remark}
Let us also mention one trivial result concerning the subdifferential of one-homogeneous convex functions on $\ensuremath{\mathbb{R}}^d$.
\begin{lemma} \label{le:one-homogeneous-subdiff} Suppose that $W$ is positively one-homogeneous convex function on $\ensuremath{\mathbb{R}}^d$. Then $\partial W(p) \subset \partial W(0)$ for any $p \in \ensuremath{\mathbb{R}}^d$. We also have $(x - y) \perp p$ for any $x, y \in \partial W(p)$ and any $p \in \ensuremath{\mathbb{R}}^d$. \end{lemma}
\subsection{The resolvent problem and the approximation by regularized functionals} \label{sec:resolvent-approximation}
Let $W$ be a convex function satisfying the growth condition \eqref{growth-condition}. For some flat torus $\Gamma = \ensuremath{\mathbb{R}}^d / L \ensuremath{\mathbb{Z}}^d$, $d \geq 1$, we want to approximate $E_W(\cdot; \Gamma)$ defined in \eqref{EW} by certain regularized functionals.
Suppose therefore that $\set{W_m}_{m\in \ensuremath{\mathbb{N}}}$ is a sequence of convex functions on $\ensuremath{\mathbb{R}}^d$ that satisfies the following: \begin{enumerate} \item $\set{W_m}_{m\in\ensuremath{\mathbb{N}}}$ is a decreasing sequence, \item $W_m \in C^2(\ensuremath{\mathbb{R}}^d)$, \item $W_m \searrow W$ as $m\to\infty$ locally uniformly on $\ensuremath{\mathbb{R}}^d$, \item there exist positive numbers $a_m$ such that $a_m^{-1} I \leq \nabla_p^2 W_m(p) \leq a_m I$ for all $p \in \ensuremath{\mathbb{R}}^d$, $m\in\ensuremath{\mathbb{N}}$, where $I$ is the $d\times d$ identity matrix. \end{enumerate}
We introduce the regularized functionals \begin{align*} E_m(\psi; \Gamma) := \begin{cases} \int_\Rd W_m(\nabla \psi) \;dx & \psi \in H^1(\Gamma),\\ +\infty & \psi \in L^2(\Gamma) \setminus H^1(\Gamma), \end{cases} \end{align*} where $H^k(\Gamma) := W^{k,2}(\Gamma)$ is the standard Sobolev space of $L\ensuremath{\mathbb{Z}}^d$-periodic functions.
Let us give an example of a regularized $W_m$ first.
\begin{example} \label{ex:wm-example} Let $\eta_m$ be the standard mollifier with support of radius $1/m$. Define the smoothing \begin{align*} W_m(p) = (W * \eta_m)(p) + \frac 1{2m} \abs{p}^2 \qquad p \in \Rd. \end{align*} By convexity we have $W_m \geq W$ and $W_m$ convex, $W_m \in C^\infty(\ensuremath{\mathbb{R}}^d)$, $\nabla^2 W_m \geq \frac 1m I$ and $W_m \searrow W$ as $m\to0$ locally uniformly. The uniform upper bound on $\nabla^2 W_m$ follows immediately from $\partial_{p_i p_j}(W * \eta_m) = \partial_{p_i} W * \partial_{p_j} \eta_m$, and the right-hand side is bounded since $\nabla W$ is bounded. \end{example}
We need the following result similar to \cite[Proposition~5.1]{GGP13JMPA}.
\begin{proposition} \label{pr:energy-convergence} \begin{enumerate} \item $E_m(\cdot; \Gamma)$ form a decreasing sequence of proper closed convex functionals on $L^2(\Gamma)$. \item The subdifferential $\partial E_m$ is a singleton for all \begin{align*} \psi \in \operatorname{\mathcal{D}}(\partial E_m) = H^2(\Gamma) \end{align*} containing the unique element \begin{align*} -\operatorname{tr} \bra{\pth{\nabla_p^2 W_m} \pth{\nabla \psi} \nabla^2 \psi} \qquad \text{a.e.} \end{align*} \item $(\inf_m E_m(\cdot; \Gamma))_* = E_W(\cdot; \Gamma)$, the lower semi-continuous envelope of $\inf_m E_m$ in $L^2(\Gamma)$. \end{enumerate} \end{proposition}
\begin{proof} For (a) and (b) see \cite[Section~9.6.3]{Evans}.
(c): $\set{E_m(\cdot; \Gamma)}$ is decreasing since $\set{W_m}$ is decreasing. Therefore $E_m(\psi; \Gamma) \to E_W(\psi; \Gamma)$ for any $\psi \in H^1(\Gamma)$ by the Dominated convergence theorem, since $E_W$ is of the form \eqref{functional-w11} in this case. If $\psi \notin H^1(\Gamma)$, $E_m(\psi; \Gamma) = \infty$ by definition and therefore $E_W(\cdot; \Gamma) \leq \inf_m E_m(\cdot; \Gamma)$, with equality on $H^1(\Gamma)$. Let us now denote $F(\psi) = \inf_m E_m(\psi; \Gamma)$. By a standard approximation result, for any $\psi \in BV(\Gamma)$ there exists a sequence $\set{\psi_k} \subset C^\infty(\Gamma) \cap BV(\Gamma) \subset H^1(\Gamma)$ such that $\psi_k \to \psi$ in $L^2(\Omega)$ and $\int_\Gamma \abs{D\psi_k} \to \int_\Gamma \abs{D\psi}$, which yields $E_W(\psi_k; \Gamma) \to E_W(\psi; \Gamma)$ due to \cite{Resetnjak}; see \cite{GiaquintaModicaSoucek}. In particular, \begin{align*} F_*(\psi) \leq \liminf_{k\to\infty} F(\psi_k) = \liminf_{k\to\infty} E_W(\psi_k; \Gamma) = E_W(\psi; \Gamma). \end{align*} Hence $F_* = E_W$ by the lower semi-continuity of $E_W$. \end{proof}
We will need the following approximation and convergence result for the resolvent problems.
\begin{proposition} \label{pr:resolvent-problems} For $\psi \in {\rm Lip}(\Gamma)$, and $m \in \ensuremath{\mathbb{N}}$, $a > 0$, the resolvent problems \begin{align*} \psi_a + a \partial E_W(\psi_a; \Gamma) \ni \psi,\\ \psi_{a,m} + a \partial E_m(\psi_{a,m}; \Gamma) \ni \psi, \end{align*} admit unique solutions $\psi_a$ and $\psi_{a,m}$ in $L^2(\Gamma)$, respectively. Moreover, $\psi_a$ and $\psi_{a,m}$ are Lipschitz continuous and \begin{align*} \norm{\nabla \psi_a}_\infty, \norm{\nabla \psi_{a,m}}_\infty \leq \norm{\nabla \psi}_\infty. \end{align*} Finally, $\psi_{a,m} \in C^{2,\ensuremath{\alpha}}(\Gamma)$ for some $\ensuremath{\alpha} = \alpha_m > 0$.
We also introduce the functions \begin{align*} h_a := \frac{\psi_a - \psi}{a}, &&& h_{a,m} := \frac{\psi_{a,m} - \psi}{a} = -\operatorname{tr}\bra{(\nabla_p^2 W_m)(\nabla \psi_{a,m}) \nabla^2 \psi_{a,m}}. \end{align*} Then, for fixed $a>0$, \begin{align*} \psi_{a,m} &\rightrightarrows \psi_a && \text{uniformly as $m \to \infty$, and},\\ h_{a,m} &\rightrightarrows h_a && \text{uniformly as $m \to \infty$}. \intertext{Moreover,} \psi_a &\rightrightarrows \psi && \text{uniformly as $a \to 0$.} \intertext{If furthermore $\psi \in \operatorname{\mathcal{D}}\pth{\partial E_W(\cdot; \Gamma)}$ then also} h_a &\to -\partial^0 E(\psi; \Gamma)&& \text{in $L^2(\Gamma)$ as $a \to 0$.} \end{align*} \end{proposition}
\begin{proof} We follow the proof of \cite[Proposition~5.3]{GGP13JMPA}. Due to Proposition~\ref{pr:energy-convergence}(a), \cite[Theorem~3.20]{Attouch} implies the \emph{Mosco convergence} of $E_m$ to $E$. This yields the resolvent convergence \cite[Theorem~3.26]{Attouch}, namely, for fixed $a > 0$ we have \begin{align} \label{res-l2-conv} \psi_{a,m} \to \psi_a \quad \text{in $L^2(\Gamma)$.} \end{align}
The $C^{2,\alpha}$ regularity of $\psi_{a,m}$ is standard from the elliptic theory, as $I + a \partial^0 E_m(\cdot; \Gamma)$ is a quasilinear uniformly elliptic operator as noted in Proposition~\ref{pr:energy-convergence}.
Since the $E_m$-resolvent problem is translation invariant and has a maximum principle, we find that $\psi_{a,m}$ is Lipschitz since $\psi$ is Lipschitz, and \begin{align*} \norm{\nabla \psi_{a,m}}_\infty \leq \norm{\nabla \psi}_\infty. \end{align*} Therefore the Arzel\'{a}-Ascoli theorem and \eqref{res-l2-conv} yield the uniform convergence of $\psi_{a,m} \to \psi_a$ and $h_{a,m} \to h_a$ as $m\to\infty$ for fixed $a > 0$, and hence also the Lipschitz bound $\norm{\nabla \psi_a}_\infty \leq \norm{\nabla \psi}_\infty$. Moreover, since the $E_m$-resolvent problem has a maximum principle, the $E_W$-resolvent problem has a maximum principle as well.
Finally, a standard result implies that $\psi_a \to \psi$ in $L_2(\Gamma)$ as $a\to0$ \cite[Theorem~3.24]{Attouch}, therefore with Arzel\'{a}-Ascoli and the uniform Lipschitz bound we conclude that $\psi_a \to \psi$ uniformly. If furthermore $\psi \in \operatorname{\mathcal{D}}(\partial E_W(\cdot; \Gamma))$, also $h_a \to -\partial^0 E_W(\psi; \Gamma)$ \cite[Proposition~3.56]{Attouch}. \end{proof}
We give a lemma on the Mosco convergence of functionals with linear growth.
\begin{lemma} \label{le:lingrowthapproximation} Suppose that $W_m$ are convex positively one-homogeneous functions such that $W_m \rightrightarrows W$ uniformly on the unit ball. Then $E_m(\psi) = \int_\Gamma W_m(\nabla \psi)$ Mosco-converges to $E(\psi) = \int_\Gamma W(D\psi)$ as $m \to \infty$. \end{lemma}
\begin{proof} By \cite[Proposition~3.19]{Attouch}, we need to show that for every $\psi$, $\psi_m \stackrel{w}{\to} \psi$ weakly in $L^2(\Gamma)$ we have $E(\psi) \leq \liminf_m E_m(\psi_m)$ and that for every $\psi \in L^2(\Gamma)$ there exists a sequence $\psi_m \to \psi$ strongly in $L^2(\Gamma)$ such that $E(\psi) = \lim_m E_m(\psi_m)$.
If $\psi_m \stackrel{w}\to \psi$ weakly in $L^2(\Gamma)$, we can deduce $E(\psi) \leq \liminf_m E_m(\psi_m)$ from the formula \cite{AB} \begin{align*} E(\psi) := \sup \Big\{\int_\Gamma \psi \operatorname{div} \varphi: &\varphi \in C^1(\Gamma), \norm{\varphi}_\infty \leq 1,\\ &\varphi(x) \cdot p \leq 1 \text{ whenever } W(p) \leq 1, x \in \Gamma\Big\}. \end{align*}
By a standard approximation result, for any $\psi \in BV(\Gamma)$ there exists a sequence $\set{\psi_k} \subset C^\infty(\Gamma) \cap BV(\Gamma) \subset W^{1, 1}(\Gamma)$ such that $\psi_k \to \psi$ in $L^2(\Omega)$ and $\int_\Gamma \abs{D\psi_k} \to \int_\Gamma \abs{D\psi}$, which yields $E(\psi_m) \to E(\psi)$ by the theorem of Re\v{s}etnjak \cite{Resetnjak}. On the other hand, by the uniform convergence of $W_m$ to $W$ on the unit ball we have for any $\xi \in W^{1,1}(\Gamma)$ \begin{align*}
\abs{\int_\Gamma W_m(\nabla\xi) - W(\nabla \xi) \;dx} \leq &\int_\Gamma |\nabla \xi|
\abs{W_m\pth{\frac{\nabla\xi}{|\nabla \xi|}} - W\pth{\frac{\nabla\xi}{|\nabla \xi|}}} \;dx \\
&\leq \int_\Gamma |\nabla \xi| \;dx\norm{W_m - W}_{L^\infty(B_1(0))}. \end{align*} Therefore $E(\psi) = \lim_m E_m(\psi_m)$. \end{proof}
\subsection{Cahn-Hoffman vector field patching}
We shall use the minimal section $\partial^0 E_W(\psi; \Omega)$ of the subdifferential of $E_W$ to define the crystalline curvature for a given Lipschitz function $\psi$ on $\Omega$. However, the minimal section is a solution of a variational problem and therefore its value might depend strongly on the set $\Omega$, and nonlocally on the values of $\psi$. Fortunately, the situation is not as dire as it might appear at first, and in fact, the minimal section is nonlocal only on flat parts (facets) of $\psi$. This restriction of nonlocality is expressed by the following lemma. Intuitively, we can patch the Cahn-Hoffman vector fields as much as we please as long as we do it across the level sets of $\psi$.
\begin{lemma} \label{le:cahn-hoffman-patch} Let $W: \ensuremath{\mathbb{R}}^d \to \ensuremath{\mathbb{R}}$ be a positively one-homogeneous convex function, $d \geq 1$. Suppose that $\psi_1 \in {\rm Lip}(\Omega_1)$ and $\psi_2 \in {\rm Lip}(\Omega_2)$ are two Lipschitz functions on two open subsets $\Omega_1, \Omega_2$ of $\ensuremath{\mathbb{R}}^d$. Let $G = \set{x \in \Omega_1: a < \psi_1(x) < b}$ for some $a < b$ such that $\cl G \subset \Omega_1 \cap \Omega_2$ and $\psi_1 = \psi_2$ on $G$. If $z_i \in {CH}_W(\psi_i; \Omega_i)$ are two Cahn-Hoffman vector fields, then \begin{align} \label{z-patch} z(x) = \begin{cases} z_2(x) & x \in G,\\ z_1(x) & x \in \Omega_1 \setminus G, \end{cases} \end{align} is also a Cahn-Hoffman vector field $z \in {CH}_W(\psi_1; \Omega_1)$, and \begin{align} \label{divz-patch} \operatorname{div} z(x) = \begin{cases} \operatorname{div} z_2(x) & \text{a.e. } x \in G,\\ \operatorname{div} z_1(x) & \text{a.e. } x \in \Omega_1 \setminus G. \end{cases} \end{align} \end{lemma}
\begin{proof} Since adding the same constant to both $\psi_1$ and $\psi_2$ does not change anything, we can assume that $a = -\delta$ and $b = \delta$ for some $\delta > 0$. For given $\ensuremath{\varepsilon} \in (0, \delta)$ we introduce the Lipschitz function \begin{align*} \zeta_\ensuremath{\varepsilon}(x) = 1 + \max \pth{ -1, \min \pth{0, \frac{\abs{\psi_1(x)} - \delta}\ensuremath{\varepsilon}}}. \end{align*} Note that $\zeta_\ensuremath{\varepsilon} = 0$ on $\set{\abs{\psi_1} \leq \delta - \ensuremath{\varepsilon}}$ and $\zeta_\ensuremath{\varepsilon} = 1$ on $\set{\abs{\psi_1} \geq \delta}$. Furthermore, \begin{align} \label{zeta-deriv} \nabla \zeta_\ensuremath{\varepsilon}(x) = \begin{cases} \operatorname{sign} \psi_1(x)\frac{\nabla \psi_1(x)}\ensuremath{\varepsilon} & \delta - \ensuremath{\varepsilon} < \psi_1(x) < \delta,\\ 0 & \text{otherwise} \end{cases} \end{align} for a.e. $x$. Finally, $\zeta_\ensuremath{\varepsilon} \searrow \chi_{\Omega_1 \setminus G}$ monotonically pointwise as $\ensuremath{\varepsilon} \to 0$.
Now for $\rho > 0$ we define $z_i^\rho = z_i * \eta_\rho$, where $\eta_\rho$ is the standard mollifier with radius $\rho$, and we extend $z_i$ as $0$ to $\Omega_i^c$. We have $z_i^\rho \to z_i$ in $L^\infty(\Omega_i)$-weak$^*$ and strongly in $L^p_{\rm loc}(\Omega_i)$ for any $1 \leq p < \infty$ as well as $\operatorname{div} z_i^\rho \to \operatorname{div} z_i$ strongly in $L^2_{\rm loc}(\Omega_i)$ as $\rho \to 0$, $i = 1,2$. Define \begin{align*} z_\ensuremath{\varepsilon}^\rho = z_1^\rho \zeta_\ensuremath{\varepsilon} + z_2^\rho (1 - \zeta_\ensuremath{\varepsilon}). \end{align*} This function is clearly Lipschitz.
On $G$ we have $z_i(x) \in \partial W(\nabla \psi_1(x)) = \partial W(\nabla \psi_2(x))$ for a.e. $x$. Therefore $(z_1(x) - z_2(x)) \cdot \nabla \psi_1(x) = 0$ for a.e. $x \in G$ by Lemma~\ref{le:one-homogeneous-subdiff}, which together with \eqref{zeta-deriv} implies \begin{align} \label{zetae-ortho} \nabla \zeta_\ensuremath{\varepsilon} \cdot (z_1 - z_2) = 0 \qquad \text{a.e.} \end{align} Thus we have for any $\varphi \in C^\infty_c(\Omega_1)$ \begin{align*} \int z_\ensuremath{\varepsilon}^\rho \cdot \nabla \varphi &= -\int \varphi \operatorname{div} z^\rho\\ &= -\int \varphi \bra{\zeta_\ensuremath{\varepsilon} \operatorname{div} z_1^\rho + (1- \zeta_\ensuremath{\varepsilon}) \operatorname{div} z_2^\rho + \nabla \zeta_\ensuremath{\varepsilon} \cdot (z_1^\rho - z_2^\rho)}. \end{align*} Now we send $\rho \to 0$ and obtain \begin{align*} \int z_\ensuremath{\varepsilon} \cdot \nabla \varphi &= -\int \varphi \bra{\zeta_\ensuremath{\varepsilon} \operatorname{div} z_1 + (1- \zeta_\ensuremath{\varepsilon}) \operatorname{div} z_2 + \nabla \zeta_\ensuremath{\varepsilon} \cdot (z_1 - z_2)}\\ &= -\int \varphi \bra{\zeta_\ensuremath{\varepsilon} \operatorname{div} z_1 + (1- \zeta_\ensuremath{\varepsilon}) \operatorname{div} z_2}, \end{align*} where we used \eqref{zetae-ortho}. Finally we send $\ensuremath{\varepsilon} \to 0$ and use the Dominated convergence theorem to conclude that \begin{align*} \int z \cdot \nabla \varphi = - \int \varphi \bra{\chi_{\Omega_1 \setminus G} \operatorname{div} z_1 + \chi_G \operatorname{div} z_2}. \end{align*} Since this holds for any test function, we see that $\operatorname{div} z \in L^2(\Omega_1)$ and it can be expressed as in \eqref{divz-patch}. \end{proof}
\begin{remark} \label{arb-convex-patch} We can take an arbitrary convex combination of $z_1$ and $z_2$ on $G$ in \eqref{z-patch}. Indeed, take $z$ as in $\eqref{z-patch}$. Then $\lambda z_1 + (1-\lambda) z = (\lambda z_1 + (1 - \lambda) z_2) \chi_G + z_1 \chi_{G_1 \setminus G} \in {CH}_W(\psi_1; G_1)$ by convexity. \end{remark}
\begin{remark} \label{re:patching-on-facet-boundaries} In the proof of \cite[Proposition~2.10]{GGP13JMPA} in the case of $W$ with a smooth $1$-level set we used the fact that Cahn-Hoffman vector fields can be patched across the boundary of a facet arbitrarily, as a consequence of \cite[Proposition~2.8]{GGP13JMPA}. This is stronger than Lemma~\ref{le:cahn-hoffman-patch} above where we can patch the Cahn-Hoffman vector field only if the support functions coincide on a neighborhood of the facet. We believe that this requirement can be removed as in \cite{GGP13JMPA}, but we do not pursue this matter further in the current paper. \end{remark}
Finally, let us briefly consider the characterization of the subdifferential of $E_W$ in the case when $W$ is not positively one-homogeneous. Proposition~\ref{pr:subdiff-char-periodic} does not apply in such a case. However, if $W$ is equal to a positively one-homogeneous function $W'$ in the neighborhood of the origin, the subdifferentials of $E_W$ and $E_{W'}$ coincide at least for functions with small Lipschitz constant.
\begin{lemma} \label{le:subdiff-homog-relation} Suppose that $W$ is a convex function and $W'$ is a positively one-homogeneous convex function on $\ensuremath{\mathbb{R}}^d$, $d \geq 1$, and there exists $\ensuremath{\varepsilon} > 0$ such that $W(p) = W'(p)$ for $\abs p < \ensuremath{\varepsilon}$. Suppose that $\Omega$ is a bounded open subset of $\ensuremath{{\mathbb{R}^{\dimension}}}$ or the torus $\ensuremath{\mathbb{R}}^d / L \ensuremath{\mathbb{Z}}^d$ for some $L > 0$. If $\psi \in {\rm Lip}(\Omega)$ and $\norm{\nabla \psi}_\infty < \ensuremath{\varepsilon}$, then \begin{align*} \partial E_W(\psi; \Omega) = \partial E_{W'}(\psi; \Omega). \end{align*} \end{lemma}
\begin{proof} We shall denote the functionals as $E$ and $E'$ for short. Fix $\psi \in {\rm Lip}(\Omega)$ with $\norm{\nabla \psi}_\infty < \ensuremath{\varepsilon}$. By definition of the functionals and our assumption on the equality of $W$ and $W'$, we have \begin{align} \label{en-equality} E(\psi + h) = E'(\psi + h) \qquad h \in {\rm Lip}(\Omega),\ \norm{\nabla h}_\infty < \delta = \ensuremath{\varepsilon} - \norm{\nabla \psi}_\infty. \end{align}
The convexity of $W$, $W'$, and one-homogeneity of $W'$ imply for $p \in \ensuremath{\mathbb{R}}^d$ and $\lambda \in (0,1)$ such that $\lambda \norm p < \ensuremath{\varepsilon}$ \begin{align*} \lambda W(p) \geq W(\lambda p) - (1 -\lambda) W(0) = W'(\lambda p) = \lambda W'(p). \end{align*} In particular, $W(p) \geq W'(p)$ on $\ensuremath{\mathbb{R}}^d$. Therefore $E(\psi + h) - E(\psi) \geq E'(\psi + h) - E'(\psi)$ for all $h \in L^2(\Omega)$ since $E(\psi) = E'(\psi)$. We conclude that $\partial E'(\psi) \subset \partial E(\psi)$.
To prove the opposite inclusion, take $v \in \partial E(\psi)$, if such an element exists. We want to prove \begin{align} \label{subdiff-def} E'(\psi + h) - E'(\psi) \geq (h, v) \qquad \text{for all $h \in L^2(\Omega)$.} \end{align} If $h \notin BV(\Omega)$, $E'(\psi + h) = \infty$ by definition. Thus we can assume that $h \in BV(\Omega)$. By a standard approximation result, there exists a sequence $\set{h_m} \subset C^\infty(\Omega) \cap BV(\Omega)$ such that $h_m \to h$ in $L^2(\Omega)$ and $Dh_m \to Dh$ weakly$^*$ as measures, which yields $E'(\psi + h_m) \to E'(\psi + h)$ due to \cite{Resetnjak}; see \cite{GiaquintaModicaSoucek}. But we can choose $\lambda_m \in (0,1)$ such that $\lambda_m \norm{\nabla h_m}_\infty < \delta$.
Then \eqref{en-equality} implies \begin{align*} E'(\psi + \lambda_m h_m) - E'(\psi) = E(\psi + \lambda_m h_m) - E(\psi) \geq \lambda_m (h_m, v). \end{align*} By convexity, we have \begin{align*} E'(\psi + h_m) - E'(\psi) \geq (h_m, v). \end{align*} Indeed, \begin{align*} \lambda_m E'(\psi + h_m) + (1-\lambda_m) E'(\psi) &\geq E'(\lambda_m(\psi + h_m) + (1-\lambda_m)\psi)\\ &=E'(\psi +\lambda_m h_m) \geq E'(\psi) +\lambda_m(h_m, v). \end{align*} Sending $m \to \infty$ yields \eqref{subdiff-def}. \end{proof}
\section{Energy stratification} \label{sec:energy-stratification}
In this section we shall assume that $W$ is a convex \textbf{polyhedral} function on $\Rd$. Since $W$ is polyhedral, it can be locally viewed as a positively one-homogeneous convex function; we will give a detailed explanation in this section. The features of $W$ correspond to the dual features of the crystal such as facets, edges and vertices, depending on the dimension. For each gradient, we will decompose the space into orthogonal subspaces of interesting directions, corresponding to the given feature of the crystal, and the directions in which $W$ is linear and therefore its behavior simple.
\subsection{Slicing of $W$} \label{sec:slicing of W}
To perform the decomposition, we need a few standard concepts from convex analysis (see for example \cite{Rockafellar}). For a given convex set $C$ let $\operatorname{aff} C$ denote the affine hull of $C$, that is, the smallest affine space containing $C$. The dimension of the convex set is defined as the dimension of its affine hull, $\dim C := \dim \operatorname{aff} C$. Let $\operatorname{ri} C$ be the relative interior of $C$ with respect to $\operatorname{aff} C$. A convex set is said to be relatively open if $C = \operatorname{ri} C$. We know that $\operatorname{ri} C \neq \emptyset$ if $C \neq \emptyset$ (\cite[Theorem~6.2]{Rockafellar}). We say that $\operatorname{aff} C$ is parallel to a subspace $V \subset \Rd$ if $\operatorname{aff} C = p + V$ for some $p \in \Rd$.
We can decompose $\Rd$ based on the features of the crystal, which correspond to the value of $\partial W$.
\begin{proposition}[Feature decomposition] \label{pr:feature-decomposition} For given $W$ polyhedral with $W < \infty$ on $\Rd$ there exist a finite number of mutually disjoint maximal sets $\Xi_i$, $i \in \mathcal N$, such that $\Rd = \bigcup_{i \in \mathcal N} \Xi_i$ and $\partial W$ is constant on each $\Xi_i$. Furthermore, each $\Xi_i$ is a relatively open convex set and $\operatorname{aff} \Xi_i \perp \operatorname{aff} \partial W(p)$ for $p \in \Xi_i$ in the sense that whenever $p, q \in \Xi_i$ and $\xi, \zeta \in \partial W(p)$ then $p - q \perp \xi - \zeta$. \end{proposition}
\begin{proof} We use the projections of relative interiors of the non-empty faces of the epigraph $\operatorname{epi} W := \set{(p, \lambda): \lambda \geq W(p), p \in \Rd}$, other than $\operatorname{epi} W$ itself, onto $\Rd$. For the definition of a face of a convex set see \cite[Section~18]{Rockafellar}. By \cite[Corollary~18.1.3]{Rockafellar}, all faces of $\operatorname{epi} W$ other than $\operatorname{epi} W$ itself must lie in the relative boundary of $\operatorname{epi} W$. The relative boundary of $\operatorname{epi} W$, the set $\operatorname{epi} W \setminus \operatorname{ri} \operatorname{epi} W$, is just the regular boundary and therefore it is the graph of $W$, $\operatorname{graph} W := \set{(p, W(p)): p \in \Rd} \subset \ensuremath{\mathbb{R}}^{n+1}$. By \cite[Theorem~18.2]{Rockafellar}, the relative interiors $\hat \Xi_i$ of the faces of $\operatorname{epi} W$ other than $\operatorname{epi} W$ itself form a partition of $\operatorname{graph} W$. By projecting these relative interiors $\hat \Xi_i$ onto $\Rd$ we obtain sets $\Xi_i$, which form a partition of $\Rd$ and are again relatively open by \cite[Theorem~6.6]{Rockafellar}.
Let us now prove that $\partial W$ is constant on $\Xi_i$. Fix two points $p, q \in \Xi_i$. Since $\hat \Xi_i$ are relatively open, there exists $\mu > 1$ such that $W(\mu p + (1 - \mu) q) = \mu W(p) + (1- \mu) W(q)$. Let $\xi \in \partial W(p)$. By definition of the subdifferential, we have $W(\mu p + (1 - \mu) q) \geq W(p) + (\mu - 1) \xi \cdot (p - q)$ and $W(q) \geq W(p) + \xi \cdot (q - p)$. Using the equality in the first inequality and dividing by $\mu - 1$ we obtain $W(q) \leq W(p) + \xi \cdot (q - p)$. Therefore $W(q) - W(p) = \xi \cdot (q - p)$ and we deduce that $\xi \in \partial W(q)$. Finally, if $\zeta \in \partial W(p)$ as well, we have $(\zeta - \xi) \cdot (q - p) = 0$. Maximality, that is, that $\partial W(p) \neq \partial W(q)$ for $p \in \Xi_i$, $q \in \Xi_j$, $i \neq j$, follows from the definition of convex faces. \end{proof}
\begin{lemma} \label{le:aff Xi origin} Suppose that $\Xi_i$ are as in Proposition~\ref{pr:feature-decomposition} and suppose that $W$ is also positively one-homogeneous. Then $0 \in \operatorname{aff} \Xi_i$ for every $i$. \end{lemma}
\begin{proof} This follows immediately from one-homogeneity since $\partial W(p) = \partial W(tp)$ for any $p \in \ensuremath{\mathbb{R}}^n$, $t > 0$. \end{proof}
Since $W$ is finite everywhere, $\partial W(p)$ is a nonempty closed convex set for any $p \in \Rd$. For given $p_0 \in \Rd$ we introduce the one-sided directional derivative of $W$ at $p_0$ with respect to a vector $p \in \Rd$ as (\cite[Section~23]{Rockafellar}) \begin{align*} W_{p_0}'(p) := \lim_{\lambda \to 0+} \frac{W(p_0 + \lambda p) - W(p_0)}{\lambda}. \end{align*} Then $W'(p_0; \cdot)$ is a positively one-homogeneous convex function, and (\cite[Theorem~23.4]{Rockafellar}) \begin{align} \label{W'-convex-conjugate} W_{p_0}'(p) \equiv \delta^*(p \mid \partial W(p_0)) := \sup \set{p \cdot \xi: \xi \in \partial W(p_0)}. \end{align} In particular, $W_{p_0}'$ is the convex conjugate of the indicator function of $\partial W(p_0)$. Therefore by \cite[Theorem~13.4]{Rockafellar} the lineality space of $W_{p_0}'$ (the subspace of directions in which $W_{p_0}'$ is affine) is the orthogonal complement of the subspace parallel to $\operatorname{aff} \partial W(p_0)$. This provides the orthogonal decomposition of $\Rd$ for a given gradient.
\begin{proposition}[Direction decomposition] \label{pr:direction-decomposition} Let $W$ be a polyhedral convex function on $\Rd$ finite everywhere and let $p_0 \in \Rd$. Let $V$ be the subspace of $\Rd$ parallel to $\operatorname{aff} \partial W(p_0)$ and set $U = V^\perp$. Then $W_{p_0}'$ is linear on $U$ and \begin{align} \label{W'-linear} W_{p_0}'(p) = W_{p_0}'(P_V p) + \xi \cdot P_U p \qquad \text{for any $p \in \Rd$, $\xi \in \operatorname{aff} \partial W(p_0)$}, \end{align} where $P_U$ and $P_V$ are the orthogonal projections onto $U$ and $V$, respectively.
Moreover, there exists $\delta > 0$ such that \begin{align*} W(p) = W_{p_0}'(p - p_0) + W(p_0) \qquad \text{for all $\abs{p - p_0} < \delta$.} \end{align*} \end{proposition}
\begin{proof} \eqref{W'-linear} follows from \eqref{W'-convex-conjugate} and from the orthogonality of $U$ and $V$.
The existence of $\delta > 0$ can be proved by contradiction: suppose that there exists a sequence $\set{p_k}$, $p_k \to p_0$ such that $W(p_k) - W(p_0) > W_{p_0}'(p_k - p_0)$ (it is clear that $W_{p_0}'(p - p_0) \leq W(p) - W(p_0)$ by convexity). Since $W$ is polyhedral, it is given as the maximum of a finite number of affine functions, and therefore by taking a subsequence we can assume that $W(p_k) = \xi \cdot p_k + c$ for some fixed $\xi \in \Rd$, $c \in \ensuremath{\mathbb{R}}$. By continuity we have $W(p_k) - W(p_0) = \xi \cdot (p_k - p_0)$. Therefore $\xi \in \partial W(p_0)$. But this yields a contradiction since then \eqref{W'-convex-conjugate} implies $W_{p_0}'(p_k - p_0) \geq \xi \cdot (p_k - p_0)$. \end{proof}
The previous proposition tells us that the behavior of $W$ is interesting only in the directions parallel to $\operatorname{aff} \partial W$. That motivates the following notation. For given $W: \Rd \to \ensuremath{\mathbb{R}}$ convex polyhedral and $p \in \Rd$ let $V$ be the subspace of $\Rd$ parallel to $\operatorname{aff} \partial W(p)$, $U = V^\perp$, $k = \dim V$, and we fix an arbitrary rotation \begin{align} \label{rotation} {\mathcal T}: \Rd \to \Rd \end{align} that maps $\ensuremath{\mathbb{R}}^k \times \set 0$ onto $V$ and $\set 0 \times \ensuremath{\mathbb{R}}^{n-k}$ onto $U$. For given $x \in \Rd$, we define the unique $x' \in \ensuremath{\mathbb{R}}^k$ and $x'' \in \ensuremath{\mathbb{R}}^{n-k}$ such that \begin{align} \label{rotation'} {\mathcal T}(x', x'') = x. \end{align} We set ${\mathcal T}_V: \ensuremath{\mathbb{R}}^k \to V$ and ${\mathcal T}_U: \ensuremath{\mathbb{R}}^{n-k} \to U$ by \begin{align} \label{rotationUV} {\mathcal T}_V x' = {\mathcal T}(x', 0), \qquad {\mathcal T}_U x'' = {\mathcal T}(0, x''). \end{align} In the above we also allow for $k = 0$ and $k = n$, in which case terms containing $x'$ respectively $x''$ simply do not appear in the formulas, and ${\mathcal T}_V$ respectively ${\mathcal T}_U$ are trivial maps. Note that \begin{align*} \left( {\mathcal T}_V z \right)' = z, \quad \left( {\mathcal T}_U w \right)'' = w, \qquad z \in \ensuremath{\mathbb{R}}^k, w \in \ensuremath{\mathbb{R}}^{n-k}, \end{align*} and \begin{align*} {\mathcal T}_V x' = P_V x, \quad {\mathcal T}_U x'' = P_U x, \qquad x \in \ensuremath{\mathbb{R}}^n, \end{align*} where $P_V$ and $P_U$ are respectively the orthogonal projections on $V$ and $U$. Since ${\mathcal T}$ is a linear isometry, it preserves the inner product \begin{align*} z_1 \cdot z_2 = {\mathcal T}_V z_1 \cdot {\mathcal T}_V z_2, \qquad z_1, z_2 \in \ensuremath{\mathbb{R}}^k, \end{align*} and similarly for ${\mathcal T}_U$.
\begin{remark} We are free to choose any such ${\mathcal T}$, as long as we keep this choice consistent throughout the paper for given $W$ and $p$. We can in fact choose the same ${\mathcal T}$ for all $p \in \Xi_i$ from Proposition~\ref{pr:feature-decomposition}. \end{remark}
We will introduce the sliced energy density $W^{\rm sl}$ that locally captures behavior of $W$ in the directions $V$.
\begin{definition} \label{def:sliced-W} We define the \emph{sliced density} $W^{\rm sl}_p: \ensuremath{\mathbb{R}}^k \to \ensuremath{\mathbb{R}}$ as \begin{align} \label{W-red} \begin{aligned} W^{\rm sl}_p &:= W_p' \circ {\mathcal T}_V. \end{aligned} \end{align} \end{definition}
\begin{lemma}[Decomposition] \label{le:decomposition-subdiff-W} For any fixed $p_0 \in \Rd$ we have \begin{align*} \partial W_{p_0}'(p) = \set{{\mathcal T} (\zeta', \xi''): \zeta' \in \partial W^{\rm sl}_{p_0}(p')} \qquad \text{for all $p \in \Rd$, $\xi \in \partial W_{p_0}'(0)$}. \end{align*} \end{lemma}
The following lemma states that the behavior of $W$ in the neighborhood of some $p$ is completely captured by the sliced density $W^{\rm sl}_p$.
\begin{lemma} \label{le:W-decomposition} For every $p_0 \in \ensuremath{\mathbb{R}}^n$ there exists $\ensuremath{\varepsilon} > 0$ such that \begin{align} \label{W-decomposition} W(p) = W^{\rm sl}_{p_0}(p'-p_0') + P_U \xi \cdot (p - p_0) + W(p_0) \end{align} for any $p \in \Rd$, $\abs{p - p_0} <\ensuremath{\varepsilon}$, $\xi \in \partial W(p)$. Since ${\mathcal T}$ is an isometry, we have $P_U \xi \cdot (p - p_0) = \xi'' \cdot (p'' - p_0'')$. \end{lemma}
\begin{proof} The claim follows from Definition~\ref{def:sliced-W} and Proposition~\ref{pr:direction-decomposition}. \end{proof}
\begin{lemma} \label{le:linear growth SW} Suppose that $p_0 \in \Rd$ and $\xi_0 \in \operatorname{ri} \partial W(p_0)$. Then there exists $\delta > 0$ such that \begin{align*}
W^{\rm sl}_{p_0}(z) - \xi_0' \cdot z \geq \delta |z|, \qquad z \in \ensuremath{\mathbb{R}}^k, \end{align*} where $k = \dim \operatorname{aff} \partial W(p_0)$. \end{lemma}
\begin{proof} Let again $V$ be the subspace parallel to $\operatorname{aff} \partial W(p_0)$. Then $\operatorname{aff} \partial W(p_0) = \xi_0 + V$. Since $\xi_0 \in \operatorname{ri} \partial W(p_0)$, there exists $\delta > 0$ with $\xi \in
\partial W(p_0)$ for all $|\xi - \xi_0| \leq \delta$, $\xi \in \xi_0 + V$. Take $z \in \ensuremath{\mathbb{R}}^k$ and set $\zeta = \xi_0 + \delta \frac{{\mathcal T}_V z}{|z|} \in \partial W(p_0)$. From the definition of $W^{\rm sl}_{p_0}$ we have from \eqref{def:sliced-W} and \eqref{W'-convex-conjugate} \begin{align*}
W^{\rm sl}_{p_0}(z) = W_{p_0}'({\mathcal T}_V z) =\sup \set{{\mathcal T}_V z \cdot \xi : \xi \in \partial W(p_0)} \geq {\mathcal T}_V z \cdot \zeta = \xi_0'\cdot z + \delta |z|. \end{align*} This yields the lower bound. \end{proof}
\subsection{Sliced energy}
Suppose now that $p \in \Rd$ such that $k = \dim \partial W(p) > 0$ and recall the definition of ${\mathcal T}$ in \eqref{rotation}. We shall consider the rotated flat torus $\Gamma = \ensuremath{\mathbb{R}}^n / L {\mathcal T} \ensuremath{\mathbb{Z}}^n$ for some $L > 0$. We can write $\Gamma = {\mathcal T}(\Gamma' \times \Gamma'')$, where $\Gamma' = \ensuremath{\mathbb{R}}^k / L \ensuremath{\mathbb{Z}}^k$ and $\Gamma'' = \ensuremath{\mathbb{R}}^{n - k} / L\ensuremath{\mathbb{Z}}^{n-k}$, and $x \in \Gamma$ is given as $x = {\mathcal T}(x', x'')$ for $x' \in \Gamma'$, $x'' \in \Gamma''$.
We define the functionals \begin{align*} E_p(\psi) &:= E_{W(\cdot + p) - W(p)}(\psi; \Gamma), && \psi \in L^2(\Gamma),\\ E'_p(\psi) &:= E_{W_p'}(\psi; \Gamma), && \psi \in L^2(\Gamma),\\ E^{\rm sl}_p(\psi) &:= E_{W^{\rm sl}_p}(\psi; \Gamma'), && \psi \in L^2(\Gamma'). \end{align*} All three functionals are proper closed convex functions on $L^2(\Gamma)$ resp. $L^2(\Gamma')$.
Since $W_p'$ and $W^{\rm sl}_p$ are positively one-homogeneous, the characterization of the subdifferential in Proposition~\ref{pr:subdiff-char-periodic} applies.
The function $q \mapsto W(q + p) - W(p)$ is not one-homogeneous in general, however, and therefore the same characterization does not apply for the subdifferential of $E_p$. Nevertheless, it coincides with the subdifferential of $E_p'$ at $\psi \in {\rm Lip}(\Gamma)$ when $\norm{\nabla \psi}_\infty$ is small by Lemma~\ref{le:subdiff-homog-relation}. This observation allows us to use the simpler, positively one-homogeneous energy $E_p'$ when defining the crystalline curvature of a facet.
What follows is the main justification of the energy stratification. We show that since $W_p'$ is linear on the subspace $U$, we need to only consider the directions in $V = U^\perp$ when computing the crystalline curvature of a \textbf{stratified} function.
\begin{lemma} \label{le:subdiff-slicing} Let $p$ be as above. Suppose that $\bar\psi \in {\rm Lip}(\Gamma')$ and $f \in C^1(\Gamma'')$ are given functions and let $\psi(x) = \bar\psi(x') + f(x'')$. Let $\psi_a$ and $\bar \psi_a$ be the unique solutions of the resolvent problems \begin{align*} \psi_a + a \partial E_p'(\psi_a) &\ni \psi,\\ \bar\psi_a + a \partial E^{\rm sl}_p(\bar\psi_a) &\ni \bar\psi, \end{align*} for given $a > 0$. Then \begin{align*} \psi_a(x) = \bar \psi_a(x') + f(x''), \qquad \text{$x = {\mathcal T}(x', x'') \in \Gamma$}. \end{align*} or, equivalently, \begin{align*} (I +a\partial E_p')^{-1}(\psi)(x) = (I +a \partial E^{\rm sl}_p)^{-1}(\bar\psi)(x') + f(x''). \end{align*} If moreover $\bar\psi \in \operatorname{\mathcal{D}}(\partial E^{\rm sl}_p)$, then $\psi \in \operatorname{\mathcal{D}}(\partial E_p')$, $\partial^0 E_p'(\psi)$ is independent of $x''$ and \begin{align} \label{minsectionslicing} \partial^0 E_p'(\psi)(x) = \partial^0 E^{\rm sl}_p(\bar \psi)(x') \qquad \text{a.e. $x = {\mathcal T}(x', x'') \in \Gamma$}. \end{align} \end{lemma}
\begin{proof} Suppose that $\psi(x) = \bar \psi(x') + f(x'')$ for some $\bar \psi \in {\rm Lip}(\Gamma')$ and $f \in C^1(\Gamma'')$. By the characterization of the subdifferentials in Proposition~\ref{pr:subdiff-char-periodic}, we have \begin{align*} \partial E'_p(\psi) = - \operatorname{div} {CH}_{W_p'}(\psi; \Gamma), \qquad \partial E^{\rm sl}_p(\bar \psi) = - \operatorname{div} {CH}_{W^{\rm sl}_p}(\bar\psi; \Gamma'). \end{align*} The decomposition lemma~\ref{le:decomposition-subdiff-W} implies \begin{align} \label{subdiffslicing} \partial W'_p(\nabla \psi(x)) = \set{{\mathcal T}(\xi', \xi''): \xi' \in \partial W^{\rm sl}_p(\nabla \bar\psi(x'))} \end{align} for some fixed $\xi'' \in \ensuremath{\mathbb{R}}^{n-k}$ since \begin{align*} \nabla \psi(x) = {\mathcal T} (\nabla \bar \psi(x'), \nabla f(x'')). \end{align*}
By Proposition~\ref{pr:resolvent-problems}, both $\psi_a$ and $\bar\psi_a$ are Lipschitz. As $\bar\psi_a$ is the unique solution of the resolvent problem, the characterization of the subdifferential of $E^{\rm sl}_p$ above yields that there exists $\bar z_a \in {CH}_{W^{\rm sl}_p}(\bar\psi_a; \Gamma')$ such that $\bar\psi_a - \bar\psi = a \operatorname{div} \bar z_a$. Set $z_a(x) = {\mathcal T}(\bar z_a(x'), \xi'')$ for some fixed $\xi''$ as above and $\zeta_a(x) = \bar\psi_a(x') + f(x'')$. Note that $z_a \in {CH}_{W_p'}(\zeta_a; \Gamma)$ by \eqref{subdiffslicing}. Moreover $\operatorname{div}_x z_a(x) = \operatorname{div}_{x'} \bar z_a(x')$. Therefore \begin{align} \label{hgyfhbs} \begin{aligned} \zeta_a(x) - \psi(x) &= \bar\psi_a(x') +f(x'') - \bar\psi(x') + f(x'') = \bar \psi_a(x') - \bar\psi(x')\\ &= a \operatorname{div}_{x'} \bar z_a(x') = a \operatorname{div}_x z_a(x). \end{aligned} \end{align} The characterization of the subdifferential of $E_p'$ above implies that $\zeta_a - \psi \in - \partial E_p'(\zeta_a; \Gamma)$ and therefore $\zeta_a$ is a solution of the resolvent problem. However, the solution is unique and therefore $\psi_a = \zeta_a$ almost everywhere.
Now we suppose that $\bar \psi \in \operatorname{\mathcal{D}}(\partial E^{\rm sl}_p)$, that is, that $\partial E^{\rm sl}_p(\bar\psi)$ is nonempty. And so there exists $\bar z\in {CH}_{W^{\rm sl}_p}(\bar\psi; \Gamma')$. But then $z(x) = {\mathcal T}(\bar z(\bar x), \xi'') \in {CH}_{W_p'}(\psi; \Gamma)$ as we just observed. In particular, $\psi \in \operatorname{\mathcal{D}}(\partial E_p')$.
Let us set $h_a = (\psi_a - \psi)/a$ and $\bar h_a = (\bar \psi_a - \bar\psi)/a$ as in Proposition~\ref{pr:resolvent-problems}. Observe that due to \eqref{hgyfhbs} \begin{align*} h_a(x) = \bar h_a(x'). \end{align*} Since $-h_a \to \partial^0 E_p'(\psi; \Gamma)$ in $L^2(\Gamma)$ and $-\bar h_a \to \partial^0 E^{\rm sl}_p(\bar \psi; \Gamma')$ in $L^2(\Gamma')$ as $a \to 0$, we conclude \eqref{minsectionslicing}. \end{proof}
\section{Crystalline curvature} \label{sec:crystalline curvature}
We introduce an operator $\Lambda_p$ that assigns the crystalline curvature to a facet with slope $p$ given by a faceted function, as long as the faceted function is admissible in a certain sense.
\subsection{Facets} To describe facets, let us recall the notation for pairs that was introduced in \cite{GGP13AMSA}. Since we need to construct facets of various dimensions, depending on the dimension of $\partial W(p)$, $\mathcal P^k$ will denote the set of pairs on $\ensuremath{\mathbb{R}}^k$:
\begin{definition}[cf. \cite{GGP13AMSA}] For any $k \in \ensuremath{\mathbb{N}}$ we will denote by $\mathcal P^k$ the set of all ordered pairs $(A_-, A_+)$ of disjoint sets $A_\pm \subset \ensuremath{\mathbb{R}}^k$, $A_- \cap A_+ = \emptyset$.
We will introduce a partial ordering $(\mathcal P^k, \preceq)$ by \begin{align*} (A_-, A_+) \preceq (B_-, B_+) \qquad \Leftrightarrow \qquad A_+ \subset B_+ \text{ and } B_- \subset A_- \end{align*} for $(A_-, A_+), (B_-, B_+) \in \mathcal P^k$, as well as the \emph{reversal} \begin{align*} -(A_-, A_+) := (A_+, A_-). \end{align*} Clearly, if $(A_-, A_+) \preceq (B_-, B_+)$ then $-(B_-, B_+) \preceq -(A_-, A_+)$.
A pair $(A_-, A_+) \in \mathcal P^k$ is said to be \emph{open} if both $A_-$ and $A_+$ are open.
A \emph{smooth} pair is then an open pair $(A_-, A_+) \in \mathcal P^k$ for which we also have \begin{enumerate}[(i)] \item $\operatorname{dist}(A_-, A_+) > 0$, where we use the convention $\operatorname{dist}(\emptyset, E) = + \infty$ for any $E$, and \item $\partial A_- \in C^\infty$ and $\partial A_+ \in C^\infty$. \end{enumerate}
We will refer to the set \begin{align*} \ensuremath{\mathbb{R}}^k \setminus \pth{A_- \cup A_+} = A_-^c \cap A_+^c \end{align*} as the \emph{facet} of the pair $(A_-, A_+) \in \mathcal P^k$. \end{definition}
\begin{remark} We will drop $k$ if the dimension is understood from the context or is irrelevant. \end{remark}
We will add the notion of a bounded pair. \begin{definition} We say that a pair $(A_-, A_+) \in \mathcal P^k$ is \emph{bounded} if either $A_-^c$ or $A_+^c$ is bounded. \end{definition}
\begin{remark} Note that if $(A_-, A_+)$ is a bounded pair, then the facet $A_-^c \cap A_+^c$ is bounded. If $(A_-, A_+)$ is an open pair, the reverse implication also applies. \end{remark}
Let us also recall the useful notion of a support function.
\begin{definition}[cf. \cite{GGP13AMSA}] A Lipschitz function $\psi \in {\rm Lip}(\ensuremath{\mathbb{R}}^k)$ is called a \emph{support function} of an open pair $(A_-, A_+) \in \mathcal P^k$ if \begin{align*} \psi(x) \begin{cases} > 0 & x \in A_+,\\ = 0 & x \in A_-^c \cap A_+^c,\\ < 0 & x \in A_-. \end{cases} \end{align*}
On the other hand, for any function $\psi$ on $\ensuremath{\mathbb{R}}^k$ we define the pair \begin{align*} \operatorname{Pair}(\psi) := \pth{\set{x \in \ensuremath{\mathbb{R}}^k : \psi(x) <0}, \set{x \in \ensuremath{\mathbb{R}}^k: \psi(x) >0}}. \end{align*} \end{definition}
\begin{example} \label{ex:trivial-support-function} For any open pair $(A_-, A_+) \in \mathcal P^k$ the function \begin{align*} \psi(x) := \operatorname{dist}(x, A_+^c) - \operatorname{dist}(x, A_-^c) \end{align*} is a support function of the pair $(A_-, A_+)$. \end{example}
Finally, let us recall the notion of a generalized neighborhood of a subset of $\ensuremath{\mathbb{R}}^k$.
\begin{definition}[cf. \cite{GGP13AMSA}] For any set $E \subset \ensuremath{\mathbb{R}}^k$ and $\rho \in \ensuremath{\mathbb{R}}$ the \emph{generalized neighborhood} is defined as \begin{align*} \nbd\rho(E) := \begin{cases} E + \cl B_\rho(0) & \rho > 0,\\ E & \rho = 0,\\ \set{x \in E : \cl B_{\abs\rho}(x) \subset E} & \rho < 0. \end{cases} \end{align*}
For a pair $(A_-, A_+) \in \mathcal P^k$ we introduce the generalized neighborhood \begin{align*} \nbd\rho(A_-,A_+) := \pth{\nbd{-\rho}(A_-),\nbd{\rho}(A_+)}. \end{align*} \end{definition}
A part of the following proposition was stated in \cite{GGP13AMSA} for the $n$-dimensional torus, but it can be easily restated for $\Rd$. The proof is straighforward.
\begin{proposition} \label{pr:nbd-properties} \begin{enumerate} \item $\mathcal U^{-\rho}(A) \subset A \subset \mathcal U^\rho(A)$ for $\rho > 0$. \item (complement) \begin{align} \label{compl-nbd} \pth{\nbd\rho(A)}^c = \nbd{-\rho}(A^c) \qquad \text{for any set $A \subset \Rd$ and $\rho \in \ensuremath{\mathbb{R}}$} \end{align} \item (monotonicity) \begin{align*} \nbd\rho(A_1) \subset \nbd\rho(A_2)\qquad \text{for $A_1 \subset A_2 \subset \Rd$ and $\rho \in \ensuremath{\mathbb{R}}$.} \end{align*} \item $\nbd\rho(A_1 \cap A_2) \subset \nbd\rho(A_1) \cap \nbd\rho(A_2)$
for all $\rho \in \ensuremath{\mathbb{R}}$, with equality for $\rho \leq 0$. \item $\nbd{r}(\nbd\rho(A)) \subset \nbd{r+\rho}(A)$ for $r \geq 0$ and $\rho \in \ensuremath{\mathbb{R}}$; equality holds if $\rho \geq 0$. \item For any $\rho \in \ensuremath{\mathbb{R}}$, we have $\nbd\rho(A_1) \subset A_2$ if and only if $A_1 \subset \nbd{-\rho} (A_2)$. \item (interior and closure) \begin{align*} \bigcup_{\rho > 0} \nbd{-\rho}(A) = \operatorname{int} A \subset A \subset \cl A = \bigcap_{\rho>0} \nbd\rho(A) \qquad \text{for any set $A \subset \Rd$.} \end{align*} \item (distance) \begin{align*} \operatorname{dist}(A_1, A_2) = \sup \set{\rho \geq 0: \nbd\rho(A_1) \subset A_2^c} \qquad \text{for all $A_1, A_2 \subset \Rd$.} \end{align*} \end{enumerate} \end{proposition}
\subsection{Definition of crystalline curvature} We assume for the rest of the paper that $W$ is a convex polyhedral function on $\Rd$. Let $p \in \Rd$ such that $k = \dim \partial W(p) > 0$. Let $\psi$ be a support function of a bounded open pair $(A_-, A_+) \in \mathcal P^k$. We say that $\psi$ is an \emph{$p$-admissible support function} if there exists an open set $G \supset A_-^c \cap A_+^c$ such that the set of Cahn-Hoffman vector fields \begin{align*} CH^{\rm sl}_p(\psi; G) := {CH}_{W^{\rm sl}_p}(\psi; G) \end{align*} is nonempty. We denote this for short as $\psi \in \operatorname{\mathcal{D}}(\Lambda_p)$. If for a given bounded open pair $(A_-, A_+)$ there exists at least one $p$-admissible support function, we say that $(A_-, A_+)$ is a \emph{$p$-admissible pair}. If $p$ is understood from the context, we refer to them as an admissible support function and an admissible pair.
Let $\psi \in \operatorname{\mathcal{D}}(\Lambda_p)$ be an admissible support function of an admissible pair $(A_-, A_+)$. We define the function $\Lambda_p[\psi] \in L^2(A_-^c \cap A_+^c)$ on the facet as \begin{align} \label{crystalline-curvature} \Lambda_p[\psi](x) = \operatorname{div} z_{\rm min}(x), \qquad x \in A_-^c \cap A_+^c, \end{align} where $z_{\rm min}$ is an element of $CH^{\rm sl}_p(\psi; G)$ that minimizes $\norm{\operatorname{div} z}_{L^2(G)}$. We call $\Lambda_p$ the \emph{crystalline curvature}.
\begin{remark} As we shall see later in Corollary~\ref{co:lambda support func indep} at the end of this section, the crystalline curvature satisfies a comparison principle and therefore its value on a facet of a given admissible pair is independent of the choice of an admissible support function of this pair. \end{remark}
We first prove that the crystalline curvature is well-defined $\Lambda_p$.
\begin{proposition} \label{pr:lambda-well-defined} The quantity $\Lambda_p[\psi]$ is well-defined in the sense that the value is unique a.e. and it does not depend on $G$ nor on the value of $\psi$ away from the facet. More precisely, if $\psi_1$ and $\psi_2$ are two support functions of a bounded open pair $(A_-, A_+) \in \mathcal P^k$ with $\psi_i \in \operatorname{\mathcal{D}}(\Lambda_p)$ for some $p \in \Rd$ with $k = \dim \partial W(p) > 0$ such that $\psi_1 = \psi_2$ on a neighborhood of the facet $A_-^c \cap A_+^c$, then $\Lambda_p[\psi_1] = \Lambda_p[\psi_2]$ a.e. on $A_-^c \cap A_+^c$. \end{proposition}
\begin{proof} Let $\psi_i \in \operatorname{\mathcal{D}}(\Lambda_p)$, $i = 1,2$, be two support functions that satisfy the hypothesis. Then there are open sets $G_i \supset A_-^c \cap A_+^c$ and associated Cahn-Hoffman vector fields $z_i \in CH^{\rm sl}_p(\psi_i; G_i)$ that minimize $\norm{\operatorname{div} z_i}_{L^2(G_i)}$ over $CH^{\rm sl}_p(\psi_i; G_i)$. Since the facet $A_-^c \cap A_+^c$ is assumed to be bounded, we can find a bounded open set $H \supset A_-^c \cap A_+^c$ with $\psi_1 = \psi_2$ on $H$ and $H \subset G_1 \cap G_2$. Let us take $0 < \delta < \min_{\partial H} \abs{\psi_1}$ and set $G = \set{x \in H: \abs{\psi_1} < \delta} \subset\subset H$.
Set $z = z_1 \chi_{G_1 \setminus G} + z_2 \chi_G$. By Lemma~\ref{le:cahn-hoffman-patch} we have that $z \in CH^{\rm sl}_p(\psi_1; G_1)$ and therefore $\norm{\operatorname{div} z}_{L^2(G_1)} \geq \norm{\operatorname{div} z_1}_{L^2(G_1)}$, which with \eqref{divz-patch} implies \begin{align*} \norm{\operatorname{div} z_2}_{L^2(G)} \geq \norm{\operatorname{div} z_1}_{L^2(G)}. \end{align*} Reversing the roles of $\psi_1$ and $\psi_2$, and $G_1$ and $G_2$, we get the opposite inequality.
Therefore the strict convexity of the $L^2$-norm implies that $\operatorname{div} z_1 = \operatorname{div} z_2$ a.e. on $L$. Indeed, if they are not equal, we can decrease the norm by taking the vector field $z = \frac 12 \pth{z_1 + z_2}$ on $G$ which is still admissible due to Remark~\ref{arb-convex-patch}. \end{proof}
The following crucial result will allow us to express the crystalline curvature as the minimal section of the subdifferential of the sliced energy on a periodic domain.
\begin{proposition} \label{pr:curvature-as-min-section} Let $p \in \ensuremath{\mathbb{R}}^k$ be such that $k = \dim \partial W(p) > 0$. Suppose that $\psi \in \operatorname{\mathcal{D}}(\Lambda_p)$, that is, $\psi$ is an admissible support function of a bounded open pair $(A_-, A_+)$. Let $L > 0$ be such that $A_-^c \cap A_+^c \subset B_{L/4}(0)$. Denote $\Gamma = \ensuremath{\mathbb{R}}^k / L \ensuremath{\mathbb{Z}}^k$.
There exists an $L$-periodic Lipschitz function $\psi_2 \in {\rm Lip}(\Gamma)$ such that $\psi_2$ is a support function of the open pair $(A_- + L \ensuremath{\mathbb{Z}}^k, A_+ + L \ensuremath{\mathbb{Z}}^k)$ and $CH^{\rm sl}_p(\psi_2; \Gamma)$ is nonempty, and for some open set $H$, $A_-^c \cap A_+^c \subset H \subset B_{L/4}(0)$ we have $\psi_1 = \psi_2$ on $H$. Moreover, \begin{align} \label{lambda-per-expr} \Lambda[\psi_1](x) = -\partial^0 E^{\rm sl}_p(\psi_2; \Gamma)(x) \qquad \text{a.e. $x \in A_-^c \cap A_+^c$.} \end{align} \end{proposition}
\begin{proof} Let us first show \eqref{lambda-per-expr} if we have function $\psi_2$ with the properties stated in the proposition. We use the characterization of the differential in Proposition~\ref{pr:subdiff-char-periodic}. Let therefore $z_2 \in CH^{\rm sl}_p(\psi_2; \Gamma)$ be a Cahn-Hoffman vector field that minimizes $\norm{\operatorname{div} z_2}_2$ in this set. Note that we have $\partial^0 E^{\rm sl}_p(\psi_2; \Gamma)(x) = - \operatorname{div} z_2$ by the characterization of the subdifferential in Proposition~\ref{pr:subdiff-char-periodic}.
Now we can proceed as in the proof of Proposition~\ref{pr:lambda-well-defined}. Let $z_1$ minimize $\norm{\operatorname{div} z_1}_2$ in $CH^{\rm sl}_p(\psi_1; G_1)$ for some open set $G_1 \supset A_-^c \cap A_+^c$. We can assume that $G_1 \subset H$. We proceed as follows: given that $G \subset\subset H$ with $G$ as defined in that proof, Lemma~\ref{le:cahn-hoffman-patch} can be applied to $\psi_1$ on $G_1$ and $\psi_2$ on $G_2 = H$, and since we are only modifying the vector fields away from the boundary of $H$, replacing $z_2$ by $z_1$ on the set $G$ yields again a vector field in $CH^{\rm sl}_p(\psi_2; \Gamma)$. We again deduce that $\operatorname{div} z_1 = \operatorname{div} z_2$ on $A_-^c \cap A_+^c$,
We shall now construct $\psi_2$. Since $\psi_1$ is admissible there are an open set $G \supset A_- \cap A_+$, $G \subset B_{L/4}(0)$, and a vector field $z \in CH^{\rm sl}_p(\psi_1; G)$. Let us choose a positive $\delta$ such that $\delta < \min_{\partial G} \abs{\psi_1}$. This is possible since $\psi_1$ is continuous and $\partial G \subset A_- \cup A_+ = \set{\psi_1 \neq 0}$. We set \begin{align*} \xi(x) = \begin{cases} -\delta & x \in A_- \setminus G,\\ \max(-\delta, \min(\delta, \psi_1(x))) & x \in G\\ \delta & x \in A_+ \setminus G. \end{cases} \end{align*} Note that $\xi$ is Lipschitz on $\ensuremath{\mathbb{R}}^k$, $\nabla \xi(x) = \nabla \psi_1$ whenever $\abs{\xi(x)} < \delta$ and $\nabla \xi(x) = 0$ if $\abs{\xi(x)} = \delta$, almost everywhere. Moreover, we see that \begin{align} \label{xi-H-eq} \xi = \psi_1 \qquad \text{on } H := \set{x \in G: \abs{\psi_1} < \delta}. \end{align} Since the complement of $B_{L/4}(0)$ is connected and $A_-$, $A_+$ are open disjoint sets, we must have either $A_- \subset B_{L/4}(0)$ or $A_+ \subset B_{L/4}(0)$. In any case, $\xi$ is constant outside of $B_{L/4}(0)$.
Let $\phi \in C^\infty(\ensuremath{\mathbb{R}}^k)$ be such that $0 \leq \phi \leq 1$, $\operatorname{supp} \phi \subset G$ and $\phi = 1$ on $\set{x \in G: \abs\psi \leq \delta}$. Define \begin{align*} w(x) = \begin{cases} z(x) \phi(x) & x \in G,\\ 0 & \text{otherwise}. \end{cases} \end{align*} Clearly $w \in L^\infty(\ensuremath{\mathbb{R}}^k; \ensuremath{\mathbb{R}}^k)$, $\operatorname{div} w \in L^2(\ensuremath{\mathbb{R}}^k)$, $\operatorname{supp} w \subset G$. Moreover, since $\partial W^{\rm sl}_p(q) \subset \partial W^{\rm sl}_p(0) \ni 0$ for any $q \in \ensuremath{\mathbb{R}}^k$ by Lemma~\ref{le:one-homogeneous-subdiff} and $\partial W^{\rm sl}_p(0)$ is convex, we have $w(x) \in \partial W^{\rm sl}_p(\nabla \xi(x))$ for a.e. $x \in \ensuremath{\mathbb{R}}^k$. Therefore $\xi$ is an admissible support function of the pair $(A_-, A_+)$.
Since $\xi = \psi$ in a neighborhood of $A_-^c \cap A_+^c$, we conclude that $\Lambda_p[\xi] = \Lambda_p[\psi_1]$ a.e. on $A_-^c \cap A_+^c$ due to Proposition~\ref{pr:lambda-well-defined}.
Now we $L$-periodically extend $\xi$ and $w$ from $[-L/2, L/2)^k$ to $\ensuremath{\mathbb{R}}^k$ and call them $\psi_2$ and $z_2$, respectively. This gives a support function of an open pair $(A_- + L \ensuremath{\mathbb{Z}}^k, A_+ + L \ensuremath{\mathbb{Z}}^k)$ and clearly $CH^{\rm sl}_p(\psi_2; \Gamma) \ni z_2$.
By construction, $\psi_2 = \psi_1$ on $H$ due to \eqref{xi-H-eq}. \end{proof}
\subsection{Comparison principle for the crystalline curvature}
We can prove the following comparison theorem for the crystalline curvature of ordered facets, as in \cite{GGP13JMPA}. This will imply that $\Lambda_p[\psi]$ on a given admissible pair is in fact independent of the choice of an admissible support function $\psi$, Corollary~\ref{co:lambda support func indep} below.
\begin{proposition}[Comparison principle for $\Lambda_p$] \label{pr:comparison Lambda} Let $p \in \Rd$ such that $k = \dim \partial W(p) > 0$. Suppose that $(A_{1,-}, A_{1,+})$ and $(A_{2,-}, A_{2,+})$ are two $p$-admissible pairs in $\mathcal P^k$. If the pairs are ordered in the sense of \begin{align*} (A_{1,-}, A_{1,+}) \prec (A_{2,-}, A_{2,+}), \end{align*} then for any two $p$-admissible support functions $\psi_1$ and $\psi_2$ of the respective pairs we have \begin{align} \label{curv-ordered-facet} \Lambda_p[\psi_1](x) \leq \Lambda_p[\psi_2](x) \quad \text{a.e. $x \in A_{1,-}^c \cap A_{1,+}^c \cap A_{2,-}^c \cap A_{2,+}^c$.} \end{align} \end{proposition}
Before proceeding with the proof, we first give a technical lemma, which is a variant of \cite[Lemma 4.2.9]{G06}; such a result goes back to \cite{CGG,ES} to establish a uniqueness of a level set flow.
\begin{lemma} \label{le:Lipschitz ordering} Suppose that $\psi$ and $\varphi$ are two nonnegative periodic Lipschitz functions on $\ensuremath{\mathbb{R}}^d$, $d \geq 1$, such that $\set{\psi = 0} \subset \set{\varphi = 0}$. Then there exists a Lipschitz continuous function $\theta: [0, \infty) \to [0, \infty)$ such that $\theta(0) = 0$, $\theta(s) > 0$ for $s > 0$ and $\theta'(s) > 0$ for almost every $s > 0$ and we have \begin{align*} \theta \circ \varphi \leq \psi \qquad \text{on $\ensuremath{\mathbb{R}}^d$.} \end{align*} \end{lemma}
\begin{proof} We may assume that $\set{\psi = 0} \neq \emptyset$, otherwise the statement is trivial. We define \begin{align*} \eta(s) := \inf \set{\psi(x): \varphi(x) \geq s}. \end{align*} Clearly by compactness $\eta(0) = 0$, $\eta(s) > 0$ for $s > 0$. Furthermore, $\eta$ is nondecreasing since $s \mapsto \set{\varphi \geq s}$ is nonincreasing. Finally, $\eta \circ \varphi \leq \psi$ as \begin{align*} \eta(\varphi(x)) = \inf \set{\psi(y): \varphi(y) \geq \varphi(x)} \leq \psi(x). \end{align*}
As $\eta$ can have jumps or be infinite, we now consider \begin{align*}
\sigma(s) := \inf \set{\eta(t) + |s - t|: 0 \leq t \leq s}. \end{align*} We immediately obtain $0\leq \sigma(s) \leq \eta(s)$ and $\sigma(0) = 0$. On the other hand,
$\eta(t) + |s - t| \geq \min \set{\frac s2, \eta(\frac s2)} > 0$ for $s > 0$, $t \in [0, s]$, and so $\sigma(s) > 0$ for $s > 0$. As for monotonicity, a simple estimate for $s \geq u \geq 0$ yields \begin{align*}
\sigma(s) &= \min \set{\inf \set{\eta(t) + |s - t|: 0 \leq t \leq u}, \inf \set{\eta(t) + |s - t|: u \leq t \leq s}}\\
&\geq \min \set{\sigma(u) + |s - u|, \eta(u)}\\ &\geq \sigma(u). \end{align*} We also show that $\sigma$ is Lipschitz. Take $0 \leq u \leq s$ and $\delta > 0$ and find $t \in
[0,u]$ such that $\sigma(u) > \eta(t) + |u - t| - \delta$. Then we have \begin{align*}
\sigma(s) \leq \eta(t) + |s - t| = \eta(t) + |u - t| + |s - u| < \sigma(u) + |s - u| + \delta. \end{align*} Since $\delta$ was arbitrary, $\sigma$ is Lipschitz.
Finally, set \begin{align*} \theta(s) := (1 - e^{-s}) \sigma(s). \end{align*} Clearly $\theta(0) = 0$, $\theta(s) > 0$ for $s > 0$. The product rule yields $\theta'(s) > 0$ for almost every $s > 0$. By construction, \begin{align*} \theta \circ \varphi \leq \sigma \circ \varphi \leq \eta \circ \varphi \leq \psi. \end{align*} \end{proof}
Now we complete the proof of the comparison principle for the crystalline curvature $\Lambda_p$.
\begin{proof}[Proof of Theorem~\ref{pr:comparison Lambda}] By Proposition~\ref{pr:curvature-as-min-section}, we can for a sufficiently large $L > 0$ find $L$-periodic functions, called $\tilde\psi_1$ and $\tilde\psi_2$, such that $CH^{\rm sl}_p(\tilde\psi_i; \Gamma)$ is nonempty, $\Gamma = \ensuremath{\mathbb{R}}^k / L \ensuremath{\mathbb{Z}}^k$, and $\tilde\psi_i$ coincides with the original $\psi_i$ on the neighborhood of the facet $A_{i,-}^c \cap A_{i,+}^c$, $i = 1, 2$, and that \begin{align} \label{l-is-cononrest} \Lambda_p[\psi_i] = -\partial^0 E^{\rm sl}_p(\tilde\psi_i; \Gamma) \qquad \text{a.e. on $A_{i,-}^c \cap A_{i,+}^c$, $i =1,2$.} \end{align}
Since the pairs are ordered, if we consider the sets $A_{i, \pm}$ as subsets of $\Gamma$ we have \begin{align*} \{\tilde\psi_{2,+} = 0\} = A_{2,+}^c \subset A_{1,+}^c = \{\tilde\psi_{1,+} = 0\},\\ \{\tilde\psi_{1,-} = 0\} = A_{1,-}^c \subset A_{2,-}^c = \{\tilde\psi_{2,-} = 0\}, \end{align*} where $\tilde\psi_{i,\pm} := \max (\pm\tilde\psi_i, 0)$ denote the positive and negative parts. By Lemma~\ref{le:Lipschitz ordering}, there exist Lipschitz functions $\theta^-$ and $\theta^+$ on $[0, \infty)$ such that $\theta^\pm(0) = 0$, $\theta^\pm(s) > 0$ for $s > 0$, and $(\theta^\pm)'(s) > 0$ for almost all $s > 0$, such that $\theta^+ \circ \tilde\psi_{1,+} \leq \tilde\psi_{2,+}$ and $\theta^- \circ \tilde\psi_{2,-} \leq \tilde\psi_{1,-}$. We introduce \begin{align*} \theta_1(s) := \begin{cases} s, & s <0,\\ \theta^+(s), & s \geq 0, \end{cases} \qquad \theta_2(s) := \begin{cases} -\theta^-(-s), & s <0,\\ s, & s \geq 0. \end{cases} \end{align*} and \begin{align*} \xi_1 := \theta_1 \circ \tilde\psi_1, \qquad \xi_2 := \theta_2 \circ \tilde\psi_2. \end{align*} By construction we have that $\xi_i$ are Lipschitz on $\Gamma$, \begin{align*} \xi_1 \leq \xi_2, \end{align*} and the chain rule for Lipschitz functions yields \begin{align*} \nabla \xi_i(x) = \theta_i'(\xi_i(x)) \nabla \tilde\psi_i(x), \qquad \text{for almost every $x$}, \end{align*} if we interpret the right-hand side to be equal to zero if $\nabla \tilde\psi_i(x)$ is zero, no matter if $\theta_i'$ is differentiable at $\xi_i(x)$ or not. Since $\theta_i'(s) > 0$ for almost every $s \in \ensuremath{\mathbb{R}}$, we have by the positive one-homogeneity of $W^{\rm sl}_p$ \begin{align*} \partial W^{\rm sl}_p(\nabla \xi_i(x)) = \partial W^{\rm sl}_p(\nabla \tilde\psi_i(x)) \qquad \text{for almost every $x$}, \end{align*} and therefore \begin{align} \label{subd-bent-same} CH^{\rm sl}_p(\xi_i; \Gamma) = CH^{\rm sl}_p(\tilde\psi_i; \Gamma) \neq \emptyset. \end{align}
The functional $E^{\rm sl}_p(\cdot; \Gamma)$ is proper closed convex and therefore the resolvent problems \begin{align*} \zeta_i + \lambda \partial E^{\rm sl}_p(\zeta_i; \Gamma) \ni \xi_i \end{align*} have unique solutions $\zeta_i \in L^2(\Gamma)$.
By approximation by smooth problems that have a comparison principle, as in Proposition~\ref{pr:resolvent-problems} and its proof, we can deduce that $\zeta_i$ are Lipschitz since $\xi_i$ are Lipschitz, and \begin{align*} \zeta_1 \leq \zeta_2. \end{align*} On the intersection of the facets $K = A_{1,-}^c \cap A_{1,+}^c \cap A_{2,-}^c \cap A_{2,+}$ we have $\xi_1 = \xi_2 = 0$ and therefore \begin{align} \label{resol-ordering} \frac{\zeta_1 - \xi_1}\lambda \leq \frac{\zeta_2 - \xi_2}\lambda \qquad \text{on $K$.} \end{align} By \eqref{subd-bent-same} and the characterization of the subdifferential Proposition~\ref{pr:subdiff-char-periodic}, we know that $\xi_i \in \operatorname{\mathcal{D}}(\partial E^{\rm sl}_p(\cdot; \Gamma))$ and therefore the standard result \cite[Proposition~3.56]{Attouch} yields \begin{align*} \frac{\zeta_i - \xi_i}\lambda \to - \partial^0 E^{\rm sl}_p(\xi_i; \Gamma) \qquad \text{in $L^2(\Gamma)$ as $\lambda\to0$.} \end{align*} We can send $\lambda\to0$, and then use \eqref{l-is-cononrest}, \eqref{subd-bent-same} and the ordering \eqref{resol-ordering} to conclude that \begin{align*} \Lambda_p[\psi_1] = -\partial^0 E^{\rm sl}_p(\xi_1; \Gamma) \leq -\partial^0 E^{\rm sl}_p(\xi_2; \Gamma) = \Lambda_p[\psi_2] \qquad \text{a.e. on $K$.} \end{align*} This is the comparison principle for the $\Lambda_p$. \end{proof}
The following result is an immediate consequence of Proposition~\ref{pr:comparison Lambda}.
\begin{corollary} \label{co:lambda support func indep} Suppose that $p \in \ensuremath{\mathbb{R}}^n$ with $k := \dim \partial W(p) > 0$ and let $(A_-, A_+) \in \mathcal P^k$ be a $p$-admissible pair. Then the value of $\Lambda_p$ on the facet $A_-^c \cap A_+^c$ is independent of the choice of a $p$-admissible support function, that is, for any two $p$-admissible support functions $\psi, \xi$ of pair $(A_-, A_+)$ we have \begin{align*} \Lambda_p[\psi] = \Lambda_p[\xi] \qquad \text{a.e. on $A_-^c \cap A_+^c$.} \end{align*} \end{corollary}
\section{Viscosity solutions} \label{sec:viscosity solutions}
In this section we introduce viscosity solutions of problem \eqref{P}. For the definition of viscosity solutions we shall use \emph{stratified faceted functions} that rely on the concept of energy stratification that we have developed in Section~\ref{sec:energy-stratification}. Recall that for every $\hat p \in \Rd$ we have introduced the coordinate system $x = {\mathcal T}(x', x'')$ using the rotation ${\mathcal T} = {\mathcal T}_{\hat p}$ from \eqref{rotation}.
\begin{definition} \label{def:strat-faceted-test-function} Let $(\hat x, \hat t) \in \Rd \times \ensuremath{\mathbb{R}}$ and $\hat p \in \Rd$, $V \subset \Rd$ be the subspace parallel to $\operatorname{aff} \partial W(\hat p)$, $U = V^\perp$, $k = \dim V$. We say that a function $\ensuremath{\varphi}(x,t)$ is a \emph{stratified faceted test function at $(\hat x, \hat t)$ with gradient $\hat p$} if \begin{align*} \ensuremath{\varphi}(x,t) = \bar \psi\pth{x' - \hat x'}
+ f\pth{x'' - \hat x''}
+ \hat p \cdot x + g(t), \end{align*} where \begin{itemize} \item $\bar \psi: \ensuremath{\mathbb{R}}^k \to \ensuremath{\mathbb{R}}$ is a support function of a bounded facet
$(A_-, A_+) \in \mathcal P^k$ with
$0 \in \operatorname{int} (A_-^c \cap A_+^c)$
and $\bar \psi \in \operatorname{\mathcal{D}}(\Lambda_{\hat p})$, \item $f \in C^2(\ensuremath{\mathbb{R}}^{n - k})$, $f(0) = 0$ and $\nabla f(0) = 0$, \item $g \in C^1(\ensuremath{\mathbb{R}})$. \end{itemize} \end{definition}
With this notion of test functions, we define viscosity solutions.
\begin{definition} \label{def:visc-solution} An upper semi-continuous function $u: \cl Q \to \ensuremath{\mathbb{R}}$ is a \emph{viscosity subsolution} of \eqref{P} if the following hold: \begin{enumerate}[(i)] \item \emph{(faceted test)} Let $\ensuremath{\varphi}$ be a stratified faceted test function at $(\hat x, \hat t) \in Q$ with gradient $\hat p \in \Rd$ and pair $(A_-, A_+)$. Then if there is $\rho > 0$ such that \begin{align} \label{general-position} u(x + w,t) - \ensuremath{\varphi}(x,t) \leq u(\hat x, \hat t) - \ensuremath{\varphi}(\hat x, \hat t) \end{align} for all \begin{align*} \abs{w'} \leq \rho,\ w'' = 0, \quad \text{and } x' - \hat x' \in \nbd\rho(\facet A),\ \abs{x'' - \hat x''} \leq \rho,\ \abs{t - \hat t} \leq \rho, \end{align*} then there exists $\ensuremath{\delta} > 0$ such that $B_\delta(\hat x') \subset \operatorname{int}(\facet A)$ and \begin{align*} \ensuremath{\varphi}_t(\hat x, \hat t) + F(\hat p, \operatorname*{ess\,inf}_{B_\ensuremath{\delta}(0)} \Lambda_{\hat p}[\bar \psi]) \leq 0. \end{align*} \item \emph{(off-facet test)} Let $\ensuremath{\varphi} \in C^1(\mathcal U)$ where $\mathcal U$ is a neighborhood of some point $(\hat x, \hat t) \in Q$ and suppose that $\dim \partial W(\nabla \ensuremath{\varphi}(\hat x, \hat t)) = 0$. If $u - \ensuremath{\varphi}$ has a local maximum at $(\hat x, \hat t)$ then \begin{align*} \ensuremath{\varphi}_t(\hat x, \hat t) + F(\nabla \ensuremath{\varphi}(\hat x, \hat t), 0) \leq 0. \end{align*} \end{enumerate}
Supersolutions are defined analogously. \end{definition}
If for some $p$ the value of $F(p, \xi)$ does not depend on $\xi$ in the sense below, we can replace the faceted test by a simpler test that does not need an admissible faceted function.
\begin{definition}[Curvature-free type at $p_0$] \label{def:level-set-type} We say that $F$ is of \emph{curvature-free type} at $p_0 \in \Rd$ if we have for any constant $C > 0$ \begin{align*} \lim_{p \to p_0} \sup_{\abs\zeta \leq C} F(p, \zeta) = F(p_0, 0) = \lim_{p \to p_0} \inf_{\abs\zeta \leq C} F(p, \zeta). \end{align*} \end{definition}
\begin{remark} The function $F$ defined in \eqref{geometric F} is of curvature-free type at $p_0 = 0$. \end{remark}
\begin{definition}[Faceted test at curvature-free gradients] \label{def:level-set-test} If $F$ is of curvature-free type at $p_0 = 0$, we replace the faceted test (i) in Definition~\ref{def:visc-solution} at $\hat{p} = p_0 = 0$ by the following test: \begin{enumerate} \item[(i-cf)] Let $g \in C^1(\ensuremath{\mathbb{R}})$, $\ensuremath{\varphi}(x, t) := g(t)$ and suppose that $u - \ensuremath{\varphi}$ has a local maximum at $(\hat x, \hat t)$. Then \begin{align*} g'(\hat t) + F(0, 0) = g'(\hat t) \leq 0. \end{align*} \end{enumerate} \end{definition}
\section{Construction of faceted functions} \label{sec:faceted functions}
To prove the uniqueness of viscosity solutions of \eqref{P}, we need to be able to construct a sufficiently wide class of test functions, the \emph{faceted functions}. In this section we will assume that $W$ is convex, positively one-homogeneous and crystalline. We shall also assume that there exists $\delta > 0$ such that $W(p) \geq \delta \abs p$. The important case for us is $W^{\rm sl}_p$ from Definition~\ref{def:sliced-W}.
The polar function $W^\circ$ of $W$ is defined as \begin{align} \label{polar} W^\circ(x) = \sup \set{x \cdot p: W(p) \leq 1}. \end{align} Clearly \begin{align*} (W^\circ)^\circ = W. \end{align*} We define the Wulff shape corresponding to $W$ as \begin{align*} \operatorname{Wulff}_W := \set{x \in \Rd: W^\circ(x) \leq 1}. \end{align*} Note that the Wulff shape of a one-homogeneous crystalline (polyhedral) $W$ with linear growth is a bounded polyhedron containing the origin in its interior.
We want to establish a proposition similar to \cite[Proposition~2.12]{GGP13AMSA}, but for a crystalline energy:
\begin{proposition} \label{pr:approximate-pair} Let $k = 1$ or $2$, $(A_-, A_+) \in \mathcal P^k$ be a \emph{bounded} pair and let
$0 \leq \rho_1 < \rho_2$. Suppose that $W: \ensuremath{\mathbb{R}}^k \to \ensuremath{\mathbb{R}}$ is a convex, positively one-homogeneous polyhedral function such that there exists $\delta > 0$ with $W(p) \geq \delta |p|$ for $p \in \ensuremath{\mathbb{R}}^k$. Then there exists an \emph{admissible pair} $(G_-, G_+) \in \mathcal P^k$ such that \begin{align} \label{admiss pair approx} \nbd{\rho_1}(A_-, A_+) \preceq (G_-, G_+) \preceq \nbd{\rho_2}(A_-, A_+), \end{align} that is, there exists a support function $\psi$ of pair $(G_-, G_+)$ such that ${CH}_W(\psi; \ensuremath{\mathbb{R}}^k)$ is nonempty. \end{proposition}
We shall use this result in the following form:
\begin{corollary} \label{co:approximate pair sliced} Let $W: \ensuremath{\mathbb{R}}^n \to \ensuremath{\mathbb{R}}$ be a polyhedral convex function finite everywhere. Suppose that $p_0 \in \ensuremath{\mathbb{R}}^n$ such that $\dim \partial W(p_0) = k$ for $k = 1$ or $2$. Then for any bounded pair $(A_-, A_+) \in \mathcal P^k$ and any $0 \leq \rho_1 < \rho_2$ there exists a $p_0$-\emph{admissible pair} $(G_-, G_+)$ satisfying \eqref{admiss pair approx}. \end{corollary}
\begin{proof} Let us take $\xi_0 \in \operatorname{ri} \partial W(p_0)$. The function $\hat W(p) := W^{\rm sl}_{p_0}(p) - \xi_0' \cdot p$ satisfies the assumptions of Proposition~\ref{pr:approximate-pair} by Lemma~\ref{le:linear growth SW}. Therefore there exists a pair $(G_-, G_+) \in \mathcal P^k$, its support function $\psi$ and a Cahn-Hoffman vector field $z \in {CH}_{\hat W}(\psi; \ensuremath{\mathbb{R}}^k)$. It is easy to see that $z + \xi_0 \in CH^{\rm sl}_{p_0}(\psi; \ensuremath{\mathbb{R}}^k)$, and therefore $(G_-, G_+)$ is $p_0$-admissible. \end{proof}
As of now we only know how to construct such admissible facets for dimensions $k = 1$ and $k = 2$.
For the construction of an admissible function we will basically use a signed-distance-like function induced by $W$, and then define a possible Cahn-Hoffman vector field for this function. For a given set $V \subset \ensuremath{\mathbb{R}}^k$ the signed-distance-like function $d_V$ is defined as \begin{align} \label{d_V-def} d_V(x) := \inf_{y\in V} W^\circ(x - y) -\inf_{y\in V^c} W^\circ(y - x), \quad x \in \ensuremath{\mathbb{R}}^k, \end{align} where $W^\circ$ is the polar of $W$ given as \eqref{polar}.
\subsection{One-dimensional admissible facets}
We will give an explicit construction as a proof of Proposition~\ref{pr:approximate-pair} in the one-dimensional case to illustrate the process and hopefully prepare the reader for the construction in the two-dimensional case. \begin{figure}
\caption{Construction of an one-dimensional admissible pair and its support function}
\label{fig:addissible-pair-1d}
\end{figure} The situation is depicted in Figure~\ref{fig:addissible-pair-1d}.
Let $(A_-, A_+) \subset \mathcal P^1$ be a bounded pair in $\ensuremath{\mathbb{R}}$. By making $\rho_1$ larger if necessary, we can assume that $0 < \rho_1 < \rho_2$. Let us set $\ensuremath{\varepsilon} := \frac{\rho_2 - \rho_1}3$.
We define the open sets \begin{align*} G_- := \operatorname{int} \cl{\nbd\ensuremath{\varepsilon}\pth{\nbd{-\rho_2}(A_-)}} \qquad \text{and} \qquad G_+ := \operatorname{int} \cl{\nbd{\rho_1 + \ensuremath{\varepsilon}}(A_+)}. \end{align*} Due to the properties of the set neighborhood, we have for all $\eta > 0$ \begin{align} \label{G-pm-eta} \nbd{-\rho_2} (A_-) \subset G_- \subset \nbd{-\rho_2 + \ensuremath{\varepsilon}+ \eta}(A_-), \qquad \nbd{\rho_1}(A_+) \subset G_+ \subset \nbd{\rho_1 + \ensuremath{\varepsilon} + \eta}(A_+). \end{align} In particular, we take the interior of the closure in the definition of $G_\pm$ to regularize the boundary so that $G^c_\pm$ has no isolated points.
By definition $A_-\subset A_+^c$, and therefore Proposition~\ref{pr:nbd-properties} together with \eqref{G-pm-eta} imply that for any $\eta \in (0, 2\ensuremath{\varepsilon})$ \begin{align*} G_- &\subset \nbd{\ensuremath{\varepsilon}+\eta}\pth{\nbd{-\rho_2}(A_-)} \subset \nbd{-\rho_2 + \ensuremath{\varepsilon} + \eta}(A_-)\\ &\subset \nbd{-\rho_2 + \ensuremath{\varepsilon} + \eta}(A_+^c) \subset \nbd{-\ensuremath{\varepsilon} + 2\eta}\pth{\nbd{-\rho_1 - \ensuremath{\varepsilon} - \eta}(A_+^c)}\\ &= \nbd{-\ensuremath{\varepsilon} + 2\eta}\pth{\nbd{\rho_1 + \ensuremath{\varepsilon} + \eta}(A_+)^c}\\ &= \nbd{\ensuremath{\varepsilon} - 2\eta}\pth{\nbd{\rho_1 + \ensuremath{\varepsilon} + \eta}(A_+)}^c \subset \nbd{\ensuremath{\varepsilon} - 2\eta}\pth{G_+}^c \end{align*} We conclude that \begin{align*} \operatorname{dist}(G_-, G_+) = \ensuremath{\varepsilon} > 0. \end{align*} Therefore $(G_-, G_+)$ is an open pair, and due to \eqref{G-pm-eta} \begin{align*} \nbd{\rho_1}(A_-, A_+) \preceq (G_-, G_+) \preceq \nbd{\rho_2}(A_-, A_+), \end{align*} To prove that the pair $(G_-, G_+)$ is bounded, we recall that $(A_-, A_+)$ is a bounded pair therefore there exists $R > 0$ such that $B_R^c(0) \subset A_-$ or $B_R^c(0) \subset A_+$. From \eqref{G-pm-eta} we have that $\nbd{-\rho_2}(B_R^c(0)) \subset G_-$ or $\nbd{\rho_1}(B_R^c(0)) \subset A_+$. Therefore $B_{\tilde R}^c(0) \subset G_-$ or $B_{\tilde R}^c(0) \subset G_+$ for $\tilde R = R + \rho_2$, which implies that $(G_-, G_+)$ is bounded.
Since $G_\pm$ are open, we can write the union $G_- \cup G_+$ as at most a countable union of disjoint open intervals. Since the facet $G_-^c \cap G_+^c$ is bounded, and the sets $G_\pm$ have the interior ball property with radius $\ensuremath{\varepsilon}$ by construction, the length of the intervals must be greater than or equal to $2\ensuremath{\varepsilon}$. In particular, there must only be finitely many of them. Since moreover $\operatorname{dist} (G_-, G_+) > 0$, we can find $m \in \ensuremath{\mathbb{N}}$ and $\set{a_i}_{i=0}^m$, $\set{b_i}_{i=0}^m$ such that \begin{align*} -\infty = a_0 < b_0 < a_1 < b_1 < \cdots < a_m < b_m = \infty \end{align*} and \begin{align*} G_- \cup G_+ = \bigcup_{i=0}^m (a_i, b_i). \end{align*} Finally, by construction, \begin{align} \label{def-of-delta} \delta := \frac13 \min \set{\min_{0 \leq i \leq m} b_i - a_i, \min_{1 \leq i \leq m} a_i - b_{i-1}} > 0. \end{align} The facet $G_-^c \cap G_+^c$ is closed and \begin{align*} G_-^c \cap G_+^c = \bigcup_{i=1}^m [b_{i-1}, a_i]. \end{align*}
Let us now introduce the sign function \begin{align*} \sigma(x) := \begin{cases} 1 & x \in \cl{G_+},\\ -1 & x \in \cl{G_-},\\ 0 & \text{otherwise}. \end{cases} \end{align*} This allows us to define the function \begin{align*} \psi(x) := \min\set{\delta, \operatorname{dist}(x, G_+^c)} - \min \set{\delta, \operatorname{dist}(x, G_-^c)}, \end{align*} as a clipped version of the function in Example~\ref{ex:trivial-support-function}, which is again clearly a support function of the pair $(G_-, G_+)$. Moreover, \begin{align*} \psi(x) = \begin{cases} \delta \sigma(x) & x \in [a_i + \delta, b_i - \delta] \text{ for some $i$},\\ 0 & x \in [b_{i-1}, a_i] \text{ for some $i$},\\ (x-a_i)\sigma(x) & x \in (a_i, a_i + \delta) \text{ for some $i$},\\ (b_i -x)\sigma(x) & x \in (b_i - \delta, b_i) \text{ for some $i$}. \end{cases} \end{align*} Therefore the function $\psi$ is differentiable everywhere except at the points $a_i, b_i, a_i + \delta, b_i - \delta$ for $0 \leq i \leq m$. We can evaluate the derivative at the other points as \begin{align*} \psi'(x) = \begin{cases} 0 & x \in (a_i + \delta, b_i -\delta) \cup (b_{i-1}, a_i) \text{ for some $i$},\\ \sigma(x) & x \in (a_i, a_i + \delta) \text{ for some $i$},\\ -\sigma(x) & x \in (b_i - \delta, b_i) \text{ for some $i$}. \end{cases} \end{align*}
In one dimension, the subdifferential of one-homogeneous $W$ can be expressed as \begin{align*} \partial W(p) = \begin{cases} \set{w_-} & p < 0,\\ [w_-, w_+] & p = 0,\\ \set{w_+} & p < 0, \end{cases} \end{align*} for $w_\pm = W'(\pm 1)$, $w_- < 0 < w_+$.
Let us define the continuous Cahn-Hoffman vector field as \begin{align*} z(x) := \begin{cases} W'(\sigma(x)) & x \in (a_i, a_i + \delta) \text{ for some $i$},\\ W'(-\sigma(x)) & x \in (b_i - \delta, b_i) \text{ for some $i$},\\ W'(\sigma(b_0)) & x \leq b_0 - \delta,\\ W'(\sigma(a_m)) & x \geq a_m + \delta,\\ \text{linear} & \text{otherwise}, \end{cases} \end{align*} One can easily see that the function $z$ is Lipschitz continuous on $\ensuremath{\mathbb{R}}$ and $\norm{\nabla z}_\infty \leq \frac{w_+ - w_-}\delta \leq \infty$ by the definition of $\delta$ in \eqref{def-of-delta}. Therefore $\psi \in \operatorname{\mathcal{D}}(\partial E)$ and the facet $(G_-, G_+)$ is admissible, which finishes the proof of Proposition~\ref{pr:approximate-pair} in the case of $k = 1$.
\subsection{Two-dimensional admissible facets}
In this section we give a proof of Proposition~\ref{pr:approximate-pair} in the two-dimensional case. We can without loss suppose that $\rho_1 = 0$ and $\rho_2 = \rho > 0$. Let us stress again that we do not assume that the Wulff shape of $W$ is symmetric with respect to the origin.
The proof of Proposition~\ref{pr:approximate-pair} for $k = 2$ uses a rather simple idea of an explicit construction that is unfortunately quite technical. It will be split in several steps: \begin{enumerate}[1.] \item Approximate a general bounded facet by a smooth facet. \item Rotate the smooth facet by a small angle so that the boundary has nonzero curvature at the points where the normal is pointing in the direction of a corner of $W$. \item Flatten the boundary locally at these points. \item Use the Fenchel distance-like function induced by $W$ to construct a support function and a Cahn-Hoffman vector field in the neighborhood of the boundary. \end{enumerate}
We define the set of critical directions, \begin{align*} \mathcal N := \set{p \in S^1: \partial W(p) \text{ is not a singleton}} = \set{p \in S^1: W \text{ is not differentiable at $p$}}, \end{align*}
where $S^1 := \set{p \in \ensuremath{\mathbb{R}}^2: |p| = 1}$ is the unit circle. Since $W$ is polyhedral, $\mathcal N$ is finite. \begin{lemma} \label{le:} $\partial W: p \to 2^{\ensuremath{\mathbb{R}}^2}$ is constant on every connected component of $S^1 \setminus \mathcal N$. Moreover, $\partial W(p)$ is a singleton for every such $p$. \end{lemma}
\begin{proof} This follows from the fact that $W$ is polyhedral. \end{proof}
We will also use some basic results of the convex analysis. In particular, recall the definition of the polar $W^\circ$ in \eqref{polar}. We will for short denote the associate Wulff shape as \begin{align*} \mathcal W := \set{x: W^\circ(x) \leq 1}. \end{align*} This is a polygon in two dimensions, with a finite number of vertices, corresponding to the number of critical directions $\mathcal N$. We have the following basic result:
\begin{lemma} \label{le:wulff-frank-rel} If $p \neq 0$ and $x \in \partial W(p)$ then $W^\circ(x) = 1$ and $x \cdot p = W(p)$. Similarly, if $x \neq 0$ and $p \in \partial W^\circ(x)$ then $W(p) = 1$ and $x \cdot p = W^\circ(x)$. Suppose now that $x \neq 0$ and $p \neq 0$. Then \begin{align*} \frac x{W^\circ(x)} \in \partial W(p) \quad \Leftrightarrow \quad \frac p{W(p)} \in \partial W^\circ(x). \end{align*} \end{lemma}
\subsubsection{Smooth pair approximation}
By the smooth approximation lemma, \cite[Lemma~2.11]{GGP13AMSA}, we can find smooth disjoint open sets $H_-, H_+$ such that \begin{align} \label{A-Hausdorff-approx} \begin{aligned} \nbd{\rho/2}(A_-, A_+) \preceq (H_-, H_+) \preceq \nbd{3\rho/4}(A_-, A_+). \end{aligned} \end{align} We note that $(H_-, H_+)$ is an smooth bounded pair.
We claim that we can choose $H_-, H_+$ in such a way that \begin{align} \label{nonzero curvature} \begin{aligned} \text{the curvature of $\partial H_-$ and $\partial H_+$ at $x$ is nonzero}\\ \text{whenever $\nu_{\partial H_-}(x) \in \mathcal N$ or $-\nu_{\partial H_+}(x) \in \mathcal N$, respectively.} \end{aligned} \end{align} Indeed, let $V$ be $H_-$ or $\operatorname{int} H_+^c$. Since $\partial V$ is smooth and bounded, it is a union of finitely many disjoint closed curves. Each of these curves is a one-dimensional manifold without boundary and the unit outer normal vector map $\nu: \partial V \to S^1$ is smooth. By Sard's theorem we have $\mathcal H^1\pth{\nu\pth{\set{x \in \partial V: d\nu(x) \text{ has rank } < 1}}} = 0$. Note that the curvature $\kappa(x)$ of $\partial V$ at $x \in \partial V$ is zero if and only if the rank of $d\nu(x)$ is zero. Since the set of critical directions $\mathcal N \subset S^1$ is finite, we can find a rotation $R$ of $\ensuremath{\mathbb{R}}^2$ by an arbitrary small angle such that $R(\mathcal N) \cap \nu(\set{x \in \partial V : \kappa(x) = 0}) = \emptyset$. We therefore rotate the set $V$ by $R^{-1}$ with a sufficiently small such angle so that the rotated set still approximates the original one. Therefore whenever $x \in R^{-1}(V)$ such that $\kappa_{R^{-1}(\partial V)}(x) = 0$, we have $\nu_{R^{-1}(\partial V)}(x) \notin \mathcal N$. We can therefore replace $H_-$ and $H_+$ with the rotated ones by a sufficiently small angle if necessary and then $H_\pm$ satisfy \eqref{nonzero curvature}.
\subsubsection{Flattening of $\partial H_\pm$ in the critical directions}
Let $V$ denote either $H_-$ or $\operatorname{int} H_+^c$ in what follows and let $\nu(x) = \nu_{\partial V}(x)$ be the unit outer normal to $\partial V$ at $x \in \partial V$. We will modify $V$ in the neighborhood of the critical points of its boundary $x \in \partial V$ with $\nu(x) \in \mathcal N$ so that the boundary of the modified set has a flat part of nonzero length with the same normal. Let us denote the set of these critical points by $S$, \begin{align*} S := \set{x \in \partial V: \nu(x) \in \mathcal N}. \end{align*} Note that $S$ is compact since $\nu$ is smooth and $\partial V$ is bounded.
We claim that $S$ is finite. Indeed, suppose that $S$ is infinite. Since $S$ is compact, there is $\hat x \in S$ such that $B_\ensuremath{\varepsilon}(\hat x) \cap S$ is infinite for every $\ensuremath{\varepsilon} > 0$. Since $\mathcal N$ is discrete and $\nu$ is continuous, there exists $\ensuremath{\varepsilon}_0 > 0$ such that $\nu(x) \equiv \nu(\hat x)$ for all $x \in B_{\ensuremath{\varepsilon}_0}(\hat x) \cap S$. But that is a contradiction with $d \nu(\hat x) \neq 0$ from \eqref{nonzero curvature}.
Let us choose $\eta > 0$ such that \begin{align*} \eta < \min \set{\frac 1{40}\operatorname{dist}(\partial H_-,\partial H_+), \frac \rho8,
\min_{\substack{x, y \in S\\x \neq y}} |x - y|}. \end{align*} Since for any $\hat x \in S$ we have $\kappa(\hat x) \neq 0$, by making $\eta$ smaller if necessary, we may also assume that $\partial V \cap B_{20\eta}(\hat x)$ is a graph of a convex or a concave function $g = g_{\hat x}$
in the sense that \begin{align*} V \cap B_{20\eta}(\hat x) = \set{y + \hat x \in B_{20\eta}(\hat x) : y \cdot \nu(\hat x) < g(y \cdot \tau(\hat x))}, \end{align*}
where $\tau(\hat x) \perp \nu(\hat x)$, $|\tau(\hat x)| = 1$. Note that $g(0) = g'(0) = 0$. Since $\kappa(\hat x) \neq 0$, we have $g''(\hat x) \neq 0$ and by Taylor expansion we may also assume that \begin{align*}
\frac 14 |g''(0)|s^2 \leq |g(s)| \leq |g''(0)| s^2, \quad |s| < 20 \eta. \end{align*}
With this set-up, we can for every $\hat x \in S$ find $L_{\hat x} > 0$ such that $\set{s:
|g_{\hat x}(s)| < L_{\hat x}} \times [-L_{\hat x}, L_{\hat x}] \subset B_\eta(0)$. We then define $\hat V$, the set with flattened boundary in the critical directions, as \begin{align*} \hat V := &\pth{V \setminus \bigcup_{\hat x \in S} B_{10 \eta}(\hat x)}\\ &\bigcup_{\substack{\hat x \in S\\g_{\hat x}''(0) > 0}} \set{y + \hat x \in B_{10\eta}(\hat x) : y \cdot \nu(\hat x) < \max(L_{\hat x}, g_{\hat x}(y \cdot \tau(\hat x)))}\\ &\bigcup_{\substack{\hat x \in S\\g_{\hat x}''(0) < 0}} \set{y + \hat x \in B_{10\eta}(\hat x) : y \cdot \nu(\hat x) < \min(-L_{\hat x}, g_{\hat x}(y \cdot \tau(\hat x)))}. \end{align*} Note that $\partial V \subset \nbd\eta(\partial \hat V)$ and $\partial \hat V \subset \nbd\eta(\partial V)$.
We finish our construction of the admissible pair by defining $G_- = \hat V$ when starting with $V = H_-$, and $G_+ = \operatorname{int} \hat V^c$ when starting with $V = \operatorname{int} H_+^c$.
\subsubsection{Construction of the support function and the Cahn-Hoffman vector field}
In this part we shall finally define a candidate for the admissible function with an appropriate Cahn-Hoffman vector field in a small neighborhood of the flattened boundary $\partial \hat V$, where $\hat V = G_-$ or $\hat V = \operatorname{int} G_+^c$.
Let $\mathcal V$ denote the set of vertices of the Wulff shape $\mathcal W$. We define $\mathcal C_0$ to be the family of connected components of $\partial \hat V \setminus \partial V$, and $\mathcal C_r$ to be the family of connected components of $\partial \hat V \cap \partial V$. We also define $\mathcal C = \mathcal C_0 \cup \mathcal C_r$. Every $\Gamma_0 \in \mathcal C_0$ is the flattened part of the boundary $\partial \hat V$, the line segment with a normal vector $\nu_0 \in \mathcal N$. Similarly, every $\Gamma \in \mathcal C_r$ is a connected piece of the original smooth boundary $\partial V$, and by construction there exists a unique vertex $v \in \mathcal V$ of the Wulff shape such that $\set{v} = \partial W(\nu(x))$ for $x \in \Gamma$. We set $\mathcal V(\Gamma) = \set{v}$.
Given $\Gamma \in \mathcal C_0$ with normal $\nu_0$, there exists exactly two distinct vertices $v, w \in \mathcal V$ such that $\set{v, w} \subset \partial W(\nu_0)$. In this case we set $\mathcal V(\Gamma) = \set{v, w}$. There exist exactly two sets $\Gamma', \Gamma'' \in \mathcal C_r$ such that $\mathcal V(\Gamma') = \set{v}$, $\mathcal V(\Gamma'') = \set{w}$, and $\cl \Gamma \cap \Gamma' = \set{x_v}$, $\cl \Gamma \cap \Gamma'' = \set{x_w}$ for some points $x_v, x_w$; see Figure~\ref{fig:local-admissible}. \begin{figure}
\caption{Geometry at a flattened part of the boundary of $\hat V$. The shaded area represents the rescaled Wulff shape touching the flattened part.}
\label{fig:local-admissible}
\end{figure} Since $v, w$ are linearly independent, we have a unique point $c^\Gamma$ at the intersection of $L_v(x_v)$ and $L_w(x_w)$. We have $c^\Gamma + t v = x_v$ and $c^\Gamma + sw = x_w$ for some $t, s \in \ensuremath{\mathbb{R}} \setminus \set0$. However, since $(x_w - x_v) \cdot \nu_0 = 0$, we must have $c^\Gamma \cdot \nu_0 + s w \cdot \nu_0 = c^\Gamma \cdot \nu_0 + t v \cdot \nu_0$. As $v \cdot \nu_0 = w \cdot \nu_0 = W(\nu_0)$, it follows that $t = s$ and we set $\alpha^\Gamma := t$. This induces a coordinate system on $\ensuremath{\mathbb{R}}^2$ with coordinates $x = \xi_v^\Gamma(x) v + \xi_w^\Gamma(x) w + c^\Gamma$ for every $x \in \ensuremath{\mathbb{R}}^2$. We note that \begin{align} \label{Gamma alpha level} \Gamma = \set{x: \xi_v^\Gamma(x) + \xi_w^\Gamma(x) = \alpha^\Gamma,\ \xi_v^\Gamma(x) \xi_w^\Gamma(x) > 0}. \end{align} Clearly $\xi_v^\Gamma(x_v) = \xi_w^\Gamma(x_w) = \alpha^\Gamma$ and $\xi^\Gamma_v(x_w) = \xi^\Gamma_w(x_v) = 0$.
We define the line through a point $x$ in the direction $v$ as \begin{align*} L_v(x) := \set{x + tv: t \in \ensuremath{\mathbb{R}}}, \end{align*} and the cylinder through set $\Gamma$ \begin{align*} L_v(\Gamma) := \set{x + tv: x \in \Gamma,\ t \in \ensuremath{\mathbb{R}}}. \end{align*} The thickness of a cylinder is denoted by \begin{align*} \theta(L_v(\Gamma)) := \sup_{x, y \in \Gamma} \operatorname{dist} \pth{L_v(x), L_v(y)}. \end{align*}
We collect a few basic properties of the relationship between the components $\Gamma \in \mathcal C$ and the associated cylinders. These results follow from the construction of $\hat V$ in the previous section.
\begin{lemma} \label{le:one intersect} Suppose that $\Gamma \in \mathcal C$ and $x, y \in \Gamma$. Let $v \in \mathcal V(\Gamma)$. Then there exists $p \in \partial W^\circ(v)$ with $v \cdot p = 1$ such that $(x-y) \cdot p = 0$. In particular, if $x \in L_v(\Gamma)$ then $L_v(x) \cap \Gamma = \set{y}$ for some $y$, that is, there exists a unique $t \in \ensuremath{\mathbb{R}}$ such that $x - tv \in \Gamma$. \end{lemma}
\begin{proof} Since $\Gamma$ is a smooth curve, by the mean value theorem there exists $\xi \in \Gamma$ such that $(x-y) \cdot \nu(\xi) = 0$. But $p:= \frac{\nu(\xi)}{W(\nu(\xi))} \in \partial W^\circ(v)$ by construction. Then $v \cdot p = 1$ follows from the characterization of the subdifferential of $W^\circ$ in Lemma~\ref{le:wulff-frank-rel}.
Now let $x \in L_v(\Gamma)$. By definition, there exists $t \in \ensuremath{\mathbb{R}}$ such that $x - tv \in \Gamma$. Suppose that $x - sv \in \Gamma$ for $s \in \ensuremath{\mathbb{R}}$. Then from the above there exists $p$ such that $v \cdot p = 1$ and $0 = (x - tv - x + sv) \cdot p = (s - t) v \cdot p = s - t$. We have $s = t$. \end{proof}
\begin{lemma} \label{le:neighbor cylinders} Let $\Gamma \in \mathcal C_0$ and $\Gamma' \in \mathcal C_r$ such that $\operatorname{dist}(\Gamma, \Gamma') = 0$. Then there exist $v$ such that $\set{v} = \mathcal V(\Gamma) \cap \mathcal V(\Gamma')$, and $\xi$ such that $\cl \Gamma \cap \Gamma' = \set{\xi}$. Moreover, $L_v(\Gamma) \cap L_v(\Gamma') = \emptyset$ and $\cl{L_v(\Gamma)} \cap L_v(\Gamma') = L_v(\xi)$. \end{lemma}
\begin{proof} If $\operatorname{dist}(\Gamma, \Gamma') = 0$, then $\Gamma$ must be the flattened part and $\Gamma'$ must be the adjacent smooth part of $\partial \hat V$. By construction, $\mathcal V(\Gamma) \cap \mathcal V(\Gamma') = \set{v}$ for some $v \in \mathcal V$, and $\cl \Gamma \cap \Gamma' = \set{\xi}$ for some $\xi$. In particular, $\cl{L_v(\Gamma)} \cap L_v(\Gamma') \subset L_v(\xi)$. Now suppose that there exist distinct points $x \in \cl\Gamma$, $y \in \Gamma'$ such that $L_v(x) = L_v(y)$. Then by connectedness of $\cl\Gamma$ and $\Gamma'$, we can find such points arbitrarily close to $\xi$. But this is a contradiction with the fact that $L_v(x)$ can intersect both $\Gamma$ and $\Gamma'$ at most once by Lemma~\ref{le:one intersect}, and the line $L_v(x)$ in the direction of $v$ travels from $\hat V$ to $\hat V^c$ at two consecutive points $x, y$, with no transition from $\hat V^c$ to $\hat V$ in between. \end{proof}
\begin{corollary} \label{co:on intersect neighbor} Suppose that $\Gamma \in \mathcal C_r$, $\Gamma', \Gamma'' \in \mathcal C_0$ are the adjacent flat parts, $\operatorname{dist}(\Gamma', \Gamma) = \operatorname{dist}(\Gamma'', \Gamma) = 0$, and $x, y \in \Gamma \cup \Gamma' \cup \Gamma''$. Let $v \in \mathcal V(\Gamma)$. Then there exists $p \in \partial W^\circ(v)$ with $v \cdot p = 1$ such that $(x-y) \cdot p = 0$. In particular, if $x \in L_v(\Gamma \cup \Gamma' \cup \Gamma'')$ then $L_v(x) \cap (\Gamma \cup \Gamma' \cup \Gamma'') = \set{y}$ for some $y$, that is, there exists a unique $t \in \ensuremath{\mathbb{R}}$ such that $x - tv \in \Gamma \cup \Gamma' \cup \Gamma''$. \end{corollary}
\begin{proof} This follows by combining Lemma~\ref{le:neighbor cylinders} and Lemma~\ref{le:one intersect} for the neighboring components $\Gamma, \Gamma', \Gamma''$, since the flat ones have normals $\nu', \nu'' \in \partial W^\circ(v)$. \end{proof}
Given $\mu > 0$, we define the sets $U_\Gamma$ for $\Gamma \in \mathcal C$ by \begin{align*} U_\Gamma := \begin{cases}
\set{x + tv: x \in \Gamma, \ |t| \leq \mu,\ v \in \mathcal V(\Gamma)} & \text{if $\Gamma \in \mathcal C_r$,}\\
\set{x: |\xi_v^\Gamma(x) + \xi_w^\Gamma(x) - \alpha^\Gamma| \leq \mu, \ \xi_v^\Gamma(x) \xi_w^\Gamma(x) > 0} & \text{if $\Gamma \in \mathcal C_0$.} \end{cases} \end{align*} We shall show below in \eqref{partialVcover} that $\set{U_\Gamma}_{\Gamma \in \mathcal C}$ cover a neighborhood of $\partial
\hat V$. Note that if we take $\mu \leq |\alpha^\Gamma|/2$ we must have $\operatorname{sign} \xi_v^\Gamma(x) = \operatorname{sign} \xi_w^\Gamma(x) = \operatorname{sign} \alpha^\Gamma$ on $U_\Gamma$ for $\Gamma \in \mathcal C_0$.
If we choose $\mu >0$ small enough, the sets $U_\Gamma$ are pair-wise disjoint. \begin{lemma} \label{le:UG disjoint} Suppose that $0 < \mu < \min \set{\mu_1, \mu_2}$, where \begin{align*}
\mu_1 := \frac{1}{3\max_{W^\circ(v)} |v|} \min_{\substack{\Gamma, \Gamma' \in \mathcal C\\\operatorname{dist}(\Gamma, \Gamma')> 0 }}\operatorname{dist}(\Gamma, \Gamma'),\qquad \mu_2 := \min_{\Gamma \in \mathcal C_0}
\frac{|\alpha^\Gamma|}2. \end{align*} Then $U_\Gamma \cap U_{\Gamma'} = \emptyset$ for all $\Gamma, \Gamma' \in \mathcal C$, $\Gamma \neq \Gamma'$. \end{lemma}
\begin{proof} Suppose that $\operatorname{dist}(\Gamma, \Gamma') > 0$. Then $U_\Gamma \subset \nbd{t}(\Gamma)$ and
$U_{\Gamma'} \subset \nbd{t}(\Gamma')$ with $t = \mu \max_{W^\circ(v) \leq 1} |v|$. Hence $U_\Gamma \cap U_{\Gamma'} = \emptyset$ by $\mu < \mu_1$.
On the other hand, if $\operatorname{dist}(\Gamma, \Gamma') = 0$, then one of the sets, say $\Gamma$, belongs to $\mathcal C_0$, and the other belongs to $\mathcal C_r$. Suppose that $y \in U_\Gamma \cap U_{\Gamma'}$. We will show that this leads to a contradiction. Indeed, set $v \in \mathcal V(\Gamma')$ and note that $U_{\Gamma'} \subset L_v(\Gamma')$. We have $c^\Gamma \in L_v(\Gamma')$. Therefore $y(\lambda) := \lambda y + (1-\lambda) c^\Gamma \in L_v(\Gamma')$ for every $\lambda \in [0,1]$. By Lemma~\ref{le:neighbor cylinders}, we have $\Gamma \cap L_v(y(\lambda)) =\emptyset$ for all $\lambda \in [0,1]$.
Let $t:= \xi_v^\Gamma(y) + \xi_w^\Gamma(y) - \alpha^\Gamma$. Since $y \in U_\Gamma$, we have $|t|
\leq \mu < \mu_2 \leq |\alpha^\Gamma|/2$ and $\xi_v^\Gamma(y) \xi_w^\Gamma(y) > 0$. If $t \alpha^\Gamma \leq 0$, we have $y - t v \in \Gamma$ by \eqref{Gamma alpha level}, and this is a contradiction with $\Gamma \cap L_v(y) = \emptyset$. If $t \alpha^\Gamma > 0$, we set $\lambda := \frac{\alpha}{\alpha + t} \in (0,1)$. A simple computation using \eqref{Gamma alpha level} shows that $y(\lambda) \in \Gamma$, which is a contradiction with $\Gamma \cap L_v(y(\lambda)) = \emptyset$. The conclusion $U_\Gamma \cap U_{\Gamma'} = \emptyset$ follows. \end{proof}
We choose $\mu$ satisfying the assumption in Lemma~\ref{le:UG disjoint}. Then on the pair-wise disjoint collection of sets $\set{U_\Gamma: \Gamma \in \mathcal C}$, we define functions $\psi$ and $z$ by \begin{align*} \psi(x) := \begin{cases} t \text{ such that $x - tv \in \Gamma$, $v \in \mathcal V(\Gamma)$},& x \in U_\Gamma,\ \Gamma \in \mathcal C_r,\\ \xi_v^\Gamma(x) + \xi_w^\Gamma(x) - \alpha^\Gamma, & x \in U_\Gamma,\ \Gamma \in \mathcal C_0, \end{cases} \end{align*} and \begin{align*} z(x) := \begin{cases} v \text{, where $v \in \mathcal V(\Gamma)$}& x \in U_\Gamma,\ \Gamma \in \mathcal C_r,\\ \frac{\xi_v^\Gamma(x) v + \xi_w^\Gamma(x) w}{\xi_v^\Gamma(x) + \xi_w^\Gamma(x)}, \text{ where $v, w \in \mathcal V(\Gamma)$}, & x \in U_\Gamma,\ \Gamma \in \mathcal C_0. \end{cases} \end{align*}
Both $\psi$ and $z$ are well-defined by Lemma~\ref{le:one intersect}. Note that $|\psi| \leq \mu$ on $U_\Gamma$. We can easily see that $\psi$ is differentiable in the interior of $U_\Gamma$ for all $\Gamma \in \mathcal C$ by the inverse function theorem. Moreover, the level set $\set{x: \psi(x) = \psi(y)}$ in a neighborhood of $y \in \operatorname{int} U_\Gamma$ is just a translation of $\Gamma$. Therefore $\nabla \psi(y) = s \nu'$, where $\nu' = \nu^\Gamma$ for $\Gamma \in \mathcal C_0$, or $\nu' = \nu(y - \psi(y) v)$ for $\Gamma \in \mathcal C_r$, with $s = v \cdot \nu' > 0$. In particular, \begin{align*} z(y) \in \partial W(\nabla \psi(y)) \qquad \text{for $z \in \operatorname{int} U_\Gamma$, $\Gamma \in \mathcal C$.} \end{align*}
We now conclude this part by showing that for small $\delta > 0$, the functions $\psi$ and $z$ are well-defined, Lipschitz continuous functions on $\nbd\delta(\partial \hat V)$. We shall use the following two lemmas that we prove first. We set \begin{align*}
K := \max_{W(p)\leq 1} |p| \qquad \text{and} \qquad \delta_\theta = \min_{\Gamma \in \mathcal C} \min_{v \in \mathcal V(\Gamma)} \theta(L_v(\Gamma)), \qquad \text{and} \qquad \delta_\mu := \frac \mu K. \end{align*} Finally, we find $\delta_s > 0$ such that for every $\Gamma \in \mathcal C_0$, $\Gamma' \in \mathcal C_r$ the adjacent component to $\Gamma$, $\operatorname{dist}(\Gamma', \Gamma) = 0$, $v \in \mathcal V(\Gamma')$, $v \neq w \in \mathcal V(\Gamma)$, we have \begin{align} \label{delta sign} \operatorname{dist} (\Gamma' \cap \nbd{\delta_s}(L_v(\Gamma)), L_w(c^\Gamma)) > \delta_s. \end{align} This is possible since $\Gamma' \cap \cl{L_v(\Gamma)} = \set{x_v}$, $\Gamma'$ is smooth (in fact detaching from $L_v(\Gamma)$ linearly), with $\xi_v^\Gamma(x_v) = \alpha^\Gamma \neq 0$, and $L_w(c^\Gamma) = \set{x: \xi_v^\Gamma(x) = 0}$.
\begin{lemma} \label{le:Cr case} Let $x \in \Gamma \in \mathcal C_r$, $v \in \mathcal V(\Gamma)$, and let $\Gamma', \Gamma'' \in \mathcal C_0$ be the neighboring components with $\operatorname{dist}(\Gamma', \Gamma) = \operatorname{dist}(\Gamma'', \Gamma) = 0$. Then for every
$y$, $|y - x| \leq \min\set{\delta_\theta, \delta_\mu, \delta_s}$ there exists a unique $t(y)$ such that $y - t(y) v \in \Gamma
\cup \Gamma' \cup \Gamma''$. Moreover, $|t(y)| \leq \mu$. Finally, $\operatorname{sign}\xi_v^{\Gamma'}(y) = \operatorname{sign} \alpha^{\Gamma'}$ and $\operatorname{sign} \xi_v^{\Gamma''}(y) = \operatorname{sign} \alpha^{\Gamma''}$. \end{lemma}
\begin{proof}
Using Lemma~\ref{le:neighbor cylinders} and $|x - y| \leq \delta_\theta$, that is, that the distance between $x$ and $y$ is smaller than the width of the cylinders $L_v(\Gamma')$ and $L_v(\Gamma'')$, we are guaranteed that $y \in L_v(\Gamma \cup \Gamma' \cup \Gamma'')$. Therefore there exists a unique $t \in \ensuremath{\mathbb{R}}$ with $y - tv \in \Gamma \cup \Gamma' \cup \Gamma''$. By Corollary~\ref{co:on intersect neighbor}, there exists $p \in \partial W^\circ(v)$ such that \begin{align*} 0 = (x - y + tv) \cdot p = (x - y) \cdot p + t. \end{align*}
By Cauchy-Schwarz $|t| \leq K |x - y| \leq \mu$ and the conclusion follows. The sign of $\xi_v^{\Gamma'}(y)$ and $\xi_v^{\Gamma''}(y)$ must match the sign at $\Gamma \cap \cl\Gamma'$, $\Gamma \cap \Gamma''$, which matches that of $\alpha^{\Gamma'}$, $\alpha^{\Gamma''}$, respectively, since $\delta \leq \delta_s$ and $\delta_s$ satisfies \eqref{delta sign}. \end{proof}
\begin{lemma} \label{le:C0 case} Let $x \in \Gamma \in \mathcal C_0$, $v \in \mathcal V(\Gamma)$, and let $\Gamma', \Gamma'' \in \mathcal C_r$ be the neighboring components with $\operatorname{dist}(\Gamma', \Gamma) = \operatorname{dist}(\Gamma'', \Gamma) = 0$. Let $v \in \mathcal V(\Gamma')$ and $w \in \mathcal V(\Gamma'')$. Then for every
$y$, $|y - x| \leq \min\set{\delta_\theta, \delta_\mu}$ where $\delta_\mu = \mu/K$. Then exactly one of the following holds: \begin{enumerate}
\item $\xi_v^\Gamma(y) \xi_w^\Gamma(y) > 0$, $|\xi_v^\Gamma(y) + \xi_w^\Gamma(y) - \alpha| \leq \mu$, or
\item $y \in L_v(\Gamma')$, there exists $t$ such that $y - t v \in \Gamma'$, and $|t| \leq \mu $, or
\item $y \in L_w(\Gamma'')$, there exists $t$ such that $y - t w \in \Gamma''$, and $|t| \leq \mu$. \end{enumerate} \end{lemma}
\begin{proof} Let us set $t = \xi_v^\Gamma(y) + \xi_w^\Gamma(y) - \alpha$. Then $\xi_v^\Gamma(y - tv) + \xi_w^\Gamma(y - tv) - \alpha^\Gamma = 0$ and therefore $(x - y + t v) \cdot \nu_0 =0$, where $\nu_0$ is the normal of $\Gamma$. In particular, $t = (x - y) \cdot \frac{\nu_0}{W(\nu_0)}$ and hence
$|t| \leq K |x - y| \leq \mu$, which implies the estimate in (a).
Since $|y - x| \leq \delta_\mu$, we have $|t| \leq \mu < \mu_2 \leq |\alpha^\Gamma|/2$ and therefore $\xi_v^\Gamma(y) + \xi_w^\Gamma(y)$ has the same sign as $\alpha^\Gamma$. We conclude that at least one of $\xi_v^\Gamma(y)$, $\xi_v^\Gamma(y)$ has the same sign as $\alpha^\Gamma$. Due to Lemma~\ref{le:neighbor cylinders}, $L_v(\Gamma') \subset \set{\xi_w^\Gamma \alpha^\Gamma \leq 0}$ and $L_w(\Gamma'') \subset \set{\xi_v^\Gamma \alpha^\Gamma \leq 0}$. Therefore $y \notin L_v(\Gamma') \cap L_w(\Gamma'')$.
If $\xi_v^\Gamma(y) \xi_w^\Gamma(y) > 0$ then we are at case (a). Otherwise since $|y - x| \leq \delta_\theta$, $y$ must be in exactly one of the cylinders $L_v(\Gamma')$ or $L_w(\Gamma'')$ due to the discussion above.
Suppose therefore $y \in L_v(\Gamma')$. Then there exists a unique $t$ such that $y - t v \in \Gamma'$, and Corollary~\ref{co:on intersect neighbor} implies the estimate $|t| \leq K |y -
x| \leq \mu$ as in Lemma~\ref{le:Cr case}. The case $y \in L_w(\Gamma'')$ can be handled similarly. \end{proof}
We therefore take \begin{align*} 0 < \delta < \min\set{\delta_\theta, \delta_\mu, \delta_s}. \end{align*} With this choice, \begin{align} \label{partialVcover} \nbd\delta(\partial \hat V) \subset \bigcup_{\Gamma \in \mathcal C} U_\Gamma. \end{align} Indeed, let us fix $y \in \nbd\delta(\partial \hat V)$. Then there exists $x \in \partial \hat V$ with
$|x - y| \leq \delta$.
In the case that $x \in \Gamma \in \mathcal C_r$, we apply Lemma~\ref{le:Cr case} to conclude that there is a unique $t$, $|t| \leq K |y-x| \leq K\delta \leq \mu$, such that $y - t v \in \Gamma \cup \Gamma' \cup \Gamma''$ where $v \in \mathcal V(\Gamma)$. If $y - tv \in \Gamma$, then clearly $y \in U_\Gamma$. On the other hand, if $y - tv \in \Gamma'$, we have \begin{align*} 0 = \xi_v^{\Gamma '}(y - tv) + \xi_w^{\Gamma '}(y - tv) - \alpha^{\Gamma'} = \xi_v^{\Gamma '}(y) + \xi_w^{\Gamma '}(y) - \alpha^{\Gamma'} - t. \end{align*} Since also $\operatorname{sign} \xi_v^{\Gamma'}(y) = \operatorname{sign} \alpha^{\Gamma'}$, we conclude that $y \in U_{\Gamma'}$. An analogous argument works if $y - tv \in \Gamma''$.
Now if $x \in \Gamma \in \mathcal C_0$, we apply Lemma~\ref{le:C0 case}, and we argue as above to conclude that $y \in U_{\Gamma}$, $U_{\Gamma'}$ or $U_{\Gamma''}$. Therefore we recover \eqref{partialVcover}.
Now we finally show that $\psi$ and $z$ are Lipschitz on $\nbd\delta(\partial \hat V)$. Since $\psi$ and $z$ are smooth in the interior of $U_\Gamma$, we only need to address the continuity across the transition between $U_\Gamma$, $U_{\Gamma'}$, $\Gamma \in \mathcal C_0$, $\Gamma' \in \mathcal C_r$, with $\operatorname{dist}(\Gamma, \Gamma') = 0$. The function $z$ is clearly Lipschitz across this boundary, since we can alternatively define $z$ in the neighborhood of this boundary using \begin{align*} \zeta(x):= \begin{cases} \xi_w^\Gamma(x), & \xi_w^\Gamma(x) \alpha^\Gamma > 0,\\ 0, & \text{otherwise}. \end{cases} \end{align*} Then we have in the neighborhood of the boundary between $U_\Gamma$ and $U_{\Gamma'}$ that \begin{align*} z(x) = \frac{\xi_v^\Gamma(x) v + \zeta(x) w}{\xi_v^\Gamma(x) + \zeta(x)}, \end{align*} which is clearly a Lipschitz function when $\abs{\xi_v^\Gamma(x)} > \ensuremath{\varepsilon} > 0$, as is the case near the boundary.
Similarly, we can alternatively define $\psi$ in the neighborhood of the boundary between $U_\Gamma$ and $U_{\Gamma'}$ as \begin{align*} \psi(x) = t \quad \text{where $t$ is such that $x - t v \in \Gamma \cup \Gamma'$.} \end{align*} This function is Lipschitz continuous by Corollary~\ref{co:on intersect neighbor}.
\subsubsection{Completion of the proof of Proposition~\ref{pr:approximate-pair}}
We now have two Lipschitz functions $\psi^-, \psi^+$ and Lipschitz continuous vector fields $z^-, z^+$ defined in $\nbd{\delta}(\partial G_-)$ and $\nbd{\delta}(\partial G_+)$, respectively, such that $z^\pm(x) \in \partial W(\nabla \psi^\pm)$. Furthermore, $\partial G_\pm = \set{\psi^\pm = 0}$ almost everywhere. We now have to connect them to produce an admissible support function of the pair $(G_-, G_+)$. We define the constant $\eta = \min (\eta^-, \eta^+) > 0$ by \begin{align*}
\eta^\pm := \frac 12 \min \set{|\psi_\pm(x)|: \delta/2 \leq \operatorname{dist}(x, \partial G_\pm) \leq \delta}. \end{align*} We find smooth cutoff functions $\varphi^\pm \in C^\infty_c$ such that \begin{align*} \text{ $0 \leq \varphi^\pm \leq 1$, $\operatorname{supp} \varphi^\pm \subset \nbd{3\delta/4}(\partial G_\pm)$, $\varphi_\pm = 1$ on $\nbd{\delta/2}(\partial G_\pm)$. } \end{align*} We define the support function of the pair $(G_-, G_+)$ as \begin{align*} \psi(x) := \begin{cases} \eta & x\in G_+ \setminus \nbd\delta(\partial G_+),\\ \min(\eta, \max (\psi^+, 0)) & x\in \nbd\delta(\partial G_+),\\ 0 & x \in G_-^c \cap G_+^c \setminus \nbd\delta(\partial G_- \cup \partial G_+),\\ \max(-\eta, \min (\psi^-, 0)) & x\in \nbd\delta(\partial G_-),\\ -\eta & x\in G_- \setminus \nbd\delta(\partial G_-). \end{cases} \end{align*} It is easy to check that $\psi$ is a Lipschitz support function of $(G_-, G_+)$. Moreover, it is admissible with the Lipschitz Cahn-Hoffman vector field \begin{align*} z(x) := z^-(x) \varphi^-(x) + z^+(x) \varphi^+(x). \end{align*} by Lemma~\ref{le:one-homogeneous-subdiff}.
\section{Comparison principle} \label{sec:comparison principle}
In this section we prove the comparison principle on a spacetime cylinder $Q := \Rd \times (0,T)$ for some $T > 0$.
\begin{theorem}[Comparison principle] \label{th:comparison principle} Let $W: \ensuremath{\mathbb{R}}^n \to \ensuremath{\mathbb{R}}$ be a positively one-homogeneous convex polyhedral function such that the conclusion of Corollary~\ref{co:approximate pair sliced} holds for $1 \leq k \leq n-1$, and let $F$ be of curvature-free type at $p_0 = 0$. Suppose that $u$ and $v$ are a subsolution and a supersolution of \eqref{P} on $\Rd \times [0, T]$ for some $T > 0$, respectively. Moreover, suppose that there exist a compact set $K\subset \Rd$ and constants $c_u \leq c_v$ such that $u \equiv c_u$, $v \equiv c_v$ on $\pth{\Rd \setminus K} \times [0,T]$. Then $u(\cdot, 0) \leq v(\cdot, 0)$ on $\Rd$ implies $u \leq v$ on $\Rd \times [0, T]$. \end{theorem}
We will use the standard doubling-of-variables technique with an additional parameter to enforce a certain facet-like behavior of the functions at a contact point, which will allow us to construct faceted test functions there.
Let us suppose that the comparison theorem does not hold for a given subsolution $u$ and supersolution $v$, that is, suppose that \begin{align} \label{m_0} m_0 := \sup_Q [u - v] > 0. \end{align}
For arbitrary $\zeta \in \Rd$, $\ensuremath{\varepsilon} > 0$ we define \begin{align*} \Phi_{\zeta,\ensuremath{\varepsilon}}(x,t,y,s) := u(x,t) - v(y,s) - \frac{\abs{x-y-\zeta}^2}{2\ensuremath{\varepsilon}}-
S_\ensuremath{\varepsilon}(t,s), \end{align*} where \begin{align} \label{Se} S_\ensuremath{\varepsilon}(t,s) := \frac{\abs{t-s}^2}{2\ensuremath{\varepsilon}} + \frac{\ensuremath{\varepsilon}}{T - t} +\frac\ensuremath{\varepsilon}{T-s}. \end{align}
As in \cite{GG98ARMA}, we define the maximum of $\Phi_{\zeta,\ensuremath{\varepsilon}}$ as \begin{align*} \ell(\zeta, \ensuremath{\varepsilon}) = \max_{\cl Q \times \cl Q} \Phi_{\zeta,\ensuremath{\varepsilon}} \end{align*} and the set of maxima of $\Phi_{\zeta,\ensuremath{\varepsilon}}$ over $\cl Q \times \cl Q$ \begin{align*} \mathcal A(\zeta,\ensuremath{\varepsilon}) := \operatorname*{arg\,max}_{\cl Q \times \cl Q} \Phi_{\zeta,\ensuremath{\varepsilon}}
:= \set{(x,t,y,s) \in \cl Q \times \cl Q :
\Phi_{\zeta,\ensuremath{\varepsilon}}(x,t,y,s) = \ell(\zeta,\ensuremath{\varepsilon})}. \end{align*} Moreover, we define the set of gradients \begin{align*} \mathcal B(\zeta,\ensuremath{\varepsilon}) :=
\set{\frac{x - y -\zeta}{\ensuremath{\varepsilon}} : (x,t,y,s) \in \mathcal A(\zeta,\ensuremath{\varepsilon})}. \end{align*}
\begin{proposition}[{cf. \cite{GG98ARMA}}] \label{pr:maxima-interior} There exists $\ensuremath{\varepsilon}_0 > 0$ such that for all $\ensuremath{\varepsilon} \in (0, \ensuremath{\varepsilon}_0)$ we have \begin{align*} \mathcal A(\zeta,\ensuremath{\varepsilon})\subset Q \times Q \qquad \text{for all $\abs{\zeta} \leq \kappa(\ensuremath{\varepsilon})$}, \end{align*} where $\kappa(\ensuremath{\varepsilon}) := \frac 18 (m_0 \ensuremath{\varepsilon})^{\frac 12}$. \end{proposition}
From now on, we \textbf{fix} $\ensuremath{\varepsilon} \in (0, \ensuremath{\varepsilon}_0)$ so that Proposition~\ref{pr:maxima-interior} holds and we write $\kappa = \kappa(\ensuremath{\varepsilon})$ for simplicity, and drop $\ensuremath{\varepsilon}$ from our notation.
We have the following properties of $\mathcal A(\zeta)$ and $\mathcal B(\zeta)$.
\begin{proposition} \label{pr:max-graph} The graphs of $\mathcal A(\zeta)$ and $\mathcal B(\zeta)$ over $\zeta \in \cl B_\kappa(0)$ are compact. \end{proposition}
\begin{proof} See \cite[Proposition 7.3]{GG98ARMA}. Since $\Phi_\zeta - \ell(\zeta) \leq 0$ by definition of $\ell$, we observe that \begin{align*} \operatorname{graph} \mathcal A(\zeta) &:= \{(\zeta, x, t, y, s) \subset \cl B_\kappa(0) \times \cl Q \times \cl Q:\\ &\qquad\qquad\Phi_\zeta(x,t,y,s) - \ell(\zeta) \geq 0\}, \end{align*} which is closed since $\Phi_\zeta$ is an upper semi-continuous function of $(\zeta, x, t, y, s)$ and $\ell(\zeta)$ is a lower semi-continuous function. $\operatorname{graph} \mathcal B(\zeta)$ is a continuous image of $\operatorname{graph} \mathcal A(\zeta)$ and therefore also compact. \end{proof}
\begin{proposition} \label{pr:ball-of-gradients} With $\kappa = \kappa(\ensuremath{\varepsilon})$ fixed above, there exists a maximal relatively open convex set $\Xi \subset \Rd$ on which $\partial W$ is constant, $\zeta_0 \in \Rd$ and $\lambda > 0$ such that $\abs{\zeta_0} + 2\lambda < \kappa$ and \begin{align*} \mathcal B(\zeta) \cap \Xi \neq \emptyset \qquad \text{for all } \zeta \in B_{2\lambda}(\zeta_0). \end{align*} Moreover, $\operatorname{aff} \Xi \perp \operatorname{aff} \partial W(p)$ for all $p \in \Xi$.
In other words, for every $\zeta \in B_{2\lambda}(\zeta_0)$ there exists a point of maximum $(\hat x, \hat t, \hat y, \hat s) \in \mathcal A(\zeta)$ of $\Phi_\zeta$ such that \begin{align} \label{grad-in-Xi} \frac{\hat x - \hat y - \zeta}{\ensuremath{\varepsilon}} \in \Xi. \end{align} \end{proposition}
\begin{proof} Recall the decomposition of $\Rd$ from Proposition~\ref{pr:feature-decomposition} into relatively open convex sets $\Xi_i$, $i \in \mathcal N$. Moreover, since $\Xi_i$ is relatively open we can find an increasing sequence of compact sets $K_{i,j} \subset \Xi_i$ such that \begin{align*} \Xi_i = \bigcup_{j\in\ensuremath{\mathbb{N}}} K_{i,j}. \end{align*} Let us now define the sets \begin{align*} Z_{i,j} := \set{\zeta \in \cl B_\kappa(0) : K_{i,j} \cap \mathcal B(\zeta) \neq \emptyset}. \end{align*} We observe that $Z_{i,j}$ are compact due to Proposition~\ref{pr:max-graph}. Since \begin{align*} \cl B_\kappa(0) = \bigcup_{i\in\mathcal N} \bigcup_{j\in\ensuremath{\mathbb{N}}} Z_{i,j}, \end{align*} the Baire category theorem implies that there exists $i_0 \in \mathcal N$, $j_0 \in \ensuremath{\mathbb{N}}$ such that $\operatorname{int} Z_{i_0, j_0} \neq \emptyset$. In particular, we can find $\zeta_0$ and $\lambda > 0$ with $B_{2\lambda}(\zeta_0) \subset Z_{i_0,j_0}$, and we set $\Xi = \Xi_{i_0}$. Note that $\Xi$ is maximal by Proposition~\ref{pr:feature-decomposition}. \end{proof}
\subsection{Flatness at a contact point}
We will now use the information about the behavior of $u$ and $v$ at the point of maximum to show that there is enough space to construct faceted test functions. We shall use the Constancy lemma from \cite{GG98ARMA}.
\begin{lemma}[Constancy lemma] \label{le:constancy} Let $1\leq k < N$, $K \subset \ensuremath{\mathbb{R}}^N$ be compact and $G \subset \ensuremath{\mathbb{R}}^k$ be a bounded domain. Denote $P: \ensuremath{\mathbb{R}}^N \to \ensuremath{\mathbb{R}}^k$ the natural projection $w \mapsto (w_1, \ldots, w_k)$. Assume that $h$ is an upper semi-continuous function and $\phi\in C^2(\ensuremath{\mathbb{R}}^k)$, and define for $w \in K$ and $z \in G$ \begin{align*} h_z(w) &:= h(w) - \phi(Pw - z),\\ H(z) &:= \max_K h_z. \end{align*} If for all $z\in G$ there exists $w \in K$ such that $h_z(w) = H(z)$ and $(\nabla \phi)(P w - z) = 0$ then $H(z)$ is constant on $G$. \end{lemma}
In what follows, we will decompose $\ensuremath{\mathbb{R}}^n$ into two orthogonal subspaces $V$ and $U$, as in Section~\ref{sec:slicing of W}, of dimensions $k = \dim V$ and $n - k = \dim U$. Therefore we will use ${\mathcal T}$, ${\mathcal T}_U$, ${\mathcal T}_V$, and the decomposition $x = {\mathcal T}(x', x'')$ as introduced in \eqref{rotation}, \eqref{rotation'} and \eqref{rotationUV}. \begin{lemma} \label{le:max-constancy} Suppose that there exist $p_0, \zeta_0 \in \Rd$, a subspace $U \subset \Rd$ and $\lambda > 0$ such that $\abs{\zeta_0} + 2\lambda < \kappa$ and for all $\zeta \in B_{2\lambda}(\zeta_0)$ we have \begin{align*} \mathcal B(\zeta) \cap (p_0 + U) \neq \emptyset. \end{align*} Then \begin{align*} \ell(\zeta) - p_0 \cdot \zeta = const \quad \text{for } \zeta \in (\zeta_0 + V) \cap B_{2\lambda}(\zeta_0), \end{align*} where $V := U^\perp$. \end{lemma}
\begin{proof} We apply the constancy lemma, Lemma~\ref{le:constancy}. Set $k = \dim V$ and $N = 2 (n+1)$. We will denote $w = (\xi', \xi'', t, y, s)$ for $\xi, y \in \Rd$, $t, s \in \ensuremath{\mathbb{R}}$, so that the natural projection $P:\ensuremath{\mathbb{R}}^N \to \ensuremath{\mathbb{R}}^k$ is given as $Pw = \xi'$ (for the notation $'$ and $''$ see \eqref{rotation'}). Additionally, let us set \begin{align*} K = \set{(\xi', \xi'', t, y, s) : (\xi + y,t) \in \cl Q, \ (y,s) \in \cl Q} \quad \text{and} \quad G = B^d_{2\lambda}(0). \end{align*} And, finally, let us define the functions \begin{align*} h(w) &= u(\xi + y) - v(y) - p_0' \cdot \xi' - \frac{\abs{\rho'' - \zeta_0''}^2}{2\ensuremath{\varepsilon}} - S(t,s), &&w \in \ensuremath{\mathbb{R}}^N,\\ \phi(\eta) &= \frac{\abs{\eta - \zeta_0'}^2}{2\ensuremath{\varepsilon}} - p_0' \cdot \eta, \qquad &&\eta \in \ensuremath{\mathbb{R}}^k. \end{align*}
The Pythagorean theorem $|\xi' - \zeta_0' -z|^2 + |\xi'' - \zeta_0''|^2 = |\xi - \zeta_0 - {\mathcal T}_V z|^2$, due to the fact that ${\mathcal T}$ is a rotation, yields \begin{align*} h_z(w) := h(w) - \phi(\xi' - z) = \Phi_{\zeta_0 + {\mathcal T}_V z} (\xi + y, y) - p_0' \cdot z. \end{align*} We know, by assumption, that for every $\zeta = \zeta_0 + {\mathcal T}_V z$, $z \in G$, there exists a point of maximum $(x,t,y,s) \in \mathcal A(\zeta)$ of $\Phi_{\zeta}$ such that $\frac{x - y -\zeta}{\ensuremath{\varepsilon}} \in p_0 + U$. This yields $\frac{x' - y' - z -\zeta_0'}{\ensuremath{\varepsilon}} = p_0'$. In particular, \begin{align*} (\nabla \phi)(x' - y' - z) = 0. \end{align*} Thus, by Lemma~\ref{le:constancy}, we infer that \begin{align*} H(z) = \max_K h_z = \ell(\zeta_0 + {\mathcal T}_V z) - p_0' \cdot z \end{align*} is constant for $z \in G$, which is what we wanted to prove since $p_0 \cdot \zeta = p_0' \cdot z + p_0 \cdot \zeta_0$. \end{proof}
The previous lemma has the following important corollary.
\begin{corollary} \label{co:contact-ordering} Suppose that we have $p_0$, $\zeta_0$, $\lambda$, $U$ and $V$ as in Lemma~\ref{le:max-constancy}. Define \begin{align*} \ensuremath{\theta}(x,t,y,s) := u(x,t) - v(y,s) - \frac{\abs{x'' - y''- \zeta_0''}^2}{2\ensuremath{\varepsilon}} - p_0' \cdot (x' - y'-\zeta_0') - S(t,s). \end{align*} Then for any $(\hat x,\hat t,\hat y,\hat s) \in \mathcal A(\zeta_0)$ such that $\frac{\hat x' - \hat y' - \zeta_0'}{\ensuremath{\varepsilon}} = p_0'$ we have \begin{align*} \ensuremath{\theta}(x,t,y,s) \leq \ensuremath{\theta}(\hat x, \hat t, \hat y,\hat s) \quad \text{for } (x,t),(y,s) \in \cl Q, \ \abs{x' - y' - (\hat x' - \hat y')} \leq \lambda. \end{align*} \end{corollary}
\begin{proof} For the sake of clarity, we will drop $t,s, \hat t$ and $\hat s$ from the following formulas. Let us fix $x, y, \hat x, \hat y$ that satisfy the assumptions and set \begin{align} \label{choice-of-zeta} \zeta = \zeta_0 + {\mathcal T}_V(x' - y' - (\hat x' - \hat y')). \end{align}
Since $|\zeta - \zeta_0| \leq \lambda$ and $\zeta \in \zeta_0 + V$, Lemma~\ref{le:max-constancy} implies $\ell(\zeta) - p_0 \cdot \zeta = \ell(\zeta_0) - p_0 \cdot \zeta_0$ and we infer from the definition of $\ell$ \begin{align} \label{Phi-estim} \Phi_\zeta(x,y) \leq \ell(\zeta) = \ell(\zeta_0) + p_0 \cdot (\zeta - \zeta_0) = \Phi_{\zeta_0} (\hat x, \hat y) + p_0 \cdot (\zeta - \zeta_0). \end{align} Note also that $p_0 \cdot (\zeta - \zeta_0) = p_0' \cdot (\zeta' - \zeta_0')$. We express $\ensuremath{\theta}$ in terms of $\Phi_\zeta$ and use \eqref{Phi-estim} to obtain \begin{align*} \ensuremath{\theta}(x,y) &= \Phi_\zeta(x,y) + \frac{\abs{x'-y'-\zeta'}^2}{2\ensuremath{\varepsilon}} - p_0' \cdot (x' - y' - \zeta_0')\\ &\leq \Phi_{\zeta_0}(\hat x,\hat y) + \frac{\abs{x'-y'-\zeta')}^2}{2\ensuremath{\varepsilon}} + p_0' \cdot (\zeta' - \zeta_0' - (x' - y' - \zeta_0'))\\ &= \ensuremath{\theta}(\hat x, \hat y) + \frac{\abs{x'-y'-\zeta'}^2}{2\ensuremath{\varepsilon}} -\frac{\abs{\hat x'-\hat y'-\zeta_0'}^2}{2\ensuremath{\varepsilon}}\\ &\quad + p_0' \cdot (-(x' - y' - \zeta') + (\hat x' - \hat y' - \zeta_0'))\\ &= \ensuremath{\theta}(\hat x, \hat y) + \frac{\abs w^2}{2\ensuremath{\varepsilon}} - \frac{\abs z^2}{2\ensuremath{\varepsilon}} + p_0' \cdot (-w + z), \end{align*} where we set $w = x'-y'-\zeta'$ and $z = \hat x' - \hat y' -\zeta_0'$. We now just have to show that the extra terms cancel out. First, we see that $w - z = 0$ by \eqref{choice-of-zeta}. Furthermore, by the choice of $\hat x,\hat y$ we have $z/\ensuremath{\varepsilon} = p_0'$. Therefore we have, using $\abs{w -z}^2 = \abs w^2 + \abs z^2 - 2w \cdot z$, \begin{align*} \frac{\abs{w}^2}{2\ensuremath{\varepsilon}} - \frac{\abs{z}^2}{2\ensuremath{\varepsilon}} = \frac{\abs{w - z}^2}{2\ensuremath{\varepsilon}} - \frac{\abs{z}^2}{\ensuremath{\varepsilon}} + \frac{w \cdot z}{\ensuremath{\varepsilon}} = 0 -p_0' \cdot z + p_0' \cdot w. \end{align*} Therefore $\ensuremath{\theta}(x,t,y,s) \leq \ensuremath{\theta}(\hat x, \hat t, \hat y, \hat s)$, which is what we wanted to prove. \end{proof}
\subsection{Construction of faceted test functions}
We shall use Corollary~\ref{co:contact-ordering} to construct test functions for $u$ and $v$ following the idea from \cite{GGP13AMSA}.
Let us fix $\Xi, \zeta_0 \in \Rd$ and $\lambda>0$ to be a triplet provided by Proposition~\ref{pr:ball-of-gradients}. Then we fix a point of maximum $(\hat x, \hat t, \hat y, \hat s) \in \mathcal A(\zeta_0)$ that satisfies \eqref{grad-in-Xi} with $\zeta = \zeta_0$. We set $p_0 := \frac{\hat x - \hat y - \zeta_0}\ensuremath{\varepsilon} \in \Xi$, $U := \operatorname{span}(\Xi - p_0)$ and $V \subset \Rd$ be the subspace parallel to $\operatorname{aff} \partial W(p_0)$. We have $U = V^\perp$ by Proposition~\ref{pr:feature-decomposition}. It is easy to check that $p_0$, $\zeta_0$, $\lambda$, $U$, $V$ and $(\hat x, \hat t, \hat y, \hat s)$ satisfy the hypothesis of Corollary~\ref{co:contact-ordering}. Let us also set $k = \dim V$ as usual.
Depending on the value $k$ and $p_0$, we split the situation into three cases:
\begin{itemize} \item[Case I]: $k = 0$; \item[Case II]: $k = n$, $p_0 = 0$ and $F$ is of curvature-free type at $p_0 = 0$; \item[Case III]: none of the above. \end{itemize}
We will deal with each case individually in the following three subsections and show that they all lead to a contradiction. Therefore \eqref{m_0} cannot occur and the comparison principle holds.
\subsubsection{Case I} We have $k = 0$. In this case we use the off-facet test in Definition~\ref{def:visc-solution}(ii). Corollary~\ref{co:contact-ordering} in this case reduces to \begin{align} \label{off-facet max order}
u(x, t) - v(y, s) - \frac{|x - y - \zeta_0|^2}{2\ensuremath{\varepsilon}} - S(t,s)
\leq u(\hat x, \hat t) - v(\hat y, \hat s) - \frac{|\hat x - \hat y - \zeta_0|^2}{2\ensuremath{\varepsilon}} - S(\hat t,\hat s) \end{align} for all $(x,t), (y,s) \in \cl Q$. We define the test functions \begin{align*}
\varphi_u(x,t) &:= \frac{|x - \hat y - \zeta_0|^2}{2\ensuremath{\varepsilon}} + S(t,\hat s),\\
\varphi_v(x,t) &:= - \frac{|\hat x - x - \zeta_0|^2}{2\ensuremath{\varepsilon}} - S(\hat t, t). \end{align*} From \eqref{off-facet max order} we deduce that $u - \varphi_u$ has a global maximum at $(\hat x, \hat t)$ and $v - \varphi_v$ has a global minimum at $(\hat y, \hat s)$. Therefore we must have from the definition of viscosity solutions \begin{align*} (\varphi_u)_t(\hat x, \hat t) + F(\nabla \varphi_u(\hat x, \hat t), 0) &\leq 0,\\ (\varphi_v)_t(\hat y, \hat s) + F(\nabla \varphi_v(\hat y, \hat s), 0) &\geq 0.\\ \end{align*} Since $\nabla \varphi_u(\hat x, \hat t) = \nabla \varphi_v(\hat y, \hat s)$, subtracting the second inequality from the first and evaluating the time derivatives yields \begin{align*} 0 \geq (\varphi_u)_t(\hat x, \hat t) - (\varphi_v)_t(\hat y, \hat s) = \frac{\ensuremath{\varepsilon}}{(T - \hat t)^2} + \frac{\ensuremath{\varepsilon}}{(T - \hat s)^2} > 0, \end{align*} a contradiction.
\subsubsection{Case II}
Now $k = n$, or, in other words, $V = \operatorname{aff} \partial W(p_0) = \Rd$. Since we now assume that $p_0 = 0$ and that $F$ is of curvature-free type at $p_0 = 0$, we use Definition~\ref{def:level-set-test}. Then this case is just a minor modification of Case I. Indeed, Corollary~\ref{co:contact-ordering} now reads \begin{align} \label{curvature-free order} u(x, t) - v(y, s) - S(t,s) \leq u(\hat x, \hat t) - v(\hat y, \hat s) - S(\hat t,\hat s) \end{align}
for all $(x, t), (y, t) \in \cl Q$, $|x - y - (\hat x - \hat y)| \leq \lambda$. Thus if we define the test functions \begin{align*} \varphi_u(x,t) := S(t, \hat s), \quad \text{and} \quad \varphi_v(x,t):= - S(\hat t, t), \end{align*} we see from \eqref{curvature-free order} that $u - \varphi_u$ has a local maximum at $(\hat x, \hat t)$, and $v - \varphi_v$ has a local minimum at $(\hat y, \hat s)$. The definition of viscosity solution for the curvature-free type case yields \begin{align*} (\varphi_u)_t(\hat x, \hat t) + F(0, 0) &\leq 0,\\ (\varphi_v)_t(\hat y, \hat s) + F(0, 0) &\geq 0.\\ \end{align*} The contradiction then follows as in Case I.
\subsubsection{Case III}
\begin{figure}
\caption{Settings for the facet construction. The dot-dashed lines represent the boundaries of the constructed pairs $(U_-,U_+)$ and $(V_-, V_+)$.}
\label{fig:facet-construction}
\end{figure}
This is the most involved situation. Since $W$ is positively one-homogeneous, we have $p_0 \perp V$ by Lemma~\ref{le:aff Xi origin} and the orthogonality from Proposition~\ref{pr:feature-decomposition}, and therefore $p_0' = 0$ in what follows. Nevertheless, we keep the terms with $p_0'$ below for completeness, they are necessary when handling a case of general polyhedral $W$. We first reduce the problem to the subspace $V$ by introducing the functions \begin{align*} \begin{aligned} \hat u(w) &:= u({\mathcal T}_V w + \hat x, \hat t) - p_0' \cdot w - u(\hat x, \hat t),\\ \hat v(w) &:= v({\mathcal T}_V w + \hat y, \hat s) - p_0' \cdot w - v(\hat y, \hat s), \end{aligned} &&& w \in \ensuremath{\mathbb{R}}^k. \end{align*} Then we build facets on $\ensuremath{\mathbb{R}}^k$ using the closed sets \begin{align*} \hat U := \set{w \in \ensuremath{\mathbb{R}}^k : \hat u(w) \geq 0}, &&& \hat V := \set{w \in \ensuremath{\mathbb{R}}^k : \hat v(w) \leq 0}. \end{align*} as in \cite{GGP13AMSA}; see Figure~\ref{fig:facet-construction}. Note that these sets were denoted there as $U$ and $V$. This allows us to create test functions for both subsolution and supersolution and arrive at a contradiction as before.
Let us review the construction. For convenience we set \begin{align*}
\xi_u(x'', t) &:= \frac{|x'' - \hat y'' - \zeta_0''|^2}{2\ensuremath{\varepsilon}}
- \frac{|\hat x'' - \hat y'' - \zeta_0''|^2}{2\ensuremath{\varepsilon}} + S(t, \hat s) - S(\hat t, \hat s),\\
\xi_v(y'', s) &:= \frac{|\hat x'' - \hat y'' - \zeta_0''|^2}{2\ensuremath{\varepsilon}}
- \frac{|\hat x'' - y'' - \zeta_0''|^2}{2\ensuremath{\varepsilon}} + S(\hat t, \hat s) - S(\hat t, s). \end{align*} Then \begin{align*} u(x,t) - u(\hat x, \hat t) - p_0' \cdot (x' - \hat x') - \xi_u(x'', t) &\leq 0, &&\text{for } (x,t) \in \cl Q, x' - \hat x \in \nbd\lambda(\hat V),\\ v(y,s) - v(\hat y, \hat s) - p_0' \cdot (y' - \hat y') - \xi_v(y'', s) &\geq 0, &&\text{for } (y,s) \in \cl Q, y' - \hat y \in \nbd\lambda(\hat U). \end{align*}
We set $r := \lambda/10$ and introduce the closed sets \begin{align*} X := \cl{(\nbd{r}(\hat U))^c}, \qquad Y:= \cl{(\nbd{r}(\hat V))^c}. \end{align*}
Since $\operatorname{dist}(\hat U, X) = \operatorname{dist}(\hat V, Y) = r$, the semi-continuity of $u$ and $v$ imply that there exists $\delta > 0$ such that \begin{align*} u(x,t) - u(\hat x, \hat t) - p_0' \cdot (x' - \hat x') - \xi_u(x'', t) &< 0,
&&x' - \hat x' \in X, |x'' - \hat x''| \leq \delta, |t - \hat t| \leq \delta,\\ v(y,s) - v(\hat y, \hat s) - p_0' \cdot (y' - \hat y') - \xi_v(y'', s) &> 0,
&&y' - \hat y' \in Y, |y'' - \hat y''| \leq \delta, |s - \hat s| \leq \delta. \end{align*} Note that if $X$ is unbounded, then $u(x, t) = c_u < u(\hat x, \hat t)$ for all $x \notin K$ and therefore we only need to use semi-continuity of $u$ on a compact subset of $X$ to get the $\delta$ above. We can similarly handle the case of unbounded $Y$.
Therefore as in \cite{GGP13AMSA}, we define the pairs \begin{align*} S_u := (\hat U^c, \hat U \setminus \nbd{\lambda-3r}(\hat V)), \qquad S_v := (\hat V^c, \hat V \setminus \nbd{\lambda-3r}(\hat U)). \end{align*} We note that both $S_u$ and $S_v$ are bounded pairs. Indeed, $S_u$ bounded if $\hat U$ is bounded or $\hat U^c \cup \hat V$ is bounded. Since $u(\hat x, \hat t) - v(\hat y, \hat t) \geq m_0$, we deduce that $u(\hat x, \hat t) > v(\hat y, \hat t)$. Then if $\hat U$ is unbounded, we have $u(\hat x, \hat t) \leq c_u$ and therefore $v(\hat y, \hat s) < u(\hat x, \hat t) \leq c_u \leq c_v$, and so we conclude that $\hat U^c \cup \hat V$ are both bounded. We can argue similarly for $S_v$.
Since both $S_u$ and $S_v$ are bounded pairs, Corollary~\ref{co:approximate pair sliced} (currently only for $k = 1, 2$) implies that there exist $p_0$-admissible pairs $(U_-, U_+)$ and $(V_-, V_+)$ such that \begin{align*} \nbd{2r}(S_u) &\preceq (U_-, U_+) \preceq \nbd{3r}(S_u),\\ \nbd{2r}(S_v) &\preceq (V_-, V_+) \preceq \nbd{3r}(S_v). \end{align*}
We have the following lemma.
\begin{lemma} \label{le:pair properties} The pair $(U_-, U_+)$ and the pair $(V_-, V_+)$ have the following properties: \begin{enumerate} \item The pairs are strictly ordered in the sense \begin{align} \label{pair order} \nbd r(U_-, U_+) \preceq (V_+, V_-) = - (V_-, V_+). \end{align} \item The origin $0$ lies in the interior of the intersection of the facets, that is, \begin{align*} \cl B_r(0) \subset U_-^c \cap U_+^c \cap V_-^c \cap V_+^c. \end{align*} \item The pairs are in general position with respect to $R_u$ and $R_v$, that is, \begin{align*} \nbd r(R_u) \preceq (U_-, U_+), \qquad \nbd r(R_v) \preceq (V_-, V_+), \end{align*} where \begin{align*} R_u &:= (X, X^c \setminus \nbd\lambda(\hat V)),\\ R_v &:= (Y, Y^c \setminus \nbd\lambda(\hat U)). \end{align*}
\end{enumerate} \end{lemma}
\begin{proof} See \cite[Lemma~4.6]{GGP13AMSA}. \end{proof}
Now we have all that we need to reach a contradiction. Let us define \begin{align*}
\tilde u(x' - \hat x') &:=\sup_{|x'' - \hat x''| \leq \delta}\sup_{|t - \hat t| \leq \delta} \bra{ u(x,t) - u(\hat x, \hat t) - p_0' \cdot (x' - \hat x') - \xi_u(x'', t)},\\
\tilde v(y' - \hat y') &:= \inf_{|y'' - \hat y''| \leq \delta}\inf_{|s - \hat s| \leq \delta} \bra{v(y,s) - v(\hat y, \hat s) - p_0' \cdot (y' - \hat y') - \xi_v(y'', s)}. \end{align*} By the construction above, we have $\tilde u < 0$ on $X$ and $\tilde u \leq 0$ on $X \cup \nbd\lambda(\hat V)$. Similarly, we have $\tilde v > 0$ on $Y$ and $\tilde v \geq 0$ on $Y \cup \nbd\lambda(\hat U)$. Lemma~\ref{le:pair properties}(c) implies that $X \supset \nbd{r}(U_-)$ and $X \cup \nbd\lambda( \hat V) \supset \nbd{r}(U_+^c)$. Therefore for any support function $\psi$ of pair $(U_-, U_+)$ we can by upper semi-continuity of $\tilde u$ find two constants $\alpha,
\beta > 0$ so that $\alpha \psi_+ - \beta \psi_- \geq \tilde u(\cdot - w)$ for all $|w| \leq r/2$ in a neighborhood of the facet $U_-^c \cap U_+^c$. An analogous reasoning applies to $\tilde v$.
Since the pairs $(U_-, U_+)$ and $(V_-, V_+)$ are $p_0$-admissible, there exist faceted functions $\psi_u, \psi_v \in \operatorname{\mathcal{D}}(\Lambda_{p_0})$ that are the support functions of the respective pairs. By applying the observation in the previous paragraph, we can assume that $\tilde u(\cdot - w) \leq \psi_u$ and $\psi_v \leq \tilde v(\cdot -
w)$ for all $|w| \leq r/2$ in a neighborhood of the respective facets $U_-^c \cap U_+^c$ and $V_-^c \cap V_+^c$. Therefore the functions $\varphi_u(x,t) := \psi_u(x') + p_0' \cdot (x' - \hat x') + \xi_u(x'', t)$, $\varphi_v := \psi_v(x') + p_0' \cdot (y' - \hat y') + \xi_v(x'', t)$ are test functions for $u$ and $v$, respectively, in the sense of Definition~\ref{def:visc-solution}(i). Due to Lemma~\ref{le:pair properties}(a--b), the comparison principle for the crystalline curvature Proposition~\ref{pr:comparison Lambda} yields \begin{align} \label{essinf sup order} \operatorname*{ess\,inf}_{B_r(0)} \left[ \Lambda_{p_0}[\psi_u] \right] \leq \operatorname*{ess\,sup}_{B_r(0)} \left[ \Lambda_{p_0}[\psi_v] \right]. \end{align}
From the definition of viscosity solutions, namely Definition~\ref{def:visc-solution}(i), we infer \begin{align*} (\xi_u)_t(\hat t) + F\pth{p_0, \operatorname*{ess\,inf}_{B_r(0)} \left[ \Lambda_{p_0}[\psi_u] \right]} &\leq 0,\\ (\xi_v)_t(\hat s) + F\pth{p_0, \operatorname*{ess\,sup}_{B_r(0)} \left[ \Lambda_{p_0}[\psi_v] \right]} &\geq 0. \end{align*} Using \eqref{essinf sup order} and the ellipticity of $F$, we get after subtracting the above two inequalities \begin{align*} 0 < \frac \ensuremath{\varepsilon}{(T - \hat t)^2} + \frac \ensuremath{\varepsilon}{(T - \hat s)^2} + F\pth{p_0, \operatorname*{ess\,inf}_{B_r(0)} \left[ \Lambda_{p_0}[\psi_u] \right]} - F\pth{p_0, \operatorname*{ess\,sup}_{B_r(0)} \left[ \Lambda_{p_0}[\psi_v] \right]} \leq 0, \end{align*} a contradiction.
This finished the proof of the comparison principle Theorem~\ref{th:comparison principle} since we have shown that \eqref{m_0} always yields a contradiction.
\section{Stability} \label{se:stability}
We will show the stability of \eqref{P} under the approximation by parabolic problems \begin{align} \label{regularized-problem} \begin{cases} u_t + F(\nabla u, \operatorname{tr}\bra{(\nabla_p^2 W_m)(\nabla u) \nabla^2 u}) = 0,\\ \at{u}{t=0} = u_0, \end{cases} \end{align} where $W_m$ approximate $W$ as in Section~\ref{sec:resolvent-approximation}. An example of such sequence $\set{W_m}$ is given in Example~\ref{ex:wm-example}.
The main result of this section is the following stability theorem. We recall the definition of \emph{half-relaxed limits} (semi-continuous limits) \begin{align*}
\operatorname*{\star-limsup}_{m\to\infty} u_m(x,t) &:= \lim_{k \to \infty} \sup_{m > k} \sup_{|y - x| < \frac 1k} \sup_{|t-s| < \frac 1k} u_m(y,s),\\ \operatorname*{\star-liminf}_{m\to\infty} u_m(x,t) &:= -\operatorname*{\star-limsup}_{m\to\infty} \big(-u_m(x,t)\big). \end{align*}
\begin{theorem}[Stability] \label{th:stability quadratic} Let $u_m$ be a locally bounded sequence of viscosity solutions of \eqref{regularized-problem} (without the initial condition). Then $\operatorname*{\star-limsup}_{m\to\infty} u_m$ is a viscosity subsolution of \eqref{P} and $\operatorname*{\star-liminf}_{m\to\infty} u_m$ is a viscosity supersolution of \eqref{P}. \end{theorem}
\noindent \emph{Proof of stability} We will only show the subsolution part, the proof of the supersolution part is analogous. Let $u = \operatorname*{\star-limsup}_m u_m$. Clearly $u$ is upper semi-continuous. We want to show that $u$ is a subsolution of \eqref{P}.
We have to verify (i)--(ii) of Definition~\ref{def:visc-solution} and (i-cf) of Definition~\ref{def:level-set-test} for curvature-free type $F$.
\subsection{Case (i)} \label{se:stability case (i)} Suppose that $\ensuremath{\varphi}$ is a stratified faceted test function at $(\hat x, \hat t)$ with gradient $\hat p$ and with $\bar \psi$, $f$ and $g$ as in Definition~\ref{def:strat-faceted-test-function}, and suppose that this test function is a test function for $u$ in the sense of Definition~\ref{def:visc-solution}(i), i.e., it satisfies \eqref{general-position} with some $\rho > 0$. Let $(A_-, A_+)$ be the pair supported by $\bar \psi$. We will set $V \subset \Rd$ to be the subspace parallel to $\operatorname{aff} \partial W(\hat p)$, $U=V^\perp$ and $k = \dim V$. We recall that we have the rotated coordinate system $x = {\mathcal T} (x', x'')$ with ${\mathcal T} = {\mathcal T}_{\hat p}$ introduced in \eqref{rotation}.
Let us define the function $\bar u: \ensuremath{\mathbb{R}}^k \to \ensuremath{\mathbb{R}}$ \begin{align*} \bar u(x') := \sup_{\substack{\abs{x''} \leq \rho\\\abs{t - \hat t} \leq \rho}} u(\hat x + x, t) &- u(\hat x, \hat t) - f(x'') - g(t) + g(\hat t) - \hat p \cdot x \end{align*} and the closed subsets of $\ensuremath{\mathbb{R}}^k$ \begin{align*} Y &:= \set{x' \in \ensuremath{\mathbb{R}}^k : \bar u(x') \geq 0}, \qquad\\ Z &:= \set{x'\in O: \bar \psi(x') \leq 0} = A_+^c \cap O, \end{align*} where $O = \nbd\rho(\facet A)$, see Figure~\ref{fig:stability-geometry}. Note that with this definition of $\bar u$, the condition \eqref{general-position} is equivalent to \begin{align} \label{gp-bar} \bar u(y') \leq \bar\psi(x') \qquad \text{for all $x' \in O$, $\abs{y' - x'} \leq \rho$.} \end{align}
\begin{figure}
\caption{Situation at the contact point of $\bar u$ and $\bar \psi$. The thick line denotes the boundary of $N$.}
\label{fig:stability-geometry}
\end{figure}
We immediately have the following ``geometrical'' lemma. Intuitively, since $\bar u$ and $\bar\psi$ are ordered even when shifted by a small distance, we must have that $\bar\psi$ is nonnegative in a neighborhood of the set $Y$ where $\bar u$ is nonnegative, and, analogously, $\bar u$ is nonpositive in a neighborhood of the set $Z$ where $\bar \psi$ is nonpositive.
\begin{lemma}[{cf. \cite[Lemma~5.6]{GGP13JMPA}}] \label{le:u-psi-shift} Suppose that $u$ and $\ensuremath{\varphi}$ satisfy \eqref{general-position} for some $\rho > 0$, $(\hat x, \hat t) \in \Rd$. Then \begin{align*} \bar u(x') \leq 0 \qquad \text{for all $x' \in \nbd\rho(Z)$,} \end{align*} or, more explicitly, \begin{align*} u(x,t) \leq f\pth{x''- \hat x''} + g(t) - g(\hat t) + u(\hat x,\hat t) + \hat p \cdot (x - \hat x) \end{align*} for $x' - \hat x \in \nbd\rho(Z)$, $\abs{x'' - \hat x''} \leq \rho$, $\abs{t - \hat t} \leq \rho$. Furthermore, we have \begin{align*} \bar \psi(x') \geq 0 \qquad \text{for } x' \in \nbd\rho(Y) \cap O. \end{align*} \end{lemma}
\begin{proof} Let us prove the first statement. If $x' \in \nbd\rho(Z)$ then there exists $z' \in Z \subset O$ such that $\abs{x' - z'} \leq \rho$. Thus \eqref{gp-bar} and the definition of $Z$ imply \begin{align*} \bar u(x') \leq \bar\psi(z') \leq 0, \end{align*} and that is what we wanted to prove.
Similarly, if we suppose that $x' \in \nbd\rho(Y) \cap O$, there exists $y' \in Y$ with $\abs{x' - y'} \leq \rho$. Then \eqref{gp-bar} and the definition of $Y$ imply \begin{align*} \bar\psi(x') \geq \bar u(y') \geq 0. \end{align*} The lemma is proved. \end{proof}
We obtain the following corollary.
\begin{corollary} Suppose that \eqref{gp-bar} holds with $\rho > 0$. Then there exists $\delta$, $0 < \delta \leq \rho/5$, such that $\nbd{4\delta}(N) \subset O$ where \begin{align*} N := \nbd\delta(Z) \cap Y \cap O, \end{align*} and moreover \begin{align*} \cl B^k_\delta(0) \subset A_-^c \cap A_+^c \end{align*} and \begin{align} \label{u-psi-scaled-comp} \bar u(x') \leq \alpha \bar\psi(x' + z') \qquad \text{for all $\alpha > 0$, $x' \in \nbd{3\delta}(N)$, $\abs{z'} \leq \delta$,} \end{align} with strict inequality for $x' \notin N$. \end{corollary}
\begin{proof} By definition, $Z \subset A_+^c$. Moreover, the second result in Lemma~\ref{le:u-psi-shift} is equivalent to \begin{align*} \nbd\rho(Y) \cap O \subset A_-^c. \end{align*} We can therefore estimate \begin{align} \label{N-bound} N = \nbd\delta(Z) \cap Y \cap O \subset \nbd\delta(A_+^c) \cap A_-^c \subset \nbd\delta(\partial A_+) \cup (A_+^c \cap A_-^c). \end{align} Since $A_+$ is open, we have $\partial A_+ \subset A_+^c$. But since $A_- \cap A_+ = \emptyset$ and $A_-$ is also open, we must also have $\partial A_+ \subset A_-^c$. Therefore $\partial A_+$ is in the facet, and by assumption on $O$ we have \begin{align} \label{boundary-in-facet} \partial A_+ \subset A_-^c \cap A_+^c \subset O. \end{align} Since $O$ is open and $A_-^c \cap A_+^c$ is compact, and $0\in \operatorname{int} A_-^c \cap A_+^c$, for $\delta > 0$ small enough we will have \begin{align*} \nbd{5\delta}(A_-^c \cap A_+^c) \subset O \qquad \text{and} \qquad \cl B_\delta^k(0) \subset A_-^c \cap A_+^c. \end{align*} Using \eqref{boundary-in-facet} in \eqref{N-bound}, we obtain \begin{align} \label{N-4delta} \nbd{4\delta}(N) \subset \nbd{5\delta}(A_-^c \cap A_+^c) \subset O. \end{align} Let us now fix $\alpha > 0$ and $\abs{z'} \leq \delta$. Using the definition and \eqref{N-4delta}, we can estimate \begin{align*} \nbd{3\delta}(N) \subset \nbd{4\delta}(Z) \cap \nbd{3\delta}(Y) \cap \nbd{-\delta}(O). \end{align*} In particular, if $x' \in \nbd{3\delta}(N)$ then $x' + z' \in \nbd\rho(Y) \cap O$ and $x' \in \nbd\rho(Z)$. Hence Lemma~\ref{le:u-psi-shift} applies, yielding \begin{align*} \bar u(x') \leq 0 \leq \bar\psi(x' + z'), \end{align*} and therefore \eqref{u-psi-scaled-comp} follows. If $x' \in \nbd{3\delta}(N) \setminus N$, then we must have $x' + z' \in O$ and at least one of the following: \begin{itemize} \item $x' \notin \nbd\delta(Z)$: Thus $x' + z' \in O \setminus Z$ and therefore $\psi(x'+z') > 0$. \item $x' \notin Y$: Thus $u(x') < 0$. \end{itemize} We deduce the strict ordering in \eqref{u-psi-scaled-comp} for $x \notin N$. \end{proof}
The previous corollary has the following important direct consequence.
\begin{lemma}[cf. {\cite[Lemma 5.4]{GGP13JMPA}}] \label{le:strict_order} Suppose that \eqref{gp-bar} is satisfied for some $\rho > 0$. By adding the term $\abs{y}^2$ to $f(y)$ and $\abs{t - \hat t}^2$ to $g(t)$ if necessary, there exists $0 < \delta < \rho/5$ such that for all $\abs{z'} \leq \delta$ and $\alpha > 0$ \begin{align*} u(x,t) - \alpha \bar \psi(x' + z' - \hat x') - f(x'' - \hat x'') - g(t) - \hat p \cdot (x - \hat x) \leq u(\hat x, \hat t) - g(\hat t) \end{align*} whenever \begin{align*} x' - \hat x' \in \nbd{3\delta}(N),\ \abs{x'' - \hat x''} \leq \rho,\ \abs{t - \hat t} \leq \rho, \end{align*} with a \emph{strict} inequality outside of $\set{(x,t): x' - \hat x' \in N,\ x'' = \hat x'',\ t = \hat t}$. \end{lemma}
We shall now proceed with the proof of stability. By Proposition~\ref{pr:curvature-as-min-section}, for $L > 0$ sufficiently large and $\Gamma' = \ensuremath{\mathbb{R}}^k / L \ensuremath{\mathbb{Z}}^k$, we can find a function $\xi \in {\rm Lip}(\Gamma')$ such that $\xi(x') = \bar\psi(x')$ on a neighborhood of the facet $\facet A$ such that $\xi \in \operatorname{\mathcal{D}}(\partial E^{\rm sl}_{\hat p}(\cdot; \Gamma'))$ and $\Lambda_{\hat p}(\bar \psi) = -\partial^0 E^{\rm sl}_{\hat p}(\xi; \Gamma')$ a.e. on $\facet A$. By making the set $O$ smaller if necessary, we can assume that $\xi = \bar\psi$ on $O$. Let $\delta > 0$ be from Lemma~\ref{le:strict_order}.
Fix $\alpha > 0$. Since $\nabla f(0) = 0$, we can find $\theta_\alpha > 0$ and $f_\alpha \in {\rm Lip}(\Gamma'')$, $\Gamma'' = \ensuremath{\mathbb{R}}^{n-k}/L \ensuremath{\mathbb{Z}}^{n-k}$, such that $f_\alpha(x'') = f(x'')$ for $\abs{x''} \leq 2\theta$ with $\norm{\nabla f_\alpha}_\infty \leq \alpha \norm{\nabla\xi}_\infty$. Let us define the function \begin{align} \label{def-psi} \psi(x) = \psi_\alpha(x) = \alpha \xi\pth{x'} + f_\alpha\pth{x''}, \qquad x \in \Gamma = {\mathcal T}(\Gamma' \times \Gamma''). \end{align} We see that $\psi \in {\rm Lip}(\Gamma)$ and therefore by Lemma~\ref{le:subdiff-slicing} $\psi \in \operatorname{\mathcal{D}}(\partial E_{\hat p}'(\cdot; \Gamma))$. We can estimate \begin{align} \label{lip-bound-psi-alpha} \norm{\nabla \psi_\alpha}_\infty \leq 2\alpha \norm{\nabla \xi}_\infty. \end{align} In particular, if $\alpha$ is sufficiently small, $\partial E_{\hat p}(\psi_\alpha) = \partial E_{\hat p}'(\psi_\alpha)$ by Lemma~\ref{le:subdiff-homog-relation}.
From now on we fix one such $\alpha$ and we write $\psi = \psi_\alpha$. For given $a >0$ let $\psi_a$ and $\psi_{a,m}$ be the solutions of the resolvent problems in Proposition~\ref{pr:resolvent-problems} for energies $E_W = E_{\hat p}$ and $E_m = E_{W_m(\cdot - \hat p) - W(\hat p)}$, respectively, on $\Gamma$. Note that these energies satisfy all the assumptions of Proposition~\ref{pr:resolvent-problems}.
For given $a > 0$ and $\abs{z'} \leq \delta$, we define the set of maxima \begin{align*} A_{a, z'} := \operatorname*{arg\,max}_{(x,t) \in M_2} \bra{u(x + \hat x, t + \hat t) - \psi_a(x + {\mathcal T}_V z') - \hat p \cdot x - g(t + \hat t)} \end{align*} where $M_s := \set{(x,t): x' \in \nbd{s\delta}(N),\ \abs{x''} \leq s \theta,\ \abs t \leq s\delta}$. Note that $\psi(x + {\mathcal T}_V z') = \alpha \bar \psi(x' + z') + f(x'')$ for $(x,t) \in M_2$, $\abs{z'} \leq \delta$. Due to the uniform convergence $\psi_a \rightrightarrows \psi$ on $\Gamma$ from Proposition~\ref{pr:resolvent-problems}, and the strict ordering of Lemma~\ref{le:strict_order}, we have that there exists $a_0 > 0$, independent of $z'$, such that \begin{align} \label{Az-in-M1} \emptyset \neq A_{a,z'} \subset M_1 \qquad \text{for all $\abs{z'} \leq \delta$, $a < a_0$.} \end{align}
We now fix one such $a < a_0$ and find $\abs{z'} \leq \delta$ such that \begin{align} \label{choice-of-z'} \psi_a({\mathcal T}_V z') - \alpha\bar\psi(z') = \min_{\abs{w'} \leq \delta} \bra{\psi_a({\mathcal T}_V w') - \alpha\bar\psi(w')}. \end{align} As in \cite{GGP13JMPA}, $z'$ is chosen in such a way that Lemma~\ref{le:resolvent-order} below holds.
Due to the uniform convergence $\psi_{a,m} \rightrightarrows \psi_a$ as $m\to\infty$ there exists $(x_a, t_a) \in A_{a,z'}$ and a sequence $(x_{a,m}, t_{a,m})$ (for a subsequence of $m$) of local maxima of \begin{align*} (x,t) \mapsto u_m(x + \hat x,t + \hat t) - \psi_{a,m}(x + {\mathcal T}_V z') - \hat p \cdot x - g(t + \hat t) \end{align*} such that $(x_{a,m}, t_{a,m}) \to (x_a, t_a)$ as $m\to\infty$ (along a subsequence).
Recall the definitions of $h_a$ and $h_{a,m}$ from Proposition~\ref{pr:resolvent-problems}. Since $\psi_{a,m} \in C^{2,\ensuremath{\alpha}}(\Gamma)$ and $u_m$ is a viscosity subsolution of \eqref{regularized-problem}, we must have \begin{align*} &g'(t_{a,m} + \hat t) + F(\nabla \psi_{a,m}(x_{a,m} + {\mathcal T}_V z') + \hat p, h_{a,m}(x_{a,m} + {\mathcal T}_V z'))\\ & \begin{aligned} = g'(t_{a,m} + \hat t) + F\big(&\nabla \psi_{a,m}(x_{a,m} + {\mathcal T}_V z') + \hat p,\\ &\operatorname{tr}\bra{(\nabla_p^2 W_m)(\nabla \psi_{a,m} + \hat p) \nabla^2 \psi_{a,m}}(x_{a,m} + {\mathcal T}_V z')\big) \leq 0. \end{aligned} \end{align*}
By the uniform Lipschitz bound $\norm{\nabla \psi_{a,m}}_\infty \leq \norm{\nabla \psi}_\infty \leq C\alpha$ from \eqref{lip-bound-psi-alpha}, and $h_{a,m} \rightrightarrows h_a$ as $m \to\infty$, we can find a point $p_a \in \Rd$, $\abs{p_a - \hat p} \leq C\alpha$, and send $m\to\infty$ along a \emph{subsequence} to recover \begin{align} \label{subsol ha} g'(t_a + \hat t) &+ F(p_a, h_a(x_a + {\mathcal T}_V z')) \leq 0. \end{align} To estimate $h_a(x_a + {\mathcal T}_V z')$, we prove the following lemma.
\begin{lemma}[{cf. \cite[Lemma~5.5]{GGP13JMPA}}] \label{le:resolvent-order} We have \begin{align*} h_a(x_a + {\mathcal T}_V z') \leq h_a({\mathcal T}_V z') = \min_{\abs{w'}\leq \delta} h_a({\mathcal T}_V w'). \end{align*} \end{lemma}
\begin{proof} We chose $z'$ so that \eqref{choice-of-z'} holds and therefore the equality above holds as well. Therefore we only need to show the inequality. Recalling the definition of $h_a$, we have to show that \begin{align} \label{main-ineq-order} \psi_a(x_a + {\mathcal T}_Vz') - \psi(x_a + {\mathcal T}_V z') \leq \psi_a({\mathcal T}_Vz') - \psi({\mathcal T}_V z'). \end{align} We begin by expressing the second term on the left-hand side using \eqref{def-psi}, which yields \begin{align*} - \psi(x_a + {\mathcal T}_V z') = -\alpha \bar\psi\pth{x_a'+ z'} - f\pth{x_a''}. \end{align*} Since $(x_a, t_a) \in A_{a,z'} \in M_1$ by \eqref{Az-in-M1}, clearly \begin{align} \label{point-in-neighb} x_a' + z' \in \nbd{3\delta}(Z) \cap \nbd{2\delta}(Y) \cap O \end{align} and therefore $\bar\psi\pth{x_a' + z'} \geq 0$ by Lemma~\ref{le:u-psi-shift}. This implies \begin{align} \label{main-ineq-1} - \psi(x_a + {\mathcal T}_V z') \leq -f\pth{x_a''}. \end{align} For the first term in \eqref{main-ineq-order}, we use the fact that $(x_a, t_a)$ is a point of maximum and therefore \begin{align*} u(x_a + \hat x, t_a + \hat t) - \psi_a(x_a + {\mathcal T}_V z') - \hat p \cdot x_a -g(t_a + \hat t) \geq u(\hat x, \hat t) - \psi_a({\mathcal T}_V z') - g(\hat t). \end{align*} After rearranging the terms, we obtain \begin{align*} \psi_a(x_a + {\mathcal T}_V z') \leq \bra{u(x_a + \hat x, t_a + \hat t) - u(\hat x, \hat t) - \hat p \cdot x_a - g(t_a + \hat t) + g(\hat t)} + \psi_a({\mathcal T}_V z'). \end{align*} We use \eqref{point-in-neighb} again and therefore the first inequality of Lemma~\ref{le:u-psi-shift} allows us estimate the term in the bracket from above by $f(x_a'')$, yielding \begin{align} \label{main-ineq-2} \psi_a(x_a + {\mathcal T}_V z') \leq \psi_a({\mathcal T}_V z') + f\pth{x_a''}. \end{align} Finally, by the choice of $\ensuremath{\delta}$ we have $z' \in A_-^c \cap A_+^c$ and therefore \begin{align*} \psi({\mathcal T}_V z') = 0. \end{align*} Hence using this observation, and taking the sum of \eqref{main-ineq-1} and \eqref{main-ineq-2} we arrive at \eqref{main-ineq-order} and the proof of the lemma is finished. \end{proof}
Then, by the ellipticity of $F$ in \eqref{F ellipticity}, \begin{align*} g'(t_a + \hat t) + F(p_a + \hat p, \min_{\abs{w'} \leq \delta} h_a({\mathcal T}_V w')) \leq g'(t_a + \hat t) + F(p_a, h_a(x_a + {\mathcal T}_V z')) \leq 0. \end{align*}
We send $a \to 0$ along a subsequence $a_l$ such that $\min h_{a_l} \to \liminf_{a\to 0} \min h_a$ and $p_a \to p_0$ as $l \to\infty$, for some $p_0 \in \ensuremath{\mathbb{R}}^n$, $\abs{p_0 - \hat p} \leq C\alpha$, to obtain \begin{align*} g'(\hat t) + F(p_0, \liminf_{a\to 0} \min_{\abs{w'} \leq \delta} h_a(T_V w')) \leq 0. \end{align*} Now we use Lemma~\ref{le:subdiff-slicing}, in particular the fact that $h_a(x) = \bar h_a(x')$ for some $\bar h_a = (\bar \psi_a - \bar \psi)/a \in {\rm Lip}(\Gamma')$ and that $\bar h_a \to -\partial^0 E^{\rm sl}_{\hat p}(\bar\psi; \Gamma')$ in $L^2(\Gamma')$. Thus, recalling Proposition~\ref{pr:curvature-as-min-section}, \begin{align*} \liminf_{a\to 0} \min_{\abs{w'} \leq \delta} h_a({\mathcal T}_V w') =\liminf_{a\to 0} \min_{\abs{w'} \leq \delta} \bar h_a(w') \leq \operatorname*{ess\,inf}_{B_\delta(0)} -\partial^0 E^{\rm sl}_{\hat p}(\xi; \Gamma') = \operatorname*{ess\,inf}_{B_\delta(0)} \Lambda_{\hat p}[\bar\psi], \end{align*} and ellipticity yields \begin{align*} g'(\hat t) + F(p_0, \operatorname*{ess\,inf}_{B_\delta(0)} \Lambda_{\hat p}[\bar\psi]) \leq 0. \end{align*} Since this holds for any $\alpha > 0$ small, and therefore continuity of $F(p,\xi)$ in $p$ and the estimate $\abs{p_0 - \hat p} \leq C \alpha$ yields \begin{align*} g'(\hat t) + F(\hat p, \operatorname*{ess\,inf}_{B_\delta(0)} \Lambda_{\hat p}[\bar\psi]) \leq 0, \end{align*} which we needed to prove.
\subsection{Case (ii)} In this case the test function is also a test function \eqref{regularized-problem} and therefore the stability follows the standard viscosity solution argument.
\subsection{Case (curvature-free type)}
In this part we will assume that $F$ is of curvature-free type at $p = 0$ in the sense of Definition~\ref{def:level-set-type}. We need to verify Definition~\ref{def:level-set-test}(i-cf).
Suppose therefore that $\phi(x,t) = g(t)$ on a neighborhood $U$ of a point $(\hat x, \hat t)$ and $u - \phi$ has a local maximum 0 at $(\hat x, \hat t)$. We want to show that $g_t (\hat t) + F(0, 0) \leq 0$.
This can be accomplished by perturbing the test function $\phi(x,t)$ and considering the function \begin{align*} \phi_{m,q}(x,t) = W^\star_{m; A, q}(x - \hat x) + g(t) +\abs{t - \hat t}^2, \end{align*} with $W^\star_{m;A,q}$ given by \cite[Lemma~5.8]{GGP13AMSA}, and with suitable parameters $A, q > 0$.
Let us recall that $W^\star_{m; A,q}$ is the Legendre-Fenchel transform of \begin{align*} W_{m;A,q}(p) := A\pth{W_m(p) + q \psi\pth{\frac pq} - W_m(0)}. \end{align*} Here $\psi : \Rd \to [0, \infty]$ is a lower semi-continuous nonnegative convex function such that $\psi \in C^\infty(B_1(0))$, $\psi(0) = 0$ and $\psi(p) = \infty$ for $\abs p \geq 1$. The semi-continuity then implies $\psi(p) \to \infty$ as $ \abs p \to 1^-$.
The following lemma was proved in \cite{GGP13AMSA}.
\begin{lemma}[c.f. {\cite[Lemma~5.6]{GGP13AMSA}}] \label{le:Wstar-props} For any $m, A, q$ positive, $W^\star_{m;A,q}$ is a strictly convex, nonnegative, $C^2$ function on $\Rd$ and \begin{align*} \abs{\nabla W^\star_{m;A,q}(x)} \leq q, \qquad 0 \leq \mathcal L_m(W^\star_{m;A,q})(x) \leq A^{-1} n, \qquad x \in \Rd, \end{align*} where $\mathcal L_m(u)(x) := \operatorname{tr} \bra{(\nabla^2 W_m) (\nabla u(x)) \nabla^2 u(x)}$ for $u \in C^2(\Rd)$. \end{lemma}
We will add the following modification of \cite[Lemma~5.8]{GGP13AMSA}.
\begin{lemma} \label{le:Wstar-growth} For every $\delta > 0$ there exists $A > 0$ such that for every $q > 0$ there exist $\ensuremath{\varepsilon} > 0$ and $m_0 > 0$ for which \begin{align*} W^\star_{m; A, q}(x) > \ensuremath{\varepsilon}, \qquad \text{for all } x, \abs x \geq \delta, \text{ and } m \geq m_0. \end{align*} \end{lemma}
\begin{proof} Let us define \begin{align} \label{def-mu} \mu := \sup_{\abs p = 1/2} \bra{W(p) + \psi(p)} \in (0, \infty) \end{align} and set for given $\ensuremath{\delta} > 0$ \begin{align*} A := \frac{\delta}{8\mu}. \end{align*} Now we fix $q > 0$ and set \begin{align*} \ensuremath{\varepsilon} := \frac{q\delta}{8}. \end{align*} By the locally uniform convergence of $W_m \to W$, we can find $m_0 > 0$ such that \begin{align} \label{def-m0} \sup_{\abs p = q/2} \abs{W_m(p) - W_m(0) - W(p)} \leq q \mu \qquad m \geq m_0. \end{align} Now whenever $\abs{x} \geq \delta$ and $m \geq m_0$, we can take $p = \frac q2 \frac{x}{\abs{x}}$ and estimate, using \eqref{def-m0}, one-homogeneity of $W$, and \eqref{def-mu}, \begin{align*} W^\star_{m; A, q} &\geq x \cdot p - W_{m; A, q} (p)\\ &= \frac{q}{2} \abs{x} - A \pth{W_m(p) + q \psi\pth{\frac pq} - W_m(0)}\\ &\geq \frac{q}{2} \abs{x} - A \pth{W(p) + q \psi\pth{\frac pq} + q\mu}\\ &= \frac{q}{2} \abs{x} - A \pth{qW\pth{\frac pq} + q \psi\pth{\frac pq} + q\mu}\\ &\geq \frac{q}{2} \abs{x} - 2 Aq \mu \geq \frac{q\delta}{4} > \ensuremath{\varepsilon}. \end{align*} \end{proof}
\begin{lemma} \label{le:Wstar-zero} For any $A, q$ positive \begin{align*} W^\star_{m; A, q}(0) \to 0 \qquad \text{as } m \to \infty. \end{align*} \end{lemma} \begin{proof} Since $W_m$ is a decreasing sequence converging to $W$ locally uniformly, we have $W_m \geq \min W = 0$ and $W_m(0) \to W(0) = 0$. As also $\psi \geq 0$, it follows that \begin{align*} 0 \leq W^\star_{m; A, q}(0) \leq A W_m(0) \to 0. \end{align*} \end{proof}
Let us now choose $\ensuremath{\delta} > 0$ small enough so that $Q := \cl B_\delta(\hat x) \times [\hat t - \delta, \hat t + \delta] \subset U$. We have $u - \phi \leq 0$ on $Q$ with equality at $(\hat x, \hat t)$. For this $\delta$ we fix $A > 0$ from Lemma~\ref{le:Wstar-growth}.
Now due to the same lemma for any $q > 0$ we also have $\ensuremath{\varepsilon}, m_0 > 0$ such that \begin{align*} u - \phi_{m, q} < -\ensuremath{\varepsilon} \qquad \text{on } \pth{\partial B_\delta(\hat x)} \times [\hat t - \delta, \hat t + \delta], \text{ for } m \geq m_0. \end{align*} Because $W^\star_{m;A,q} \geq 0$ by Lemma~\ref{le:Wstar-props}, we also have \begin{align*} u - \phi_{m,q} \leq - \delta^2 \qquad \text{on } x \in B_\delta(\hat t), \ t = \hat t \pm \delta, \text{ for all } m. \end{align*} Since $\phi_{m,q}(0) \to 0$ as $m \to \infty$ by Lemma~\ref{le:Wstar-zero} and since $\phi_{m,q}$ is uniformly Lipschitz in $m$ by Lemma~\ref{le:Wstar-props}, we conclude that there must exist a subsequence $m_j$ and a sequence of points $(x_j, t_j)$ such that $u_{m_j} - \phi_{m_j, q}$ has a local maximum at $(x_j, t_j)$, $x_j \in B_\delta(\hat x)$, and, moreover, $t_j \to \hat t$.
Let us now choose $q_k = 1/k$. By the standard diagonalization argument we can find a subsequence $m_k$ such that $u_{m_k} - \phi_{m_k, q_k}$ has a local maximum at a point $(x_k, t_k)$, $x_k \in B_\delta(\hat x)$, and $\abs{t_k - \hat t} \leq 1/k$. Thus we introduce \begin{align*} p_k &:= \nabla \phi_{m_k, q_k}(x_k, t_k) = \nabla W^\star_{m_k; A, q_k}(x_k - \hat x), \text{ and}\\ \xi_k &:= \mathcal L_{m_k}\pth{\phi_{m_k, q_k}(\cdot, t_k)}(x_k) = \mathcal L_{m_k}\pth{W^\star_{m_k; A, q_k}}(x_k - \hat x). \end{align*} By the assumption that $u_{m_k}$ is a subsolution of \eqref{regularized-problem}, we have \begin{align*} g'(t_k) + 2 (t_k - \hat t) + F(p_k, \xi_k) \leq 0. \end{align*} Furthermore, from Lemma~\ref{le:Wstar-props} and the choice of $q_k$ we have the bounds \begin{align*} \abs{p_k} \leq 1/k, \qquad \abs{\xi_k} \leq A^{-1} n \qquad \text{for all } k, \end{align*} where $A$ is independent of $k$.
Since $F$ is of curvature-free type at $p = 0$, Definition~\ref{def:level-set-type}, we finally obtain \begin{align*} g'(\hat t) + F(0, 0) &= g'(\hat t) + \liminf_{p \to 0} \inf_{\abs{\xi} \leq A^{-1} n} F(p, \xi) \\ &\leq \liminf_{k \to \infty} \bra{g'(t_k) + 2(t_k - \hat t) + F(p_k, \xi_k)} \leq 0. \end{align*}
The supersolution case can be handled similarly with a test function \begin{align*} \phi_{m,q}(x,t) = -W^\star_{m; A, q}(-x + \hat x) + g(t) +\abs{t - \hat t}^2. \end{align*} This finishes the proof of stability for the curvature-free test function case.
The proof of Theorem~\ref{th:stability quadratic} is complete.
\subsection{Approximation by linear growth functionals}
In this section we prove the following approximation result:
\begin{theorem} \label{th:linear growth stability} Suppose that $F$ is of curvature-free type at $p_0 = 0$ and that $\set{W_m}_{n\in \ensuremath{\mathbb{N}}} \subset C(\ensuremath{\mathbb{R}}^n) \cap C^2(\ensuremath{\mathbb{R}}^n\setminus\set0)$ are positively one-homogeneous functions with bounded, strictly convex sub-level sets $\set{W_m \leq 1}$ such that $W_m \rightrightarrows W$ uniformly on $\cl B_1(0)$. Let $u_m$ be the unique viscosity solutions of \begin{align*} \left\{ \begin{aligned} u_t + F(\nabla u, \operatorname{div} \nabla_p W_m(\nabla u)) &= 0, && \text{in $\ensuremath{\mathbb{R}}^n \times (0,\infty)$,}\\ u(\cdot, 0) &= u_{0, m}, && \text{in $\ensuremath{\mathbb{R}}^n$}, \end{aligned} \right. \end{align*} where $u_{0, m} \in C(\ensuremath{\mathbb{R}}^n)$ are uniformly bounded. Then \begin{align*} \overline{u} &:= \operatorname*{\star-limsup}_{m\to\infty} u_m, & \underline{u} &:= \operatorname*{\star-liminf}_{m\to\infty} u_m \end{align*} are a viscosity subsolution and a viscosity supersolution of \eqref{P}. \end{theorem}
\begin{proof} We will follow the proof of Theorem~\ref{th:stability quadratic} with an additional approximation because the solutions $\psi_{a,m}$ of the resolvent problem for the linear growth energy $E_m$ might not be smooth. Let us set for $\delta > 0$ \begin{align*}
W_m^\delta(p) := (W_m * \eta_\delta)(p) + \delta |p|^2, \end{align*} where $\eta_\delta$ is the standard mollifier with radius $\delta$, and let $u_m^\delta$ be the unique viscosity solution of \begin{align} \label{quad approximation} \left\{ \begin{aligned} u_t + F(\nabla u, \operatorname{div} \nabla_p W_m^\delta(\nabla u_m)) &= 0, && \text{in $\ensuremath{\mathbb{R}}^n \times (0,\infty)$,}\\ u_m(\cdot, 0) &= u_{0, m}, && \text{in $\ensuremath{\mathbb{R}}^n$}. \end{aligned} \right. \end{align} From the standard theory we have that $u_m^\delta \rightrightarrows u_m$ as $\delta \to 0$ locally uniformly on $\ensuremath{\mathbb{R}}^n \times [0, \infty)$.
Suppose now that $\varphi$ is a stratified test function at $(\hat x, \hat t)$ with gradient $\hat p$, as in the proof in Section~\ref{se:stability case (i)}, Case (i) above, for a subsolution. We proceed as in that proof, but we use an additional perturbation of the test function by solving the resolvent problem for the energy $E_m^\delta := E_{W_m^\delta(\cdot - \hat p) - W(\hat p)}$: we define the unique solution $\psi_{a,m}^\delta \in L^2(\Gamma)$ of \begin{align*} \psi_{a,m}^\delta + a \partial E_m^\delta(\psi_{a,m}^\delta) \ni \psi, \end{align*} where $\psi$ and $\Gamma$ were given in \eqref{def-psi}. Recall that $\psi_{a,m}^\delta \in C^{2,\gamma}(\Gamma)$ by the elliptic regularity.
We can apply Proposition~\ref{pr:resolvent-problems} to $E_m^\delta$ and $E_m$ for fixed $m$ in the limit $\delta \to 0$. We in particular have $\psi_{a,m}^\delta \rightrightarrows \psi_{a,m}$ and $h_{a,m}^\delta \rightrightarrows h_{a,m}$ as $\delta \to 0$ for fixed $a, m$.
Due to the Mosco convergence of $E_m$ to $E_{\hat p}$ in Lemma~\ref{le:lingrowthapproximation}, we also can apply Proposition~\ref{pr:resolvent-problems} to $E_m$ and $E_{\hat p}$ in the limit $m \to \infty$.
We now fix $a$ and $z'$ as in \eqref{choice-of-z'}. Due to the uniform convergence $\psi_{a,m}^\delta \rightrightarrows \psi_{a,m}$ as $\delta \to 0$ and $\psi_{a,m} \rightrightarrows \psi_a$ as $m\to\infty$, there exists $(x_a, t_a) \in A_{a,z'}$ and a sequence $(x_{a,m}, t_{a,m})$ (for a subsequence of $m$) of local maxima of \begin{align*} (x,t) \mapsto u_m(x + \hat x,t + \hat t) - \psi_{a,m}(x + {\mathcal T}_V z') - \hat p \cdot x - g(t + \hat t) \end{align*} such that $(x_{a,m}, t_{a,m}) \to (x_a, t_a)$ as $m\to\infty$ (along a subsequence), and for each $m$ in this subsequence there exist a sequence $(x_{a,m}^\delta, t_{a,m}^\delta)$ (for a subsequence of $\delta$ as $\delta \to 0$) of local maxima of \begin{align*} (x,t) \mapsto u_m^\delta(x + \hat x,t + \hat t) - \psi^\delta_{a,m}(x + {\mathcal T}_V z') - \hat p \cdot x - g(t + \hat t), \end{align*} such that $(x_{a,m}^\delta, t_{a,m}^\delta) \to (x_{a,m}, t_{a,m})$ as $\delta \to 0$ (along a subsequence).
Since $u_m^\delta$ is a viscosity solution of \eqref{quad approximation}, we have \begin{align*} &g'(t^\delta_{a,m} + \hat t) + F(\nabla \psi^\delta_{a,m}(x^\delta_{a,m} + {\mathcal T}_V z') + \hat p, h^\delta_{a,m}(x^\delta_{a,m} + {\mathcal T}_V z'))\\ & \begin{aligned} = g'(t^\delta_{a,m} + \hat t) + F\Big(&\nabla \psi^\delta_{a,m}(x^\delta_{a,m} + {\mathcal T}_V z') + \hat p,\\ &\operatorname{tr}\bra{(\nabla_p^2 W^\delta_m)(\nabla \psi^\delta_{a,m} + \hat p) \nabla^2 \psi^\delta_{a,m}}(x^\delta_{a,m} + {\mathcal T}_V z')\Big) \leq 0. \end{aligned} \end{align*} Sending $\delta \to 0$ along a \emph{subsequence} and using the uniform convergence of $h^\delta_{a,m}
\rightrightarrows h_{a,m}$, we can find $p_{a,m}$ with $|p_{a,m} - \hat p| \leq C \alpha$ such that \begin{align} \label{ham subsol} &g'(t_{a,m} + \hat t) + F(p_{a,m}, h_{a,m}(x_{a,m} + {\mathcal T}_V z')) \leq 0. \end{align} Sending $m \to \infty$ along a \emph{subsequence}, we obtain $p_a$ and \eqref{subsol ha}. Then we finish the proof as in the proof of Theorem~\ref{th:stability quadratic} for Case(i).
Case (ii) as well as the curvature-free case are both straightforward. \end{proof}
\section{Well-posedness} \label{sec:well-posedness}
Once the stability with respect to the approximation of the energy density $W$ is established, we get existence of solutions as in \cite{GGP13JMPA}.
\begin{theorem}[Well-posedness] \label{th:well-posedness} Let $W: \ensuremath{\mathbb{R}}^n \to \ensuremath{\mathbb{R}}$ be a positively one-homogeneous convex polyhedral function such that the conclusion of Corollary~\ref{co:approximate pair sliced} holds for $1 \leq k \leq n-1$, and let $F$ be of curvature-free type at $p_0 = 0$. Then for given $u_0 \in C(\ensuremath{\mathbb{R}}^n)$ such that $u \equiv c$ on $\ensuremath{\mathbb{R}}^n \setminus K$ for some compact $K \subset \ensuremath{\mathbb{R}}^n$ and $c \in \ensuremath{\mathbb{R}}$ there exists a unique viscosity solution of \begin{align} \label{limit problem} \left\{ \begin{aligned} u_t + F(\nabla u, \operatorname{div} \partial W(\nabla u)) &= 0, && \text{in $\ensuremath{\mathbb{R}}^n \times (0, \infty)$},\\ u(\cdot, 0) &= u_0, && \text{in $\ensuremath{\mathbb{R}}^n$.} \end{aligned} \right. \end{align} Moreover, if $u_0$ is Lipschitz, then \begin{align*} \norm{\nabla u(\cdot, t)}_\infty \leq \norm{\nabla u_0}_\infty, \qquad t \geq 0. \end{align*} \end{theorem}
\begin{proof} We follow a standard approximation argument using the stability result from Section~\ref{se:stability}. Let $W_m \in C(\ensuremath{\mathbb{R}}^n) \cap C^2(\ensuremath{\mathbb{R}}^n \setminus \set0)$ be a sequence of convex positively one-homogeneous functions with $\set{W_m \leq 1}$ strictly convex, such that $W_m \rightrightarrows W$ on $\cl B_1(0)$. We can find the unique viscosity solutions $u_m$ of the problem \begin{align*} \left\{ \begin{aligned} u_t + F(\nabla u, \operatorname{div} \nabla_p W_m(\nabla u)) &= 0, && \text{in $\ensuremath{\mathbb{R}}^n \times (0, \infty)$},\\ u(\cdot, 0) &= u_0, && \text{in $\ensuremath{\mathbb{R}}^n$.} \end{aligned} \right. \end{align*} We define the limits \begin{align*} \overline{u} &:= \operatorname*{\star-limsup}_{m\to\infty} u_m, & \underline{u} &:= \operatorname*{\star-liminf}_{m\to\infty} u_m. \end{align*} These limits are well-defined since $u_m$ are uniformly bounded. By the stability result Theorem~\ref{th:linear growth stability}, we see that $\overline u$ is a viscosity subsolution and $\underline u$ is a viscosity supersolution of \eqref{limit problem}.
We need to prove that $\overline u$ and $\underline u$ have the correct initial data. We can compare $u_m$ with translations of barriers \begin{align*} \psi^+_{m; a, b} &:= a (W_m^\circ(x - x_0) - bt)_+, & \psi^-_{m; a, b} &:= -a (-W_m^\circ(-x + x_0) + bt)_-, \end{align*} where $W_m^\circ$ is the polar of $W_m$. The comparison with such barriers shows that $\overline u(\cdot, 0) = \underline u(\cdot, 0) = u_0$, and for every $T > 0$ there exists a compact set $K_T \subset \ensuremath{\mathbb{R}}^n$ such that $\overline u = \underline u = c$ on $(\ensuremath{\mathbb{R}}^n \setminus K_T) \times [0, T]$.
Then the comparison principle Theorem~\ref{th:comparison principle} yields $\overline u \leq \underline u$ and thus $u := \overline u = \underline u$ is the unique solution of \eqref{limit problem}.
The Lipschitz continuity follows from the comparison principle. \end{proof}
We now present the proofs of Theorems~\ref{th:unique existence} and \ref{th:convergence}.
\begin{proof}[Proof of Theorems~\ref{th:unique existence} and \ref{th:convergence}] Find $R > 0$ such that $D_0 \subset B_{R/2}(0)$. Let $F$ and $W$ be as in \eqref{geometric F}. Let $u_0$ be a continuous function with $D_0 = \set{u_0 > 0}$ such that $u_0 = -c$ for some $c > 0$ for $\abs x \geq R$. For instance, take a cutoff of the signed distance function to $\Gamma_0$, $u_0(x) := -\min(\operatorname{dist}(x, D_0), 1) + \operatorname{dist}(x, D_0^c)$. Then there is a unique solution $u$ of \eqref{P} with initial data $u_0$ by Theorem~\ref{th:well-posedness}. This establishes the existence of a level set flow $\set{\Gamma_t}_{t\geq0}$ as $\Gamma_t := \set{x: u(x, t) = 0}$.
We therefore only need to show that the zero level set of $u$ does not depend on $u_0$. For this we simply argue as in \cite[Section~4.1.1]{G06} to show that $\theta \circ u := \theta(u)$ is also a viscosity solution of \eqref{P} for any continuous, nondecreasing $\theta$. Then for any given two continuous level set functions $u_0$, $\tilde u_0$ of $\Gamma_0$ we can find $\theta_1$, $\theta_2 \in C(\ensuremath{\mathbb{R}})$, strictly increasing, such that $\theta_1 \circ u_0 \leq \tilde u_0$ and $\theta_2 \circ \tilde u_0 \leq u_0$. Let $u$, $\tilde u$ be the two unique viscosity solutions of \eqref{P} with initial data $u_0$, $\tilde u_0$, respectively. By the comparison principle Theorem~\ref{th:comparison principle} we get $\theta_1 \circ u \leq \tilde u$ and $\theta_2 \circ \tilde u \leq u$. Since $\theta_1 \circ u$ and $\theta_2 \circ \tilde u$ have the same zero level sets as $u$ and $\tilde u$, respectively, we conclude that the level set flow $\set{\Gamma_t}_{t \geq 0}$ is unique.
The stability result of Theorem~\ref{th:convergence} follows from Theorem~\ref{th:linear growth stability} and the comparison principle Theorem~\ref{th:comparison principle}. \end{proof}
\subsection*{Acknowledgments}
Y. G. is partially supported by Grants-in-Aid for Scientific Research No. 26220702 (Kiban S), No. 23244015 (Kiban A) and No. 25610025 (Houga) of Japan Society for the Promotion of Science (JSPS). N. P. is partially supported by JSPS KAKENHI Grant Number 26800068 (Wakate B).
\begin{bibdiv} \begin{biblist}
\bib{AB}{article}{
author={Amar, M.},
author={Bellettini, G.},
title={A notion of total variation depending on a metric with
discontinuous coefficients},
language={English, with English and French summaries},
journal={Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire},
volume={11},
date={1994},
number={1},
pages={91--133},
issn={0294-1449},
review={\MR{1259102 (97a:49057)}}, }
\bib{ACM}{book}{
author={Andreu-Vaillo, Fuensanta},
author={Caselles, Vicent},
author={Maz{\'o}n, Jos{\'e} M.},
title={Parabolic quasilinear equations minimizing linear growth
functionals},
series={Progress in Mathematics},
volume={223},
publisher={Birkh\"auser Verlag, Basel},
date={2004},
pages={xiv+340},
isbn={3-7643-6619-2},
review={\MR{2033382 (2005c:35002)}},
doi={10.1007/978-3-0348-7928-6}, }
\bib{AG89}{article}{
author={Angenent, Sigurd},
author={Gurtin, Morton E.},
title={Multiphase thermomechanics with interfacial structure. II.\
Evolution of an isothermal interface},
journal={Arch. Rational Mech. Anal.},
volume={108},
date={1989},
number={4},
pages={323--391},
issn={0003-9527},
review={\MR{1013461 (91d:73004)}},
doi={10.1007/BF01041068}, }
\bib{Anzellotti}{article}{
author={Anzellotti, Gabriele},
title={Pairings between measures and bounded functions and compensated
compactness},
journal={Ann. Mat. Pura Appl. (4)},
volume={135},
date={1983},
pages={293--318 (1984)},
issn={0003-4622},
review={\MR{750538 (85m:46042)}},
doi={10.1007/BF01781073}, }
\bib{Attouch}{book}{
author={Attouch, H.},
title={Variational convergence for functions and operators},
series={Applicable Mathematics Series},
publisher={Pitman (Advanced Publishing Program), Boston, MA},
date={1984},
pages={xiv+423},
isbn={0-273-08583-2},
review={\MR{773850 (86f:49002)}}, }
\bib{BCCN06}{article}{
author={Bellettini, Giovanni},
author={Caselles, Vicent},
author={Chambolle, Antonin},
author={Novaga, Matteo},
title={Crystalline mean curvature flow of convex sets},
journal={Arch. Ration. Mech. Anal.},
volume={179},
date={2006},
number={1},
pages={109--152},
issn={0003-9527},
review={\MR{2208291 (2007a:53126)}},
doi={10.1007/s00205-005-0387-0}, }
\bib{BCCN09}{article}{
author={Bellettini, Giovanni},
author={Caselles, Vicent},
author={Chambolle, Antonin},
author={Novaga, Matteo},
title={The volume preserving crystalline mean curvature flow of convex
sets in $\Bbb R^N$},
journal={J. Math. Pures Appl. (9)},
volume={92},
date={2009},
number={5},
pages={499--527},
issn={0021-7824},
review={\MR{2558422 (2011b:53155)}},
doi={10.1016/j.matpur.2009.05.016}, }
\bib{BGN00}{article}{
author={Bellettini, G.},
author={Goglione, R.},
author={Novaga, M.},
title={Approximation to driven motion by crystalline curvature in two
dimensions},
journal={Adv. Math. Sci. Appl.},
volume={10},
date={2000},
number={1},
pages={467--493},
issn={1343-4373},
review={\MR{1769163 (2001i:53109)}}, }
\bib{BN00}{article}{
author={Bellettini, G.},
author={Novaga, M.},
title={Approximation and comparison for nonsmooth anisotropic motion by
mean curvature in ${\bf R}^N$},
journal={Math. Models Methods Appl. Sci.},
volume={10},
date={2000},
number={1},
pages={1--10},
issn={0218-2025},
review={\MR{1749692 (2001a:53106)}},
doi={10.1142/S0218202500000021}, }
\bib{BNP99}{article}{
author={Bellettini, G.},
author={Novaga, M.},
author={Paolini, M.},
title={Facet-breaking for three-dimensional crystals evolving by mean
curvature},
journal={Interfaces Free Bound.},
volume={1},
date={1999},
number={1},
pages={39--55},
issn={1463-9963},
review={\MR{1865105 (2003i:53099)}},
doi={10.4171/IFB/3}, }
\bib{BNP01a}{article}{
author={Bellettini, G.},
author={Novaga, M.},
author={Paolini, M.},
title={On a crystalline variational problem. I. First variation and
global $L^\infty$ regularity},
journal={Arch. Ration. Mech. Anal.},
volume={157},
date={2001},
number={3},
pages={165--191},
issn={0003-9527},
review={\MR{1826964 (2002c:49072a)}},
doi={10.1007/s002050010127}, }
\bib{BNP01b}{article}{
author={Bellettini, G.},
author={Novaga, M.},
author={Paolini, M.},
title={On a crystalline variational problem. II. $BV$ regularity and
structure of minimizers on facets},
journal={Arch. Ration. Mech. Anal.},
volume={157},
date={2001},
number={3},
pages={193--217},
issn={0003-9527},
review={\MR{1826965 (2002c:49072b)}},
doi={10.1007/s002050100126}, }
\bib{BP96}{article}{
author={Bellettini, G.},
author={Paolini, M.},
title={Anisotropic motion by mean curvature in the context of Finsler
geometry},
journal={Hokkaido Math. J.},
volume={25},
date={1996},
number={3},
pages={537--566},
issn={0385-4035},
review={\MR{1416006 (97i:53079)}},
doi={10.14492/hokmj/1351516749}, }
\bib{BouchitteDalMaso}{article}{
author={Bouchitt{\'e}, Guy},
author={Dal Maso, Gianni},
title={Integral representation and relaxation of convex local functionals
on ${\rm BV}(\Omega)$},
journal={Ann. Scuola Norm. Sup. Pisa Cl. Sci. (4)},
volume={20},
date={1993},
number={4},
pages={483--533},
issn={0391-173X},
review={\MR{1267597 (95d:49021)}}, }
\bib{B78}{book}{
author={Brakke, Kenneth A.},
title={The motion of a surface by its mean curvature},
series={Mathematical Notes},
volume={20},
publisher={Princeton University Press, Princeton, N.J.},
date={1978},
pages={i+252},
isbn={0-691-08204-9},
review={\MR{485012 (82c:49035)}}, }
\bib{CasellesChambolle06}{article}{
author={Caselles, Vicent},
author={Chambolle, Antonin},
title={Anisotropic curvature-driven flow of convex sets},
journal={Nonlinear Anal.},
volume={65},
date={2006},
number={8},
pages={1547--1577},
issn={0362-546X},
review={\MR{2248685 (2007d:35143)}},
doi={10.1016/j.na.2005.10.029}, }
\bib{CMP}{article}{
author={Chambolle, Antonin},
author={Morini, Massimiliano},
author={Ponsiglione, Marcello},
title={Existence and uniqueness for a crystalline mean curvature flow},
eprint={http://arxiv.org/abs/1508.03598},
status={preprint}, }
\bib{CGG}{article}{
author={Chen, Yun Gang},
author={Giga, Yoshikazu},
author={Goto, Shun'ichi},
title={Uniqueness and existence of viscosity solutions of generalized
mean curvature flow equations},
journal={J. Differential Geom.},
volume={33},
date={1991},
number={3},
pages={749--786},
issn={0022-040X},
review={\MR{1100211 (93a:35093)}}, }
\bib{CIL}{article}{
author={Crandall, Michael G.},
author={Ishii, Hitoshi},
author={Lions, Pierre-Louis},
title={User's guide to viscosity solutions of second order partial
differential equations},
journal={Bull. Amer. Math. Soc. (N.S.)},
volume={27},
date={1992},
number={1},
pages={1--67},
issn={0273-0979},
review={\MR{1118699 (92j:35050)}},
doi={10.1090/S0273-0979-1992-00266-5}, }
\bib{Evans}{book}{
author={Evans, Lawrence C.},
title={Partial differential equations},
series={Graduate Studies in Mathematics},
volume={19},
edition={2},
publisher={American Mathematical Society, Providence, RI},
date={2010},
pages={xxii+749},
isbn={978-0-8218-4974-3},
review={\MR{2597943 (2011c:35002)}},
doi={10.1090/gsm/019}, }
\bib{ES}{article}{
author={Evans, L. C.},
author={Spruck, J.},
title={Motion of level sets by mean curvature. I},
journal={J. Differential Geom.},
volume={33},
date={1991},
number={3},
pages={635--681},
issn={0022-040X},
review={\MR{1100206 (92h:35097)}}, }
\bib{GiaquintaModicaSoucek}{article}{
author={Giaquinta, M.},
author={Modica, G.},
author={Sou{\v{c}}ek, J.},
title={Functionals with linear growth in the calculus of variations. I,
II},
journal={Comment. Math. Univ. Carolin.},
volume={20},
date={1979},
number={1},
pages={143--156, 157--172},
issn={0010-2628},
review={\MR{526154 (80b:35047)}}, }
\bib{G06}{book}{
author={Giga, Yoshikazu},
title={Surface evolution equations - a level set approach},
series={Monographs in Mathematics},
volume={99},
note={(earlier version: Lipschitz Lecture Notes \textbf{44}, University of Bonn, 2002)},
publisher={Birkh\"auser Verlag, Basel},
date={2006},
pages={xii+264},
isbn={978-3-7643-2430-8},
isbn={3-7643-2430-9},
review={\MR{2238463 (2007j:53071)}}, }
\bib{GG98ARMA}{article}{
author={Giga, Mi-Ho},
author={Giga, Yoshikazu},
title={Evolving graphs by singular weighted curvature},
journal={Arch. Rational Mech. Anal.},
volume={141},
date={1998},
number={2},
pages={117--198},
issn={0003-9527},
review={\MR{1615520 (99j:35118)}}, }
\bib{GG01}{article}{
author={Giga, Mi-Ho},
author={Giga, Yoshikazu},
title={Generalized motion by nonlocal curvature in the plane},
journal={Arch. Ration. Mech. Anal.},
volume={159},
date={2001},
number={4},
pages={295--333},
issn={0003-9527},
review={\MR{1860050 (2002h:53117)}},
doi={10.1007/s002050100154}, }
\bib{GGP13AMSA}{article}{
author={Giga, Mi-Ho},
author={Giga, Yoshikazu},
author={Po{\v{z}}{\'a}r, Norbert},
title={Anisotropic total variation flow of non-divergence type on a
higher dimensional torus},
journal={Adv. Math. Sci. Appl.},
volume={23},
date={2013},
number={1},
pages={235--266},
issn={1343-4373},
isbn={978-4-7625-0665-9},
review={\MR{3155453}}, }
\bib{GGP13JMPA}{article}{
author={Giga, Mi-Ho},
author={Giga, Yoshikazu},
author={Po{\v{z}}{\'a}r, Norbert},
title={Periodic total variation flow of non-divergence type in
$\Bbb{R}^n$},
language={English, with English and French summaries},
journal={J. Math. Pures Appl. (9)},
volume={102},
date={2014},
number={1},
pages={203--233},
issn={0021-7824},
review={\MR{3212254}},
doi={10.1016/j.matpur.2013.11.007}, }
\bib{GG92}{article}{
author={Giga, Yoshikazu},
author={Goto, Shun'ichi},
title={Motion of hypersurfaces and geometric equations},
journal={J. Math. Soc. Japan},
volume={44},
date={1992},
number={1},
pages={99--111},
issn={0025-5645},
review={\MR{1139660 (93b:58025)}},
doi={10.2969/jmsj/04410099}, }
\bib{GOS}{article}{
author={Giga, Yoshikazu},
author={Ohtsuka, Takeshi},
author={Sch{\"a}tzle, Reiner},
title={On a uniform approximation of motion by anisotropic curvature by
the Allen-Cahn equations},
journal={Interfaces Free Bound.},
volume={8},
date={2006},
number={3},
pages={317--348},
issn={1463-9963},
review={\MR{2273232 (2007k:35258)}},
doi={10.4171/IFB/146}, }
\bib{Go}{article}{
author={Goto, Shun'ichi},
title={Generalized motion of hypersurfaces whose growth speed depends
superlinearly on the curvature tensor},
journal={Diff. Integral Eq.},
volume={7},
date={1994},
number={2},
pages={323--343},
issn={0893-4983},
review={\MR{1255892 (94m:35143)}}, }
\bib{Gr89}{article}{
author={Grayson, Matthew A.},
title={A short note on the evolution of a surface by its mean curvature},
journal={Duke Math. J.},
volume={58},
date={1989},
number={3},
pages={555--558},
issn={0012-7094},
review={\MR{1016434 (90h:53010)}},
doi={10.1215/S0012-7094-89-05825-0}, }
\bib{GSS}{article}{
author={Gurtin, M. E.},
author={Soner, H. M.},
author={Souganidis, P. E.},
title={Anisotropic motion of an interface relaxed by the formation of
infinitesimal wrinkles},
journal={J. Differential Equations},
volume={119},
date={1995},
number={1},
pages={54--108},
issn={0022-0396},
review={\MR{1334488 (97a:73013)}},
doi={10.1006/jdeq.1995.1084}, }
\bib{Il93}{article}{
author={Ilmanen, Tom},
title={Convergence of the Allen-Cahn equation to Brakke's motion by mean
curvature},
journal={J. Differential Geom.},
volume={38},
date={1993},
number={2},
pages={417--461},
issn={0022-040X},
review={\MR{1237490 (94h:58051)}}, }
\bib{I96}{article}{
author={Ishii, Hitoshi},
title={Degenerate parabolic PDEs with discontinuities and generalized
evolutions of surfaces},
journal={Adv. Differential Equations},
volume={1},
date={1996},
number={1},
pages={51--72},
issn={1079-9389},
review={\MR{1357954 (97j:35083)}}, }
\bib{IS}{article}{
author={Ishii, Hitoshi},
author={Souganidis, Panagiotis},
title={Generalized motion of noncompact hypersurfaces with velocity
having arbitrary growth on the curvature tensor},
journal={Tohoku Math. J. (2)},
volume={47},
date={1995},
number={2},
pages={227--250},
issn={0040-8735},
review={\MR{1329522 (96e:35069)}},
doi={10.2748/tmj/1178225593}, }
\bib{Moll}{article}{
author={Moll, J. S.},
title={The anisotropic total variation flow},
journal={Math. Ann.},
volume={332},
date={2005},
number={1},
pages={177--218},
issn={0025-5831},
review={\MR{2139257 (2006d:35113)}},
doi={10.1007/s00208-004-0624-0}, }
\bib{OS93}{article}{
author={Ohnuma, Masaki},
author={Sato, Moto-Hiko},
title={Singular degenerate parabolic equations with applications to
geometric evolutions},
journal={Diff. Integral Eq.},
volume={6},
date={1993},
number={6},
pages={1265--1280},
issn={0893-4983},
review={\MR{1235192 (94h:35133)}}, }
\bib{Resetnjak}{article}{
author={Re{\v{s}}etnjak, Ju. G.},
title={The weak convergence of completely additive vector-valued set
functions},
language={Russian},
journal={Sibirsk. Mat. \u Z.},
volume={9},
date={1968},
pages={1386--1394},
issn={0037-4474},
review={\MR{0240274 (39 \#1623)}}, }
\bib{Rockafellar}{book}{
author={Rockafellar, R. Tyrrell},
title={Convex analysis},
series={Princeton Mathematical Series, No. 28},
publisher={Princeton University Press, Princeton, N.J.},
date={1970},
pages={xviii+451},
review={\MR{0274683 (43 \#445)}}, }
\bib{Si83}{book}{
author={Simon, Leon},
title={Lectures on geometric measure theory},
series={Proceedings of the Centre for Mathematical Analysis, Australian
National University},
volume={3},
publisher={Australian National University, Centre for Mathematical
Analysis, Canberra},
date={1983},
pages={vii+272},
isbn={0-86784-429-9},
review={\MR{756417 (87a:49001)}}, }
\bib{TT}{article}{
author={Takasao, K.},
author={Tonegawa, Y.},
title={Existence and regularity of mean curvature flow with transport term in higher dimension},
status={to appear in Math. Annalen},
eprint={http://arxiv.org/abs/1307.6629}, }
\bib{T91}{article}{
author={Taylor, Jean E.},
title={Constructions and conjectures in crystalline nondifferential
geometry},
conference={
title={Differential geometry},
},
book={
title={Proceedings of the Conference on Differential Geometry, Rio de Janeiro},
editor={Lawson, B.},
editor={Tanenblat, K.},
series={Pitman Monogr. Surveys Pure Appl. Math.},
volume={52},
publisher={Longman Sci. Tech., Harlow},
},
date={1991},
pages={321--336},
review={\MR{1173051 (93e:49004)}},
doi={10.1111/j.1439-0388.1991.tb00191.x}, }
\bib{TC98}{article}{
author={Taylor, Jean E.},
author={Cahn, John W.},
title={Diffuse interfaces with sharp corners and facets: phase field
models with strongly anisotropic surfaces},
note={With an appendix by Jason Yunger},
journal={Phys. D},
volume={112},
date={1998},
number={3-4},
pages={381--411},
issn={0167-2789},
review={\MR{1607466 (98i:35185)}},
doi={10.1016/S0167-2789(97)00177-2}, } \end{biblist} \end{bibdiv}
\end{document} | arXiv | {
"id": "1601.01802.tex",
"language_detection_score": 0.6145858764648438,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Quantum key distribution with flawed and leaky sources} \author{Margarida Pereira$^{1}$} \email{mpereira@com.uvigo.es} \author{Marcos Curty$^{1}$, Kiyoshi Tamaki$^{2}$} \affiliation{$^{1}$Escuela de Ingenier$\acute{\textit{\i}}$a de Telecomunicaci$\acute{o}$n, Department of Signal Theory and Communications, University of Vigo, Vigo E-36310, Spain \\ $^{2}$Graduate School of Science and Engineering for Research,University of Toyama, Gofuku 3190, Toyama 930-8555, Japan} \date{\today}
\begin{abstract} In theory, quantum key distribution (QKD) allows secure communications between two parties based on physical laws. However, most of the security proofs of QKD today make unrealistic assumptions and neglect many relevant device imperfections. As a result, they cannot guarantee the security of the practical implementations. Recently, the loss-tolerant protocol (K. Tamaki et al, Phys. Rev. A, 90, 052314, 2014) was proposed to make QKD robust against state preparation flaws. This protocol relies on the emission of qubit systems which, unfortunately, is difficult to achieve in practice. In this work, we remove such qubit assumption and generalise the loss-tolerant protocol to accommodate multiple optical modes in the emitted signals. These multiple optical modes could arise, for example, from Trojan horse attacks and/or device imperfections. Our security proof determines some dominant device parameter regimes needed for achieving secure communication, and therefore it can serve as a guideline to characterise QKD transmitters. Furthermore, we compare our approach with that of Lo and Preskill (H.-K. Lo et al, Quantum Inf. Comput., 7, 431-458, 2007) and identify which method provides the highest secret key generation rate as a function of the device imperfections. Our work constitutes an important step towards the best practical and secure implementation for QKD. \end{abstract}
\maketitle
\section{Introduction} \label{sec:intro} Quantum key distribution (QKD) \cite{gisin,scarani,lo} enables two distant parties, Alice and Bob, to share a common secret key that can be used to encrypt and decrypt messages. In theory, QKD can offer information-theoretic security based on the laws of physics. In practise, however, it does not because typical security proofs of QKD require assumptions that are not actually met by the practical implementations, as they usually ignore many experimental device imperfections. This discrepancy between the theory and the practice of QKD has been evidenced by many quantum hacking attacks, especially by those that exploit flaws in the detectors of QKD systems \cite{lydersen,gerhardt}. Fortunately, the proposal of measurement-device-independent QKD (MDI-QKD) \cite{lo2} can solve all security loopholes in the measurement unit, and therefore Eve cannot take advantage of detector side-channels to learn information about the key. Furthermore, MDI-QKD can be implemented experimentally using standard optical components \cite{rubenok,silva,liu,tang,yin,roberts}. Therefore, to guarantee implementation security we now need to focus on how to secure the source in QKD.
Ideally, the sending devices are single-photon sources and the encoding of the light pulses is executed perfectly, without any state preparation flaw (SPF). However, none of these two conditions are met experimentally since all devices have inherent deficiencies. The decoy-state method \cite{hwang,lo3,wang2} was proposed to replace single-photon sources with coherent light sources. Also, by using the Gottesman-Lo-L{\"u}tkenhaus-Preskill (GLLP) security analysis \cite{gottesman} the problem with SPFs is fixed. The main drawback of this last approach is that the resulting secret key rate is poor and fragile against channel loss. This is because it assumes the worst case scenario in which Eve could enhance the signals' flaws through channel loss, which significantly decreases the performance of the QKD scheme.
Recently, a protocol that is loss-tolerant to SPFs has been proposed \cite{tamaki} to address the limitation of the GLLP analysis. The loss-tolerant protocol employs only three states and takes into account modulation errors due to an imperfect phase modulator (PM). Remarkably, by using a different phase error estimation technique involving the use of the basis mismatched events, the secret key rate of the loss-tolerant protocol remains almost unchanged even if the SPFs increase. In fact, the maximum transmission distance for a QKD system in fiber so far has been recently achieved using this protocol \cite{boaron}, which shows that the loss-tolerant protocol is highly practical. Its main weakness, however, is the assumption that the single-photon signals sent by Alice are qubits, which is difficult to guarantee in practice. For instance, if Eve conducts a Trojan horse attack (THA) \cite{gisin2,vakhitov,lucamarini,tamaki3,wang} against the source, this assumption can be violated. In a THA, Eve sends bright light into Alice's PM and obtains information about the encoding by measuring the back-reflected light that exits Alice's lab. Moreover, an optical mode of the light pulse emitted by Alice could be dependent on the value of the phase modulation, which means that a sent single-photon pulse might not be a qubit (we call this imperfection the non-qubit assumption). That is, Alice's setting choice information could be encoded in other degrees of freedom of the emitted light, and this spontaneous leakage of information results in a higher-dimensional sending state. \\
This work aims to reduce the big gap between the theory and the practice of QKD by generalising the loss-tolerant protocol such that it can include typical imperfections in the sending device. To be precise, and in contrast to~\cite{tamaki}, here we remove the qubit assumption and include the effect of side-channels by considering the mode dependency of the PM and THAs. Moreover, like in~\cite{tamaki}, we also include in the analysis SPFs in a single-mode qubit subspace. Therefore, our analysis covers dominant imperfections that a source device has, allowing the use of a much wider class of imperfect devices in a secure manner. Our generalised loss-tolerant protocol can be applied to any multi-mode scenario as long as the states of the emitted signals are independently and identically distributed (I.I.D.), namely, this proof does not consider correlations between the sending signals. However, we remark that recent results reported in \cite{mizutani} imply that our analysis could also accommodate correlations between the signals which are independent of Alice's setting choice. Furthermore, we emphasise that the basic idea is rather general, and can be applied to many other QKD protocols like, for instance, the six-state protocol \cite{bruss}, distributed-phase-reference protocols \cite{inoue,takesue,stucki} and MDI-QKD~\cite{lo2}. In simple terms, it is a formalism to estimate the phase error rate of a QKD protocol by evaluating the transmission rates of some virtual states with the help of the state structure of Alice's signals (see Eq. (\ref{eq:multi-mode}) below). We also emphasise that our method does not require a complete characterisation of the side-channels, which significantly simplifies the experiments for characterising the source. Using this formalism, we can quantify the device parameters required to ensure secure communications with flawed and leaky sources.
Additionally, we investigate how Lo-Preskill's security analysis \cite{lo4} behaves in the presence of the same device's imperfections and, by using imperfectly characterised states, we compare it with our generalised loss-tolerant protocol. As a result, we determine which security proof provides a higher secret key rate as a function of the device parameters. These parameters are essential for experimentalists to produce and to calibrate the transmitting devices, and therefore our work can be used as a guideline for securing the source in the presence of multi-mode signals.
This paper is organised as follows. In section \ref{sec:description}, we describe the assumptions that we make in our security proof and introduce the QKD protocol considered. In section \ref{sec:estimation}, we present the security analysis for our generalised loss-tolerant protocol. Then, in section \ref{sec:simulation}, we consider a particular device model to perform the simulations for our analysis and in order to compare it with Lo-Preskill's analysis. Finally, we conclude our paper in section \ref{sec:conclusion} and provide additional explanations and calculations in the Appendixes.
\section{Description of the protocol} \label{sec:description} We shall assume, for simplicity, that Alice's lab has a single-photon source. However, we emphasise that our analysis can also be applied to the case where Alice emits phase-randomised weak coherent pulses. In this latter case, Alice can use the decoy-state method \cite{hwang,lo3,wang2} to estimate all the quantities corresponding to the single-photon pulses which are needed to apply our method. Below we focus on the case where Alice has at her disposal single-photon sources only because the study with phase-randomised weak coherent pulses, together with decoy states, results in an unnecessarily cumbersome analysis. Fig. 1 shows the QKD setup. Next, we describe the assumptions we make on Alice's and Bob's devices.
\subsection{Assumptions on Alice's device} In this work, we consider the asymptotic scenario where Alice sends Bob an infinite number of pulses. Our formalism is valid for any source that emits pulses whose quantum state is of the form \begin{equation} \ket{\Phi_{j\beta}}_{BE} = a_{j\beta} \ket{\phi_{j\beta}}_{BE} + b_{j\beta} \ket{\phi_{j\beta}^\perp}_{BE}, \label{eq:multi-mode} \end{equation}
with $|a_{j\beta}|^2 + |b_{j\beta}|^2 =1$, where $j \in \{0,1\}$ and $\beta \in \{Z,X\}$ are Alice's bit value and basis choices respectively. As in the loss-tolerant analysis introduced in \cite{tamaki}, we consider a three-state protocol where Alice selects $j\beta \in \{0Z,1Z,0X\}$. Furthermore, in Eq. (\ref{eq:multi-mode}), we assume that $\ket{\phi_{j\beta}}_{BE}$ is a pure state in a single-mode qubit space, where $BE$ stands for Bob's and Eve's systems due to a potential THA. For instance, $\ket{\phi_{j\beta}}_{BE}$ could be of the form $\ket{\phi_{j\beta}}_{BE} = \ket{\omega_{j\beta}}_{B}\otimes \ket{\epsilon}_E$, where Eve's system does not depend on $j\beta$ and $\ket{\omega_{j\beta}}_{B}$ is a qubit state. The state $\ket{\phi_{j\beta}^\perp}_{BE}$, on the other hand, corresponds to any state outside of the single mode qubit space, including the state of a side-channel, and it is in an Hilbert space orthogonal to $\ket{\phi_{j\beta}}_{BE}$. We note that the form of the pure state given by Eq.~(\ref{eq:multi-mode}) is the most general I.I.D. state. Indeed, this equation simply decomposes a state in a given Hilbert space into a direct sum of two states in different Hilbert spaces, which can always be done. One of these states is in a qubit space and the other one is in any complementary Hilbert space. That is, any pure state can be written in the form given by Eq.~(\ref{eq:multi-mode}). In addition,
\begin{figure}
\caption{Each single-photon pulse emitted by Alice's source goes through a 50:50 beamsplitter (BS) and is decomposed into the reference and the signal pulses. The reference pulse travels through the longer arm of Alice's Mach-Zehnder interferometer. To perform the encoding, she uses a PM that applies a phase shift to the signal pulse. The two pulses are recombined at the second 50:50 BS, sent through the quantum channel and then received in Bob's lab. On reception, they are split by a 50:50 BS and Bob applies a phase shift on the reference and signal pulses in the upper arm of his Mach-Zehnder interferometer. These pulses then interfere with the pulses that travelled through the shorter arm of the interferometer at the second 50:50 BS. Bob can then detect click events corresponding to photons choosing the shortest arm in Alice's interferometer and the longest one in Bob's, and to the opposite, by using two detectors, D0 and D1, which correspond to obtaining bit value 0 and 1 respectively. }
\label{fig:protocol}
\end{figure}
\noindent we further assume that, like in~\cite{tamaki}, the states $\ket{\phi_{j\beta}}_{BE}$ in Eq.~(\ref{eq:multi-mode}) form a triangle in the Bloch sphere, and we set their $Y$-components to be the same by choosing the $Y$-axis appropriately. This assumption is required to ensure that Alice is sending essentially three different states, rather than one or two states. Importantly, we note that by introducing an ancilla system for Alice to purify the state, our formalism is also valid for a mixed state in a single-mode qubit space, as shown in Appendix \ref{app:security}.
The state structure in Eq. (\ref{eq:multi-mode}) means that the inner product ${\braket{\phi_{j\beta} | \phi_{j'\beta'}^\perp}}{_{BE}}$ for all $j,j',\beta$ and $\beta'$ is always zero. Also, depending on Alice's knowledge about the state given in Eq. (\ref{eq:multi-mode}) she might have to consider the worst-case scenario, {\it {\it i.e.}}, ${\braket{\phi_{j\beta}^\perp | \phi_{j'\beta'}^\perp}}{_{BE}} = 0$ for any combination of $(j,\beta)$ and $(j',\beta')$. This means that, complete information about $\ket{\phi_{j\beta}^\perp}$ is not required, which significantly simplifies the experiments for characterising the source. On the other hand, if Alice knows some structure of the side-channel she should fully exploit it and lower bound ${\braket{\phi_{j\beta}^\perp | \phi_{j'\beta'}^\perp}}{_{BE}}$. For example, if she knows that the side-channel is associated with the polarisation state of the single-mode qubit then the worst case scenario does not apply, {\it {\it i.e.}}, ${\braket{\phi_{j\beta}^\perp | \phi_{j'\beta'}^\perp}}{_{BE}} \neq 0$, since it is impossible for three states to be orthogonal to each other given the two-dimensionality of polarisation. This way, our formalism can readily take into account the available information.
We remark that, to apply the procedure introduced below we only need to determine the coefficients $a_{j\beta}$ and $b_{j\beta}$, and the qubit state, but it is not necessary to completely characterise the quantum information of the side-channel, $\ket{\phi_{j\beta}^\perp}_{BE}$. That is, our characterisation seems to be rather simple, and there is no need to perform further detailed characterisations. Nonetheless, the better Alice and Bob know the state given in Eq. (\ref{eq:multi-mode}), the better the resulting performance, as explained later in Section \ref{sec:simulation}. An experimental procedure to perform this estimation is out of the scope of this paper hence, we assume that these parameters are given.
Furthermore, our work can accommodate any SPF in the single-mode qubit space, and one could also employ the techniques in \cite{nagamatsu,mizutani}. For example, we may select a case in which the states that Alice prepares can be expressed as \begin{linenomath*} \begin{equation} \frac{1}{\sqrt{2}}\left( \ket{1}_r \ket{v}_s + e^{\varphi_A + \delta \varphi_A /\pi} \ket{v}_r \ket{1}_s \right), \label{eq:model} \end{equation}\end{linenomath*} where $\delta(\ge 0)$ is the deviation of the phase modulation from the intended value $\varphi_A$, and we define $\ket{1}_r \ket{v}_s = \ket{0_Y}$ and $ \ket{v}_r \ket{1}_s = \ket{1_Y}$, where $v$ stands for vacuum, $\ket{1}$ denotes a Fock state with one photon and the subscript $r$ $(s)$ corresponds to the reference (signal) pulse. In the case of the three-state protocol we have that $\varphi_A\in\{0, \pi, \pi/2\}$. Then, by using $\ket{0_Z} = (\ket{0_Y} + \ket{1_Y})/\sqrt{2}$ and $\ket{1_Z} = (- \ket{0_Y} + \ket{1_Y})/\sqrt{2}$, we obtain the following expressions for the three states in the single-mode qubit space: \begin{linenomath*} \begin{equation} \begin{split} &\ket{\omega_{0Z}}_B = \ket{0_Z}, \\ &\ket{\omega_{1Z}}_B = - \sin (\frac{\delta}{2})\ket{0_Z} + \cos (\frac{\delta}{2}) \ket{1_Z},\\ &\ket{\omega_{0X}}_B = \cos (\frac{\pi}{4} + \frac{\delta}{4}) \ket{0_Z} + \sin (\frac{\pi}{4} + \frac{\delta}{4}) \ket{1_Z}. \end{split} \label{eq:alice_states} \end{equation}\end{linenomath*}
Therefore, our formalism can be used, for instance, when the information about Alice's choice of state is leaked and/or the optical mode depends on Alice's selection. This leakage from the source can occur spontaneously or with an active THA.
\subsection{Assumptions on Bob's device} Bob receives the signal and reference pulses from Alice and measures them in a basis selected at random. More precisely, Bob's measurements are defined by the positive-operator valued measures (POVMs) $\{\hat{M}_{0\beta}, \hat{M}_{1\beta},\hat{M}_{f}\}$, where $\hat{M}_{0\beta}$ ($\hat{M}_{1\beta}$) with $\beta \in \{X,Z\}$ corresponds to obtaining the bit value 0 (1) when Bob chooses the basis $\beta$, and $\hat{M}_{f}$ corresponds to an inconclusive outcome. Importantly, $\hat{M}_{f}$ is assumed to be the same for the two bases. This means that the detection efficiencies are independent of Bob's measurement basis choice, which is required to prevent side-channel attacks exploiting channel loss \cite{lydersen,gerhardt}. Note that, this assumption is widely used in most security proofs, and one of the simplest ways to circumvent such detector side-channel attacks is to use MDI-QKD, to which our technique also applies (see Appendix \ref{app:mdi}).
\subsection{Actual protocol}
The three-state protocol makes an asymmetric choice of basis, namely, the $Z$ and the $X$ basis are selected with a priori different probabilities for both Alice and Bob. The events when both of them select the $Z$ basis are used for key generation. Also, all announcements between Alice and Bob are done via an authenticated public channel. Next, we describe the steps of the QKD protocol in detail. \\
\begin{enumerate} \item \textbf{Initialisation:} Before running the protocol, Alice and Bob agree on a number $N_{fixed}$ of rounds, on the error correcting codes, and on a set of hash functions to perform privacy amplification. Steps 2-4 of the protocol are repeated $N$ times until the number of detected events $N$ becomes $N_{fixed}$.
\item \textbf{State preparation:} Alice generates the states given in Eq. (\ref{eq:multi-mode}). First, she selects the basis $\beta \in\{ Z, X\}$ for encoding the states with probabilities $P_{Z_A}$ and $P_{X_A} = 1- P_{Z_A}$ respectively. If the $Z$ basis is selected, she randomly chooses a bit value. Then, Alice prepares the signal and reference pulses following these specifications and sends the pulses to Bob via a quantum channel. Due to a potential THA, the sending states might contain Eve's system $E$ as well.
\item \textbf{Measurement:} Bob measures each incoming signal using the basis $\beta \in \{Z,X\}$ which he selects with probabilities $P_{Z_B}$ and $P_{X_B} = 1 - P_{Z_B}$ respectively.
\item \textbf{Detection announcement:} Bob checks whether the signal in Step 3 is detected or not. For each detected event, $N$ is increased by 1 unit\footnotemark[1]. If $N = N_{fixed}$, Bob announces the detected events, and they proceed to Step 5, otherwise they go back to Step 2.\footnotetext[1]{As proven in \cite{tamaki4}, one can employ this announcement and the setting independent termination condition as long as we use the Azuma's inequality \cite{azuma}. Moreover, with the Azuma's inequality, Alice and Bob can exchange the basis information for each round of the quantum communication \cite{tamaki4}, which we do not adopt here for simplicity of the discussions.}
\item \textbf{Basis announcement and sifting:} Alice and Bob announce their basis choices for the detected events. Also, they define bit strings associated with the basis matched and mismatched events.
\item \textbf{Parameter estimation:} Alice and Bob announce the bit strings $\vec{s}_{X,0Z}, ~\vec{s}_{X,1Z}$ and $\vec{s}_{X,0X}$ which correspond to the events when Alice sends one of the three possible states and Bob measures it in the $X$ basis. These bit strings are used to estimate the number of bits that need to be removed from the sifted key, which is composed by the $Z$ basis matched events, during privacy amplification.
\item \textbf{Error correction and privacy amplification:} Alice and Bob randomly select an error correcting code from Step 1 to perform error correction on these sifted strings and then they exchange the syndrome information about the $Z$ basis matched events. Then, based on the result of the parameter estimation in Step 6, they perform privacy amplification on the corrected sifted keys. At the end of this step, Alice and Bob obtain the key strings $\vec{k}_{Z_A}$ and $\vec{k}_{Z_B}$, respectively.
\end{enumerate}
\section{Security Analysis} \label{sec:estimation} In order to prove the information-theoretic security of our protocol we use the complementary scenario introduced by Koashi \cite{koashi,koashi2}. For this, we first need to create an equivalent virtual protocol (see Appendix \ref{app:security}) concerning an observable conjugate to the key. The classical and quantum information available to Eve in the actual and virtual protocols are the same and therefore she cannot distinguish and behave differently between them. Hence, by proving the security in the virtual protocol we ensure the security of the actual protocol. Additionally, from the virtual protocol we can determine the phase error rate, which quantifies the amount of information that is leaked to Eve and has to be removed in the privacy amplification step. In this section, we show how this last quantity is estimated by generalising the loss-tolerant method.
\subsection{Secret key rate} As explained before, for simplicity we assume the asymptotic scenario where Alice sends Bob an infinite number of pulses. The asymptotic key rate for the single-photon signals can be expressed as \begin{linenomath*} \begin{equation} R \ge Y_Z [1 - h(e_X) - fh(e_Z)], \label{eq:keyrate} \end{equation}\end{linenomath*} where $Y_Z$ is the yield of the single photons in the $Z$ basis, {\it {\it i.e.}} the joint probability of Alice emitting a single-photon in the $Z$ basis and Bob detecting it with a measurement also in the $Z$ basis. The function $h(x) = -x\log_2 (x) - (1-x) \log_2 (1-x)$ is the binary entropy function and $f$ is the error correction efficiency. The term $e_X$ is the phase error rate and thus, $h(e_X)$ is the cost of performing privacy amplification in order to remove the correlations between the corrected sifted key and Eve. The term $e_Z$ is the bit error rate and $fh(e_Z)$ corresponds to the amount of syndrome information required to make Alice's and Bob's keys the same. The quantities $Y_Z$ and $e_Z$ in Eq. (\ref{eq:keyrate}) can be directly obtained from an implementation of the experiment. Therefore, we are left with the estimation of the phase error rate. We do this below.
\subsection{Estimation of the phase error rate} We assume that Alice prepares the states $\ket{\Phi_{j\beta}}_{BE}$ as defined in Eq. (\ref{eq:multi-mode}). These states take into consideration the non-qubit assumption, a possible THA by Eve, and SPFs. In the virtual protocol (see Appendix \ref{app:security} for further details), Alice prepares the following state in the $Z$ basis: \begin{linenomath*} \begin{equation} \ket{\Psi_{Z}}_{ABE} = \frac{1}{\sqrt{2}} \Big[\ket{0_Z}_A \otimes \big( a_{0Z} \ket{\phi_{0Z}}_{BE}+ b_{0Z} \ket{\phi^{\perp}_{0Z}}_{BE}\big) + \ket{1_Z}_A \otimes \big( a_{1Z} \ket{\phi_{1Z}}_{BE} + b_{1Z} \ket{\phi^{\perp}_{1Z}}_{BE}\big) \Big]. \label{eq:stateZ} \end{equation}\end{linenomath*} We then define the bit error rate as \begin{linenomath*} \begin{equation} e_{Z} = \frac{Y_{0Z,1Z}^{(Z)} + Y_{1Z,0Z}^{(Z)}}{Y_{0Z,0Z}^{(Z)} + Y_{1Z,0Z}^{(Z)} + Y_{0Z,1Z}^{(Z)} + Y_{1Z,1Z}^{(Z)}}, \label{eq:biterror2} \end{equation}\end{linenomath*} where the yields $Y_{sZ,jZ}^{(Z)}$, with $s,j \in \{0,1\}$, are the joint probabilities that Alice prepares the state $\ket{\Psi_{Z}}_{ABE}$, Bob selects the $Z$ basis, and Alice (Bob) obtains the bit value $j$ ($s$) when she (he) measures the system $A$ $(B)$ in the $Z$ basis. Note that, the superscript $(Z)$ in the yields represents the basis used in the state preparation while the subscripts denote the bases employed in the measurements. These yields are directly observed in the experiment. Similarly, the phase error rate is defined as \begin{linenomath*} \begin{equation} e_X = \frac{Y_{0X,1X}^{(Z) vir} + Y_{1X,0X}^{(Z) vir}}{Y_{0X,0X}^{(Z) vir} + Y_{1X,0X}^{(Z) vir} + Y_{0X,1X}^{(Z) vir} + Y_{1X,1X}^{(Z) vir}}, \label{eq:ex} \end{equation}\end{linenomath*} where $Y_{sX,jX}^{(Z) vir}$, with $s,j \in \{0,1\}$, is the joint probability that Alice prepares the state $\ket{\Psi_{Z}}_{ABE}$, she and Bob select the $Z$ basis but both use the $X$ basis for their measurements (rather than the selected $Z$ basis), and Alice (Bob) obtains the bit value $j$ ($s$). The phase error rate corresponds to the bit error in the virtual protocol. Also, we have that the denominator of $e_X$ in Eq.(\ref{eq:ex}) is equal to $Y_{0Z,0Z}^{(Z)} + Y_{1Z,0Z}^{(Z)} + Y_{0Z,1Z}^{(Z)} + Y_{1Z,1Z}^{(Z)}$, since by assumption the probability to obtain an inconclusive outcome associated to the operator $\hat{M}_f$ is the same for the both basis for any incoming state. This means that to estimate $e_X$ we only need to calculate the virtual yields $Y_{0X,1X}^{(Z)vir}$ and $Y_{1X,0X}^{(Z)vir}$.
In the virtual protocol, after Alice measures the system $A$ in Eq. (\ref{eq:stateZ}) in the $X$ basis, she sends Bob the (unnormalised) states: \begin{linenomath*}\begin{equation} \hat{\theta}_{BE, jX, vir} = \Tr_A \big[ \dyad{j_X}{j_X}_A \otimes \hat{\mathds{1}}_{BE} \dyad{\Psi_{Z}}{\Psi_{Z}}_{ABE} \big], \label{eq:density_matrix} \end{equation}\end{linenomath*} where $\Tr_A$ is the partial trace over the virtual system $A$. Using Eqs. (\ref{eq:stateZ}) and (\ref{eq:density_matrix}) we can calculate the unnormalised states sent by Alice for $j \in \{0,1\}$ and obtain that $\hat{\theta}_{BE, jX, vir} = \dyad{\psi}{\psi}_{BE, jX, vir}$ with \begin{linenomath*}\begin{equation} \ket{\psi}_{BE,jX,vir} = \frac{1}{2} \Big[ a_{0Z} \ket{\phi_{0Z}}_{BE} + b_{0Z} \ket{\phi^{\perp}_{0Z}}_{BE} + (-1)^j \big(a_{1Z} \ket{\phi_{1Z}}_{BE} + b_{1Z} \ket{\phi^{\perp}_{1Z}}_{BE}\big) \Big]. \label{eq:unstate} \end{equation}\end{linenomath*} Writing Eq. (\ref{eq:unstate}) in terms of the states $\ket{\gamma_{jX}}_{BE}$ and $ \ket{\gamma_{jX}^\perp}_{BE}$, defined below, we have that: \begin{linenomath*}\begin{equation} \begin{split}
\ket{\psi}_{BE, jX, vir} = \frac{1}{2} \bigg[&\sqrt{|a_{0Z}|^2 + (-1)^j \big( a_{0Z}^* a_{1Z}{\braket{\phi_{0Z}|\phi_{1Z}}}_{BE} + a_{0Z} a_{1Z}^* {\braket{\phi_{1Z}|\phi_{0Z}}}_{BE} \big) + |a_{1Z}|^2} ~ \ket{\gamma_{jX}}_{BE} \\
&+ \sqrt{|b_{0Z}|^2 + (-1)^j \big( b_{0Z}^* b_{1Z}{\braket{\phi_{0Z}^\perp | \phi_{1Z}^\perp}}_{BE} + b_{0Z} b_{1Z}^* {\braket{\phi_{1Z}^\perp | \phi_{0Z}^\perp}}_{BE} \big) + |b_{1Z}|^2} ~ \ket{\gamma_{jX}^\perp}_{BE}\bigg], \label{eq:statej} \end{split} \end{equation}\end{linenomath*} where the normalised states $\ket{\gamma_{jX}}_{BE}$ have the form \begin{linenomath*}\begin{equation}
\ket{\gamma_{jX}}_{BE} = \frac{a_{0Z}\ket{\phi_{0Z}}_{BE} + (-1)^j a_{1Z} \ket{\phi_{1Z}}_{BE}}{ \sqrt{|a_{0Z}|^2 + (-1)^j \big(a_{0Z}^* a_{1Z} {\braket{\phi_{0Z} | \phi_{1Z}}}_{BE} + a_{0Z} a_{1Z}^* {\braket{\phi_{1Z}|\phi_{0Z}}}_{BE} \big) + |a_{1Z}|^2}}, \end{equation}\end{linenomath*} and the normalised states $ \ket{\gamma_{jX}^\perp}_{BE}$, which are orthogonal to $\ket{\gamma_{jX}}_{BE} $, are given by \begin{linenomath*}\begin{equation}
\ket{\gamma_{jX}^\perp}_{BE} = \frac{b_{0Z}\ket{\phi_{0Z}^\perp}_{BE} + (-1)^j b_{1Z} \ket{\phi_{1Z}^\perp}_{BE}}{ \sqrt{|b_{0Z}|^2 + (-1)^j \big(b_{0Z}^* b_{1Z} {\braket{\phi_{0Z}^\perp | \phi_{1Z}^\perp}}_{BE} + b_{0Z} b_{1Z}^* {\braket{\phi_{1Z}^\perp | \phi_{0Z}^\perp}}_{BE} \big) + |b_{1Z}|^2}}. \end{equation}\end{linenomath*} Note that, in Eq. (\ref{eq:statej}), we have decomposed $\ket{\psi}_{BE,jX,vir}$ into a single-mode qubit $\ket{\gamma_{jX}}_{BE}$ and a state in any mode orthogonal to it, $ \ket{\gamma_{jX}^\perp}_{BE}$. This decomposition follows the definition provided in Eq. (\ref{eq:multi-mode}), and it is an essential step for our estimation of the phase error rate.
To obtain the yields $Y_{sX,jX}^{(Z)vir}$ we need to calculate \begin{linenomath*}\begin{equation} Y_{sX,jX}^{(Z) vir} = P_{Z_A} P_{Z_B} \Tr[\hat{D}_{sX}\hat{\theta}_{BE, jX, vir} ], \label{eq:yield} \end{equation}\end{linenomath*} where $\hat{D}_{sX} = \sum_k \hat{A}_k^{\dagger} \hat{M}_{sX} \hat{A}_k $ corresponds to Eve's action, represented by the Kraus operators $\hat{A}_k$, as well as Bob's measurement with $\hat{M}_{sX}$ being an element of Bob's POVM. Here, recall the definition of the phase error rate where the $Z$ basis is selected but both Alice and Bob use the $X$ basis for their measurements (rather than the selected $Z$ basis), which is why $P_{Z_A}$ and $P_{Z_B}$ appear in Eq. (\ref{eq:yield}). Moreover, here we assume, for simplicity, that Eve applies the same quantum operation to every signal, which corresponds to a collective attack, but our analysis can be generalised to coherent attacks by considering the Azuma's inequality \cite{azuma} (see Appendix \ref{app:security}), which deals with any correlations among the events, {\it {\it i.e.}}, the phase error rate pattern. Using Eqs. (\ref{eq:statej})-(\ref{eq:yield}) we obtain the following expression for the yields: \begin{linenomath*}\begin{equation} \begin{split} Y_{sX,jX}^{(Z)vir} = & ~P_{Z_A} P_{Z_B} \bigg( A_j \Tr\Big[\hat{D}_{sX} \dyad{\gamma_{jX}}{\gamma_{jX}}_{BE} \Big] \\ & + \Tr\Big[\hat{D}_{sX}\Big(B_j \dyad{\gamma_{jX}}{\gamma_{jX}^{\bot}}_{BE} + B_j^* \dyad{\gamma_{jX}^{\bot}}{\gamma_{jX}}_{BE} + C_j \dyad{\gamma_{jX}^{\bot}}{\gamma_{jX}^{\bot}}_{BE}\Big)\Big]\bigg), \label{eq:yield1} \end{split} \end{equation}\end{linenomath*} where the coefficients $A_j$, $B_j$, and $C_j$ are defined in Appendix \ref{app:coefficients}, and we omit presenting their explicit expressions here for simplicity. Since the state $\ket{\gamma_{jX}}_{BE}$ in the first term of Eq. (\ref{eq:yield1}) is a single-mode qubit state, its density matrix can be expressed as \begin{linenomath*}\begin{equation} \hat{\rho}_{jX} = \dyad{\gamma_{jX}}{\gamma_{jX}}_{BE} = \frac{1}{2} \sum_i P^{jX,vir}_i \hat{\sigma_i}, \end{equation}\end{linenomath*} where $P^{jX,vir}_i$ are the coefficients of the Bloch vector and $\hat{\sigma_i}$, with $i \in \{Id,x,y,z\}$, represent the identity and the three Pauli operators, respectively. Therefore, we have that \begin{linenomath*}\begin{equation}
A_j \Tr\Big[\hat{D}_{sX} \dyad{\gamma_{jX}}{\gamma_{jX}}_{BE} \Big] = A_j \big[P^{jX,vir}_{Id} q_{sX|Id} + P^{jX,vir}_x q_{sX|x} + P^{jX,vir}_y q_{sX|y} + P^{jX,vir}_z q_{sX|z}\big], \end{equation}\end{linenomath*}
where $P^{jX,vir}_i = \Tr \big[\hat{\sigma_i} \dyad{\gamma_{jX}}{\gamma_{jX}}_{BE} \big]$ and $q_{sX|i} = \frac{1}{2} \Tr \big[\hat{D}_{sX} \hat{\sigma}_i \big]$ can be regarded as the transmission rates of the operator $\hat{\sigma}_i$. These can be calculated by solving a system of linear equations with the events from the actual protocol, which we will explain later. Moreover, by choosing the $y$ axis of the Bloch sphere appropriately we can always set $P^{jX,vir}_y =0$ for all the Bloch vectors, since the PM just creates rotations in the $X$-$Z$ plane of the Bloch sphere. Indeed, even if the PM introduces loss depending on Alice's state selection, as long as the three states form a triangle in the Bloch sphere, we can apply such simplification \cite{tamaki}. As already mentioned in Section \ref{sec:description}, we note that any implementation of the loss-tolerant protocol requires that the three states form a triangle in the Bloch sphere.
Furthermore, it is possible to find both lower and upper bounds on the second term of Eq. (\ref{eq:yield1}). In particular, this term can be written as $\Tr[\hat{D}_{sX} N_j]$ where $N_j$ is the matrix $\Big[\begin{smallmatrix} C_j&B_j^*\\ B_j&0 \end{smallmatrix}\Big]$ with eigenvalues \begin{linenomath*}\begin{equation}
\lambda_{max_j} = \frac{C_j + \sqrt{{C_j}^2 + 4|B_j|^2}}{2} ~~~\text{and}~~~\lambda_{min_j} = \frac{C_j - \sqrt{{C_j}^2 + 4|B_j|^2}}{2}. \end{equation}\end{linenomath*} Using the properties of POVMs we have that the operators $\hat{D}_{sX}$ have eigenvalues between 0 and 1, therefore $\Tr[\hat{D}_{sX} N_j]$ is bounded by $\lambda_{min_{j}} \le \Tr[\hat{D}_{sX} N_j] \le \lambda_{max_{j}}$, since $\lambda_{min_{j}}$ is negative.
This means that the virtual yields satisfy: \begin{linenomath*}\begin{equation} \begin{split}
P_{Z_A} P_{Z_B} \Big(A_j \big[q_{sX|Id} + P_x^{jX,vir} ~q_{sX|x} + P_z^{jX,vir} ~ q_{sX|z}\big] + \lambda_{min_{j}}\Big) \le ~Y_{sX,jX}^{(Z)vir} \\
\le P_{Z_A} P_{Z_B} \Big(A_j \big[q_{sX|Id} + P_x^{jX,vir} ~ q_{sX|x} + P_z^{jX,vir}~ q_{sX|z}\big] + \lambda_{max_{j}}\Big). \\ \end{split} \label{eq:coef2} \end{equation}\end{linenomath*}
To find the transmission rates $q_{sX|i}$, the actual events we need to consider are those associated with the yields $Y_{sX,0Z}^{(Z)}$, $Y_{sX,1Z}^{(Z)}$ and $Y_{sX,0X}^{(X)}$. These are defined as $Y_{sX,j\beta}^{(\beta)} = P_{j\beta} P_{X_B} \Tr \big[\hat{D}_{sX} \dyad{\Phi_{j\beta}}{\Phi_{j\beta}}_{BE}\big]$ for $j\beta \in \{ 0Z, 1Z, 0X\}$, where the normalised actual states $\ket{\Phi_{j\beta}}_{BE}$ are defined in Eq. (\ref{eq:multi-mode}). That is, $\ket{\Phi_{j\beta}}_{BE}$ are the states emitted by Alice in the actual protocol when she chooses the bit value $j$ and the basis $\beta$, in the presence of multi-mode signals. Using exactly the same method explained above, we obtain \begin{linenomath*}\begin{equation} \begin{split} Y_{sX,j\beta}^{(\beta)} = & ~ P_{j\beta} P_{X_B}\bigg( E_{j\beta} \Tr\Big[\hat{D}_{sX} \dyad{\phi_{j\beta}}{\phi_{j\beta}}_{BE} \Big] \\ &+ \Tr\Big[\hat{D}_{sX}\Big(F_{j\beta} \dyad{\phi_{j\beta}}{\phi_{j\beta}^{\bot}}_{BE} +F^*_{j\beta} \dyad{\phi_{j\beta}^{\bot}}{\phi_{j\beta}}_{BE} + G_{j\beta} \dyad{\phi_{j\beta}^{\bot}}{\phi_{j\beta}^{\bot}}_{BE}\Big)\Big]\bigg), \end{split} \label{eq:actualY} \end{equation}\end{linenomath*}
where $E_{j\beta} = |a_{j\beta}|^2$, $F_{j\beta} = a_{j\beta}b_{j\beta}^*$, $F^*_{j\beta} = a_{j\beta}^*b_{j\beta}$ and $G_{j\beta} = |b_{j\beta}|^2$. Therefore, we find that the actual yields satisfy \begin{linenomath*}\begin{equation} \begin{split}
P_{j\beta} P_{X_B} \Big(E_{j\beta} ~\big[q_{sX|Id} + P^{j\beta}_x q_{sX|x} + P^{j\beta}_z q_{sX|z}\big] + \lambda_{min_{j\beta}}\Big) \le ~ Y_{sX,j\beta}^{(\beta)} \\
\le P_{j\beta} P_{X_B} \Big(E_{j\beta}~ \big[q_{sX|Id} + P^{j\beta}_x q_{sX|x} + P^{j\beta}_z q_{sX|z}\big] + \lambda_{max_{j\beta}}\Big), \\ \end{split} \label{eq:hello} \end{equation}\end{linenomath*} where \begin{linenomath*}\begin{equation}
\lambda_{max_{j\beta}} = \frac{G_{j\beta} + \sqrt{G_{j\beta}^2 + 4|F_{j\beta}|^2}}{2} ~~~\text{and}~~~ \lambda_{min_{j\beta}} = \frac{G_{j\beta} - \sqrt{G_{j\beta}^2 + 4|F_{j\beta}|^2}}{2}, \label{eq:coef3} \end{equation}\end{linenomath*} are the eigenvalues for the non-qubit part of the actual states and, $P^{j\beta}_{x}$ and $P^{j\beta}_{z}$ are the coefficients of the Bloch vector for the actual states. By substituting $j\beta \in \{ 0Z, 1Z, 0X\}$ in Eqs. (\ref{eq:hello}) and (\ref{eq:coef3}), we obtain a system of three linear inequalities, which can be expressed as \begin{linenomath*}\begin{equation} \begin{split}
\begin{bmatrix} q_{sX|Id},&q_{sX|x},&q_{sX|z}\end{bmatrix} \hat{A} + \begin{bmatrix} \lambda_{min_{0Z}},& \lambda_{min_{1Z}},& \lambda_{min_{0X}} \end{bmatrix} \le \begin{bmatrix} \frac{Y_{sX,0Z}^{(Z)}}{P_{0Z} P_{X_B}}, & \frac{Y_{sX,1Z}^{(Z)}}{P_{1Z} P_{X_B}}, & \frac{Y_{sX,0X}^{(X)}}{P_{0X} P_{X_B}} \end{bmatrix} \\
\le \begin{bmatrix} q_{sX|Id},&q_{sX|x},&q_{sX|z}\end{bmatrix} \hat{A} + \begin{bmatrix} \lambda_{max_{0Z}},&\lambda_{max_{1Z}},&\lambda_{max_{0X}} \end{bmatrix}, \end{split} \label{eq:bounds} \end{equation}\end{linenomath*} where $\hat{A} := (V^T_{0Z}, V^T_{1Z}, V^T_{0X})$ in which $V_{j\beta} = E_{j\beta} (1, P^{j\beta}_x,P^{j\beta}_z)$ and where the superscript $T$ means transpose.
By rearranging Eq. (\ref{eq:bounds}) we obtain the bounds on the transmission rates $q_{sX|Id}$, $q_{sX|x}$, and $q_{sX|z}$ to be \begin{linenomath*}\begin{equation} \begin{split}
\bigg( \begin{bmatrix} \frac{Y_{sX,0Z}^{(Z)}}{P_{0Z} P_{X_B}}, & \frac{Y_{sX,1Z}^{(Z)}}{P_{1Z} P_{X_B}}, & \frac{Y_{sX,0X}^{(X)}}{P_{0X} P_{X_B}}\end{bmatrix} - \begin{bmatrix} \lambda_{max_{0Z}},&\lambda_{max_{1Z}},&\lambda_{max_{0X}} \end{bmatrix} \bigg) \hat{{A}^{-1}} \le \begin{bmatrix} q_{sX|Id},&q_{sX|x}, &q_{sX|z}\end{bmatrix} \\ \le \bigg(\begin{bmatrix} \frac{Y_{sX,0Z}^{(Z)}}{P_{0Z} P_{X_B}}, & \frac{Y_{sX,1Z}^{(Z)}}{P_{1Z} P_{X_B}}, & \frac{Y_{sX,0X}^{(X)}}{P_{0X} P_{X_B}} \end{bmatrix} - \begin{bmatrix} \lambda_{min_{0Z}},& \lambda_{min_{1Z}},& \lambda_{min_{0X}} \end{bmatrix} \bigg) \hat{{A}^{-1}}, \end{split} \label{eq:bounds2} \end{equation}\end{linenomath*} where $\hat{A}^{-1}$ is the inverse of the matrix $\hat{A}$.
By solving Eq. (\ref{eq:bounds2}), we can calculate the transmission rates and then substitute them into Eq. (\ref{eq:coef2}) to find the upper bounds on the virtual yields $Y_{0X,1X}^{(Z)vir}$ and $Y_{1X,0X}^{(Z)vir}$. Finally, by using these upper bounds on the virtual yields and the yields from the actual events we can estimate the phase error rate $e_X$ in Eq. (\ref{eq:ex}).
As already mentioned previously, this technique is quite general and could be applied to many other QKD protocols. As an example, in Appendix \ref{app:mdi} we outline how this analysis could be performed for MDI-QKD.
\section{Simulation of the key rate} \label{sec:simulation}
\subsection{Particular device model} Only for the purpose of the simulation, we now consider a particular device model and a particular THA. In general, to experimentally guarantee that the three states emitted by Alice remain in two dimensions, i.e., in a single-mode qubit, her PM needs to have the same temporal, spectral, spatial and polarisation mode independently of the bit and basis choices. However, due to imperfections in the devices this condition is hard to fulfil. Some counter-measures against these imperfections have been suggested \cite{xu,xu2,jiang,mynbaev}, but they cannot rigorously ensure a single-mode qubit. Therefore, it is crucial to consider how device's flaws can be taken into account in a security proof. This is the aim of our analysis. For simplicity, among many imperfections, we select the polarisation mode as an example of how to use our framework.
A change in polarisation can arise from the imperfect alignment of the laser with the principal axis of the PM and/or when the PM is polarisation dependent, i.e., the state of polarisation of the signals prepared might be different for each encoding phase value. In principle, this could be avoided by using a polarisation beam splitter (PBS) that selects a single polarisation mode. In practice, however, because of the finite extinction ratio of the PBS this is usually not the case. Here, we relax the need for a perfect PBS by considering a polarisation multi-mode scenario. We remark, nonetheless, that our analysis can be applied to any multi-mode scenario. Using our formalism, we can express the states sent by Alice in the scenario considered in an analogous way to Eq. (\ref{eq:multi-mode}): \begin{equation}
\ket{\Omega_{j\beta}}_B = \cos \theta_{j\beta} \ket{\omega_{j\beta}}_{HB}+ \sin \theta_{j\beta} \ket{\omega_{j\beta}}_{VB}, \label{eq:polarisation} \end{equation} for $j\beta \in \{0Z,1Z,0X\}$, where the subscripts $H$ and $V$ refer to the horizontal and vertical polarisation modes, respectively. That is, now the polarisation state of $\ket{\omega_{j\beta}}_B$ depends on Alice's bit and basis choices instead of being the same independently of her encoding. Next, we add the SPF and the THA to this particular device model.
For the states $\ket{\omega_{j\beta}}_{HB}$ and $ \ket{\omega_{j\beta}}_{VB}$ we use the definitions in Eq. (\ref{eq:alice_states}), where they both live in a qubit space. Also, by using Eq. (\ref{eq:alice_states}) these states already include SPFs whenever the parameter $\delta > 0$. As stressed in Section \ref{sec:description}, since in this case we know the form of the states we do not need to consider the worst case scenario but only the inner product$\tensor[_{HB}]{\braket{\omega_{j\beta} | \omega_{j'\beta'}}}{_{VB}}= 0$ for all $j,j',\beta$ and $\beta'$.
Additionally, we consider an active information leakage in our device model. For this, we assume that Eve sends strong light into Alice's PM, which is then back-reflected and exits Alice's lab in the form \begin{equation} \ket{\xi_{j\beta}}_E = C_I \ket{e}_E + C_D \ket{e_{j\beta}}_E. \label{eq:eve_states} \end{equation}
In this expression, $|C_I|^2 + |C_D|^2 = 1$, and $\ket{e}_E$ ($\ket{e_{j\beta}}_E$) represents (represent) the setting independent (dependent) state (states) on Alice's bit and basis choice, where we assume that ${\braket{e | e_{j\beta}}}{_E} = 0$. That is, the state $\ket{e}_E$ ($\ket{e_{j\beta}}_E$) provides Eve with no (some) information about Alice's bit and basis values each given time. Therefore, our model for the THA can be parameterised by only two parameters, $C_I$ and $C_D$, and no further detailed information is needed to apply our analysis. For instance, when we increase isolation on Alice's sending device, the independent component increases and Eve obtains less information about the states being sent. Moreover, in the absence of further information about the states $\ket{e_{j\beta}}_E$, we assume the worst case scenario where these states are orthogonal to each other, i.e., ${\braket{e_{j\beta} | e_{j'\beta'}}}{_E} = 0$ for any $(j,\beta) \neq (j',\beta')$. Clearly, if Alice and Bob know the states $\ket{e_{j\beta}}_E$, this information can be trivially included in the formalism below.
If $\ket{\xi_{j\beta}}_E$ is say, for instance, a coherent state, $\ket{e}_E$ is the vacuum state (i.e., $\ket{e}_E = \ket{v}_E$), $C_I = e^{-\mu/2}$ and $C_D = \sqrt{1 - e^{-\mu}}$, where $\mu$ is the intensity of Eve's back reflected light. In this case, note that the condition ${\braket{e_{j\beta} | e_{j'\beta'}}}{_E} = 0$ is not satisfied since Eve will never be able to perfectly distinguish the states dependent on Alice's encoding. The value of this overlap depends on the isolation of the devices. Below, however, we conservatively assume for simplicity the worst case scenario where this overlap is zero.
Putting Eq. (\ref{eq:polarisation}) and Eq. (\ref{eq:eve_states}) together, Alice's emitted state for the single photon pulses is modelled as \begin{equation} \ket{\Phi_{j\beta}}_{BE} = \ket{\Omega_{j\beta}}_B \otimes \ket{\xi_{j\beta}}_E. \label{eq:psi} \end{equation} By using Eqs. (\ref{eq:alice_states})-(\ref{eq:polarisation})-(\ref{eq:eve_states})-(\ref{eq:psi}) and by assuming that $\ket{\xi_{j\beta}}_E$ are coherent states, we obtain \begin{equation} \begin{split} \ket{\Phi_{j\beta}}_{BE} &= \big( \cos \theta_{j\beta} \ket{\omega_{j\beta}}_{HB} + \sin \theta_{j\beta} \ket{\omega_{j\beta}}_{VB} \big) \otimes \big(C_I \ket{v}_E + C_D \ket{e_{j\beta}}_E \big) \\ & = \cos \theta_{j\beta} C_I \ket{\omega_{j\beta}}_{HB} \ket{v}_E + \cos \theta_{j\beta} C_D \ket{\omega_{j\beta}}_{HB} \ket{e_{j\beta}}_E + \sin \theta_{j\beta} \ket{\omega_{j\beta}}_{VB} \otimes \big(C_I \ket{v}_E + C_D \ket{e_{j\beta}}_E \big). \\ \end{split} \label{eq:fullstate} \end{equation} The first term of Eq. (\ref{eq:fullstate}) has polarisation $H$ and is insensitive to the THA, it corresponds to $a_{j\beta} \ket{\phi_{j\beta}}_{BE}$ in Eq. (\ref{eq:multi-mode}). Similarly, the other terms have either polarisation $V$ and/or are affected by the THA, and together they correspond to $b_{j\beta} \ket{\phi_{j\beta}^\perp}_{BE}$ in Eq. (\ref{eq:multi-mode}). In this case, the unnormalised virtual states given by Eq. (\ref{eq:statej}) have now the form
\begin{equation} \begin{split} \ket{\psi}_{BE, jX, vir} = \frac{1}{2} \Bigg[& C_I \sqrt{\cos \theta_{0Z}^2 - (-1)^j ~2\cos \theta_{0Z} \cos \theta_{1Z} \sin \frac{\delta}{2} + \cos \theta_{1Z}^2} ~ \ket{\gamma_{jX}}_{BE} \\ &+ \sqrt{C_I^2 \bigg(\sin \theta_{0Z}^2 - (-1)^j ~2 \sin \theta_{0Z} \sin \theta_{1Z} \sin \frac{\delta}{2} + \sin \theta_{1Z}^2 \bigg) + 2C_D^2 } ~ \ket{\gamma_{jX}^\perp}_{BE}\Bigg], \label{eq:statejj} \end{split} \end{equation}
where we have used the relationship ${\braket{\omega_{0Z} | \omega_{1Z}}}{_B}={\braket{\omega_{0Z} | \omega_{1Z}}}{_B}= - \sin (\frac{\delta}{2})$. In order to estimate the phase error rate, we need to calculate the transmission rates $q_{sX|Id}$, $q_{sX|x}$ and $q_{sX|z}$ using the actual yields. For this, we use Eq. (\ref{eq:bounds2}) where in this particular example, the matrix $\hat{A}$ is \begin{equation} \hat{A} = \begin{bmatrix} E_{0Z}&E_{1Z}&E_{0X}\\ 0 & -E_{1Z}\sin(\delta) & E_{0X}\sin(\pi/2 + \delta/2) \\ E_{0Z} & -E_{1Z}\cos(\delta) & E_{0X}\cos(\pi/2 + \delta/2) \end{bmatrix}, \end{equation} where $E_{j\beta} = C_I^2 \cos^2 \theta_{j\beta}$. Then, we can find the virtual yields by using Eq. (\ref{eq:coef2}) where, in this example, the coefficients of the Bloch vectors are \begin{equation} \begin{split} & P_x^{jX,vir} = \frac{(-1)^j ~2 \cos \theta_{0Z} \cos \theta_{1Z} \cos \frac{\delta}{2} - 2 \cos \theta_{1Z}^2 \cos \frac{\delta}{2} \sin \frac{\delta}{2}}{\cos \theta_{0Z}^2 - (-1)^j ~2 \cos \theta_{0Z} \cos \theta_{1Z} \sin \frac{\delta}{2} + \cos \theta_{1Z}^2}, \\ & P_z^{jX,vir} = \frac{\cos \theta_{0Z}^2 - (-1)^j ~2 \cos \theta_{0Z} \cos \theta_{1Z} \sin \frac{\delta}{2} + \cos \theta_{1Z}^2 \big(1 - 2 \cos^2 \frac{\delta}{2}\big)}{\cos \theta_{0Z}^2 - (-1)^j ~2 \cos \theta_{0Z} \cos \theta_{1Z} \sin \frac{\delta}{2} + \cos \theta_{1Z}^2}. \\ \end{split} \end{equation} Finally, one can directly use Eq. (\ref{eq:ex}) to estimate the phase error rate $e_X$.
\subsection{Lo-Preskill's analysis with imperfectly characterised states} With the method described above, it is possible to employ the leaky source without compromising the security of the QKD system. Nonetheless, depending on the situation and the particular experimental parameters it might be beneficial to consider another method that in some cases might provide a higher key generation rate. Therefore, it is important to compare our generalised loss-tolerant protocol with an alternative method, say the Lo-Preskill's analysis introduced in \cite{lo4}. In Appendix \ref{app:lopreskill}, we provide a detailed description of this analysis.
To ensure a fair comparison between both protocols we consider the efficient four-state loss-tolerant protocol, where the four states of the BB84 protocol \cite{bennett} are used to run two loss-tolerant protocols simultaneously. That is, when Alice emits the state $\ket{\omega_{0X}}_B$ ($\ket{\omega_{1X}}_B$) she considers that it belongs to the first (second) loss-tolerant protocol, while each of the two protocols is randomly chosen by Alice before sending the pulse. See Eq. (\ref{eq:extrastate}) for the definition of the state $\ket{\omega_{1X}}_B$ in the presence of SPFs. This means that no modifications to the hardware of the standard BB84 protocol are required and therefore, the four-state loss-tolerant protocol is equivalent to the BB84 protocol from an experimental point of view. Furthermore, we consider the same assumptions about the states for both of them. Namely, we employ the decomposition of the state of the single-photon pulses emitted by Alice into a single-mode qubit and any other modes which are orthogonal to the former, i.e., Eq. (\ref{eq:multi-mode}). For the simulations, we apply Lo-Preskill's analysis to the same particular device model described in Section \ref{sec:simulation}.A.
In order to quantify the phase error rate $e_X$ in the Lo-Preskill's analysis we use the following expression \cite{lo4} \begin{equation} e_X \le {e_Z} + 4 \Delta' (1-\Delta') (1 - 2 {e_Z}) + 4 (1 - 2\Delta' ) \sqrt{\Delta' (1 - \Delta') {e_Z} (1 - {e_Z})}, \label{eq:eph_delta} \end{equation} which depends on the bit error rate $e_Z$ and on the imbalance $\Delta'$ of a quantum coin. To find this imbalance, we need to calculate the inner product of Eqs. (\ref{eq:ent1}) and (\ref{eq:ent2}). In turn, these equations depend on the states $\ket{\Phi_{j\beta}}_{BE}$, defined in Eq. (\ref{eq:fullstate}), therefore, their overlaps allows us to find $\Delta'$. Using Eq. (\ref{eq:fullstate}) we can calculate the inner product of these states to be \begin{equation}
{\braket{\Phi_{j\beta} | \Phi_{j'\beta'}}}{_{BE}} = \cos \theta_{j\beta} \cos \theta_{j'\beta'} C_I^2 {\braket{\omega_{j\beta} | \omega_{j'\beta'}}}{_B}, \label{eq:overlap2}
\end{equation} where $j,j' \in \{0,1\}$ and $\beta,\beta' \in \{X,Y\}$.
Using the same definition for the secret key rate given by Eq. (\ref{eq:keyrate}), below we compare both security approaches for the same device model as a function of the device parameters.
\subsection{Results and discussion} We show the results obtained for $R$ as a function of the overall system loss (which includes both the channel attenuation and the loss at Bob's receiver), for different values of $\delta$, $\theta_{j\beta}$ and $\mu$ which correspond to the SPFs, non-qubit assumption and THA respectively. The angles $\theta_{j\beta}$ are chosen such that they are associated with Alice's encoding of the states $\ket{\omega_{j\beta}}_{B}$. That is, $\theta_{0Z}=0$, $\theta_{1Z}=\pi \hat\theta$ and $\theta_{0X}=\frac{\pi}{2}\hat\theta$ for a certain angle $\hat\theta$. In our simulations, we consider the experimental parameters to be: the dark count rate $p_d = 10^{-7}$, $f = 1.16$ and the fiber loss coefficient $\alpha = 0.2$ dB/Km. Moreover, we assume for simplicity that in the loss-tolerant protocol and in the Lo-Preskill's analysis $P_{Z_A} = P_{Z_B} = \frac{1}{2}$. This selection of probabilities might not be ideal but it is sufficient for the purpose of the simulation. By using the channel model described in Appendix \ref{app:channel}, we find that $Y_Z = Y_{0Z,0Z}^{(Z)} + Y_{1Z,0Z}^{(Z)} + Y_{0Z,1Z}^{(Z)} + Y_{1Z,1Z}^{(Z)} = P_{Z_A} P_{Z_B} \big[ 4 (1 - \frac{\eta}{2}) p_d + \eta\big]$ where $\eta$ is the overall transmission efficiency of the system. The bit error rate is then given by \begin{linenomath*}\begin{equation} e_Z = \frac{2\big(1-\frac{\eta}{2}\big) p_d + \frac{\eta}{2} + \frac{\eta}{4}(\cos 2 \delta + \cos \delta)(p_d-1)}{4\big(1-\frac{\eta}{2}\big) p_d + \eta}. \label{eq:biterror} \end{equation}\end{linenomath*}
\subsubsection{Generalised loss-tolerant protocol} In order to evaluate how the different imperfections of the source affect the key generation rate we analyse each of them separately. For the simulations, we select the SPFs according to the experimental results reported in \cite{xu,honjo,li}. There are some works related with the mode dependency \cite{xu,tang2}, but unfortunately they do not directly provide the value of $\hat\theta$. Therefore, we evaluate $\hat\theta$ over a big range and choose the angles $\theta_{j\beta}$ to be: $\theta_{0Z}=0$, $\theta_{1Z}=\pi \hat\theta$ and $\theta_{0X}=\frac{\pi}{2}\hat\theta$ for a certain angle $\hat\theta$. This choice comes from Alice's encoding of the different states, which means that $\theta_{j\beta}$ is associated with the prepared state $\ket{\omega_{j\beta}}_{B}$. Obviously, a better experimental characterisation of the source would be essential to improve the accuracy of the current parameters. Finally, for the intensity of Eve's back reflected light during the THA, we select a range based on the work presented in \cite{lucamarini}.
Furthermore, we consider both the cases when the mode dependency parameter $\theta_{j\beta}$, which is associated with the non-qubit assumption, is independent and dependent on Alice's bit and basis choice. When it is independent (i.e., when $\theta_{j\beta} = \theta$), Eqs. (\ref{eq:fullstate}) and (\ref{eq:statejj}) are simplified and become, respectively \begin{equation} \begin{split} \ket{\Phi_{j\beta}}_{BE} &= \big( \cos \theta \ket{\omega_{j\beta}}_{HB} + \sin \theta \ket{\omega_{j\beta}}_{VB} \big) \otimes \big(C_I \ket{v}_E + C_D \ket{e_{j\beta}}_E \big) \\ & = \cos \theta~ C_I \ket{\omega_{j\beta}}_{HB} \ket{v}_E + \big[ \cos \theta~ C_D \ket{\omega_{j\beta}}_{HB} \ket{e_{j\beta}}_E + \sin \theta \ket{\omega_{j\beta}}_{VB} \otimes \big(C_I \ket{v}_E + C_D \ket{e_{j\beta}}_E \big) \big], \\ \end{split} \label{eq:fullstate2} \end{equation} and \begin{equation} \ket{\psi}_{BE,jX,vir} = C_I \cos \theta \sqrt{2 - (-1)^j ~2\sin \frac{\delta}{2}} ~\ket{\gamma_{jX}}_{BE} + \sqrt{C_I^2 \sin^2 \theta \bigg(2 - (-1)^j~ 2\sin \frac{\delta}{2}\bigg) + 2 C_D^2} ~\ket{\gamma_{jX}^\perp}_{BE}. \end{equation} Using these equations, and following the method described in Section \ref{sec:estimation}, we estimate the phase error rate. When the mode dependency parameter is setting dependent, we use Eqs. (\ref{eq:fullstate}) and (\ref{eq:statejj}) with $\theta_{j\beta}$, for $j\beta \in \{0Z,1Z,0X\}$.
Fig. \ref{fig:lossTolerant}(a) demonstrates that even if $\delta$ increases the key rate stays approximately the same, which means that Eve cannot enhance the flaws of the signals by exploiting channel loss. This is one main advantage of the loss-tolerant protocol \cite{tamaki}.
We consider the case when $\theta$ is independent of Alice's encoding in Fig. \ref{fig:lossTolerant}(b). The perfect scenario, i.e, when $\theta = 0$, implies that the signals prepared by Alice are in the single horizontal mode $H$, as seen in Eq. (\ref{eq:fullstate2}). If $\theta$ increases, the fraction of vertical polarisation also increases and the states sent becomes progressively more imperfect and vulnerable to a possible attack. For instance, these states form a three dimensional Hilbert space hence, Eve can perform an unambiguous state discrimination (USD) attack \cite{chefles, dusek}, in which she sends the identified state to Bob only when the USD measurement succeeds. This results in a decrease of the secret key rate as shown in Fig. \ref{fig:lossTolerant}(b). Additionally, our results show that as long as $\theta$ is sufficiently small (for the experimental parameters considered this means $\theta \lessapprox 10^{-7}$) the effect on the secret key rate is small, since it is approximately the same as when $\theta=0$. It is important to note, however, that because we are assuming a setting independent $\theta$ we have the freedom to choose a good polarisation mode. For instance, instead of the definition used above we could have called $\cos \theta \ket{\omega_{jZ}}_{HB}+\sin \theta \ket{\omega_{jZ}}_{VB}$ our mode if Alice had identified $\theta$ exactly, and regard it as the single-mode qubit, in turn affecting the results shown in Fig. \ref{fig:lossTolerant}(b). This means that Eve would be unable to exploit this flaw and obtain information about the key, and hence the secret key rate would be unaffected. This scenario would be equivalent to $\theta = 0$ and it is not the one considered in Fig. \ref{fig:lossTolerant}(b). For the setting dependent parameter $\theta_{j\beta}$, we obtain a slightly lower key rate as expected, since this scenario provides more information about Alice's encoding to a possible eavesdropper. The reason for this difference lies in the orthogonality of the states, which increases when we have a setting dependent $\theta_{j\beta}$.
Finally, Fig. \ref{fig:lossTolerant}(d) shows how the THA affects the generalised loss-tolerant protocol, where $\mu$ quantifies the intensity of Eve's back reflected light. An increase in $\mu$ results in a lower secret key rate since Eve acquires more information about Alice's encoding thus compromising the security of the system. Furthermore, no key can be obtained around $\mu \gtrapprox 10^{-3}$. By comparing Figs. \ref{fig:lossTolerant}(b) and \ref{fig:lossTolerant}(d), it can be seen that there is a relationship between $\mu$ and $\theta$. In particular, the resulting secret key rate coincides when $\mu \sim \theta^2$. For example, the key rate is the same when $\mu = 10^{-6}$ and when $\theta= 10^{-3}$. This relationship can also be obtained analytically from the coefficient of the first term in Eq. (\ref{eq:fullstate2}), by using approximations to the series expansion of $\cos \theta$ and $e^{-\mu}$.
\subsubsection{Comparison between the generalised loss-tolerant protocol and the Lo-Preskill's analysis} In a similar manner, we can evaluate how the secret key generation rate $R$ depends on the device parameters for the Lo-Preskill's analysis. The results and discussion are in Appendix \ref{app:lopreskill}. Below we compare both security proofs and identify which one provides a better $R$ depending on the experimental set-up. This way an experimentalist can choose which method to use for known device parameters, and ensure the security of the generated key between Alice and Bob. Here, we select the SPFs to be either $\delta = 0.063$ or $\delta = 0.126$, and for the mode dependency we choose $\hat\theta = 10^{-3}$ and $\hat\theta = 10^{-5}$. In this comparison we use the setting dependent mode dependency parameter since it corresponds to a more realistic scenario. Finally, for the intensity of Eve's back reflected light during the THA we use $\mu = 10^{-10}$, $\mu = 10^{-7}$ and $\mu = 10^{-4}$. The results are shown in Fig. \ref{fig:comparison1}. Note that, the blue and red dashed lines coincide (for the resolution presented) in all graphs. The reason lies in the value of the variable $\mu$. That is, for $\mu = 10^{-10}$ and $\mu = 10^{-7}$, the Lo-Preskill's analysis results in approximately the same secret key rate (see Appendix \ref{app:lopreskill} for more details).
\begin{figure}
\caption{Asymptotic secret key rate $R$ versus the overall system loss measured in dB for the generalised loss-tolerant protocol for various values of $\delta$, $\theta_{j\beta}$ and $\mu$. When we change the value of one of these parameters the others are kept constant and set to zero, which allows us to observe how each parameter affects the secret key rate. (a) Even if the parameter $\delta$ that characterises the SPFs increases, $R$ stays almost the same. (b) Setting independent $\theta$: As $\theta$ gets larger, the further away we are from the qubit scenario, hence $R$ decreases. (c) Setting dependent $\theta_{j\beta}$: The key generation rate decreases even further due to passive information leakage, in particular, $\hat\theta \gtrapprox 10^{-2}$ no longer provides a positive key generation rate. (d) As the intensity $\mu$ increases Eve obtains more information about the key causing the key rate to decrease.}
\label{fig:lossTolerant}
\end{figure}
By comparing Figs. \ref{fig:comparison1}(a) and \ref{fig:comparison1}(c), or Figs. \ref{fig:comparison1}(b) and \ref{fig:comparison1}(d), we can see how an increase in the parameter $\delta$, which is associated with SPFs, affects both protocols. For the generalised loss-tolerant protocol (LT) the key rate stays approximately the same as expected, since this method is loss tolerant to SPFs. On the other hand, the Lo-Preskill's analysis (LP) is more influenced by SPFs (see also Fig. \ref{fig:extLP} in Appendix \ref{app:lopreskill}). The reason for this difference is that in LP it is assumed the worst case scenario, in which Eve can enhance the basis dependence of the signals by exploiting the channel loss. However, in LT no such assumption is required hence the performance is maintained. This means that LT will typically outperform LP in the presence of high SPFs.
To compare LT and LP as a function of the setting dependent $\theta_{j\beta}$ we can contrast Figs. \ref{fig:comparison1}(a) and \ref{fig:comparison1}(b), or Figs. \ref{fig:comparison1}(c) and \ref{fig:comparison1}(d). The graphs show clear differences due to decreasing the value of $\hat\theta$, especially for the LT case. In Fig. \ref{fig:comparison1}(a) LP reaches a longer distance for any value of $\mu$, but when $\hat\theta = 10^{-5}$ LT gets better, particularly for $\mu = 10^{-10}$ as seen in Fig. \ref{fig:comparison1}(b). Furthermore, Fig. \ref{fig:comparison1}(b) shows that even when there are SPFs, LP can still do better \\
\begin{figure}
\caption{Secret key rate $R$ versus the overall system loss measured in dB for the generalised loss-tolerant protocol (LT) and Lo-Preskill (LP) analysis. The blue and red lines are superimposed in all graphs. (a) The Lo-Preskill's protocol performs better in this scenario because the SPFs are small but $\hat\theta$ is high. (b) For a smaller $\hat\theta$, the loss-tolerant analysis is better when $\mu= 10^{-10}$. (c) The generalised loss-tolerant protocol performs better when $\hat\theta$ is larger even if $\delta$ is high. (d) For large $\delta$ and small $\hat\theta$, the generalised loss-tolerant clearly surpasses the Lo-Preskill's analysis when $\mu= 10^{-10}$ or $\mu= 10^{-7}$.}
\label{fig:comparison1}
\end{figure}
\noindent than LT if the states sent are far from the idealised qubit. This is because the non-qubit assumption negatively affects more LT than LP (see Figs. \ref{fig:lossTolerant}(c) and \ref{fig:extLP}(c)).
When we compare the values of $\mu$ for the LT and LP we can see a similar trend in the secret key rate for all graphs in Fig. \ref{fig:comparison1}. Namely, the difference between the curves when $\mu = 10^{-10}$ (blue) and $\mu = 10^{-4}$ (yellow) is much larger for LT than for LP, which means that the THA is worse for the LT. However, $\mu$ is a parameter that might be easily controlled experimentally by introducing passive countermeasures, such as optical isolators \cite{lucamarini}. Indeed, in \cite{lucamarini} it has been shown, for instance, that a value of $\mu = 10^{-6}$ could be easily achieved in practice. For example, even if Eve sends Alice optical pulses with $10^{20}$ photons, practical combinations of the components of Alice's transmitter could guarantee a total optical isolation of -170 dB, which would be enough to achieve $\mu = 10^{-6}$ \cite{lucamarini}. This means that the LT method may be a better alternative when the SPFs are more dominant and mode dependency is small, since it outperforms the LP analysis in Figs. \ref{fig:comparison1}(b) and \ref{fig:comparison1}(d).
As explained above, the non-qubit assumption and the THA affect more the LT than the LP analysis. This might be because our generalisation of the loss-tolerant protocol is overestimating Eve. When we calculate the bounds for the yields we obtain that the eigenvalues $\lambda_{max}$ and $\lambda_{min}$ depend on the state preparation. However, this is probably too pessimistic because there might be some additional constraints among them, since the space spanned by the states associated to $0Z$ and $1Z$, respectively, is not orthogonal to the one spanned by the virtual states associated to $0X^{vir}$ and $1X^{vir}$. This means that these separate optimisations should not be possible in practise because Eve cannot achieve optimal values for all $\lambda$s. In other words, by improving our characterisation of the states we can improve the performance of the generalised loss-tolerant protocol. This is however beyond the scope of this paper and we leave it for future work. \\
In order to further investigate the differences between the two methods, we determine the parameter regimes where their performance is identical. First, by setting $\hat\theta = 10^{-6}$, we can identify which values of $\delta$ and $\mu$ provide the same key generation rate $R$ for LT and LP. The results are presented in Fig. \ref{fig:comparison2}(a), where the diagram clearly shows which protocol performs better given a certain $\delta$ and $\mu$: above the fitted curve, the LT provides a better performance but below the curve LP is the preferable method. In other words, as the SPFs increase the LT is superior but, as $\mu$ increases LP becomes more suitable.
\begin{figure}
\caption{The fitted line corresponds to those experimental parameters that result in the same key generation rate $R$ for both methods, the generalised loss-tolerant protocol and the Lo-Preskill's analysis. Above the line, the generalised loss-tolerant protocol performs better and below the line, the Lo-Preskill's analysis is the preferred method. The data points were fitted using a shape-preserving interpolant in Matlab. (a) Plot of $\delta$ against $\mu$ for $\hat\theta = 10^{-6}$. (b) Plot of $\delta$ against $\hat\theta$ for $\mu = 10^{-6}$.}
\label{fig:comparison2}
\end{figure}
Similar results are obtained when $\mu = 10^{-6}$. This case is particularly useful since in principle we can control the value of $\mu$ experimentally by the amount of isolation we use in our devices. Again, as SPFs increase the LT becomes better, giving a better estimation of the phase error rate and a better secret key generation rate.
\section{Conclusion} \label{sec:conclusion} Typical security proofs ignore many imperfections of experimental devices thus hindering the security claim of quantum key distribution (QKD). In this work, we have generalised the loss-tolerant QKD protocol to accommodate general imperfections. In particular, our formalism is valid for a general device model with, for instance, state preparation flaws (SPFs), mode dependency and Trojan horse attacks (THAs), which result in passive and active information leakage to an eavesdropper. Using this multi-mode scenario, we have shown that the qubit assumption can be removed from the loss-tolerant protocol without compromising the security of the QKD scheme. We present a formalism that can be used to estimate the phase error rate by finding the transmission rates of some virtual states and assuming the general state structure defined in Eq. (\ref{eq:multi-mode}). Therefore, in principle it can be applied to most QKD protocols. \\
In order to compare our generalised loss-tolerant protocol with other security proofs we have applied the Lo-Preskill's analysis \cite{lo4} to the same device model. In so doing, we have identified which approach delivers a higher secret key rate as a function of the experimental parameters. For example, the results obtained show that Lo-Preskill's method performs better under the non-qubit assumption and the THA but the generalised loss-tolerant protocol is better when there are SPFs. Since the THAs can be controlled using passive countermeasures, such as optical isolators, we have shown that in some cases the generalised loss-tolerant protocol might be the preferable method when the SPFs are more dominant. This way, our work can be used as a guideline to improve current experimental implementations in which multi-mode QKD is unavoidable. Moreover, it highlights the importance of source characterisation for more realistic security proofs.
For completeness, we also note that Ref. \cite{wang3} has recently proposed a computational toolbox that can be used to numerically estimate the phase error rate of a QKD protocol, and such technique could be applied to the scenario considered in this paper. Essentially like the Lo-Preskill's analysis, their technique only requires the knowledge of the inner products between the states emitted by Alice and is mathematically simple, which is a striking difference to previous numerical analyses \cite{coles,winick}. That is, the approach in~\cite{wang3} can also remove the qubit assumption and include side-channels when estimating the phase error rate. There are, however, some relevant differences between that method and our formalism, besides the obvious one, i.e., that our work is an analytical technique. The approach in~\cite{wang3} requires a full characterisation of the side-channels in order to obtain the inner product of the states, while ours does not, resulting in a simpler characterisation of the source. Moreover, in the absence of side-channels, their method is not loss-tolerant in some parameter regimes, while ours is always loss-tolerant, which is essential to guarantee a good performance over long distances. Furthermore, their analysis considers pure states while our method also applies to the mixed-state scenario. Despite these differences, it would be interesting to combine the advantages of both methods to achieve a better implementation security, but we leave this for future works.
\section{Three-state MDI-QKD} \label{app:mdi}
Here, we describe how our generalised loss-tolerant protocol can be applied to MDI-QKD \cite{lo2}. We assume that Alice and Bob prepare three-states in the form of Eq. (\ref{eq:multi-mode}), thus including active and passive information leakage. These states are then sent to an untrusted relay Eve which is located between Alice and Bob. Note that, we name the relay Eve (instead of using the typical name of Charles) to simplify the notation, since in this way we can use the subscript E to denote all systems possessed by the eavesdropper. Eve is supposed to perform a Bell state measurement (BSM) and announce the results over a public channel. Alice and Bob keep the data associated with the successful events and discard the rest. Finally, to guarantee correctly correlated bit strings, either Alice or Bob apply a bit flip to part of their data. In this protocol, the $Z$ basis is used to generate the secret key and the $X$ basis for parameter estimation.
As before, we can consider a virtual protocol in which Alice (Bob) prepares the state $\ket{\Psi_Z}_{ACE}$ ($\ket{\Psi_Z}_{BC'E}$) in the $Z$ basis, analogously to Eq. (\ref{eq:stateZ}), where $C$ ($C'$) is the system sent by Alice (Bob) to Eve. For simplicity, the discussion below considers the case where Eve's BSM only projects the incoming pulses into one Bell state: $\ket{\phi^+}$. In this case, the phase error rate is expressed as \begin{equation} e_X = \frac{Y_{0X,1X,\phi^+}^{(ZZ) vir} + Y_{1X,0X,\phi^+}^{(ZZ) vir}}{Y_{0X,0X,\phi^+}^{(ZZ) vir} + Y_{1X,0X,\phi^+}^{(ZZ) vir} + Y_{0X,1X,\phi^+}^{(ZZ) vir} + Y_{1X,1X,\phi^+}^{(ZZ) vir}}, \label{eq:ex_mdi} \end{equation} where $Y_{sX,jX,\phi^+}^{(ZZ) vir}$ with $s,j \in \{0,1\}$, is the joint probability that Alice (Bob) prepares the state $\ket{\Psi_Z}_{ACE}$ ($\ket{\Psi_Z}_{BC'E}$) and Eve declares the outcome $\ket{\phi^+}$, both Alice and Bob select the $Z$ basis but use the $X$ basis for their measurements of systems $A$ and $B$, and Alice (Bob) obtains the bit value $j$ ($s$). In order to find these virtual yields we need to calculate \begin{equation} Y_{sX,jX,\phi^+}^{(ZZ) vir} = P_{Z_A} P_{Z_B} \Tr[\hat{D}_{\phi^+} \hat{\theta}_{CE, jX, vir} \otimes \hat{\theta}_{C'E, sX, vir} ], \label{eq:yield_mdi} \end{equation}
where $\hat{D}_{\phi^+}$ corresponds to the announcement of Eve's outcome $\ket{\phi^+}$, and $\hat{\theta}_{CE, jX, vir}$ and $\hat{\theta}_{C'E, sX, vir}$ are defined similarly to those given by Eq. (\ref{eq:density_matrix}). Following a similar procedure to the one presented Section \ref{sec:estimation}, we can find the transmission rates to be $q_{\phi^+|ii'} = \frac{1}{4} \Tr [\hat{D}_{\phi^+} \hat{\sigma_i} \otimes \hat{\sigma_{i'}}]$ with $i$, $i' \in \{Id,x,y,z\}$. As in Eq. (\ref{eq:coef2}), we can calculate bounds of the virtual yields in terms of these quantities and their respective Bloch vector coefficients $P_i^{jX,vir}$.
Using the actual events $Y_{sZ,jZ,\phi^+}^{(ZZ)}$, $Y_{0X,jZ,\phi^+}^{(XZ)}$, $Y_{sZ,0X,\phi^+}^{(ZX)}$ and $Y_{0X,0X,\phi^+}^{(XX)}$ we can construct a system of nine linear inequalities. Finally, by employing the same assumption as before, {\it i.e.}, the three-states form a triangle in the Bloch sphere \cite{tamaki}, we can guarantee that these equations are linearly independent. Therefore, one can find bounds on the transmission rates, and consequently estimate the phase error rate.
\section{Security proof against coherent attacks} \label{app:security}
In this section, we present the security proof of our formalism against coherent attacks. For simplicity of the discussion, the main text deals with the case of pure states in a single-mode qubit space, however in this Appendix we consider the general scenario where the states could be mixed states in a single-mode qubit space. For this, we consider a virtual protocol \cite{shor, mayers}. This protocol is equivalent to the actual protocol in the sense that the resulting statistics of the measurements and the secret key rate generated between Alice and Bob are the same. Furthermore, the classical and quantum information available to Eve is equal in both protocols. The security claim follows from the fact that Alice and Bob can choose which protocol to execute and Eve is unable to distinguish between them. Hence, by proving the security of the virtual protocol we prove the security of the actual protocol.
In this work we employ the complementary scenario \cite{koashi, koashi2}, which considers a virtual protocol that uses the complementary observable of the key generation basis. For instance, in the actual protocol Alice and Bob agree on the bit values in the $Z$ basis, while in the virtual protocol, they collaborate to prepare a qubit in an eigenstate of the $X$ basis. In doing so, the security proof basically reduces to the estimation of the phase error rate, which corresponds to the bit error rate that Alice and Bob would have observed if they would have measured the $Z$ basis state in the $X$ basis. Therefore, the aim of the virtual protocol is to estimate the phase error rate. In Section \ref{sec:estimation}, we showed how this can be done by using our formalism and how we can calculate the secret key rate $R$ against collective attacks. Here, we describe in detail the virtual protocol used for the security proof and explain how to accommodate coherent attacks by Eve through the use of Azuma's inequality \cite{azuma}.
\subsection{Virtual protocol}
Here, we consider a more general case than that studied in the main text in which Alice generates a single-mode qubit system $B$, whose states are mixed states, and we show how to define the pure states needed for our security proof. We denote the mixed states by the density matrices $\hat{\rho}_{0Z_B}$, $\hat{\rho}_{1Z_B}$ and $\hat{\rho}_{0X_B}$. These states are diagonalised as \begin{equation} \begin{split} &\hat{\rho}_{jZ_B} = P_{jZ}^{0} \dyad{\phi_{jZ}^{0}}_{B} + P_{jZ}^{1} \dyad{\phi_{jZ}^{1}}_{B}, \\ &\hat{\rho}_{0X_B} = P_{0X}^{0} \dyad{\phi_{0X}^{0}}_{B} + P_{jZ}^{1} \dyad{\phi_{0X}^{1}}_{B}, \end{split} \end{equation} where $j \in \{0,1\}$, and $P_{jZ}^0$, $P_{jZ}^1$, $P_{0X}^0$ and $P_{0X}^1$ are probabilities satisfying $P_{jZ}^0 + P_{jZ}^1 = 1$ and $P_{0X}^0 + P_{0X}^1 = 1$. Moreover, $\{\ket{\phi_{jZ}^0}_B, \ket{\phi_{jZ}^1}_B\}$ and $\{\ket{\phi_{0X}^0}_B, \ket{\phi_{0X}^1}_B\}$ are orthonormal bases in the single-mode qubit. The states sent might be mixed due to imperfections in Alice's devices, including a potential entanglement between her devices and Eve's ancilla. This means that in general these mixed states can be purified by introducing Alice's ancilla system $A_1$ and Eve's system $E$, and therefore we have the purifications of $\hat{\rho}_{0Z_B}$, $\hat{\rho}_{1Z_B}$ and $\hat{\rho}_{0X_B}$ as $\ket{\tilde{\psi}_{0Z}}_{A_1BE}$, $\ket{\tilde{\psi}_{1Z}}_{A_1BE}$, and $\ket{\tilde{\psi}_{0X}}_{A_1BE}$, each of which expressed by \begin{equation} \begin{split} &\ket{\tilde{\psi}_{jZ}}_{A_1BE} = \sqrt{P_{jZ}^{0}} \ket{0_{jZ}}_{A_1E} \ket{\phi_{jZ}^{0}}_{B} + \sqrt{P_{jZ}^{1}} \ket{1_{jZ}}_{A_1E} \ket{\phi_{jZ}^{1}}_{B}, \\ &\ket{\tilde{\psi}_{0X}}_{A_1BE} = \sqrt{P_{0X}^{0}} \ket{0_{0X}}_{A_1E} \ket{\phi_{0X}^{0}}_{B} + \sqrt{P_{0X}^{1}} \ket{1_{0X}}_{A_1E} \ket{\phi_{0X}^{1}}_{B}.\\ \end{split} \end{equation} Here, $\{\ket{0_{jZ}}_{A_1E}, \ket{1_{jZ}}_{A_1E}\}$ and $\{\ket{0_{0X}}_{A_1E}, \ket{1_{0X}}_{A_1E}\}$ are orthonormal bases. Now, we define states similar to Eq. (\ref{eq:stateZ}) that include the purification of Alice's state: \begin{equation} \begin{split} &\ket{\tilde{\Psi}_{Z}}_{A_1A_2BE} = \frac{1}{\sqrt{2}} \Big[\ket{0_Z}_{A_2} \ket{\tilde{\psi}_{0Z}}_{A_1BE} + \ket{1_Z}_{A_2} \ket{\tilde{\psi}_{1Z}}_{A_1BE} \Big], \\ &\ket{\tilde{\Psi}_{X}}_{A_1A_2BE} = \ket{0_X}_{A_2} \ket{\tilde{\psi}_{0X}}_{A_1BE}, \\ \end{split} \end{equation} where $A_2$ is Alice's ancilla system used to generate a bit value in the protocol, i.e., it possesses information about Alice's encoding. As explained above, in the security analysis Alice measures $A_2$ in the $X$ basis instead of the $Z$ basis when $\ket{\tilde{\Psi}_{Z}}_{A_1A_2BE}$ is prepared, therefore, it is useful to write this state in the $X$ basis of system $A_2$. By substituting $\ket{0_Z}_{A_2} = \frac{1}{\sqrt{2}} (\ket{0_X}_{A_2} + \ket{1_X}_{A_2})$ and $\ket{1_Z}_{A_2} = \frac{1}{\sqrt{2}} (\ket{0_X}_{A_2} - \ket{1_X}_{A_2})$ we can express $\ket{\tilde{\Psi}_{Z}}_{A_1A_2BE}$ as \begin{equation}
\ket{\tilde{\Psi}_{Z}}_{A_1A_2BE} = \sqrt{\frac{1 + \braket{ \tilde{\psi}_{0Z} | \tilde{\psi}_{1Z}}_{A_1BE}}{2}} \ket{0_X}_{A_2} \ket{\tilde{\psi}_{0X}^{vir}}_{A_1BE} + \sqrt{\frac{1 - \braket{ \tilde{\psi}_{0Z} | \tilde{\psi}_{1Z}}_{A_1BE}}{2}} \ket{1_X}_{A_2} \ket{\tilde{\psi}_{1X}^{vir}}_{A_1BE}, \end{equation} where \begin{equation}
\ket{\tilde{\psi}_{jX}^{vir}}_{A_1BE} = \frac{ \ket{\tilde{\psi}_{0Z}}_{A_1BE} + (-1)^j \ket{\tilde{\psi}_{1Z}}_{A_1BE}}{\sqrt{2 \big(1 + (-1)^j \braket{ \tilde{\psi}_{0Z} | \tilde{\psi}_{1Z}}_{A_1BE}\big)}}. \end{equation}
In the virtual protocol, we consider that Alice sends Bob two virtual states, $\ket{\tilde{\psi}_{jX}^{vir}}_{A_1BE}$, and three actual states, $\ket{\tilde{\psi}_{jZ}}_{A_1BE}$ and $\ket{\tilde{\psi}_{0X}}_{A_1BE}$, which are used to estimate the phase error rate. We have seen that even in the case of mixed states we can define actual and virtual pure states, and these pure states can be directly used in our security proof. Therefore, our formalism is valid for mixed states in a single-mode qubit space.
Next, let us continue to explain the security proof in more detail. The selection of these actual and virtual states can be expressed as \begin{equation} \ket{\varphi}_{SA_1BE} = \sum_{c=1}^5 \sqrt{P(c)} \ket{c}_{S} \ket{\vartheta^{(c)}}_{A_1BE}, \label{eq:prep} \end{equation} where $S$ is the shield system that is kept inside of Alice's lab and the states $\ket{\vartheta^{(c)}}_{A_1BE}$ are \begin{equation} \begin{split} &\ket{\vartheta^{(1)}}_{A_1BE} = \ket{\tilde{\psi}_{0X}^{vir}}_{A_1BE}, \\ &\ket{\vartheta^{(2)}}_{A_1BE} = \ket{\tilde{\psi}_{1X}^{vir}}_{A_1BE}, \\ &\ket{\vartheta^{(3)}}_{A_1BE} = \ket{\tilde{\psi}_{0Z}}_{A_1BE}, \\ &\ket{\vartheta^{(4)}}_{A_1BE} = \ket{\tilde{\psi}_{1Z}}_{A_1BE}, \\ &\ket{\vartheta^{(5)}}_{A_1BE} = \ket{\tilde{\psi}_{0X}}_{A_1BE}, \\ \end{split} \end{equation} with their respective probabilities $P(c)$ \begin{equation} \begin{split}
&P(1) = \frac{{P_{Z_A}}{P_{Z_B}}}{2} \Big(1 + \braket{ \tilde{\psi}_{0Z} | \tilde{\psi}_{1Z}}_{A_1BE} \Big), \\
&P(2) = \frac{{P_{Z_A}}{P_{Z_B}}}{2} \Big(1 - \braket{ \tilde{\psi}_{0Z} | \tilde{\psi}_{1Z}}_{A_1BE} \Big), \\ &P(3) = \frac{{P_{Z_A}}{P_{X_B}}}{2}, \\ &P(4) = \frac{{P_{Z_A}}{P_{X_B}}}{2}, \\ &P(5) = P_{X_A}{P_{Z_B}} + P_{X_A}{P_{X_B}} = P_{X_A}. \\ \end{split} \end{equation} When Bob receives the states he performs a measurement in either the $Z$ or the $X$ basis, and these are defined by the POVMs described in Section \ref{sec:description}. Also, all announcements between Alice and Bob are done via an authenticated public channel. Note that, in the virtual protocol we assume that Alice and Bob are sitting in the same lab so that they can choose the measurement basis, and this is allowed because the quantum and classical information available to Eve is the same between the actual and the virtual protocols. The detailed steps of the virtual protocol are presented below and the logic schematics in Fig. \ref{fig:virtualLT}. \\
\begin{enumerate} \item \textbf{Initialisation:} Before running the protocol, Alice and Bob agree on a number $N_{fixed}$ of rounds, on the error correcting codes, and on a set of hash functions to perform privacy amplification. Steps 2-4 of the protocol are repeated $N$ times until the number of detected events $N$ becomes $N_{fixed}$.
\item \textbf{State Preparation:} After a potential THA, Alice prepares systems $S$, $A_1$ and $BE$ in the entangled state $\ket{\varphi}_{SA_1BE}$, in Eq. (\ref{eq:prep}), and sends Bob the system $BE$ via a quantum channel.
\item \textbf{QND measurement:} For each incoming system, Bob performs a quantum non-demolition (QND) measurement to determine whether the signals are detected or not. If Bob obtains a detection event he keeps the resulting system and $N$ is increased by 1 unit.
\item \textbf{Detection announcement:} If $N=N_{fixed}$, Bob announces the termination of quantum communication and the detection pattern. Otherwise Alice and Bob return to Step 2 of the protocol.
\item \textbf{Measurement and basis announcement:} For each of the detected events, Alice measures her system $S$ and announces the $Z$ $(X)$ basis when $c = 1,2,3,4 ~ (c = 5)$. Bob announces the $Z$ $(X)$ basis for $c = 1,2 ~(3,4)$, but he always measures in the $X$ basis. For $c =5$, Bob selects the basis $\beta \in\{ Z, X\}$ probabilistically and announces his basis choice. Then, he carries out the measurement on system $BE$ in his selected basis.
\item \textbf{Sifting and announcement:} Alice and Bob define and announce the bit strings $\vec{s}_{X,0Z}$, $\vec{s}_{X,1Z}$ and $\vec{s}_{X,0X}$, which correspond to the events when Alice sends the actual states and Bob performs the $X$ basis measurements. These are the basis mismatched events when $c = 3,4$ and one of the events when $c = 5$, the basis matched event. These strings are used to estimate the phase error rate.
\end{enumerate}
\begin{figure}
\caption{The logical schematics for the virtual protocol, where the notation $X_A/Z_A$, $X_B/Z_B$ corresponds to Alice's and Bob's measurements bases respectively. The virtual states correspond to $c = 1,2$, the actual $Z$ states to $c = 3,4$, and the actual $X$ states to $c =5$. For each click event, Alice measures system $S$ and Bob measures system $BE$. Note that, the selection of $c = 1,2,3,4$ already includes Bob's measurement in the $X$ basis, but when $c = 5$ his measurement basis is chosen probabilistically.}
\label{fig:virtualLT}
\end{figure}
In the virtual protocol, we require that Alice and Bob postpone their measurements until the quantum communication ends, therefore, we assume that Alice and Bob possess quantum memories where they can store their systems. The reason for this deferral comes from the application of Azuma's inequality, which is explained later. In the case of Alice, she only makes her measurement after the termination condition, in Step 5. This is allowed because it does not matter when she performs the measurement since it commutes with Eve's operations and hence it will not affect Alice's statistics. For Bob, we divide his measurement in two steps: a QND measurement, which allows him to know when a detected event occurred, and a measurement to output the bit value with the chosen basis. If the QND measurement results in a detected instance, Bob performs the measurement using the $Z$ or $X$ basis. We are able to delay Bob's measurement choice because the inconclusive outcomes are assumed to be independent of the basis, as explained in Section \ref{sec:description}. The key point in the virtual protocol is as follows: the security of the events when Alice sends the actual $Z$ basis states and Bob obtains a detected event in the actual protocol with the $Z$ basis, can be analysed by imagining that Alice and Bob both employ the $X$ basis to measure respectively the systems $A_2$ and $BE$. This means that when Alice sends a virtual state $(c=1,2)$ Bob's measurement basis is always the $X$ basis.
It is clear that the virtual protocol described here is equivalent to the actual protocol in Section \ref{sec:description}. This is so because the quantum states sent by Alice are the same in both protocols as well as the announcements made by the two parties. For instance, when Alice sends the virtual states they both measure in the $X$ basis but they announce the $Z$ basis (Step 5). In the actual protocol, these events are used for key generation and therefore Alice and Bob also announce the $Z$ basis. This means that the protocols are indistinguishable from Eve's perspective as required. Note that, the virtual protocol does not produce a key, it is merely used for the estimation of the phase error rate.
\subsection{Azuma's inequality and its application to the security proof} In coherent attacks, Eve interacts with all the signals sent by Alice followed by a joint measurement after listening to all the classical information exchanged between Alice and Bob. In this scenario we use Azuma's inequality \cite{azuma} which takes into account this dependency, and allows us to derive a relation between the expected values and the observed values. Most importantly, once we have the conditional probabilities on all previous measurement outcomes we can to find the actual number of events observed.
Azumas's inequality can be applied to a stochastic model as long as a sequence of random variables is a martingale and satisfies the bounded difference conditions (BDC). A Martingale is a sequence of random variables $X^{(0)}, X^{(1)},..., X^{(l)}$ for which the expectation $E[\cdot]$ of the next value is equal to the present value in the sequence given that we know all the previous outcomes, i.e., $E [X^{(l+1)} | X^{(0)}, X^{(1)},..., X^{(l)}] = X^{(l)}$ for all $l \ge 0$. This sequence is said to satisfy BDC if there exists $c^{(l)} > 0$ such that $|X^{(l+1)} - X^{(l)}| \le c^{(l)}$ for all $l \ge 0$. For $N$ trials of a variable $X^{(l)}$ with $c^{(l)} = 1$, Azuma's inequality states that \begin{equation}
P \big[|X^{(N)} - X^{(0)}| > N \delta_A \big] \le 2e^{\frac{-N\delta_A^2}{2}}, \end{equation} holds for any $\delta_A \in (0,1)$. Now, for the $l$th trial, we define $X^{(l)}$ as \begin{equation}
X^{(l)} := \Lambda^{(l)} - \sum_{k=1}^{l} P(\zeta_k = 1| \zeta_0, ..., \zeta_{k-1}), \label{eq:variable} \end{equation}
where $\Lambda^{(l)}$ is a random variable representing the actual number of events (that is $\Lambda^{(l)} = \sum_{k=1}^l \zeta_k$ ) observed during the first $l$ trials, $\zeta_k$ is the random variable of interest and it has the value of 0 or 1. Moreover, $P(\zeta_k = 1| \zeta_0, ..., \zeta_{k-1})$ is the conditional probability of obtaining the outcome specified by $\zeta_k = 1$ in the $k$th trial given that the first $k-1$ outcomes are $ \zeta_0, ..., \zeta_{k-1}$. It is possible to show that the sequence of random variables in Eq. (\ref{eq:variable}) is Martingale and satisfies the BDC. Hence, we can apply the Azuma's inequality and write \begin{equation}
P \Bigg[\bigg|\Lambda^{(N)} - \sum_{k=1}^{N} P(\zeta_k = 1| \zeta_0, ..., \zeta_{k-1}) \bigg| > N \delta_A \Bigg] \le 2e^{\frac{-N\delta_A^2}{2}}, \end{equation} where we use the definition $X^{(0)} = 0$. This also means that \begin{equation}
\sum_{k=1}^{N} P(\zeta_k = 1| \zeta_0, ..., \zeta_{k-1}) - N \delta_A \le \Lambda^{(N)} \le \sum_{k=1}^{N} P(\zeta_k = 1| \zeta_0, ..., \zeta_{k-1}) + N \delta_A, \end{equation} holds at least with probability $P = 1- 2e^{\frac{-N\delta_A^2}{2}}$. Therefore, \begin{equation}
\Lambda^{(N)} = \sum_{k=1}^{N} P(\zeta_k = 1| \zeta_0, ..., \zeta_{k-1}) + \delta_B, \label{eq:lambda} \end{equation} except for error probability $\epsilon + \hat{\epsilon}$, where the deviation parameter $\delta_B \in [-\Delta,\hat{\Delta}]$. These bounds are defined as $\Delta = f(N,\epsilon)$ and $\hat{\Delta} = f(N,\hat{\epsilon})$ where $f(x,y) = \sqrt{2x \ln(1/y)}$.
Let us now show how we use this inequality in our security proof. In particular, we consider
\begin{equation}
X^{(l)}_{csX} = \Lambda^{(l)}_{csX} - \sum_{k=1}^{l} P(\zeta_{k,csX} = 1| \zeta_{0}, ..., \zeta_{k-1}), \label{eq:variable2} \end{equation}
where $csX = c,s$ for $c = 1,2,3,4$ since Bob's basis choice is already included in these cases, and $csX = c,s,X$ for $c = 5$. In Eq. (\ref{eq:variable2}), $P(\zeta_{k,csX} = 1| \zeta_{0}, ..., \zeta_{k-1})$ is the probability of Alice selecting the state $c$ and Bob observing $s$ ($s,X$) for $s \in \{0,1\}$ when $c = 1,2,3,4$ ($c=5$) in the $k$th trial, conditional on all the previous outcomes from the measurements $ \zeta_0, ..., \zeta_{k-1}$. To obtain this probability we first define \begin{equation} \ket{\tau}_{SA_1BE} = \ket{\varphi_{k-1}}_{SA_1BE}\ket{\varphi_k}_{SA_1BE}\ket{\varphi_r}_{SA_1BE}, \end{equation} to be the state prepared by Alice in an execution of the protocol, where $\ket{\varphi_{k-1}}_{SA_1BE}$, $\ket{\varphi_k}_{SA_1BE}$ and $\ket{\varphi_r}_{SA_1BE}$ correspond to all the systems before the $k$th trial, in the $k$th trial and in the rest of the trials after $k$ (i.e., $r = N-k$), respectively.
Eve's action can be described as $\hat{U}_{BEE'} \ket{\tau}_{SA_1BE} \ket{0}_{E'} = \sum_t \hat{B}_{tB} \ket{\tau}_{SA_1BE} \ket{t}_{E'}$, where $\hat{U}_{BEE'}$ is a unitary transformation acting on systems $BEE'$, $\hat{B}_{tB}$ is the Kraus operator which acts on system $BE$ depending on Eve's measurement outcome $t$, and $\ket{t}_{\{t= 1,2,...\}}$ is an orthonormal basis. Note that, here we use the subscript $E$ to refer to Eve's system originating from a $THA$ and $E'$ corresponds to the additional ancilla system in her hands. Alice and Bob only communicate after performing the measurements so these parameters are independent of the state preparation.
In order to consider Alice's and Bob's measurements previous to the $k$th trial, we define the operator $\hat{\mathcal O}_{k-1,SBE} = \otimes^{k-1}_{\nu=1} \hat{M}_{S_{\nu}{BE}_{\nu}}$, where $\hat{M}_{S_{\nu}{BE}_{\nu}}$ denotes the Kraus operator associated with the $\nu$th measurement outcome of Alice and Bob. Hence, after Eve's interaction, the normalised $k$th state of the system $SBE$ conditioned on the measurement outcomes, $O_{k-1}$, and the detected event can be expressed as \begin{equation}
\hat{\rho}_{k|O_{k-1}}^{SBE} = \frac{\hat{\sigma}_{k|O_{k-1}}^{SBE}}{\Tr (\hat{\sigma}_{k|O_{k-1}}^{SBE})}, \end{equation}
where the state $\hat{\sigma}_{k|O_{k-1}}^{SBE}$ is defined shortly below (see Eq. (\ref{eq:trace})). We know that \begin{equation}
\hat{\sigma}_{k|O_{k-1}}^{SA_1BE}= \sum_t \Tr_{\bar{k}} \Big[ \hat{F}_{BE_k} \hat{\mathcal O}_{k-1,SBE} \hat{B}_{tB} \ket{\tau}_{SA_1BE } \bra{\tau} \hat{B}_{tB}^\dagger \hat{\mathcal O}_{k-1,SBE}^\dagger \hat{F}_{BE_k}^\dagger \Big], \label{eq:sigma} \end{equation} where $\Tr_{\bar{k}}$ is the partial trace over the systems $S$, $A_1$ and $BE$ for all the events that are not in the $k$th trial, and $\hat{F}_{BE_k}$ is Bob's Kraus operator acting on the $k$th system, corresponding to the detected events. This means taking the trace with the basis $\{ \ket{\vec{x}_{k-1}} , \ket{\vec{x}_r}\}$, where $\ket{\vec{x}_{k-1}}$ corresponds to all the systems in the first $k-1$ runs and $\ket{\vec{x}_r}$ to the rest of the systems after $k$. Then, we can rewrite Eq. (\ref{eq:sigma}) as \begin{equation}
\hat{\sigma}_{k|O_{k-1}}^{SBE} = \sum_t \sum_{\vec{x}_{k-1}, \vec{x}_r} \Tr_{A_1}^k \Big[A_{t,BE|O_{k-1}}^{(\vec{x}_{k-1}, \vec{x}_r)} \ket{\varphi_k}_{SA_1BE} \bra{\varphi_k} A_{t,BE|O_{k-1}}^{\dagger(\vec{x}_{k-1}, \vec{x}_r)}\Big], \label{eq:trace} \end{equation}
where $\Tr_{A_1}^k$ is the partial trace over the system $A_1$ in the $k$th trial and $A_{t,BE|O_{k-1}}^{(\vec{x}_{k-1}, \vec{x}_r)}$ is the Kraus operator acting on the $k$th system conditional on all the previous detected events, and it is defined as \begin{equation}
A_{t,BE|O_{k-1}}^{(\vec{x}_{k-1}, \vec{x}_r)} = \bra{\vec{x}_r} \bra{\vec{x}_{k-1}} \hat{F}_{BE_k} \hat{O}_{k-1,SBE} ~\hat{B}_{tB} \ket{\varphi_{k-1}}_{SA_1BE} \ket{\varphi_r}_{SA_1BE}. \end{equation}
By substituting Eq.(\ref{eq:prep}) into Eq.({\ref{eq:trace}}) we get \begin{equation}
\hat{\sigma}_{k|O_{k-1}}^{SBE} = \sum_{c,c'} \sqrt{P(c) P(c')} ~\sum_t \sum_{\vec{x}_{k-1}, \vec{x}_r} \Tr_{A_1}^k \Big[A_{t,BE|O_{k-1}}^{(\vec{x}_{k-1}, \vec{x}_r)} \ket{c}_S\bra{c'} \otimes \ket{\vartheta^{(c)}}_{A_1BE} \bra{\vartheta^{(c')}} A_{t,BE|O_{k-1}}^{\dagger(\vec{x}_{k-1}, \vec{x}_r)}\Big]. \end{equation} It is clear now that this state is dependent on Eve's action as well as on the previous outcomes. Also, note that the partial trace only acts on system $A_1$. The probability that Alice obtains the outcome $c$, Bob selects the $X$ basis and obtains a bit value $s$ conditional on all the previous measurement outcomes is calculated as \begin{equation} \begin{split}
P_{csX|O_{k-1}} &= \frac{P(X \cap c)}{ \Tr(\hat{\sigma}_{k|O_{k-1}}^{SBE})} \sum_t \sum_{\vec{x}_{k-1}, \vec{x}_r} \Tr \Bigg[A_{t,BE|O_{k-1}}^{(\vec{x}_{k-1}, \vec{x}_r)} \Tr_{A_1}^k \Big[ \dyad*{\vartheta^{(c)}}{\vartheta^{(c)}}_{A_1BE} \Big] A_{t,BE|O_{k-1}}^{\dagger(\vec{x}_{k-1}, \vec{x}_r)} \hat{M}_{sX}\Bigg] \\
&= \frac{P(X \cap c)}{\Tr(\hat{\sigma}_{k|O_{k-1}}^{SBE})} \Tr \Bigg[ \hat{D}_{sX|O_{k-1}} \Tr_{A_1}^k \Big[ \dyad*{\vartheta^{(c)}}{\vartheta^{(c)}}_{A_1BE}\Big] \Bigg] \\ \end{split} \end{equation}
where $P(X \cap c) = P(c)$ for $c = 1,2,3,4$ and $P(X \cap c) = P(c)P(X_B)$ for $c =5$. In this expression, $\hat{D}_{sX|O_{k-1}} = \sum_t \sum_{\vec{x}_{k-1}, \vec{x}_r} A_{t,BE|O_{k-1}}^{\dagger(\vec{x}_{k-1}, \vec{x}_r)} \hat{M}_{sX} A_{t,BE|O_{k-1}}^{(\vec{x}_{k-1}, \vec{x}_r)} $ represents Eve's action as well as Bob's measurement. This is independent of $c$, which means that Eve cannot behave differently depending of the state sent. Importantly, the probability $P_{csX|O_{k-1}}$ essentially corresponds to the actual yields $Y_{sX,j\beta}$ in the main text when $c = 3,4,5$. Note that the yields in this Appendix are normalised by the detected events while those in the main text are not. In the finite key size regime, the normalisation according to the detected events results in a better performance, however, in the limit of large number of pulses, they are essentially the same. As we consider this limit throughout this paper, in the main text we adopt the yields that are not normalised by the detected events for simplicity of explanation. We know $\Lambda_{3sX}$, $\Lambda_{4sX}$ and $\Lambda_{5sX}$ by collecting the corresponding number of events from the actual protocol. Therefore using Azuma's inequality, i.e., Eq. (\ref{eq:lambda}), we can calculate the conditional probabilities which correspond to the yields $Y_{sX,0Z}^{(Z)}$, $Y_{sX,1Z}^{(Z)}$ and $Y_{sX,0X}^{(X)}$ respectively. From Section \ref{sec:estimation}, we know how these yields are related to the transmission rates, and in turn, how these are related to the virtual yields $Y_{1X,0Z}^{(Z)vir}$ and $Y_{0X,1Z}^{(Z)vir}$. Here, we would like to emphasise that using Eq. (\ref{eq:coef2}) we can calculate these yields, which correspond to the probabilities $P_{11X|O_{k-1}}$ and $P_{20X|O_{k-1}}$ respectively, both of which are conditional on the previous measurement outcomes. Using Azuma's inequality again, we can find the number of number of events, $\Lambda_{11X}$ and $\Lambda_{20X}$, which are the number of phase errors, and this concludes the estimation of the phase error rate.
\section{Coefficients} \label{app:coefficients} In this appendix, we list the coefficients used in the main text. Direct calculations show that the coefficients $A_j$, $B_j$ and $C_j$ for Eqs. (\ref{eq:yield1})-(\ref{eq:coef2}) are given by \begin{linenomath*}\begin{equation} \begin{split}
A_j = & \frac{1}{4} \Big[|a_{0Z}|^2 + (-1)^j \Big( a_{0Z}^* a_{1Z}{\braket{\phi_{0Z}|\phi_{1Z}}}_{BE} + a_{0Z} a_{1Z}^* {\braket{\phi_{1Z}|\phi_{0Z}}}_{BE} \Big) + |a_{1Z}|^2 \Big],\\
B_j = & \frac{1}{4} \sqrt{|a_{0Z}|^2 + (-1)^j \Big(a_{0Z}^* a_{1Z} {\braket{\phi_{0Z}|\phi_{1Z}}}_{BE} + a_{0Z} a_{1Z}^* {\braket{\phi_{1Z}|\phi_{0Z}}}_{BE} \Big) + |a_{1Z}|^2}\\
& \times ~\sqrt{|b_{0Z}|^2 + (-1)^j \Big(b_{0Z}^* b_{1Z} {\braket{\phi_{0Z}^\perp|\phi_{1Z}^\perp}}_{BE} + b_{0Z} b_{1Z}^2 {\braket{\phi_{1Z}^\perp|\phi_{0Z}^\perp}}_{BE} \Big) + |b_{1Z}|^2},\\
C_j= &\frac{1}{4} \Big[|b_{0Z}|^2 + (-1)^j \Big( b_{0Z}^* b_{1Z}{\braket{\phi_{0Z}^\perp|\phi_{1Z}^\perp}}_{BE} + b_{0Z} b_{1Z}^*{\braket{\phi_{1Z}^\perp|\phi_{0Z}^\perp}}_{BE} \Big) + b_{1Z}^2 \Big].\\ \end{split} \end{equation}\end{linenomath*}
\section{Lo-Preskill's analysis} \label{app:lopreskill} In this section, we outline the security argument of Lo-Preskill's analysis \cite{lo4} and apply it to the particular device model described in the main text.
\subsection{Description of Lo-Preskill's protocol} In order to prove the security of the BB84 protocol \citep{bennett}, where the state of the emitted pulses is not phase randomised, we convert the actual protocol into an entanglement based protocol. In this protocol, the $Z$ basis states are prepared when Alice measures her system $A$ of the entangled state \begin{equation} \ket{\Upsilon_Z}_{ABE} = \frac{1}{\sqrt{2}} \big[\ket{0_Z}_A \otimes \ket{\Phi_{0Z}}_{BE} + \ket{1_Z}_A \otimes \ket{\Phi_{1Z}}_{BE} \big], \label{eq:ent1} \end{equation} with the basis $\{ \ket{0_Z}, \ket{1_Z}\}$. Similarly, she prepares the $X$ basis states by measuring her system $A$ of the following entangled state in the basis $\{ \ket{0_X}, \ket{1_X}\}$ \begin{equation} \ket{\Upsilon_X}_{ABE} = \frac{1}{\sqrt{2}} \big[\ket{0_X}_A\otimes \ket{\Phi_{0X}}_{BE} + \ket{1_X}_A \otimes \ket{\Phi_{1X}}_{BE} \big]. \label{eq:ent2} \end{equation}
These states contain Eve's system $E$ since we are considering a potential THA, and the states $\ket{\Phi_{j\beta}}$ for $j \in \{0,1\}$ and $\beta \in \{Z,X\}$ are defined in Eq. (\ref{eq:multi-mode}). If the state preparation is perfect and the source is completely isolated there is no passive or active leakage of information, which is the case when $\delta = \theta_{j\beta} = \mu = 0$. This means that these states are basis independent, i.e., $\ket{\Upsilon_Z}_{ABE} = \ket{\Upsilon_X}_{ABE}$. When the state preparation is not perfect (at least one of the quantities $\delta$, $\theta_{j\beta}$, $\mu$ is greater than zero), the states $\ket{\Upsilon_Z}_{ABE}$ and $\ket{\Upsilon_X}_{ABE}$ are close to each other but not equal hence, some information related with the basis choice could be leaked from the source. This means that Eve might be able to distinguish the states emitted when the $Z$ basis is chosen from the ones when the $X$ basis is chosen. This basis-dependency can be quantified by considering an equivalent virtual protocol that evaluates the ``balance" of a ``quantum coin" \cite{gottesman}. To investigate this, we consider that Alice prepares the state \begin{equation} \ket{\Gamma}_{CABE} = \frac{1}{\sqrt{2}} \big[\ket{0_Z}_C \otimes \ket{\Upsilon_Z}_{ABE} + \ket{1_Z}_C \otimes \ket{\Upsilon_X}_{ABE} \big], \label{eq:phistate} \end{equation} where $C$ corresponds to the quantum coin. She performs a measurement on the coin with the basis $\{ \ket{0_Z}, \ket{1_Z}\}$ to determine the encoding of the signal. From Koashi's approach \cite{koashi2,koashi3}, we can find the phase errors using the complementary scenario, by applying the ``Bloch sphere bound" \cite{tamaki2} to the quantum coin. For this, we consider switching the measurement basis (to measure with the $X$ basis instead of the $Z$ basis) on the quantum coin, thus it is useful to write Eq.(\ref{eq:phistate}) as \begin{equation} \ket{\Gamma}_{CABE} = \frac{1}{2} \big[ \ket{0_X}_C \big(\ket{\Upsilon_Z}_{ABE} + \ket{\Upsilon_X}_{ABE} \big) + \ket{1_X}_C \big(\ket{\Upsilon_Z}_{ABE} - \ket{\Upsilon_X}_{ABE} \big)\big]. \label{eq:imbalance} \end{equation}
For characterising how close these states are, we employ the probability $\Delta$ associated with finding the state $\ket{1_X}_C$ and the outcome $X = 1$. This can be expressed as \begin{equation}
\Delta = \frac{1}{4} \big(2 - \braket{\Upsilon_Z | \Upsilon_X}_{ABE} - \braket{\Upsilon_Z | \Upsilon_X}^*_{ABE} \big), \end{equation}
where $\braket{\Upsilon_Z | \Upsilon_X}^*_{ABE}$ is the complex conjugate of $\braket{\Upsilon_Z | \Upsilon_X}_{ABE}$. These overlaps can be calculated to obtain $\Delta$, which quantifies the basis dependence of Alice's states. If there are no imperfections, the inner product $\braket{\Upsilon_Z | \Upsilon_X}_{ABE} = \braket{\Upsilon_Z | \Upsilon_X}^*_{ABE} = 1$ and therefore the coin imbalance is $\Delta = 0$. Note that, this justifies our choices of states in Eqs. (\ref{eq:ent1}) and (\ref{eq:ent2}). Similarly, when they are completely orthogonal to each other the inner products are zero and $\Delta = \frac{1}{2}$. This means that, the closer these states are to each other, the smaller is the coin imbalance $\Delta$ and therefore, less information is leaked to Eve.
In a QKD scheme, not all the signals sent from the source are detected due to channel loss. Therefore, Eve might take advantage of this flaw by blocking certain signals that are not favourable to her without being detected, enhancing the basis dependence of the signals. To take this into consideration, the Lo-Preskill's analysis assumes the worst case scenario, where it maximises the imbalance of the coin, or in other words, maximises the leakage of information to Eve. This means that it assumes that all the signals that do not produce a click on Bob's side are associated with the outcome $X=0$ when measuring the quantum coin. The accommodation of this worst case scenario is reflected by considering an enhancement probability of $\Delta$ such that: $\Delta' = \frac{\Delta}{min[Y_Z, Y_X]}$ where $Y_Z$ $(Y_X)$ is the single photon yield in the $Z$ $(X)$ basis \cite{lucamarini}.
In order to find the secret key rate $R$ we need to estimate the phase error rate $e_X$, which corresponds to what the bit error rate would have been if Alice and Bob had measured their states in the $X$ basis when the entangled state prepared is $\ket{\Upsilon_Z}_{ABE}$. In the Lo-Preskill's analysis, $e_X$ cannot be calculated directly from the channel model but using $e_Z$ and the coin imbalance $\Delta'$ the phase error rate can be estimated to be \cite{lo4} \begin{equation} e_X \le {e_Z} + 4 \Delta' (1-\Delta') (1 - 2 {e_Z}) + 4 (1 - 2\Delta' ) \sqrt{\Delta' (1 - \Delta') {e_Z} (1 - {e_Z})}. \label{eq:eph_delta} \end{equation} In the ideal scenario, when the states $\ket{\Upsilon_Z}_{ABE}$ and $\ket{\Upsilon_X}_{ABE}$ are close to each other, $\Delta'$ is very small and $e_X \approx e_Z$. By substituting Eq. (\ref{eq:eph_delta}) into Eq. (\ref{eq:keyrate}) and using the same definition for $e_Z$ as in Eq. (\ref{eq:biterror}) we are able to calculate $R$.
\subsection{Actual and virtual protocols} In this section, we describe the actual and virtual QKD protocols considered in the Lo-Preskill's security analysis. Note that, all announcements between Alice and Bob are done via an authenticated public channel. \\
\textbf{Actual Protocol} \begin{enumerate} \item \textbf{Initialisation:} Before running the protocol, Alice and Bob agree on a number $N_{fixed}$ of rounds, on the error correcting codes, and on a set of hash functions to perform privacy amplification. Steps 2-4 of the protocol are repeated $N$ times until the number of detected events $N$ becomes $N_{fixed}$.
\item \textbf{State preparation:} Alice selects the basis $\beta \in\{ Z, X\}$ for encoding the states with probabilities $P_{Z_A}$ and $P_{X_A} = 1- P_{Z_A}$ respectively. For each basis selected, she randomly chooses a bit value and the associated phase. Then, she prepares the signal and reference pulses following these specifications and sends the state to Bob via the quantum channel. Due to a potential THA, the sent states might contain Eve's system.
\item \textbf{Measurement:} Bob measures each incoming signal using the basis $\beta \in \{Z,X\}$ with probabilities $P_{Z_B}$ and $P_{X_B} = 1 - P_{Z_B}$ respectively. For each detected event, $N$ is increased by 1 unit.
\item \textbf{Detection announcement:} If $N=N_{fixed}$, Bob announces the termination of the quantum communication phase and the detection pattern. Otherwise, Alice and Bob return to Step 2 of the protocol.
\item \textbf{Basis announcement and sifting:} For the detected events, Alice selects the basis of the quantum coin namely, the $Z_C$ or $X_C$ basis, with probabilities $P_{Z_{C}}$ and $P_{X_{C}}= 1- P_{Z_{C}}$ respectively. She announces the chosen basis, and if $Z_C$ was selected, she also announces the selected basis in Step 2. Moreover, Bob announces his basis choice. If Alice's and Bob's basis choices disagree they discard the data. Otherwise, they define bit strings associated with the matched events, i.e., when both select the $Z$ or both select the $X$ basis.
\item \textbf{Parameter estimation:} Alice and Bob announce the bit strings in the $X$ basis. They calculate the Hamming weight \texttt{wt}$(\vec{s}_{X_A} \oplus \vec{s}_{X_B})$ to find the number of mismatches between the two bit strings. This is then used to estimate the number of bits that need to be removed from the sifted strings in the $Z$ basis, $\vec{s}_{Z_A}$ and $\vec{s}_{Z_A}$, during privacy amplification.
\item \textbf{Error correction and privacy amplification:} Alice and Bob randomly select an error correcting code from Step 1 to perform error correction on the sifted strings in the $Z$ basis and then they exchange the syndrome information. Then, by choosing a random hash function from Step 1 and based on the result of the parameter estimation in Step 6, they perform privacy amplification on the corrected sifted keys. At the end, Alice and Bob obtain the key $\vec{k}_{Z_A}$ and $\vec{k}_{Z_B}$, respectively.
\end{enumerate}
\textbf{Virtual Protocol} \\
In Lo-Preskill's work, they consider only basis matched events therefore, in the virtual protocol we need to include a post-selection that will only choose these events. This can be done by flipping a classical coin $C'$ that selects between the basis matched and mismatched events. The probability that $C'=0$ $(1)$ is $P_{matched}$ $(P_{mismatched})$, associated with the basis matched (mismatched) event chosen by Bob. A diagram of the virtual protocol with this post-selection is depicted in Fig. \ref{fig:virtualLP}. Also, all announcements between Alice and Bob are done via an authenticated public channel. The steps of the protocol are as follows:
\begin{enumerate} \item \textbf{Initialisation:} Before running the protocol, Alice and Bob agree on a number $N_{fixed}$ of rounds, on the error correcting codes, and on a set of hash functions to perform privacy amplification. Steps 2-5 of the protocol are repeated until the number of detected events $N$ becomes $N_{fixed}$.
\item \textbf{Probabilistic choice of the basis matched and mismatched events:} Bob measures the classical coin $C'$ and obtains the results ``0" or ``1", which correspond to basis matched and mismatched events, respectively.
\item \textbf{State Preparation:} After a potential THA, Alice prepares systems in the entangled state in Eq. (\ref{eq:phistate}) and sends Bob the system $BE$ via a quantum channel.
\item \textbf{QND measurement:} For each incoming system, Bob performs a quantum non-demolition (QND) measurement to determine whether the signal is detected or not. If Bob obtains a detection event he keeps the resulting system and $N$ is increased by 1 unit.
\item \textbf{Detection announcement:} If $N=N_{fixed}$, Bob announces the termination of the quantum communication phase and the detection pattern. Otherwise Alice and Bob return to Step 2 of the protocol.
\item \textbf{Measurement and basis announcement:} For the detected events, Alice measures the quantum coin with the $Z_C$ basis or the $X_C$ basis, chosen with probabilities $P_{Z_{C}}$ and $P_{X_{C}} = 1- P_{Z_{C}}$ respectively. Then, she announces the chosen basis. If $Z_C$ is selected she also announces the measurement outcome (the $Z$ or the $X$ basis), however, she always measures system $A$ with the $X$ basis. As for Bob, if $C'=0$ $(1)$ in Step 2 he announces the same (opposite) basis that was announced by Alice, however, he always measures his system with the $X$ basis.
\item \textbf{Sifting and announcement:} Alice and Bob announce the bit string in the $X$ basis, which corresponds to the events when $C' = 0$ and $Z_{C} = 1$.
\end{enumerate}
\begin{figure}
\caption{The logical schematics for the virtual protocol. It is the use of a second coin that allows us to pre-select only the matched events. When Alice selects the $Z$ basis to measure the coin $C$ she prepares the state $\ket{\Upsilon_Z}$ or $\ket{\Upsilon_X}$, but in the virtual protocol she always measures in the $X$ basis. }
\label{fig:virtualLP}
\end{figure}
In this virtual protocol, Alice's and Bob's measurements are postponed because it is useful to clearly define the outcomes of each run of the protocol. Note that, in Step 6 Alice employs the $X$ basis for the measurement, and this is required for the estimation of the phase error rate. It can be seen that the classical and quantum information available to Eve is the same in both the actual and virtual protocols, and also the key generated is identical therefore, the virtual protocol can be used to prove the security of the QKD scheme.
\subsection{Simulation of the key rate for the Lo-Preskill's analysis with imperfectly characterised states}
In this section, we evaluate Lo-Preskill's analysis for the particular device model described in Section \ref{sec:simulation}. Since it considers the BB84 protocol \cite{bennett}, Alice sends Bob the three states in Eq. (\ref{eq:alice_states}) and another state in the $X$ basis, which we assume to be \begin{equation} \ket{\omega_{1X}}_B = \cos \bigg( \frac{3 \pi}{4} + \frac{3 \delta}{4} \bigg) \ket{0_Z} + \sin \bigg( \frac{3 \pi}{4} + \frac{3 \delta}{4} \bigg) \ket{1_Z}. \label{eq:extrastate} \end{equation} This state is obtained by following the same device model as that given in Eq. (\ref{eq:model}) for $\varphi_A = 3\pi/2$, i.e., the phase modulation is proportional to the chosen phase value (see Section \ref{sec:description} for more details).
In order to estimate the phase error rate we use the coin imbalance $\Delta'$, Eq. (\ref{eq:overlap2}) and Eq. (\ref{eq:eph_delta}). To calculate the secret key rate $R$ we use Eq. (\ref{eq:keyrate}), where the yield of single photons in the $Z$ basis is $Y_Z = P_{Z_A} P_{Z_B} \big[ 4 (1 - \frac{\eta}{2}) p_d + \eta\big]$ for overall transmission efficiency $\eta$ and the bit error rate is defined in Eq. (\ref{eq:biterror}). For simplicity, we assume the probabilities $P_{Z_C} \rightarrow 1$, $P_{Z_A} = P_{Z_B} = \frac{1}{2}$ and for fair comparisons, we use the same channel model used to evaluate the generalised loss-tolerant protocol (see Appendix \ref{app:channel}) and the same experimental parameters. As before, we analyse each of the different source imperfections separately to evaluate how they affect the key generation rate. These are: SPFs parametrised by $\delta$, the phase modulation deviation; the non-qubit assumption where $\theta_{j\beta}$ is the mode dependency; and a THA which depends on the intensity of the back-reflected light $\mu$. The results are shown below.
\begin{figure}
\caption{Asymptotic secret key rate $R$ against the overall system loss measured in dB, for different values of $\delta$, dependent and independent $\theta$, and $\mu$, using the Lo-Preskill's analysis with imperfectly characterised states. The blue and red curves coincide in some graphs. (a) As the SPF $\delta$ increases, $R$ decreases rapidly. (b) As the setting independent $\theta$ gets larger, the component of vertical polarisation increases and $R$ decreases. (c) When $\theta_{j\beta}$ is setting dependent the key rate $R$ decreases even further. (d) As the intensity of Eve's back-reflected light $\mu$ increases, more information is leaked causing $R$ to decrease.}
\label{fig:extLP}
\end{figure}
In Fig. \ref{fig:extLP}(a) we can see how $\delta$ greatly affects the secret key rate $R$ as it increases. The explanation for this lies in the fact that Lo-Preskill's method assumes the worst case scenario, namely, the detected signals are chosen to maximise the imbalance of the coin. This assumption is required since not all signals emitted by Alice are detected, thus Eve can exploit these losses to enhance the basis dependence of the detected signals without being exposed. These results contrast with the generalised loss-tolerant protocol, as shown in Fig. \ref{fig:lossTolerant}(a), where $R$ remains almost the same independently of $\delta$.
Fig. \ref{fig:extLP}(b) evaluates the setting independent $\theta$, that is, when $\theta$ is independent of Alice's encoding. As $\theta$ increases the key rate is approximately the same but for values of $\theta \gtrapprox 10^{-4}$ it starts to decrease. However, when we compare it with Fig. \ref{fig:lossTolerant}(b) we see that it decreases slower than in the generalised loss-tolerant protocol. When we consider a setting dependent $\theta_{j\beta}$ instead, the $R$ deteriorates even further, as shown in Fig. \ref{fig:extLP}(c). This result is expected since Eve is now able to better distinguish the states sent by Alice.
Finally, in Fig. \ref{fig:extLP}(d) we can see how the THA affects the secret key rate. An increase in $\mu$ results in a lower $R$ but, it decreases at a slower rate when compared with the generalised loss-tolerant protocol in Fig. \ref{fig:lossTolerant}(d). As observed, no key can be obtained around $\mu \gtrapprox 10^{-3}$ and the secret key rate is roughly the same for values of $\mu \lessapprox 10^{-8}$.
From these results we can conclude that the Lo-Preskill's analysis is highly affected by SPF but it is more resistant to the non-qubit assumption and THA when compared to the generalised loss-tolerant protocol. In order to see this comparison more clearly we refer the reader back to Section \ref{sec:simulation}.
\section{Channel model} \label{app:channel} The three states sent by Alice in Eq. (\ref{eq:alice_states}) can be expressed in terms of creation operators as follows
\begin{equation} \begin{split} & \ket{\omega_{0Z}}_B = \frac{1}{\sqrt{2}} (\hat{a}^{\dagger}_r + \hat{a}^{\dagger}_s ) \ket{v}, \\ & \ket{\omega_{1Z}}_B = \frac{1}{\sqrt{2}} (\hat{a}^{\dagger}_r - e^{i\delta} \hat{a}^{\dagger}_s ) \ket{v}, \\ & \ket{\omega_{0X}}_B = \frac{1}{\sqrt{2}} (\hat{a}^{\dagger}_r + i e^{i\delta/2} \hat{a}^{\dagger}_s )\ket{v}, \\ \end{split} \end{equation}
\noindent where $\hat{a}^{\dagger}_r$ and $\hat{a}^{\dagger}_s$ are creation operators for a photon in the reference and signal pulses respectively. Using these states, we can simulate the QKD protocol as shown in Fig. (\ref{fig:protocol}). We assume that the detectors D0 and D1 have the same detection efficiency and that the dark counts $p_d$ are independent of the incoming signals. The overall transmission efficiency of the system can be expressed as $\eta= \eta_{channel} \eta_{detector}$, where $\eta_{channel} = 10^{-\alpha l/10}$, $\alpha$ in dB/Km is the fiber loss coefficient and $l$ is transmission distance in Km. Note that, only half of the pulses interfere in Bob's lab. Furthermore, we do not assume misalignment in the channel. For simplicity, the non-qubit assumption and the THA are not included in this model because we assume that they are not affected by the channel. By neglecting the terms associated with $p_d^2$, the results obtained are
\begin{align*} & Y_{0X,0Z}^{(Z)} \approx ~ P_{0Z} P_{X_B} \bigg[\Big(1-\frac{\eta}{2}\Big) p_d + \frac{\eta}{4} \Big(1 + \sin \frac{\delta}{2}\Big) \Big(1 - \frac{p_d}{2}\Big) + \frac{\eta}{8} \Big(1 - \sin \frac{\delta}{2}\Big)p_d \bigg], \\ & Y_{1X,0Z}^{(Z)} \approx ~ P_{0Z} P_{X_B} \bigg[ \Big(1-\frac{\eta}{2}\Big) p_d + \frac{\eta}{8} \Big(1 + \sin \frac{\delta}{2}\Big) p_d + \frac{\eta}{4} \Big(1 - \sin \frac{\delta}{2} \Big)\Big(1 - \frac{p_d}{2}\Big)\bigg] , \\ & Y_{0Z,0Z}^{(Z)} \approx ~ P_{0Z} P_{Z_B} \bigg[\Big(1-\frac{\eta}{2}\Big) p_d + \frac{\eta}{4} (1 + \cos \delta) \Big(1 - \frac{p_d}{2}\Big) + \frac{\eta}{8} (1 - \cos \delta)p_d \bigg], \\ & Y_{1Z,0Z}^{(Z)} \approx ~ P_{0Z} P_{Z_B} \bigg[ \Big(1-\frac{\eta}{2}\Big) p_d + \frac{\eta}{8} (1 + \cos \delta) p_d + \frac{\eta}{4} (1 - \cos \delta) \Big(1 - \frac{p_d}{2}\Big)\bigg], \\ & Y_{0X,1Z}^{(Z)} \approx ~ P_{1Z} P_{X_B} \bigg[\Big(1-\frac{\eta}{2}\Big) p_d + \frac{\eta}{4} \Big(1 - \sin \frac{3\delta}{2}\Big) \Big(1 - \frac{p_d}{2}\Big) + \frac{\eta}{8} \Big(1 + \sin \frac{3\delta}{2}\Big)p_d \bigg], \stepcounter{equation}\tag{\theequation} \\ & Y_{1X,1Z}^{(Z)} \approx ~ P_{1Z} P_{X_B} \bigg[ \Big(1-\frac{\eta}{2}\Big) p_d + \frac{\eta}{8} \Big(1 - \sin \frac{3\delta}{2}\Big) p_d + \frac{\eta}{4} \Big(1 + \sin \frac{3\delta}{2}\Big) \Big(1 - \frac{p_d}{2}\Big)\bigg] , \\ & Y_{0Z,1Z}^{(Z)} \approx ~ P_{1Z} P_{Z_B} \bigg[\Big(1-\frac{\eta}{2}\Big) p_d + \frac{\eta}{4} (1 - \cos 2\delta) \Big(1 - \frac{p_d}{2}\Big) + \frac{\eta}{8} (1 + \cos 2\delta)p_d \bigg], \\ & Y_{1Z,1Z}^{(Z)} \approx ~ P_{1Z} P_{Z_B} \bigg[ \Big(1-\frac{\eta}{2}\Big) p_d + \frac{\eta}{8} (1 - \cos 2\delta) p_d + \frac{\eta}{4} (1 + \cos 2\delta) \Big(1 - \frac{p_d}{2}\Big)\bigg], \\ & Y_{0X,0X}^{(X)} \approx ~ P_{0X} P_{X_B} \bigg[\Big(1-\frac{\eta}{2}\Big) p_d + \frac{\eta}{4} (1 + \cos \delta) \Big(1 - \frac{p_d}{2}\Big) + \frac{\eta}{8} (1 - \cos \delta)p_d \bigg], \\ & Y_{1X,0X}^{(X)} \approx ~ P_{0X} P_{X_B} \bigg[ \Big(1-\frac{\eta}{2}\Big) p_d + \frac{\eta}{8} (1 + \cos \delta) p_d + \frac{\eta}{4} (1 - \cos \delta) \Big(1 - \frac{p_d}{2}\Big)\bigg]. \\ \end{align*}
\end{document} | arXiv | {
"id": "1902.02126.tex",
"language_detection_score": 0.7814838886260986,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Strongly regular configurations}
\author[M.~Abreu, M.~Funk, V.~Kr\v{c}adinac, and D.~Labbate]{Mari\'{e}n Abreu$^1$, Martin Funk$^1$, Vedran Kr\v{c}adinac$^2$, and Domenico Labbate$^1$}
\address{$^1$Dipartimento di Matematica, Informatica ed Economia, Universit\`{a} degli Studi della Basilicata, Viale dell'Ateneo Lucano~10, Potenza, Italy}
\address{$^2$Department of Mathematics, Faculty of Science, University of Zagreb, Bijeni\v{c}ka cesta~30, HR-10000 Zagreb, Croatia}
\email{marien.abreu@unibas.it} \email{martin.funk@unibas.it} \email{vedran.krcadinac@math.hr} \email{domenico.labbate@unibas.it}
\thanks{The third author was supported by the Croatian Science Foundation under the projects $6732$ and $9752$.}
\keywords{combinatorial configuration, strongly regular graph, partial geometry, semipartial geometry}
\subjclass[2010]{05B30, 05E30}
\date{September 12, 2021}
\maketitle
\begin{abstract} We study combinatorial configurations with the associated point and line graphs being strongly regular. Examples not belonging to known classes such as partial geometries and their generalizations or elliptic semiplanes are constructed. Necessary existence conditions are proved and a table of feasible parameters of such configurations with at most $200$ points is presented. Non-existence of some configurations with feasible parameters is proved. \end{abstract}
\section{Introduction}
A \emph{(combinatorial) configuration} is a finite partial linear space with constant point and line degrees. If there are~$v$ points of degree~$r$ and~$b$ lines of degree~$k$, the parameters are written $(v_r,b_k)$. If $v=b$, or equivalently $r=k$, the configuration is called \emph{symmetric} and the parameters are written~$(v_k)$. Throughout the paper we assume $k\ge 3$ and $r\ge 3$.
The \emph{point graph} of a configuration has the $v$ points as vertices, with two vertices being adjacent if the points are collinear. The \emph{line graph} is defined dually: the $b$ lines of the configuration are the vertices, and adjacency is concurrence.\footnote{We will always use the term \emph{line graph} in this sense, and not in the sense of graph theory (the line graph $L(G)$ of a graph $G$).} The point and line graphs are regular of degrees $r(k-1)$ and $k(r-1)$, respectively. A graph is called \emph{strongly regular} with parameters $SRG(n,d,\lambda,\mu)$ if it has~$n$ vertices, is regular of degree~$d$, and every two vertices have $\lambda$ common neighbors if they are adjacent and $\mu$ common neighbors if they are not adjacent. We are interested in configurations with both associated graphs being strongly regular.
A prominent family of such configurations are the \emph{partial geometries} $pg(s,t,\alpha)$, introduced by R.~C.~Bose~\cite{RCB63}. These are configurations with $r=t+1$ and $k=s+1$ such that for every non-incident point-line pair $(P,\ell)$, there are exactly~$\alpha$ points on $\ell$ collinear with $P$. The point graph is a \begin{equation}\label{pgpointgraph} SRG\left(\frac{(s+1)(st+\alpha)}{\alpha},\, s(t+1),\, s-1+t(\alpha-1),\,\alpha(t+1)\right), \end{equation} and the line graph is a \begin{equation}\label{pglinegraph} SRG\left(\frac{(t+1)(st+\alpha)}{\alpha},\, t(s+1),\, t-1+s(\alpha-1),\, \alpha(s+1)\right). \end{equation} Partial geometries include Steiner $2$-designs $pg(s,t,s+1)$ and their duals $pg(s,t,t+1)$, Bruck nets $pg(s,t,t)$~\cite{RHB51, RHB63} and their duals $pg(s,t,s)$ (transversal designs), and generalized quadrangles $pg(s,t,1)$ as special cases.
If $v\neq b$, partial geometries are the only configurations with both associated graphs strongly regular. This follows from \cite[Theorem 1.2]{BHT97}:
\begin{theorem}\label{thmBHT} Let the point graph of a $(v_r,b_k)$ configuration be strongly regular. Then the configuration is a partial geometry or $v\le b$. If $v=b$, then $\det(A+k I)$ is a square, where $A$ is the adjacency matrix of the point graph. \end{theorem}
If $v=b$, there are such configurations that are not partial geometries. The smallest examples are $(10_3)$ configurations with associated graphs $SRG(10,6,3,4)$ (the complement of the Petersen graph). One such configuration is the Desargues configuration, which is a semipartial geometry for $\alpha=2$ and $\mu=4$ (see Section~\ref{sec3} for the definition). There is another such configuration not belonging to the known generalizations of partial geometries such as semipartial geometries~\cite{DT78,DV95,FDC03} and strongly regular $(\alpha,\beta)$-geometries~\cite{HM01}, represented in Figure~\ref{smallest}.
\begin{figure}
\caption{A strongly regular configuration that is not an $(\alpha,\beta)$-geometry.}
\label{smallest}
\end{figure}
In this paper we study combinatorial configurations similar to this one. In Section~\ref{sec2} we give the definition of a strongly regular configuration. The concept unifies known classes such as (semi)partial geometries and elliptic semiplanes~\cite{PD68} with several sporadic examples from the literature (see Remark on page 37 of~\cite{BHT97} and Section 7.2 of~\cite{BKK03}). We focus on strongly regular configurations that are proper and primitive, not belonging to the known classes. We prove two necessary conditions on the parameters of strongly regular configurations, stronger than conditions on the parameters of the associated strongly regular graphs.
In Section~\ref{sec3} we present families of strongly regular configurations. A family associated with Moore graphs and a family constructed from the points and planes of the finite projective space $PG(4,q)$ have the same parameters as semipartial geometries. We prove that there are strongly regular configurations with these parameters that are not semipartial geometries. A third family constructed from finite projective planes has parameters not compatible with semipartial geometries.
Section~\ref{sec4} contains constructions of strongly regular configurations from difference sets in groups. Some of the configurations from the previous section can also be constructed in this way. We perform an exhaustive search in groups of order $v\le 200$ and find three other parameter sets $(v_k;\lambda,\mu)$ for which strongly regular configurations exist. Configurations with a fourth parameter set are constructed in a different manner.
In the final Section~\ref{sec5} we present a table of feasible parameters of strongly regular configurations with $v\le 200$. An on-line version of the table with links to the actual configurations is available on the web page \begin{center} \url{https://web.math.pmf.unizg.hr/~krcko/results/srconf.html} \end{center} We perform complete classifications of configurations with small parameters and prove non-existence for infinitely many feasible parameter sets corresponding to rook graphs.
\section{Definitions and conditions on the parameters}\label{sec2}
In view of the motivation presented in the Introduction, we make the following definition.
\begin{definition}\label{maindef} A symmetric configuration will be called a \emph{strongly regular configuration} with parameters $(v_k;\lambda,\mu)$ if the associated point graph is a $SRG(v,k(k-1),\lambda,\mu)$. \end{definition}
We can prove that the line graph is also strongly regular with the same para\-meters. We use the following lemma from~\cite{AMC81}.
\begin{lemma}\label{cohenlemma} Suppose that the point graph of a $(v_r,b_k)$ configuration is strongly regular with parameters~\eqref{pgpointgraph} corresponding to a partial geometry $pg(s,t,\alpha)$. Then the configuration is a $pg(s,t,\alpha)$. \end{lemma}
\begin{theorem}\label{linegraph} Given a strongly regular $(v_k;\lambda,\mu)$ configuration, the associated line graph is a $SRG(v,k(k-1),\lambda,\mu)$. \end{theorem}
\begin{proof} A graph is strongly regular with parameters $SRG(v,d,\lambda,\mu)$ if and only if its adjacency matrix~$A$ satisfies \begin{equation}\label{adjsrg} A^2= dI+\lambda A+\mu (J-I-A). \end{equation} Here $I$ and $J$ are the $v\times v$ identity matrix and the all-one matrix. Let~$N$ be the incidence matrix of the configuration. Then, $A=N N^t-k I$ and $B=N^t N-k I$ are adjacency matrices of the point and line graphs, respectively. By~\eqref{adjsrg}, we have \begin{equation*} N N^t N N^t +(\mu-\lambda-2k)N N^t+(k(\lambda-\mu+1)+\mu)I - \mu J=0. \end{equation*} If the incidence matrix~$N$ is non-singular, we can multiply by $N^{-1}$ from the left and by $N$ from the right. Using $N^{-1} J = \frac{1}{k} J$ and $J N=k J$, we get \begin{equation*} N^t N N^t N + (\mu-\lambda-2k)N^t N +(k(\lambda-\mu+1)+\mu)I - \mu J=0. \end{equation*} This is equation~\eqref{adjsrg} for the matrix~$B$, and therefore the line graph is also strongly regular with the same parameters.
Now assume that the incidence matrix $N$ is singular. Then the matrix~$N$ has eigenvalue~$0$ and the matrix~$A$ has eigenvalue $-k$. Thus, $k^2-(\mu-\lambda)k+\mu-k(k-1)=0$ holds and~$k$ divides~$\mu$. Denoting $\alpha=\mu/k$, we see that the parameters of the point graph correspond to a partial geometry~\eqref{pgpointgraph}. By Lemma~\ref{cohenlemma}, the configuration is a partial geometry and the line graph is also strongly regular with parameters~\eqref{pglinegraph}. The same argument was used in the proof of \cite[Theorem 1.2]{BHT97}. \end{proof}
We shall call strongly regular configurations with non-singular incidence matrices \emph{proper}. The previous proof shows that improper configurations must be partial geometries. The parameters of a strongly regular configuration are not independent. A necessary condition for the existence of a $SRG(v,k(k-1),\lambda,\mu)$ is \begin{equation*} (v-1-k(k-1))\mu=k(k-1)(k(k-1)-1-\lambda). \end{equation*} From this, $v$ can be expressed from $k$, $\lambda$, and~$\mu$, provided $\mu\neq 0$. There are many other necessary conditions on the parameters of a $SRG(v,k(k-1),\lambda,\mu)$. The adjacency matrix has eigenvalue $k(k-1)$ with multiplicity~$1$ and two more eigenvalues \begin{equation}\label{eigenv} r,s=\frac{1}{2}\left(\lambda-\mu\pm \sqrt{(\lambda - \mu)^2-4(\mu-k(k-1))} \right) \end{equation} with respective multiplicities \begin{equation}\label{multi} f,g = \frac{1}{2} \left(v-1\mp \frac{(r+s)(v-1)+2k(k-1)}{r-s}\right). \end{equation} The multiplicities are integers, giving divisibility conditions on the para\-meters. If $f\neq g$, the eigenvalues $r$, $s$ are also integers. See~\cite{BvM21} for further necessary conditions and~\cite{AEB} for tables of para\-meters of strongly regular graphs with up-to-date information on their existence. The parameters $(v_k;\lambda,\mu)$ of a strongly regular configuration will be considered \emph{feasible} if the associated strongly regular graphs exist or their existence cannot be ruled out. On top of that, we assume two more necessary conditions on the parameters. The first condition follows from \cite[Theorem 1.2]{BHT97}: $\det(A+kI)=\det(N N^t)=(\det N)^2$ is a square. The matrix $A+kI$ has eigenvalues $k^2$, $r+k$, $s+k$ with multiplicities $1$, $f$, $g$ and the determinant can be computed from the parameters.
\begin{proposition}\label{squarecond} If a strongly regular $(v_k;\lambda,\mu)$ configuration exists, then $(r+k)^f (s+k)^g$ is the square of an integer, where $r$, $s$, $f$, $g$ are given by \eqref{eigenv} and \eqref{multi}. \end{proposition}
For example, the condition rules out strongly regular $(28_4;6,4)$ configurations, although $SRG(28,12,6,4)$ graphs exist. Equations \eqref{eigenv} and \eqref{multi} give $r=4$, $s=-2$, $f=7$, $g=20$, and $2^{41}$ is not a square. The second condition follows from a counting argument.
\begin{theorem}\label{cliquecond} If a strongly regular $(v_k;\lambda,\mu)$ configuration exists, then $(v-k)(\lambda+1)\ge k(k-1)^3$. Equality holds if and only if the configuration is a partial geometry. \end{theorem}
\begin{proof} Fix a line $\ell$ and for any point $P$ not on $\ell$, denote by~$\alpha_P$ the number of lines through $P$ concurrent with~$\ell$. Count the number of flags $(P,\ell_1)$ with $\ell_1$ concurrent with $\ell$ in two ways to obtain $$\sum \alpha_P = k(k-1)^2.$$ Similarly, counting triples $(P,\ell_1,\ell_2)$, where $\ell_1\neq\ell_2$ are lines through~$P$ concurrent with $\ell$, gives $$\sum \alpha_P(\alpha_P-1) = k(k-1)(\lambda-(k-2)).$$ The sums are taken over all $P\not\in \ell$. The average $\alpha_P$ is $\alpha= \frac{k(k-1)^2}{v-k}$. Now we can compute \begin{align*} 0 &\le \sum (\alpha_P-\alpha)^2 = \sum \alpha_P(\alpha_P-1) + (1-2\alpha) \sum \alpha_P + (v-k) \alpha^2 = \\
& = k(k-1)(\lambda-k+2) + \left(1-2\,\frac{k(k-1)^2}{v-k}\right)k(k-1)^2 +
\frac{k^2(k-1)^4}{v-k} = \\
& = k(k-1)\left( \lambda+1 - \frac{k(k-1)^3}{v-k}\right). \end{align*} From this we see that $(v-k)(\lambda+1)\ge k(k-1)^3$ holds, with equality if and only if $\alpha_P=\alpha$ for all $P\not\in \ell$, i.e.\ the configuration is a partial geometry. \end{proof}
For example, the parameters $(81_5;1,6)$ do not satisfy Theorem~\ref{cliquecond} and strongly regular configurations with these parameters don't exist, although a $SRG(81,20,1,6)$ graph does. An equivalent form of the inequality $k(\mu-\lambda-1)\le \mu$ follows from Hoffman's bound on the size of cliques in strongly regular graphs; see \cite[Section 1.3]{BCN89}. Theorem~\ref{cliquecond} characterizes proper strongly regular configurations by their parameters.
\begin{corollary} A strongly regular $(v_k;\lambda,\mu)$ configuration that is not a projective plane is proper if and only if $(v-k)(\lambda+1) > k(k-1)^3$. \end{corollary}
Projective planes of order~$n$ are partial geometries $pg(n,n,n+1)$ and satisfy Theorem~\ref{cliquecond} with equality, but have non-singular incidence matrices. The associated point and line graphs are complete. More generally, we now consider the case when the associated graphs are imprimitive, i.e.\ $\mu=0$ or $\mu=k(k-1)$ holds. In the first case the graphs are disjoint unions of complete graphs $m\cdot K_{n^2+n+1}$ and the configuration is a disjoint union of $m$ projective planes of order~$n$. This case can be characterized as strongly regular configurations with collinearity of points being an equivalence relation.
The second imprimitive case $\mu=k(k-1)$ is complementary: non-collinearity of points is an equivalence relation and the associated graphs are complete multipartite. Strongly regular configurations with these properties are known as \emph{elliptic semiplanes}. Dembowski~\cite{PD68} defined a \emph{finite semiplane} as a partial linear space with parallelism of lines and non-collinearity of points being equivalence relations. A semiplane is of \emph{order} $n$ if the largest degree of a point or line is $n+1$. Dembowski proved that the set of all degrees is either $\{n-1,n,n+1\}$, $\{n,n+1\}$, or $\{n+1\}$, and called semiplanes \emph{hyperbolic}, \emph{parabolic}, or \emph{elliptic} accordingly.
Elliptic semiplanes are precisely the strongly regular configurations with $\mu=k(k-1)$. Most known elliptic semiplanes are of the form $\mathcal{P}-B$, where $\mathcal{P}$ is a projective plane of order $n$, and $B$ is a closed Baer subset. The only known exceptions are Baker's semiplane~\cite{RDB78} with parameters $(45_7;39,42)$ and Mathon's semiplane~\cite{RM07} with parameters $(135_{12};129,132)$.
In the sequel we focus on strongly regular configurations that are proper and \emph{primitive}, i.e.\ such that neither collinearity nor non-col\-li\-ne\-arity of points are equivalence relations. This is equivalent with $0< \mu < k(k-1)$. Table~\ref{fptable} contains all feasible parameters of such configurations with $v\le 200$. We first present constructions of proper and primitive strongly regular configurations.
\section{Families of strongly regular configurations}\label{sec3}
An \emph{$(\alpha,\beta)$-geometry}~\cite{DV94} is a $(v_r,b_k)$ configuration such that for every non-incident point-line pair $(P,\ell)$, there are either $\alpha$ or $\beta$ points on~$\ell$ collinear with~$P$. Thus, a partial geometry is an $(\alpha,\beta)$-geometry with $\alpha=\beta$. If $\alpha\neq \beta$, the point graph is not necessarily strongly regular. Geometries with this additional property are called \emph{strongly regular $(\alpha,\beta)$-geometries} and are studied in~\cite{HM01}. An important special case are the \emph{semipartial geometries}, introduced in~\cite{DT78}. They are $(0,\alpha)$-geometries such that for every pair of non-collinear points, there are exactly~$\mu$ points collinear with both. The parameters are written $(s,t,\alpha,\mu)$, where $r=t+1$ and $k=s+1$ are the point and line degrees, and the point graph is a $$SRG\left(1+\frac{s(t+1)(\mu+t(s+1-\alpha)}{\mu},\,s(t+1),\,s-1+t(\alpha-1),\,\mu\right).$$
Strongly regular $(\alpha,\beta)$-geometries with $v=b$ are strongly regular configurations by Definition~\ref{maindef}. Our introductory example in Figure~\ref{smallest} is not an $(\alpha,\beta)$-geometry, although the parameters correspond to a semipartial geometry. If $\ell$ is the line represented as a circle, there are points~$P$ ouside~$\ell$ with $1$, $2$, or $3$ points on~$\ell$ collinear with~$P$. This example is part of a family associated with Moore graphs of diameter two, i.e.\ strongly regular graphs with $\lambda=0$ and $\mu=1$.
Moore graphs have parameters $SRG(k^2+1,k,0,1)$ with $k\in \{2,3,7,$ $57\}$ \cite{HS60}. There is a unique graph for $k=2$ (the pentagon), $k=3$ (the Petersen graph), and $k=7$ (the Hoffman-Sigleton graph), while for $k=57$ the existence of such a graph is unknown. The incidence structure with points being vertices of a $SRG(k^2+1,k,0,1)$ and lines being neighborhoods of single vertices is a semipartial geo\-metry with $s=t=\alpha=k-1$ and $\mu=(k-1)^2$~\cite{DT78}. The point graph is the complementary $SRG(k^2+1,k(k-1),k(k-2),(k-1)^2)$. Hence, this incidence structure is a strongly regular $(v_k;\lambda,\mu)$ configuration with $v=k^2+1$, $\lambda=k(k-2)$, and $\mu=(k-1)^2$.
For $k=3$, the semipartial geometry is the Desargues configuration and there is one other $(10_3;3,4)$ configuration given in Figure~\ref{smallest}. For $k=7$, the semipartial geometry has full automorphism group $PSU(3,5) : \mathbb{Z}_2$ of order $252000$ acting flag-transitively. We found $210$ other $(50_7;35,36)$ configurations that are not $(\alpha,\beta)$-geometries. The semipartial geometry and $110$ of the new examples are self-dual and the remaining ones form $50$ dual pairs.
\begin{proposition}\label{src50} There are at least $211$ non-isomorphic $(50_7;35,36)$ configurations, one of which is a semipartial geometry. Orders of their full automorphism groups are given in Table~\ref{table50}. \end{proposition}
\begin{table}[h]
\begin{tabular}{|c c|c c|c c|c c|c c|} \hline
$|\mathop{\mathrm{Aut}}|$ & \#Cf & $|\mathop{\mathrm{Aut}}|$ & \#Cf & $|\mathop{\mathrm{Aut}}|$ & \#Cf & $|\mathop{\mathrm{Aut}}|$ & \#Cf & $|\mathop{\mathrm{Aut}}|$ & \#Cf \\ \hline 252000 & 1 & 120 & 1 & 40 & 1 & 20 & 6 & 6 & 13 \\ 2520 & 1 & 96 & 1 & 36 & 1 & 16 & 3 & 4 & 15 \\ 1440 & 1 & 72 & 1 & 32 & 1 & 12 & 1 & 3 & 18 \\ 720 & 1 & 48 & 1 & 24 & 6 & 10 & 1 & 2 & 46 \\ 240 & 1 & 42 & 1 & 21 & 2 & 8 & 11 & 1 & 76 \\ \hline \end{tabular} \vskip 3mm \caption{Distribution of $(50_7;35,36)$ configurations by order of full automorphism group.}\label{table50} \end{table}
The configurations of Proposition~\ref{src50} are available through the on-line version of Table~\ref{fptable}. They were constructed computationally, by prescribing automorphism groups and switching submatrices of the incidence matrix: $$\left[\begin{array}{c c} 1 & 0\\ 0 & 1\\ \end{array}\right] \longleftrightarrow \left[\begin{array}{c c} 0 & 1\\ 1 & 0\\ \end{array}\right].$$ We used GAP~\cite{GAP4} and our own programs written in C. To check for isomorphism and compute full automorphism groups, we used nauty~\cite{MP14}. The construction method for configurations with prescribed automorphism groups is similar to constructions of quasi-symmetric designs in~\cite{KVK20, VK20} and relies on the clique-finding program Cliquer~\cite{NO03}.
Another family of semipartial geometries is family (g) from~\cite{DT78}, denoted by $LP(n,q)$ in~\cite{DV95,FDC03}. The points of $LP(n,q)$ are lines of the projective space $PG(n,q)$, $n\ge 3$. The lines of $LP(n,q)$ are $2$-planes of $PG(n,q)$ and incidence is inclusion. Then, $LP(n,q)$ is a semipartial geometry with $s=q(q+1)$, $t=\frac{q^{n-1}-1}{q-1}-1$, $\alpha=q+1$, and $\mu=(q+1)^2$. It is a partial geometry if and only if $n=3$. Moreover, $v=b$ holds if and only if $n=4$. Thus, $LP(4,q)$ is a $(v_k;\lambda,\mu)$ configuration with \begin{equation}\label{LPpar} \begin{array}{c} v=(q^2+1)(q^4+q^3+q^2+q+1), \kern 2mm k=q^2+q+1,\\[2mm] \lambda=q^3+2q^2+q-1, \kern 4mm \mu=(q+1)^2. \end{array} \end{equation} It is self-dual and has full automorphism group $P\Gamma L(5,q)$.
We now describe transformations of $LP(4,q)$ into strongly regular configurations that are not semipartial geometries. We refer to them as \emph{polarity transformations}; they are similar to the construction of polarity designs in~\cite{JT09}. Let~$H_0$ be a hyper\-plane of $PG(4,q)$. As a subgeometry, $H_0$ is isomorphic to $PG(3,q)$ and admits a polarity~$\pi$, i.e.\ an inclusion-reversing involution. The polarity permutes the set of projective lines contained in $H_0$ and exchanges the set of points in~$H_0$ with the set of planes in~$H_0$. We modify incidence of the elements of $LP(4,q)$ contained in~$H_0$: a point~$L$ (projective line contained in $H_0$) is incident with a line $p$ (projective plane contained in $H_0$) if $\pi(L)\subseteq p$. For the remaining pairs $(L,p)$, with $L$ or $p$ not contained in $H_0$, incidence remains unaltered. We claim that the new incidence structure $LP(4,q)^\pi$ is a $(v_k;\lambda,\mu)$ configuration with parameters~\eqref{LPpar}.
The point and line degrees clearly remain the same and there is at most one line through every pair of points. The point graphs of $LP(4,q)^\pi$ and $LP(4,q)$ are identical. This follows from the next lemma.
\begin{lemma}\label{coplanarity} Two projective lines of $PG(n,q)$ are coplanar if and only if they intersect. \end{lemma}
If $L_1$ and $L_2$ are projective lines of~$H_0$, then $\pi(L_1)$, $\pi(L_1)$ are contained in a plane~$p$ if and only if $L_1$, $L_2$ intersect in the point $\pi(p)$ and hence, by Lemma~\ref{coplanarity}, are contained in some plane~$p'$. The line graph of $LP(4,q)^\pi$ is changed, but remains strongly regular because of Theorem~\ref{linegraph}.
To see that the new configuration $LP(4,q)^\pi$ is not a semipartial geometry, take a plane $p$ in $H_0$ and a projective line $L$ intersecting~$H_0$ in the point $\pi(p)$. Then, $(L,p)$ is a non-incident point-line pair of $LP(4,q)^\pi$. If $\pi(M)\subseteq p$, then $M$ contains $\pi(p)$ and is coplanar with~$L$, i.e.\ collinear as a point of the configuration. Hence, all $q^2+q+1$ points on $p$ are collinear with $L$, whereas in a semipartial geometry the number is always $0$ or $\alpha=q+1$. The configurations $LP(4,q)$ and $LP(4,q)^\pi$ are therefore not isomorphic. Configurations obtained by transforming $LP(4,q)$ with different polarities are all isomorphic, because the composition of two polarities is an isomorphism.
We define a dual transformation of $LP(4,q)$ in the following manner. Take a point $P_0$ of $PG(4,q)$ and consider the quotient geometry of lines, planes and solids containing $P_0$. It is isomorphic to $PG(3,q)$ and admits a polarity~$\pi'$ permuting the planes through~$P_0$ and exchanging the lines and solids through~$P_0$. We modify incidence in $LP(4,q)$ for projective lines $L$ and planes $p$ through $P_0$: they are incident if $L\subseteq \pi'(p)$. The new configuration $LP(4,q)_{\pi'}$ is isomorphic to the dual of $LP(4,q)^\pi$ and therefore strongly regular with parameters~\eqref{LPpar}, but not a semipartial geometry. The line graphs of $LP(4,q)_{\pi'}$ and $LP(4,q)$ are identical, while the point graph of $LP(4,q)_{\pi'}$ is changed.
A fourth $(v_k;\lambda,\mu)$ configuration is obtained if we take a non-incident point-hyperplane pair $P_0$, $H_0$ of $PG(4,q)$ and apply both transformations. The lines and planes in $H_0$ are different from the lines and planes through~$P_0$, so incidence is changed in disjoint parts of the configuration. The resulting configuration $LP(4,q)_{\pi'}^\pi$ has the same line graph as $LP(4,q)^\pi$ and the same point graph as $LP(4,q)_{\pi'}$ and is self-dual. This proves the following theorem.
\begin{theorem}\label{LPtransformed} For every prime power~$q$, there are at least four strongly regular $(v_k;\lambda,\mu)$ configuration with parameters~\eqref{LPpar}. One of them is the semipartial geometry $LP(4,q)$ and the others are not semipartial geometries. \end{theorem}
We now present an infinite family of strongly regular configurations with parameters different from semipartial geometries. The construction works by deleting a suitable subset from a projective plane, similarly as constructions of elliptic semiplanes.
\begin{theorem}\label{triangle} Let $\mathcal{P}$ be a projective plane of order $n\ge 5$ and $A$, $B$, $C$ be three non-collinear points. By deleting all points on the lines $AB$, $AC$, $BC$ and all lines through the points $A$, $B$, $C$, there remains a strongly regular $(v_k;\lambda,\mu)$ configuration with $v=(n-1)^2$, $k=n-2$, $\lambda=(n-4)^2+1$, and $\mu=(n-3)(n-4)$. This configuration is not an $(\alpha,\beta)$-geometry. \end{theorem}
\begin{proof} The number of points and lines in the remaining configuration is $v=n^2+n+1-3-3(n-1)=(n-1)^2$ and they are of degree $k=n-2$. Let $P$ and $Q$ be two remaining points that are collinear, i.e.\ are not on a line of $\mathcal{P}$ through $A$, $B$ or $C$. Then the points non-collinear with $P$ are the remaining points on the lines $AP$, $BP$, $CP$. There are $3(n-2)$ such points, and as many for $Q$. The points non-collinear with both $P$ and $Q$ are the intersections of one of the lines $AP$, $BP$, $CP$ with one of the lines $AQ$, $BQ$, $CQ$; there are $6$ such points. By inclusion-exclusion, the number of points in the remaining configuration collinear with both $P$ and $Q$ is $\lambda=(n-1)^2-2 -6(n-2)+6=(n-4)^2+1$. If the points~$P$ and~$Q$ are non-collinear, a similar count shows that the number of points in the remaining configuration collinear with both $P$ and $Q$ is $\mu=(n-3)(n-4)$.
Let $(P,\ell)$ be a non-incident point-line pair of the remaining configuration. We now count the points on $\ell$ collinear with $P$. Let $A'$, $B'$, $C'$ be the intersections of $BC$, $AC$, $AB$ with~$\ell$. These are the deleted points of~$\ell$. If the lines $AA'$, $BB'$, $CC'$ are concurrent, $P$ lies on $0$, $1$ or $3$ of these lines. Then there are $n-5$, $n-4$ or $n-2$ points on~$\ell$ collinear with~$P$. In this case, the points $A$, $B$, $C$, $A'$, $B'$, $C'$ and the common point of $AA'$, $BB'$, $CC'$ form a Fano subplane, so this can only occur if $n$ is even or $\mathcal{P}$ is non-Desarguesian. On the other hand, if the lines $AA'$, $BB'$, $CC'$ are not concurrent, $P$ lies on $0$, $1$ or $2$ of these lines and there are $n-5$, $n-4$ or $n-3$ points on~$\ell$ collinear with~$P$. In both cases there are three possibilities for the number of points on~$\ell$ collinear with $P$, so the configuration is not an $(\alpha,\beta)$-geometry. \end{proof}
The associated graphs have parameters $$SRG((n-1)^2,(n-2)(n-3),(n-4)^2+1,(n-3)(n-4)).$$ These are pseudo Latin square graphs $LS_{n-3}(n-1)$, see \cite[Section 8.4.2]{BvM21}. For $n=5$, we get the Shrikhande graph~\cite{SSS59} which is not a Latin square graph. For $n=7$, the graphs have parameters $LS_4(6)$ and are not Latin square graphs because there are no orthogonal Latin squares of order~$6$.
In the smallest case $n=5$, the $(16_3;2,2)$ configuration of Theorem~\ref{triangle} can be extended to a $(16_4;8,12)$ configuration by adding a point to every line. This is a $(4,4)$-net and can be embedded in the projective plane of order~$4$. This is an interesting transformation of the projective plane of order~$5$ into the projective plane of order~$4$, but it does not generalize to $n>5$.
In the Desarguesian projective plane $PG(2,q)$, all triangles $\{A,B,C\}$ are equivalent and Theorem~\ref{triangle} gives just one strongly regular configuration up to isomorphism, being self-dual. The smallest non-De\-sar\-guesian projective planes are of order~$9$: the Hall plane, its dual, and the self-dual Hughes plane. The Hall plane contains six inequivalent triangles and as many non-isomorphic $(64_7;26,30)$ configurations arise from Theorem~\ref{triangle}. These configurations are not self-dual. Of course, they are duals of the configurations derived from the dual Hall plane. The Hughes plane contains $16$ inequivalent triangles. The corresponding configurations are not isomorphic; $10$ are self-dual and there are $3$ dual pairs. Information on the orders of full automorphism groups of these configurations is given in Table~\ref{table64}.
\begin{table}[h]
\begin{tabular}{|c c c|c c c|}
\hline Plane & $|\mathop{\mathrm{Aut}}|$ & \#Cf & Plane & $|\mathop{\mathrm{Aut}}|$ & \#Cf \\ \hline \rule{0mm}{12pt}$PG(2,9)$ & 768 & 1 & Hughes & 144 & 1 \\ \cline{1-3} \rule{0mm}{12pt}Hall & 768 & 1 & & 48 & 1 \\ \rule{0mm}{11pt} & 96 & 2 & & 32 & 1 \\ \rule{0mm}{11pt} & 12 & 2 & & 18 & 1 \\ \rule{0mm}{11pt} & 6 & 1 & & 12 & 3 \\ \cline{1-3} \rule{0mm}{12pt}Dual Hall & 768 & 1 & & 6 & 4 \\ \rule{0mm}{11pt} & 96 & 2 & & 4 & 3 \\ \rule{0mm}{11pt} & 12 & 2 & & 2 & 1 \\ \rule{0mm}{11pt} & 6 & 1 & & 1 & 1 \\ \hline \end{tabular} \vskip 3mm \caption{Distribution of $(64_7;26,30)$ configurations by order of full automorphism group.}\label{table64} \end{table}
Configurations obtained from different projective planes of order $9$ are not isomorphic. Hence, the total number of $(64_7;26,30)$ configurations arising from Theorem~\ref{triangle} is $29$. We could not find any other examples with these parameters. This, together with the uniqueness results of Section~\ref{sec5} (Corollary~\ref{src16} and Proposition~\ref{src36}), seems to suggest that every strongly regular configuration with parameters from Theorem~\ref{triangle} can be uniquely embedded in a projective plane of order~$n$, but we do not have a proof.
\section{Strong deficient difference sets}\label{sec4}
Next we present constructions of strongly regular configurations using difference sets. Let $G$ be a group of order~$v$. A subset $D\subseteq G$ of size~$k$ is a \emph{deficient difference set} if for every $x\in G\setminus\{1\}$, there is at most one pair $(d_1,d_2)\in D\times D$ such that $x=d_1^{-1}d_2$. Shortly, the left differences $ d_1^{-1}d_2$ must all be distinct. This is equivalent with the right differences $d_1 d_2^{-1}$ being distinct. The elements of~$G$ as points and the \emph{development} $\mathop{\mathrm{dev}} D=\{gD \mid g\in G\}$ as lines form a symmetric $(v_k)$ configuration. The configuration has $G$ an automorphism group acting regularly on the points and lines~\cite{MF08, MPW87}. In the cyclic case $G=\mathbb{Z}_v$, deficient difference sets are also called \emph{modular Golomb rulers}~\cite{BS21}.
Let $\Delta(D)=\{d_1^{-1}d_2 \mid d_1,d_2\in D, d_1\neq d_2\}$ be the set of left differences of~$D$. This is a subset of $G\setminus \{1\}$ of size $k(k-1)$. For a group element $x\neq 1$, denote by
$n(x)=|\Delta(D)\cap x\Delta(D)|$. Suppose that $n(x)=\lambda$ for every $x\in \Delta(D)$, and $n(x)=\mu$ for every $x\not\in \Delta(D)$. We shall call a subset $D$ with this property a \emph{strong deficient difference set (SDDS)} for $(v_k;\lambda,\mu)$.
\begin{theorem} Let $G$ be a group and $D\subseteq G$ a strong deficient difference set for $(v_k;\lambda,\mu)$. Then, $(G,\mathop{\mathrm{dev}} D)$ is a strongly regular $(v_k;\lambda,\mu)$ configuration with~$G$ as an automorphism group acting regularly on the points and lines. Conversely, any strongly regular $(v_k;\lambda,\mu)$ configuration with an automorphism group~$G$ acting regularly on the points and lines can be obtained from a $SDDS$ in $G$. \end{theorem}
\begin{proof} Two points $x,y\in G$ are collinear if and only if $x^{-1}y\in \Delta(D)$. Let us count the number of points $z\in G\setminus\{x,y\}$ collinear with both $x$ and $y$. This is equivalent with $x^{-1}z\in \Delta(D)$ and $y^{-1}z\in \Delta(D)$, or $z\in x\Delta(D)\cap y\Delta(D)$, or $x^{-1}z \in \Delta(D)\cap x^{-1}y\Delta(D)$. The number of such points $z$ is $\lambda$ if $x^{-1}y\in \Delta(D)$, i.e.\ if $x$ and $y$ are collinear, and $\mu$ otherwise. Hence, the point graph is strongly regular with parameters $SRG(v,k(k-1),\lambda,\mu)$.
Conversely, assume a $(v_k;\lambda,\mu)$ configuration possesses an automorphism group~$G$ acting regularly. Then the points can be identified with the elements of~$G$ and every block is a deficient difference set generating this configuration. The argument above shows that it is in fact a $(v_k;\lambda,\mu)$ SDDS. \end{proof}
Configurations constructed from $PG(2,q)$ by Theorem~\ref{triangle} can be obtained from strong deficient difference sets in the group $G=\mathbb{F}_q^*\times \mathbb{F}_q^*$. Here, $\mathbb{F}_q^*$ denotes the multiplicative group of the finite field $\mathbb{F}_q$, isomorphic to the cyclic group $\mathbb{Z}_{q-1}$. If two of the points $\{A,B,C\}$ are chosen on the ``line at infinity'' and the third point as the ``origin'' $(0,0)$, points of the configuration can be identified with pairs $(x,y)$ with $x,y\in \mathbb{F}_q^*$. Lines are sets of points satisfying equations of the form $y=ax+b$, $a,b\in\mathbb{F}_q^*$. Hence, e.g.\ $D=\{(x,x+1) \mid x\in \mathbb{F}_q^*\setminus \{-1\}\}$ is a SDDS for $(v_k;\lambda,\mu)$ with $v=(q-1)^2$, $k=q-2$, $\lambda=(q-4)^2+1$, and $\mu=(q-3)(q-4)$. The full automorphism group of the configuration is $((\mathbb{F}_q^*\times \mathbb{F}_q^*):\mathop{\mathrm{Aut}}(\mathbb{F}_q)):S_3$, where $\mathop{\mathrm{Aut}}(\mathbb{F}_q)$ are the field automorphisms, and $S_3$ corresponds to collineations of $PG(2,q)$ exchanging vertices of the triangle $\{A,B,C\}$.
The two $(64_7;26,30)$ configurations with full automorphism groups of order $768$ constructed from the Hall plane and its dual (see Table~\ref{table64}) can be obtained from SDDS's in the group $G=Q_8\times Q_8$, where $Q_8=\{\pm 1,\pm i,\pm j,\pm k\}$ is the quaternion group with usual multiplication (e.g.\ $i^2=j^2=k^2=-1$, $ij=k$). The difference set $$D_1=\{(1,1), (i,-k), (j,k), (k,-j), (-i,j), (-j,i), (-k,-i)\}$$ gives the configuration constructed from the Hall plane and $$D_2=\{(1,1), (i,-k), (j,j), (k,-j), (-i,-i), (-j,i), (-k,k)\}$$ gives the dual configuration. The Hall plane of order $9$ and its dual are coordinatized by the quaternionic near-field. The first configuration arises from Theorem~\ref{triangle} when two of the points $\{A,B,C\}$ are chosen on the translation line of the Hall plane, and the second configuration when one of the points is the translation point of the dual Hall plane.
We performed an exhaustive computer search for strong deficient difference sets with parameters corresponding to proper and primitive strongly regular configurations in groups of order $v\le 200$, using the GAP library of small groups~\cite{GAP4}. Apart from the examples just described, we found four other examples not corresponding to Theorem~\ref{triangle}. The configurations constructed from these SDDS's have flag-transitive automorphism groups. Here are their descriptions.
\begin{example}\label{sdds13} SDDS's for $(13_3;2,3)$ exist in the cyclic group $\mathbb{Z}_{13}$. There is one SDDS fixed by the multiplier $3$: $\{7,8,11\}$. The development has full automorphism group $\mathbb{Z}_{13}:\mathbb{Z}_3$ acting flag-transitively. \end{example}
This is the only cyclic strongly regular configuration we found. It can be embedded in the projective plane of order $3$ by adding a point to every line.
\begin{example}\label{sdds96} SDDS's for $(96_5;4,4)$ exist in the groups $\mathbb{Z}_4 \times S_4$, $(\mathbb{Z}_2 \times \mathbb{Z}_2 \times A_4) : \mathbb{Z}_2$, $D_8\times A_4$ and $\mathbb{Z}_2 \times \mathbb{Z}_2 \times S_4$. Here is one SDDS in $\mathbb{Z}_4\times S_4$: $$\{(0,id), (1, (1,4)(2,3)), (1,(1,3,4,2)),
(1, (1,4,3)), (2, (1,2,4)) \}.$$ The developments are all isomorphic and give one self-dual configuration. The full automorphism group is $((\mathbb{Z}_2 \times \mathbb{Z}_2 \times \mathbb{Z}_2 \times \mathbb{Z}_2) : A_6) : \mathbb{Z}_2$ of order $11520$ and acts flag-transitively. \end{example}
The associated graphs have parameters $SRG(96,20,4,4)$. Many such graphs are known, see~\cite{BKK03, GMV06}. The graph with the largest automorphism group of order $138240$ is the point graph of the generalized quadrangle $pg(5,3,1)$. The graph of the $(96_5;4,4)$ configuration has full automorphism group of order $11520$. In~\cite{BKK03}, this graph is denoted by $K''$ and the configuration is mentioned as a ``partial linear space with five points per line and five lines on each point''.
\begin{example}\label{sdds120} SDDS's for $(120_8;28,24)$ exist in the symmetric group $S_5$, e.g. \begin{align*}
\{ & id,\, (1,2,5,3,4),\, (1,3,4,2,5),\, (1,5,3,2,4),\, (1,4)(2,3,5), \\[1mm]
& (1,4,5,2),\, (1,2,4),\, (1,2,5)\}. \end{align*} Up to isomorphism one self-dual strongly regular configuration arises. The full automorphism group is isomorphic to the alternating group~$A_8$ of size $20160$ and acts flag-transitively. \end{example}
This $(120_8;28,24)$ configuration was constructed in~\cite{BHT97} by embedding the $pg(7,8,4)$ of~\cite{DDT80, AMC81} into a Steiner $2$-$(120,8,1)$ design. The $135$ lines of the $pg(7,8,4)$ and the $120$ lines of the configuration cover every pair of the $120$ points exactly once and form a design. The point graphs of the $pg(7,8,4)$ and the $(120_8;28,24)$ configuration are complementary with parameters $SRG(120,63,30,36)$ and $SRG(120,56,28,24)$, respectively.
The $pg(7,8,4)$ is part of an infinite family constructed from the hyperbolic quadric in $PG(4n-1,2)$~\cite{DDT80}. The family is denoted by $PQ^+(2n-1,2)$ and has parameters $pg(2^{2n-1}-1,2^{2n-1},2^{2n-2})$. These parameters fit a hypothetical $(v_k;\lambda,\mu)$ configuration with $$v=2^{2n-1}(2^{2n}-1), k=2^{2n-1}, \lambda=2^{2n-2}(2^{2n-1}-1), \mu=2^{2n-1}(2^{2n-2}-1)$$ to make a $2$-$(v,k,1)$ design, but in \cite[Theorem 2.1]{BHT97} it was proved that this is not possible for $n>2$. Non-isomorphic partial geometries with the same parameters were constructed in~\cite{MS97, DD00} that could possibly be embedded in Steiner $2$-designs.
\begin{example}\label{pg42} SDDS's for $(155_7;17,9)$ exist in the group $G=\mathbb{Z}_{31}:\mathbb{Z}_5$. Let $G$ be represented as permutations of $\mathbb{Z}_{31}$ generated by $f:x\mapsto x+1 \pmod{31}$ and $g:x\mapsto 2x \pmod{31}$. Then, $\{id,f^{12}g^4, f^{15}g, f^{18}, f^{20}g^2,$ $f^{26}g^3, f^{30}\}$ is a SDDS. One self-dual strongly regular configuration arises, isomorphic to the semipartial geometry $LP(4,2)$. The full automorphism group $P\Gamma L(5,2)$ is of order $9999360$ and acts flag-transitively. \end{example}
The configurations obtained from $LP(4,2)$ by polarity transformations cannot be constructed from SDDS because their full automorphism groups are not transitive. The dual pair $LP(4,2)^\pi$ and $LP(4,2)_{\pi'}$ have full automorphism groups of order $322560$ isomorphic to $(\mathbb{Z}_2)^4:P\Gamma L(4,2)$. The group acts in orbits of size $35$, $120$ on the points and $15$, $140$ on the lines of $LP(4,2)^\pi$, and vice versa for $LP(4,2)_{\pi'}$. The self-dual configuration $LP(4,2)^{\pi}_{\pi'}$ has full automorphism group of order $20160$ isomorphic to $P\Gamma L(4,2)$ acting in orbits of size $15$, $35$, $105$.
Our final examples of strongly regular configurations can also not be obtained from SDDS's. They don't admit automorphism groups acting regularly, although some have flag-transitive automorphism groups.
\begin{example}\label{src63} There are at least four non-isomorphic $(63_6;13,15)$ configurations. Two of them are self-dual with full automorphism groups $PSU(3,3) : \mathbb{Z}_2$ of order $12096$ acting flag-transitively. Furthermore, there is a dual pair with full automorphism groups $(SL(2,3) : \mathbb{Z}_4) : \mathbb{Z}_2$ of order $192$ acting in orbits of size $1$, $6$, $24$, $32$. \end{example}
The two self-dual $(63_6;13,15)$ configurations are related to the smallest generalized hexagon $GH(2,2)$ (see~\cite[Section 5.7]{GR01}). This is a $(63_3)$ configuration with point and line graphs of girth $12$ and diameter $6$. The graphs are distance regular, but not strongly regular. A strongly regular $(63_6;13,15)$ configuration can be constructed similarly as a semipartial geometry from a Moore graph: the new configuration has the same points as $GH(2,2)$, and lines of the new configuration are sets of $6$ points collinear with a given point of $GH(2,2)$. The point graph of this $(63_6)$ configuration is a $SRG(63,30,13,15)$. The other self-dual $(63_6;13,15)$ configuration is constructed in the same way from the dual of $GH(2,2)$. We discovered the dual pair of non-transitive $(63_6;13,15)$ configurations computationally, by prescribing automorphism groups.
\section{A table of feasible parameters}\label{sec5}
In the final section we present a table of feasible parameters of strongly regular configurations with $v\le 200$. A.~E.~Brouwer's table of strongly regular graphs~\cite{AEB} contains $437$ parameter sets $SRG(v,d,\lambda,\mu)$ with $v\le 200$. It is known that strongly regular graphs do not exist in $62$ cases. Among the remaining $375$ cases, we look for those with $d=k(k-1)$ for some $k\ge 3$. This way we get $64$ parameter sets $(v_k;\lambda,\mu)$.
Eleven of the $64$ parameter sets do not satisfy Theorem~\ref{cliquecond}. Six satisfy the theorem with equality and correspond to partial geometries $pg(2,2,1)$, $pg(3,3,1)$, $pg(6,6,4)$, $pg(5,5,2)$, $pg(4,4,1)$, and $pg(5,5,1)$. The $pg(q,q,1)$ with $q=2,3,4,5$ are the classical generalized quadrangles $W(q)$ and their duals, see~\cite{PT09}. Two non-isomorphic $pg(5,5,2)$'s are known~\cite{vLS81, CST21, VK20b}, whereas the existence of a $pg(6,6,4)$ is open. Six of the remaining $47$ parameter sets are eliminated by Proposition~\ref{squarecond}.
\begin{table} \begin{center}
\begin{tabular}{|c c c c l|} \hline No. & $(v_k;\lambda,\mu)$ & \#Cf & \#SCf & Comments\\ \hline \rule{0mm}{4.5mm}1 & $(10_{3};3,4)$ & \textbf{2} & \textbf{2} & \\ \rule{0mm}{4.5mm}2 & $(13_{3};2,3)$ & \textbf{1} & \textbf{1} & Proposition~\ref{src13}\\ \rule{0mm}{4.5mm}3 & $(16_{3};2,2)$ & \textbf{1} & \textbf{1} & Proposition~\ref{src16}\\ \rule{0mm}{4.5mm}4 & $(25_{4};5,6)$ & \textbf{0} & \textbf{0} & Proposition~\ref{non25-4}\\ \rule{0mm}{4.5mm}5 & $(36_{5};10,12)$ & \textbf{1} & \textbf{1} & Proposition~\ref{src36}\\ \rule{0mm}{4.5mm}6 & $(41_{5};9,10)$ & ? & ? & \\ \rule{0mm}{4.5mm}7 & $(45_{4};3,3)$ & \textbf{0} & \textbf{0} & Proposition~\ref{non45-4}\\ \rule{0mm}{4.5mm}8 & $(49_{4};5,2)$ & \textbf{0} & \textbf{0} & Corollary~\ref{norook} \\ \rule{0mm}{4.5mm}9 & $(49_{6};17,20)$ & 1 & 1 & Theorem~\ref{triangle}\\ \rule{0mm}{4.5mm}10 & $(50_{7};35,36)$ & 211 & 111 & Proposition~\ref{src50}\\ \rule{0mm}{4.5mm}11 & $(61_{6};14,15)$ & ? & ? & \\ \rule{0mm}{4.5mm}12 & $(63_{6};13,15)$ & 4 & 2 & Example~\ref{src63}\\ \rule{0mm}{4.5mm}13 & $(64_{7};26,30)$ & 29 & 11 & Theorem~\ref{triangle}\\ \rule{0mm}{4.5mm}14 & $(81_{8};37,42)$ & ? & ? & \\ \rule{0mm}{4.5mm}15 & $(85_{6};11,10)$ & ? & ? & \\ \rule{0mm}{4.5mm}16 & $(85_{7};20,21)$ & ? & ? & \\ \rule{0mm}{4.5mm}17 & $(96_{5};4,4)$ & 1 & 1 & Example~\ref{sdds96}\\ \rule{0mm}{4.5mm}18 & $(99_{7};21,15)$ & ? & ? & \\ \rule{0mm}{4.5mm}19 & $(100_{9};50,56)$ & 1 & 1 & Theorem~\ref{triangle}\\ \rule{0mm}{4.5mm}20 & $(105_{9};51,45)$ & ? & ? & \\ \rule{0mm}{4.5mm}21 & $(113_{8};27,28)$ & ? & ? & \\ \rule{0mm}{4.5mm}22 & $(120_{8};28,24)$ & 1 & 1 & Example~\ref{sdds120}\\ \rule{0mm}{4.5mm}23 & $(121_{5};9,2)$ & \textbf{0} & \textbf{0} & Corollary~\ref{norook} \\ \rule{0mm}{4.5mm}24 & $(121_{6};11,6)$ & ? & ? & \\ \rule{0mm}{4.5mm}25 & $(121_{9};43,42)$ & ? & ? & \\ \rule{0mm}{4.5mm}26 & $(121_{10};65,72)$ & ? & ? & \\ \rule{0mm}{4.5mm}27 & $(125_{9};45,36)$ & ? & ? & \\ \rule{0mm}{4.5mm}28 & $(136_{6};15,4)$ & ? & ? & \\ \rule{0mm}{4.5mm}29 & $(136_{9};36,40)$ & ? & ? & \\ \rule{0mm}{4.5mm}30 & $(144_{11};82,90)$ & 1 & 1 & Theorem~\ref{triangle}\\ \hline \end{tabular} \end{center} \vskip 3mm \caption{Feasible parameters of proper primitive strongly regular configurations.}\label{fptable} \end{table}
\addtocounter{table}{-1}
\begin{table} \begin{center}
\begin{tabular}{|c c c c l|} \hline No. & $(v_k;\lambda,\mu)$ & \#Cf & \#SCf & Comments\\ \hline \rule{0mm}{4.5mm}31 & $(145_{9};35,36)$ & ? & ? & \\ \rule{0mm}{4.5mm}32 & $(153_{8};19,21)$ & ? & ? & \\ \rule{0mm}{4.5mm}33 & $(155_{7};17,9)$ & 4 & 2 & Theorem~\ref{LPtransformed}\\ \rule{0mm}{4.5mm}34 & $(169_{9};31,30)$ & ? & ? & \\ \rule{0mm}{4.5mm}35 & $(169_{12};101,110)$ & ? & ? & \\ \rule{0mm}{4.5mm}36 & $(171_{11};73,66)$ & ? & ? & \\ \rule{0mm}{4.5mm}37 & $(175_{6};5,5)$ & ? & ? & \\ \rule{0mm}{4.5mm}38 & $(181_{10};44,45)$ & ? & ? & \\ \rule{0mm}{4.5mm}39 & $(196_{10};40,42)$ & ? & ? & \\ \rule{0mm}{4.5mm}40 & $(196_{13};122,132)$ & ? & ? & \\ \rule{0mm}{4.5mm}41 & $(196_{13};125,120)$ & ? & ? & \\ \hline \end{tabular} \end{center} \vskip 3mm \caption{Feasible parameters of proper primitive strongly regular configurations (continued).} \end{table}
Thus, there are $41$ feasible parameter sets $(v_k;\lambda,\mu)$ of proper and primitive strongly regular configurations with $v\le 200$. The para\-meters are listed in Table~\ref{fptable} along with information on the numbers of strongly regular configurations (\#Cf) and self-dual strongly regular configurations (\#SCf) up to isomorphism. A number in \textbf{boldface} indicates that this is the exact number, otherwise it is a lower bound.
In the smallest case $(10_3;3,4)$, there are altogether ten combinatorial $(10_3)$ configurations denoted by $(10_3)_i$, $i=1,\ldots,10$ in \cite[Section~2.2]{BG09}. Two of them are strongly regular: the Desargues configuration~$(10_3)_1$ and the configuration $(10_3)_4$ depicted in Figure~\ref{smallest}. Interestingly, $(10_3)_4$ is the only one of the ten $(10_3)$ configurations that cannot be drawn with straight lines, i.e.\ that is not a \emph{geometric configuration} (see~\cite{BG09, PS13}). In the next two cases $(13_3;2,3)$ and $(16_3;2,2)$, the total numbers of $(13_3)$ and $(16_3)$ configurations are also known: $2036$~\cite{HG90} and $3\,004\,881$~\cite{BBP00}, respectively. Since the number of combinatorial $(v_k)$ configurations grows rapidly with~$v$, a better approach to classifying strongly regular configurations is through the associated graphs.
Suppose that a strongly regular graph $\Gamma$ with parameters $SRG(v,k(k-1),\lambda,\mu)$ is the point graph of a $(v_k;\lambda,\mu)$ configuration. Every line of the configuration gives a clique of size~$k$ in~$\Gamma$. Thus, there must be~$v$ such cliques with every pair of them intersecting in at most one point. Given the graph~$\Gamma$, we define the \emph{clique graph} $\mathcal{C}(\Gamma)$ with vertices being $k$-cliques in~$\Gamma$. Vertices of $\mathcal{C}(\Gamma)$ are adjacent if the cliques intersect in at most one point. The task is to find the cliques of size $v$ in $\mathcal{C}(\Gamma)$.
Up to isomorphism, there is a unique graph $SRG(13,6,2,3)$, the Paley graph. The vertices of $\Gamma$ are elements of the finite field $\mathbb{F}_{13}$ with two vertices being adjacent if their difference is a quadratic residue. Using Cliquer~\cite{NO03}, we found $26$ cliques of size $3$ in $\Gamma$. The clique graph $\mathcal{C}(\Gamma)$ has $26$ vertices and $286$ edges. Using Cliquer once more, we found exactly two cliques of size $13$ in $\mathcal{C}(\Gamma)$, corresponding to isomorphic $(13_3;2,3)$ configurations. This proves that the cyclic configuration constructed in Example~\ref{sdds13} is unique.
\begin{proposition}\label{src13} There is one $(13_3;2,3)$ configuration up to isomorphism. \end{proposition}
There are two graphs with parameters $SRG(16,6,2,2)$. One of them is the Shrikhande graph~\cite{SSS59} with full automorphism group of order $192$. Similarly as for the previous parameters, we found $32$ cliques of size~$3$ in~$\Gamma$ and two cliques of size~$16$ in~$\mathcal{C}(\Gamma)$, corresponding to isomorphic $(16_3;2,2)$ configurations.
The other $SRG(16,6,2,2)$ has full automorphism group of order $1152$. This is the $4\times 4$ \emph{rook graph}, sometimes also called the \emph{lattice graph} or \emph{grid graph}. Vertices of the $n\times n$ rook graph $R_n$ are pairs $(x,y)$ with $x,y\in\{1,\ldots,n\}$. Two vertices $(x_1,y_1)$, $(x_2,y_2)$ are adjacent if $x_1=x_2$ or $y_1=y_2$ holds. The graph $R_n$ is strongly regular with parameters $SRG(n^2,2(n-1),n-2,2)$ and has $2n$ maximal cliques of size $n$, being sets of vertices with a fixed coordinate. Any clique of size at least $2$ is contained in exactly one of these maximal cliques. If $R_n$ is the point graph of a $(v_k;\lambda,\mu)$ configuration, then $2(n-1)=k(k-1)$ holds. This is equivalent with $n={k\choose 2}+1$ and the configuration would have parameters \begin{equation}\label{rookparam} v=\left( {k\choose 2}+1\right)^2,\kern 3mm \lambda={k\choose 2}-1,\kern 3mm \mu=2. \end{equation} We now prove that this cannot occur.
\begin{theorem}\label{thmrook} The $n\times n$ rook graph is not the point graph of a strongly regular configuration. \end{theorem}
\begin{proof} Lines of the configuration would give a set $\mathcal{C}$ of $v=n^2$ cliques of size $k$ in $R_n$, pairwise intersecting in at most one vertex. A maximal clique of size $n={k\choose 2}+1$ contains no more than $\frac{n(n-1)}{k(k-1)}$ of the cliques in~$\mathcal{C}$, because each of the $n(n-1)$ pairs of distinct vertices is contained in at most one $k$-clique, and a $k$-clique covers $k(k-1)$ pairs. Therefore, $\mathcal{C}$ is not larger than $2n\cdot \frac{n(n-1)}{k(k-1)}$. This is equal to $v=n^2$, and therefore the cliques of $\mathcal{C}$ contained in a given $n$-clique cover every pair of its $n$ vertices exactly once. In this way we get a Steiner $2$-$(n,k,1)$ design. If $r=\frac{n-1}{k-1}$ is the replication number of the design, Fisher's inequality $r\ge k$ gives $n-1\ge k(k-1)$, a contradiction with $n={k\choose 2}+1$. \end{proof}
Together with the discussion about the Shrikhande graph, this proves that the strongly regular configuration constructed from $PG(2,5)$ by Theorem~\ref{triangle} is unique.
\begin{corollary}\label{src16} There is one $(16_3;2,2)$ configuration up to isomorphism. \end{corollary}
Furthermore, Theorem~\ref{thmrook} eliminates infinitely many feasible parameter sets of strongly regular configurations.
\begin{corollary}\label{norook} Strongly regular configurations with parameters~\eqref{rookparam} do not exist for $k>3$. \end{corollary}
\begin{proof} In~\cite{SSS59}, Shrikhande proved that for $n>4$ the only strongly regular graph with parameters $SRG(n^2,2(n-1),n-2,2)$ is the $n\times n$ rook graph. \end{proof}
We can eliminate two more parameter sets $(v_k;\lambda,\mu)$ and prove uniqueness for another computationally, when all $SRG(v,k(k-1),\lambda,\mu)$ graphs are known.
\begin{proposition}\label{non25-4} Strongly regular $(25_4;5,6)$ configurations do not exist. \end{proposition}
\begin{proof} Up to isomorphism, there are exactly $15$ strongly regular graphs with parameters $SRG(25,12,5,6)$~\cite{AP73, MR73}. The adjacency matrices are available on E.~Spence's web page~\cite{ES}. Cliquer found from $73$ to $90$ cliques of size $4$ in these graphs, but none of the corresponding clique graphs $\mathcal{C}(\Gamma)$ contain a clique of size $25$. \end{proof}
\begin{proposition}\label{src36} There is one $(36_5;10,12)$ configuration up to isomorphism. \end{proposition}
\begin{proof} There are exactly $32\,548$ graphs $SRG(36,20,10,12)$~\cite{MS01}. Adjacency matrices of the complementary graphs are available on the web page~\cite{ES}. Using Cliquer, we found that the $SRG(36,20,10,12)$ graphs~$\Gamma$ contain from $132$ to $336$ cliques of size $5$. Only one of the corresponding clique graphs $\mathcal{C}(\Gamma)$ contains a clique of size~$36$. This happens when the complementary graph $\overline{\Gamma}$ with parameters $SRG(36,15,6,6)$ is the graph constructed from the cyclic Latin square of order~$6$. Two strongly regular configurations arise, both isomorphic to the configuration constructed from $PG(2,7)$ by Theorem~\ref{triangle}. \end{proof}
\begin{proposition}\label{non45-4} Strongly regular $(45_4;3,3)$ configurations do not exist. \end{proposition}
\begin{proof} There are $78$ graphs $SRG(45,12,3,3)$~\cite{CDS06}. Adjacency matrices are available on~\cite{ES}. The graphs contain from $12$ to $135$ cliques of size~$4$ and the corresponding clique graphs do not contain cliques of size $45$. \end{proof}
It is also known that graphs with parameters $SRG(50,42,35,36)$ are unique, i.e.\ isomorphic to the complement of the Hoffman-Singleton graph. This graph has $2\,708\,150$ cliques of size $7$ and we could not classify all cliques of size $50$ in $\mathcal{C}(\Gamma)$. There may be other $(50_7;35,36)$ configurations apart from the $211$ examples of Proposition~\ref{src50}.
\end{document} | arXiv | {
"id": "2104.04880.tex",
"language_detection_score": 0.7346770763397217,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\maketitle
\begin{abstract} We prove that metric measure spaces obtained as limits of closed Riemannian manifolds with Ricci curvature satisfying a uniform Kato bound are rectifiable. In the case of a non-collapsing assumption and a strong Kato bound, we additionally show that for any $\upalpha \in (0,1)$ the regular part of the space lies in an open set with the structure of a $\mathcal C^\upalpha$-manifold. \end{abstract}
\tableofcontents
\section{Introduction}
In this paper, we establish new geometric and analytic properties of Kato limit spaces, i.e.~measured Gromov-Hausdorff limits of closed Riemannian manifolds with Ricci curvature satisfying a uniform Kato bound. Our work continues the study began in \cite{CMT} where we introduced these spaces.
For a closed Riemannian manifold $(M^n,g)$ of dimension $n \ge 2$, define \begin{equation*}
\ensuremath{\mbox{k}_t(M^n,g)} := \sup_{x \in M}\int_0^t\int_M H(s,x,y)\ensuremath{\mbox{Ric}_{\mbox{\tiny{-}}}}(y) \mathop{}\!\mathrm{d} \nu_g(y) \mathop{}\!\mathrm{d} s \end{equation*} for any $t>0$, where $H$ is the heat kernel of $M$, $\nu_g$ is the Riemannian volume measure and $\ensuremath{\mbox{Ric}_{\mbox{\tiny{-}}}} : M \to \mathbb{R}_+$ is the lowest non-negative function such that for any $x \in M$, \[ \ensuremath{\mbox{Ric}}_x \ge - \ensuremath{\mbox{Ric}_{\mbox{\tiny{-}}}}(x)g_x. \] Equivalently, $\ensuremath{\mbox{Ric}_{\mbox{\tiny{-}}}}$ is the negative part of the smallest eigenvalue of the Ricci tensor.
For the whole article, we keep a positive number $T$ and a function \mbox{$f: (0,T] \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C_+$} fixed, so that $f$ is non-decreasing and \begin{equation}\label{eq:f}\lim_{t \to 0} f(t)=0 \quad \text{and} \quad f(T) \leq \frac{1}{16n} \cdot\end{equation} We let $\mathcal{K}(n,f)$ be the set of isometry classes of $n$-dimensional closed Riemannian manifolds $(M^n,g)$ satisfying the Kato bound \begin{equation}\label{eq:Kato}\tag{K} \mathrm{k}_t(M^n,g)\leq f(t), \qquad \forall t \in (0,T]. \end{equation} This bound is implied, for instance, by a lower bound on the Ricci curvature, or by a suitable uniform $L^p$ estimate on $\ensuremath{\mbox{Ric}_{\mbox{\tiny{-}}}}$ \cite{RoseStollmann}.
For $c>0$ fixed throughout the article, let $\mathcal{K}_\mathfrak{m}(n,f,c)$ be the set of quadruples $(M^n,\mathsf{d}_g,\mu,o)$ where $(M^n,g) \in \mathcal{K}(n,f)$, $o \in M$, $\mathsf{d}_g$ is the Riemannian distance associated with $g$ and $\mu$ is a multiple of $\nu_g$ satisfying \[ c \le \mu(B_{\sqrt{T}}(o)) \le c^{-1}. \] As proved in \cite{C16,CMT}, elements in $\mathcal{K}(n,f)$ satisfy a uniform doubling condition. As a consequence, Gromov's precompactness theorem ensures that the set $\mathcal{K}_\mathfrak{m}(n,f,c)$ is precompact in the pointed measured Gromov-Hausdorff topology. We call Kato limit space any element in the closure $\overline{\cK_\meas(n,f,c)}$ with respect to this topology. Observe that Ricci limit spaces, that is limits of manifolds with a uniform Ricci lower bound \cite{ChCo97,CheegerColdingII,CheegerColdingIII,CheegerPisa}, are Kato limit spaces.
Our first result is the rectifiability of Kato limit spaces. This was shown for Ricci limit spaces in \cite[Theorem 5.7]{CheegerColdingIII}.
\begin{theorem}\label{th:main1} Let $(X,\mathsf{d},\mu,o)$ be a Kato limit space. Then $(X,\mathsf{d},\mu)$ is rectifiable as a metric measure space, in the sense that there exists a countable collection $\{(k_i,V_i,\phi_i)\}_i$ where $\{V_i\}$ are Borel subsets covering $X$ up to a $\mu$-negligible set, $\{k_i\}$ are positive integers, and $\phi_i : V_i \to \mathbb{R}^{k_i}$ is a bi-Lipschitz map such that $(\phi_i)_\#(\mu \measrestr V_i) \ll \mathcal H^{k_i}$ for any $i$, where $\mathcal H^{k_i}$ is the $k_i$-dimensional Hausdorff measure. \end{theorem}
Consider now the non-collapsing case, that is there exists $v>0$ such that for some $o \in M$ \begin{equation}\label{eq:NC}\tag{NC} \nu_g(B_{\sqrt{T}}(o)) \geq v T^{\frac{n}{2}}. \end{equation} Assume that $f$ additionally satisfies \begin{equation}\label{eq:SK}\tag{SK} \int_{0}^T \frac{\sqrt{f(t)}}{t}\mathop{}\!\mathrm{d} t < +\infty. \end{equation} In this case, we say that $(M^n,g) \in \mathcal{K}(n,f)$ satisfies a strong Kato bound. Let $\mathcal{K}(n,f,v)$ be the set of isometry classes of pointed closed $n$-dimensional manifolds $(M^n,g,o)$ satisfying a strong Kato bound and the non-collapsing assumption. We call non-collapsed strong Kato limit space any element in the closure $\overline{\cK(n,f,v)}$ with respect to the pointed Gromov-Hausdorff topology. Notice that we do not need to consider measured Gromov-Hausdorff topology, because, thanks to the volume continuity proved in \cite[Theorem 7.1]{CMT}, Riemannian volumes converge to the $n$-dimensional Hausdorff measure.
Our second main result is the bi-Hölder regularity of the regular set of non-collapsed strong Kato limit spaces. This was proved for non-collapsed Ricci limit spaces in \cite[Theorem 5.14]{ChCo97}.
\begin{theorem}\label{th:Holderreg} Let $(X,\mathsf{d},o)$ be a non-collapsed strong Kato limit space. Then for any $\upalpha\in (0,1)$ the regular set \[ \cR:=\{ x \in X : (\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n,\mathsf{d}_e,0) \in \Tan(X,x)\} \] is contained in an open $\mathcal C^\upalpha$ manifold $\mathcal U_\alpha \subset X$. Here $\mathsf{d}_e$ is the Euclidean distance and $\Tan(X,x)$ is the set of metric tangent cones of $X$ at $x$, see Definition \ref{D:tangent}. \end{theorem}
In \cite[Theorem 6.2]{CMT} we also showed that non-collapsed strong Kato limit spaces admit a stratification. By combining this with volume continuity and arguments from \cite[Theorem 6.1]{ChCo97} (see also \cite[Theorem 10.22]{CheegerPisa}), we then prove that the singular set $\mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K := X \setminus \cR$ of any $(X,\mathsf{d}, o) \in \overline{\cK(n,f,v)}$ has codimension two. For the sake of completeness, we provide a proof in the Appendix.
Our proofs of Theorem \ref{th:main1} and Theorem \ref{th:Holderreg} strongly rely on the existence of splitting maps on Kato limit spaces. These are harmonic maps with a suitable \mbox{$W^{2,2}$-estimate} which realize a Gromov-Hausdorff approximation between a small ball around $x$ and a Euclidean ball of same radius. In Section 3, we give conditions for the existence of such maps, and establish some of their properties, relying on the analysis performed in \cite{CMT}.
In order to prove Theorem \ref{th:main1}, we start by observing that almost splitting maps exist around any point $x$ of a Kato limit space admitting a Euclidean tangent cone. After that, by means of a suitable propagation property of these maps, we adapt arguments from \cite{BPS} which built upon \cite{GigliPas} to provide a proof of the rectifiability of $\RCD(K,N)$ spaces \cite{MondinoNaber} via almost splitting maps. Let us point out that, unlike the uniform lower Ricci bound considered in \cite{ChCo97}, the Kato bound \eqref{eq:Kato} does not provide a directionally restricted relative volume comparison on the limit space, so that the proof of rectifiability by Cheeger and Colding, based on a suitable control on the volume deformation of pseudo-cubes through pseudo-translations, do not carry out.
We do not know whether the dimensions $k_i$ in Theorem \ref{th:main1} are all equal to a constant. This has been proved true by Colding and Naber for Ricci limit spaces \cite{ColdingNaber}, but the arguments used there seem hardly reproducible in the context of a Kato bound; a more conceivable approach may be the one used by Brué and Semola in the $\RCD(K,N)$ framework \cite{BrueSemola}.
To prove Theorem \ref{th:Holderreg}, a key tool is the following almost monotone quantity, which we introduced in \cite{CMT} to get information on the infinitesimal geometry of non-collapsed strong Kato limits. For $X \in \overline{\cK(n,f,v)}$, $x \in X$, $t>0$, consider \[ \uptheta(t,x) := (4\pi t)^{n/2} H(t,x,x) \] where $H$ is the heat kernel of $X$. In case $(M^n,g)$ is a Riemannian manifold with non-negative Ricci curvature, the Li-Yau inequality implies that the function \mbox{$t \mapsto \uptheta(t,x) \in [1, +\infty[$} is non-decreasing for all $x \in M$. When $(M^n,g)$ satisfies a strong Kato bound, we showed in \cite{CMT} that this function is almost non-decreasing everywhere. In particular, its limit as $t$ goes to zero is well-defined, not less than one, and coincides with the inverse of the volume density at $x$. In the present paper, we prove that under \eqref{eq:SK} the regular set of $X$ is given by points where the limit of $\uptheta$ equals one: \[ \cR=\{ x \in X : \lim_{t\to 0}\uptheta(t,x) = 1\}. \]
We also establish that if $\uptheta(t,x)$ is close enough to 1 for some $t>0$ and $x \in X$, then any ball centered around $x$ with small radius is Gromov-Hausdorff close to a Euclidean ball with same radius. More precisely, we prove the following Reifenberg regularity statement.
\begin{theorem}\label{thm:ReifregX} Assume that \eqref{eq:SK} holds. Then for any $\ensuremath{\varepsilon}>0$ there exists $\delta>0$ depending on $n$, $f$ and $\ensuremath{\varepsilon}$ such that for any $(X,\mathsf{d},o)\in \overline{\cK(n,f,v)}$, if $x \in X$ and $t \in (0,\delta T)$ satisfy \begin{equation} \label{eq:assum} \uptheta(t,x) \le 1+\delta \end{equation} then for any $y\in B_{\sqrt{t}}(x)$ and $s\in (0,\sqrt{t}]$, $$\dGH\left( B_{s}(y),\bB^n_{s}\right)\le \ensuremath{\varepsilon} s,$$ where $\mathbb{B}_s^n$ is the Euclidean ball of radius $s$ centered at $0\in \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n$. \end{theorem}
In addition to the almost monotonicity of $\uptheta$ and the appropriate Li-Yau inequality for Kato limit spaces (see Proposition \ref{lem:LY}), a salient ingredient in our proof of Theorem \ref{thm:ReifregX} is the heat kernel rigidity result obtained in \cite{CT}, which allows for a suitable contradiction argument.
From Theorem \ref{thm:ReifregX} we could immediately appeal on the intrinsic Reifenberg theorem of Cheeger and Colding \cite[Theorem A.1.1]{ChCo97} and get the conclusion of Theorem \ref{th:Holderreg}. We prefer to provide an explicit construction of a bi-Hölder homeomorphism obtained from almost splitting maps through a Transformation Theorem, in the spirit of \cite{CJN}. One key new point in our approach is an almost-rigidity statement implying that for sufficiently small $\delta$, if a point $x$ in a non-collapsed strong Kato limit space satisfies $$\uptheta(t,x)<1+\delta,$$ then an almost splitting map realizing a GH-isometry exists from $B_{\sqrt{t}}(x)$ to an Euclidean ball of radius $\sqrt{t}$. We next prove a Transformation Theorem that eventually provides a better regularity on such harmonic maps : these are bi-Hölder homeomorphisms. The proof of the Transformation Theorem is a direct one and uses some results of \cite{CMT} about convergence of harmonic functions together with the refinements that we develop in Section 3.
\noindent \textbf{Acknowledgments:} The authors are partially supported by the ANR grant ANR-17-CE40-0034: CCEM. The first and third authors thank the Centre Henri Lebesgue ANR-11-LABX-0020-01 for creating an attractive mathematical environment. The first author is also partially supported by the ANR grant ANR-18-CE40-0012: RAGE.
\section{Preliminaries}
In a metric space $(X, \mathsf{d})$ we denote by $B_r(x)$ the open ball of radius $r$ centered at $x \in X$. Letting $B=B_r(x)$, for any $\lambda>0$ we denote by $\lambda B$ the re-scaled ball centered at $x$ of radius $\lambda r$. We call metric measure space any triple $(X,\mathsf{d},\mu)$ where $(X,\mathsf{d})$ is a geodesic and proper metric space and $\mu$ is a fully supported Borel measure such that $\mu(B_r(x))$ is strictly positive and finite for any $x \in X$ and $r>0$.
The Cheeger energy of $(X,\mathsf{d},\mu)$ is the map ${\sf Ch}\colon \Lip_c(X)\rightarrow \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C_+$ defined by $${\sf Ch}(f)=\int_X \text{lip}^2(f)\mathop{}\!\mathrm{d}\mu,$$
where $\text{lip}(f)$ denotes the local Lipschitz constant of $f$. Following \cite{GigliMAMS,Gigli} we say that $(X,\mathsf{d},\mu)$ is infinitesimally Hilbertian if ${\sf Ch}$ is quadratic, in which case the closure of ${\sf Ch}$, still denoted by ${\sf Ch}$, is a Dirichlet form with domain denoted by $H^{1,2}(X,\mathsf{d},\mu)$. We write $L$ for the associated non-positive, self-adjoint operator and $\{e^{-tL}\}_{t>0}$ for the Markov semi-group generated by $L$. For any $f\in H^{1,2}(X,\mathsf{d},\mu)$ there exists a unique $| df| \in L^2(X,\mu)$ called minimal relaxed slope of $f$ such that
$${\sf Ch}(f)=\int_X |df|^2\mathop{}\!\mathrm{d}\mu.$$ Moreover, ${\sf Ch}$ is strongly local and regular, and its carré du champ is given by \begin{align*}
\mathop{}\!\mathrm{d} \Gamma(u,v) = \frac{1}{4} (|d(u+v)|^2 - |d(u-v)|^2) \mathop{}\!\mathrm{d} \mu =: \langle d u ,d v \rangle \mathop{}\!\mathrm{d} \mu \end{align*} for any $u,v \in H^{1,2}(X,\mathsf{d},\mu)$. For any open set $\Omega \subset X$ we also set \[ H^{1,2}_{loc}(\Omega,\mathsf{d},\mu):=\{f \in L^2_{loc}(\Omega,\mu) \, : \, \phi f \in H^{1,2}(X,\mathsf{d},\mu) \,\, \text{for any $\phi \in \Lip_c(\Omega)$} \}. \] We say that $f \in H^{1,2}_{loc}(\Omega,\mathsf{d},\mu)$ is harmonic in $\Omega$ if for any $\phi \in \Lip_c(\Omega)$, \[ \int_{\Omega} \langle d f, d \phi \rangle \mathop{}\!\mathrm{d} \mu = 0. \]
If $(M^n,g)$ is a smooth and connected Riemannian manifold, the Cheeger energy of $(M,\mathsf{d}_g,\nu_g)$ coincides with its usual Dirichlet energy. We often implicitly identify a Riemannian manifold $(M^n,g)$ with its isometry class or with the metric measure space $(M,\mathsf{d}_g,\nu_g)$.
For any positive integer $k$, we denote by $\mathbb{B}_r^k$ the Euclidean ball of radius $r$ centered at the origin of $\mathbb{R}^k$, and we write $\mathbb{B}_r^k(p)=p+\mathbb{B}_r^k$ for any $p \in \mathbb{R}^k$.
\subsection{Notions of convergence} We assume the reader to be familiar with the various notions of Gromov-Hausdorff convergence; we refer to \cite[Section 11]{HKST}, for instance, if this is not the case. We simply recall that a map \mbox{$\phi : (X,\mathsf{d}_X) \to (Y,\mathsf{d}_Y)$} is called an $\ensuremath{\varepsilon}$-GH isometry if $|\mathsf{d}_X(x,x') - \mathsf{d}_Y(\phi(x),\phi(x'))|\le \ensuremath{\varepsilon}$ for any $x,x' \in X$ and for any $y \in Y$ there exists $x \in X$ such that $\mathsf{d}_Y(\phi(x),y)\le \ensuremath{\varepsilon}$. If $\{(X_\alpha,\mathsf{d}_\alpha,o_\alpha)\}, (X,\mathsf{d},o)$ are pointed metric spaces such that ${(X_\alpha,\mathsf{d}_\alpha,o_\alpha)\to (X,\mathsf{d},o)}$ in the pointed Gromov-Hausdorff topology, we denote by $x_\alpha \in X_\alpha \to x \in X$ a convergent sequence of points, following \cite[Characterization 1]{CMT} and the definition soon after.
\subsubsection{Tangent cones} Let us recall the classical definitions of tangent cones.
\begin{D}\label{D:tangent} \begin{enumerate} \item Let $(X, \mathsf{d})$ be a metric space. For any $x \in X$, we call metric tangent cone of $(X,\mathsf{d})$ at $x$ any pointed metric space $(Y,\mathsf{d}_Y,x)$ obtained as a limit point in the pointed Gromov-Hausdorff topology of the family of rescalings $\{(X,r^{-1}\mathsf{d},x)\}_{r>0}$ as $r\downarrow 0$. We denote by $\Tan(X,x)$ the set of metric tangent cones of $(X,\mathsf{d})$ at $x$. \item Let $(X, \mathsf{d},\mu)$ be a metric measure space. For any $x \in \supp \mu$, we call metric measured tangent cone of $(X,\mathsf{d},\mu)$ at $x$ any pointed metric measure space $(Y,\mathsf{d}_Y,\mu_Y,x)$ obtained as a limit point in the pointed measured Gromov-Hausdorff topology of the family of rescalings $\{(X,r^{-1}\mathsf{d},\mu(B_r(x))^{-1}\mu, x)\}_{r>0}$ as $r\downarrow 0$. We denote by $\Tan_\mathfrak{m}(X,x)$ the set of metric measured tangent cones of $(X,\mathsf{d}, \mu)$ at $x$. \end{enumerate} \end{D}
We are especially interested in tangent cones which split off a Euclidean factor. Let us recall the definition.
\begin{D} Let $k$ be a positive integer. \begin{enumerate} \item We say that a pointed metric space $(X, \mathsf{d},x)$ splits off an $\mathbb{R}^k$ factor if there exists a pointed metric space $(Z,\mathsf{d}_Z,z)$ and an isometry $\phi : X \to \mathbb{R}^k \times Z$ such that $\phi(x)=(0,z)$.
\item We say that a pointed metric measure space $(X, \mathsf{d},\mu,o)$ splits off an $\mathbb{R}^k$ factor if there exists a pointed metric measure space $(Z,\mathsf{d}_Z,\mu_Z,z)$ and an isometry $\phi : X \to \mathbb{R}^k \times Z$ such that $\phi(x)=(0,z)$ and $\phi_\# \mu = \mathcal{H}^k \otimes \mu_Z$. \end{enumerate} Here and in the sequel the space $\mathbb{R}^k \times Z$ is implicitly equipped with the classical Pythagorean product distance. \end{D}
\subsubsection{Convergence of functions}
Let us recall now some notions of convergence for functions defined on varying spaces. We refer to \cite[Section 1.4]{CMT} and the references therein for a more exhaustive presentation. \begin{D} Let $\{(X_\alpha, \mathsf{d}_\alpha, \mu_\alpha, o_\alpha)\}_\alpha, (X,\mathsf{d},\mu,o)$ be infinitesimally Hilbertian metric measure spaces such that $(X_\alpha, \mathsf{d}_\alpha, \mu_\alpha, o_\alpha) \to (X,\mathsf{d},\mu,o)$ in the pointed measured Gromov-Hausdorff topology.
\begin{enumerate} \item Let $\varphi_\alpha \in \mathcal C_c(X_\alpha)$ for any $\alpha$ and $ \varphi\in \mathcal C_c(X)$ be given. We say that $\left\{\varphi_\alpha\right\}$ converges uniformly on compact sets to $\varphi$ if $\varphi_\alpha(x_\alpha) \to \varphi(x)$ whenever $x_\alpha \in X_\alpha \to x \in X$; we write $\varphi_\alpha\stackrel{\mathcal C_c}{\longrightarrow} \varphi $ if this convergence holds.
\item Let $f_\alpha \in L^2(X_\alpha,\mu_\alpha)$ for any $\alpha$ and $f \in L^2(X,\mu)$ be given. \begin{itemize}
\item We say that $\{f_\alpha\}$ converges to $f$ weakly in $L^2$ if $\sup_\alpha \|f_\alpha\|_{L^2}<+\infty$ and $$\int_{X_\alpha}\varphi_\alpha f_\alpha\mathop{}\!\mathrm{d}\mu_\alpha\to \int_{X}\varphi f\mathop{}\!\mathrm{d}\mu$$ whenever $\varphi_\alpha\stackrel{\mathcal C_c}{\longrightarrow} \varphi $; we write $f_\alpha \stackrel{L^2}{\rightharpoonup}f$ if this convergence holds.
\item We say that $\{f_\alpha\}$ converges to $f$ strongly in $L^2$ if $f_\alpha \stackrel{L^2}{\rightharpoonup}f$ and $\lim_\alpha \|f_\alpha\|_{L^2}= \|f\|_{L^2}$; we write $f_\alpha \stackrel{L^2}{\to}f$ if this convergence holds. \end{itemize}
\item Let $f_\alpha \in H^{1,2}(X_\alpha,\mathsf{d}_\alpha,\mu_\alpha)$ for any $\alpha$ and $f \in H^{1,2}(X,\mathsf{d},\mu)$ be given. \begin{itemize} \item We say that $\{f_\alpha\}$ converges to $f$ weakly in energy if $f_\alpha \stackrel{L^2}{\rightharpoonup}f$ and $\sup_\alpha {\sf Ch}_\alpha(f_\alpha)<+\infty$; we write $f_\alpha \stackrel{\mathrm{E}}{\rightharpoonup}f$ if this convergence holds. \item We say that $\{f_\alpha\}$ converges to $f$ strongly in energy if $f_\alpha \stackrel{\mathrm{E}}{\rightharpoonup}f$ and $\lim_\alpha {\sf Ch}_\alpha(f_\alpha)={\sf Ch}(f)$; we write $f_\alpha \stackrel{\mathrm{E}}{\to}f$ if this convergence holds. \end{itemize}
\end{enumerate} \end{D} \subsection{Kato bound and Kato limits} Recall that $T$, $f$ are fixed and satisfy \eqref{eq:f}. The following has been proved in \cite[Proposition 2.3]{CMT}.
\begin{prop}\label{eq:PI_manifolds} There exists $\upkappa\ge 1$ and $\uplambda>0$ depending only on $n$ such that any $(M^n,g) \in \mathcal{K}(n,f)$ satisfies \begin{itemize} \item[1.] a uniform volume estimate: for any $x\in M$ and $0<s<r\le \sqrt{T}$, \begin{equation}\label{eq:volestim}\frac{\nu_g(B_r(x))}{\nu_g(B_s(x))}\le \upkappa \left(\frac{r}{s}\right)^{e^2n},\end{equation} \item[2.] a uniform local Poincaré inequality: for any ball $B\subset M$ with radius $r\le \sqrt{T}$ and any $\varphi\in \mathcal C^1(B)$,
\begin{equation}\label{eq:Poincaré}\int_B \left(\varphi-\fint_B \varphi\mathop{}\!\mathrm{d} \nu_g\right)^2 \mathop{}\!\mathrm{d} \nu_g\le \uplambda r^2 \int_B |d\varphi|^2 \mathop{}\!\mathrm{d} \nu_g.\end{equation} \end{itemize} \end{prop}
\begin{rem} Note that \eqref{eq:volestim} implies a so-called doubling condition: \begin{equation}\label{eq:doubling} \nu_g(B_{2r}(x)) \le A(n) \nu_g(B_r(x)) \end{equation} for any $x \in X$ and $r \in(0, \sqrt{T}/2]$, where $A(n):=\upkappa 2^{e^2n}$. We shall often use the following consequence of the doubling condition: for any $\lambda\in(0,1)$ there exists a constant $C(n,\lambda)\ge 1$ such that for any ball $B\subset M$ and any locally integrable ${\phi : B \to \mathbb{R}}$, \begin{equation}\label{eq:cor_doublement}
\fint_{\lambda B}|\phi| \mathop{}\!\mathrm{d} \nu_g \leq C(n,\lambda) \fint_{B}|\phi| \mathop{}\!\mathrm{d} \nu_g. \end{equation} \end{rem}
The next proposition collects estimates on the heat kernel of $(M^n,g) \in \mathcal{K}(n,f)$.
\begin{prop}\label{Prop:heatKe} There exists a constant $\upgamma\ge 1$ depending only on $n$ such that for any $(M^n,g) \in \mathcal{K}(n,f)$, for all $x,y\in M$ and $t\in (0,T)$, \begin{enumerate}[i)] \item $\displaystyle \frac{\upgamma^{-1}}{\nu_g(B_{\sqrt{t}}(x))}e^{-\upgamma\frac{\mathsf{d}^2(x,y)}{t}}\le H(t,x,y)\le \frac{\upgamma}{\nu_g(B_{\sqrt{t}}(x))}e^{-\frac{\mathsf{d}^2(x,y)}{5t}},$
\item $\displaystyle \left|\frac{\partial}{\partial t} H(t,x,y)\right|\le \frac{\upgamma}{t \nu_g(B_{\sqrt{t}}(x))}e^{- \frac{\mathsf{d}^2(x,y)}{5t}},$
\item $\displaystyle \left|d_x H(t,x,y)\right|\le \frac{\upgamma}{\sqrt{t}\nu_g(B_{\sqrt{t}}(x))}e^{-\frac{\mathsf{d}^2(x,y)}{5t}}.$ \end{enumerate} \end{prop} \begin{proof} The first estimate i) was established in \cite{C16}, see also \cite[Proposition 3.3]{CMT}. The second estimate ii) is a consequence of i), see e.g.~\cite[Corollary 3.1]{GriderHeat}. The third estimate iii) is a consequence of the Li-Yau inequality \cite[Proposition 3.3]{C16}:
$$e^{-2}\left|d_x H(t,x,y)\right|^2\le \frac{e^2n}{2t} H^2(t,x,y)+H(t,x,y)\left|\frac{\partial}{\partial t} H(t,x,y)\right|,$$ together with i) and ii). \end{proof}
Let us now recall a couple of results from \cite{CMT} about Kato limit spaces.
\begin{prop} Any $(X,\mathsf{d},\mu,o) \in \overline{\cK_\meas(n,f,c)}$ is an infinitesimally Hilbertian space satisfying the doubling condition \eqref{eq:doubling} and the local Poincaré inequality \eqref{eq:Poincaré}. Moreover, for any $x \in X$, any $(Y,\mathsf{d}_Y,\mu_Y,x) \in \Tan_\mathfrak{m}(X,x)$ is a pointed $\RCD(0,n)$ space. \end{prop}
Metric measure spaces satisfying an $\RCD(0,n)$ bound have, in a synthetic sense, non-negative Ricci curvature and dimension less than $n$. We refer to \cite{GigliLectureNotes} for a survey about their properties.
From \cite{CMT}, we also know that the following hold.
\begin{prop} \label{prop:HKconv} Let $\{(M^n_\alpha, \mathsf{d}_\alpha, \mu_\alpha,o_\alpha)\} \subset \cK_\mathfrak{m}(n,f,c)$ be converging to $(X,\mathsf{d}, \mu,o)\in \overline{\cK_\mathfrak{m}(n,f,c)}$ in the pointed measured Gromov-Hausdorff topology. Let $H_\alpha$ be the heat kernel of $(M_\alpha^n,g_\alpha)$ for any $\alpha$. Then $X$ admits a locally Lipschitz heat kernel, that is to say a map $H:(0,+\infty)\times X \times X \to (0,+\infty)$ such that $$\left(e^{-tL}f\right)(x)=\int_X H(t,x,y)f(y)\mathop{}\!\mathrm{d}\mu(y)$$for any $f\in L^2(X,\mu)$, any $t>0$ and $\mu$-a.e.~$x\in X$. Moreover, $H$ satisfies the three estimates in Proposition \ref{Prop:heatKe}. Furthermore, the following convergence results hold. \begin{itemize} \item For any $t>0$ and $x_\alpha\in M_\alpha\to x\in X$, $y_\alpha\in M_\alpha\to y\in X$, \begin{equation}\label{CvHeat} H_\alpha(t,x_\alpha,y_\alpha) \to H(t,x,y) \quad \text{and} \quad \frac{\partial}{\partial t}H_\alpha(t,x_\alpha,y_\alpha) \to \frac{\partial}{\partial t}H(t,x,y). \end{equation} \item For any $t>0$ and $x_\alpha \in M_\alpha \to x \in X$, \begin{equation}\label{L2CvHeat} H_\alpha(t,x_\alpha,\cdot) \stackrel{L^2}{\rightarrow} H(t,x,\cdot). \end{equation} \end{itemize} \end{prop}
As an important consequence, we derive in the next statement a Li-Yau inequality for Kato limit spaces.
\begin{prop}\label{lem:LY} Consider $(X,\mathsf{d}, \mu,o)\in \overline{\cK_\mathfrak{m}(n,f,c)}$. Set $\gamma(t)=\exp\left(8\sqrt{nf(t)}\right)$ for any $t\in(0,T]$. Then for any $x\in X$ and $t\in(0,T]$, the Li-Yau inequality \begin{equation}\label{LYX}
\gamma^{-1}(t)\left| d H(t,x,\cdot )\right|^2-H(t,x,\cdot)\frac{\partial}{\partial t}H(t,x,\cdot)\le \frac{n\gamma(t)}{2t} H^2(t,x,\cdot)\end{equation} holds $\mu$-a.e.~on $X$.\end{prop} \proof Let $\{(M^n_\alpha, \mathsf{d}_\alpha, \mu_\alpha,o_\alpha)\} \subset \cK_\mathfrak{m}(n,f,c)$ be converging to $(X,\mathsf{d}, \mu,o)$ in the pointed measured Gromov-Hausdorff topology. By \cite[Proposition 3.3]{C16}, for any $x,y\in M_\alpha$ and $t\in(0,T]$, \begin{equation}\label{eq:LYXa}
\gamma^{-1}(t)\left|d_yH_\alpha(t,x,y)\right|^2-H_\alpha(t,x,y)\frac{\partial}{\partial t}H_\alpha(t,x,y)\le \frac{n\gamma(t)}{2t} H_\alpha^2(t,x,y). \end{equation} Take $x_\alpha\in M_\alpha\to x\in X$ and set $u_\alpha(y)=H_\alpha(t,x_\alpha,y)$ for any $y \in M_\alpha$ and any $\alpha$. The $L^2$ heat kernel convergence \eqref{L2CvHeat} yields $$u_\alpha\stackrel{L^2}{\rightarrow} u:=H(t,x,\cdot).$$ Moreover, the semi-group property implies \begin{equation}\label{eq:semigroup}
\int_{M_\alpha} |du_\alpha|^2\mathop{}\!\mathrm{d}\nu_{g_\alpha}=\int_{M_\alpha} u_\alpha\Delta_{g_\alpha}u_\alpha\mathop{}\!\mathrm{d}\nu_{g_\alpha}=-\frac{1}{2}\frac{\partial}{\partial t}H_\alpha(2t,x_\alpha,x_\alpha)=-\frac{\partial H_\alpha}{\partial t}(2t,x_\alpha,x_\alpha), \end{equation}
hence by Proposition \ref{Prop:heatKe}.ii) the sequence $\{u_\alpha\}$ is bounded in energy, hence $u_\alpha\stackrel{E}{\rightharpoonup} u$ by definition. Since the semi-group property also implies \eqref{eq:semigroup} with $u$, $H$ and $x$ in place of $u_\alpha$, $H_\alpha$ and $x_\alpha$ respectively, the convergence \eqref{CvHeat} yields $\lim_{\alpha} \| du_\alpha\|_{L^2}={\sf Ch}(u),$ hence by definition $u_\alpha\stackrel{E}{\rightarrow} u.$ Proposition \ref{Prop:heatKe}.iii) implies that the sequence $\{|du_\alpha|\}$ is locally bounded in $L^\infty$ hence
with \cite[Proposition E.7]{CMT} we can conclude that
$$|du_\alpha|\stackrel{L^2}{\rightharpoonup} |du|.$$ This convergence, together with \eqref{CvHeat} and \eqref{eq:LYXa}, implies \eqref{LYX}. \endproof
\begin{rem}\label{rem:mieux} If there exists $\tau \in (0,T]$ such that $$\lim_{\alpha} \mbox{k}_\tau(M_\alpha,g_\alpha)=0,$$ then for any $x\in X$ and $t\in (0,\tau]$, the Li-Yau inequality
$$\left|dH(t,x,\cdot)\right|^2-H(t,x,\cdot)\frac{\partial}{\partial t}H(t,x,\cdot)\le \frac{n}{2t} H^2(t,x,\cdot)$$ holds $\mu$-a.e.~on $X$. \end{rem}
\section{Almost splittings maps and consequences of GH-closeness on functions}
In this section, we define $(k,\ensuremath{\varepsilon})$-splitting maps on Kato limits and prove some relevant properties. Such maps were introduced in \cite{ChCo96,C97,ChCo97} for the study of Ricci limit spaces and extensively used later in the study of limit spaces and $\RCD(K,N)$ spaces, see for instance \cite{CheegerNaber,CJN,Bamler,BPS}.
From now on, for any positive integer $k$, we let $\mathcal M_k(\mathbb{R})$ be the space of $k\times k$ matrices with real entries, $\mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K_k(\mathbb{R})\subset \mathcal M_k(\mathbb{R})$ be the subspace made of symmetric matrices, and we denote by $\|\cdot\|$ the matrix norm induced by the Euclidean norm $|\cdot|$, meaning that $\|A\|^2:=\sup\{ ^t(A \xi)A \xi \, : \, \text{$\xi \in \mathbb{R}^k$ such that $^t\xi \xi=1$}\}$ for any $A \in \mathcal M_k(\mathbb{R})$. Then the following holds.
\begin{lemma}\label{lem:GS} Assume that $A \in \mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K_k(\mathbb{R})$ is positive definite. Then there exists a unique lower triangular matrix $T \in \mathcal M_k(\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C)$ such that \begin{equation}\label{eq:prox1}T A {}^t T = \mathrm{Id}_k.\end{equation} Moreover, if there exists $\ensuremath{\varepsilon} \in (0,1/2)$ such that $A \in \mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K_k(\mathbb{R})$ satisfies \begin{equation}\label{eq:prox}
\|A-\mathrm{Id}_k\|< \ensuremath{\varepsilon}, \end{equation} then for some $C_k$ depending only on $k$, the matrix $T$ satisfies \begin{equation}\label{eq:prox3}
\| T-\mathrm{Id}_k\| < C_k \ensuremath{\varepsilon}. \end{equation}\end{lemma}
\begin{rem} The matrix ${}^tT$ is obtained by applying the Gram-Schmidt process. \end{rem}
\subsection{Almost splitting maps} For any infinitesimally Hilbertian metric measure space $(X,\mathsf{d},\mu)$, whenever a map $u=(u_1,\ldots,u_k) : B \to \mathbb{R}^k$ satisfies $u_i \in H^{1,2}(B,\mathsf{d},\mu)$ for any $i \in \{1,\ldots,k\}$ we define the Gram matrix map of $u$ as the $\mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K_k(\mathbb{R})$-valued map \[ G_u:=[ G_{i,j}] \qquad \text{where $G_{i,j} := \langle d u_i, d u_j\rangle$ for any $1\le i,j \le k$,} \]
and we set $|d G_u|^2:=\sum_{1\le i,j \le k} | d G_{i,j}|^2$. Note that if $T$ is a lower triangular $k \times k$ matrix and $\tilde{u}:=T\circ u$, then \begin{equation}\label{eq:gramtilde} G_{\tilde u} = T G_u {}^tT \qquad \text{$\mu$-a.e.~in $B$.} \end{equation}
\begin{D} Let $(X,\mathsf{d},\mu,o) \in \overline{\mathcal{K}_\mathfrak{m}(n,f,c)}$. Let $B \subset X$ be a ball of radius $r>0$, $k \in \{1,\ldots,n\}$ and $\ensuremath{\varepsilon}>0$. \begin{enumerate}
\item We call $(k,\ensuremath{\varepsilon})$-splitting of $B$ any harmonic map $u : B \to \mathbb{R}^k$ such that $\|du\|_{L^\infty(B)}\le 2$ and \begin{equation}\label{eq:split}
\fint_B \|G_u - \mathrm{Id}_k\| \mathop{}\!\mathrm{d} \mu < \ensuremath{\varepsilon}. \end{equation} \item We say that a $(k,\ensuremath{\varepsilon})$-splitting $u$ of $B$ is reinforced if
\[ \fint_B (\|G_u - \mathrm{Id}_k\| + r^2 |d G_u|^2) \mathop{}\!\mathrm{d} \mu < \ensuremath{\varepsilon}.\] \item We say that a (possibly reinforced) $(k,\ensuremath{\varepsilon})$-splitting $u$ of $B$ is balanced if \[ \fint_B G_u \mathop{}\!\mathrm{d} \mu = \mathrm{Id}_k. \] \end{enumerate} \end{D} \begin{rem}
Assumption $\|du\|_{L^\infty(B)}\le 2$ implies
\[ \sup_{1 \le i,j \le k} |G_{i,j}(y)|\le 4 \qquad \text{for $\mu$-a.e.~$y \in B$}.\] \end{rem}
\begin{rem}\label{rem:GSsplitting}
Condition \eqref{eq:split} implies that the symmetric matrix $A_u=\fint_{B}G_u\mathop{}\!\mathrm{d} \mu$ is \mbox{$\ensuremath{\varepsilon}$-close} to the identity $\mathrm{Id}_k$. As a consequence of Lemma \ref{lem:GS} applied with $A=A_u$, for any $\ensuremath{\varepsilon} \in (0,1/2)$ and any \mbox{$(k,\ensuremath{\varepsilon})$-splitting} $u: B \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ there exists a lower triangular matrix $T$ with $\|T\|\leq 1+C_k\ensuremath{\varepsilon}$ such that the map \mbox{$\tilde u = T \circ u: B\to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$} satisfies
\begin{equation}\label{eq:GSsplitting}\fint_{B} G_{\tilde u}\mathop{}\!\mathrm{d} \mu= \mathrm{Id}_k \qquad \text{and} \qquad \fint_{B} \|G_{\tilde u}- \mathrm{Id}_k \|\mathop{}\!\mathrm{d} \mu < (1+C_k\ensuremath{\varepsilon})^2\ensuremath{\varepsilon}.\end{equation}
\end{rem}
\begin{rem}\label{rem:reinforced} The definition of reinforced splitting is just a technical convenience. Indeed, by means of Bochner's formula and of the Hessian bound given in \cite[Proposition 3.5]{CMT}, one can prove that any splitting on a Riemannian manifold with a Kato bound is a reinforced splitting on a ball with smaller radius, and then show that this property for manifolds with a uniform Kato bound is stable under pointed measured Gromov-Hausdorff convergence. This implies, in particular, that if $u$ is a reinforced splitting of a ball $B$ in a Kato limit space, then the coefficients of the Gram matrix map $G_u$ all belong to $H^{1,2}_{loc}(B,\mathsf{d},\mu)$. \end{rem}
The next result provides an improvement of the local Lipschitz constant for splittings.
\begin{prop} \label{Prop:LipMieux}
Let $(M^n,g)$ be a closed Riemaniann manifold, $B \subset M$ a ball of radius $r>0$, $k \in \{1,\ldots,n\}$, $\eta \in (0,1)$, $L>1$ and $u:B\to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ a harmonic map such that $\|du\|_{L^\infty(B)}\le L$. Let $G_u$ be the Gram matrix map of $u$. Assume that there exists $\delta \in (0,1/16n]$ such that
$$\mathrm{k}_{r^2}(M^n,g) < \delta, \quad \fint_{B}\| G_u -\mathrm{Id}_k\| \mathop{}\!\mathrm{d} \nu_g < \delta.$$
Then there exists $C(n,\eta,L)>0$ such that $\|du\|_{L^\infty(\eta B)}\le 1+C(n,\eta,L)\delta$. \end{prop}
\begin{proof}
In the proof of \cite[Proposition 7.5]{CMT}, use the gradient bound iii) in Proposition \ref{Prop:heatKe} to get $II \le C \delta$ instead of $II \le C \delta^{1/2}$. Apply the resulting statement to any function $u_\xi := \langle \xi,u\rangle$ with $\xi \in \mathbb{R}^k$ satisfying $|\xi|=1$, and conclude by taking $\xi=du/|du|$ pointwise. \end{proof}
\subsection{GH-closeness and harmonic functions} In the setting of uniform lower Ricci bounds, existence of almost splitting maps is closely related to mGH-closeness of a ball to a Euclidean ball. We show below that the same relation actually holds for Kato limit spaces.
Thoughout this subsection, we let $k \in \{1,\ldots,n\}$ be fixed. We denote by $\|\cdot\|_1$ the $L_{1,1}$ matrix norm, namely $\|M\|_1=\sum_{i,j=1}^k |m_{i,j}|$ for any $M \in \mathcal M_k(\mathbb{R})$. Note that $\|\cdot\|\leq \|\cdot\|_1$.
\begin{theorem}\label{MetaThm} For all $\ensuremath{\varepsilon}, \eta,\lambda \in (0,1)$ such that $\lambda < \eta$ there exists $\nu$ depending only on $\ensuremath{\varepsilon}, \eta, \lambda,n, f,c$ such that if $(X,\mathsf{d}, \mu, o), (X', \mathsf{d}', \mu',o') \in \overline{\cK_\mathfrak{m}(n,f,c)}$, $x \in X$, $x' \in X'$ and $r \in (0, \sqrt{T}],$ are such that $$\dmGH(B_r(x),B_r(x'))< \nu r,$$
if $h: B_r(x) \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ is a harmonic function satisfying $\|dh\|_{L^\infty(B_r(x))}\le L$ for some $L>1$, then there exists a harmonic function $h': B_{\eta r}(x') \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ satisfying $\|dh'\|_{L^\infty(B_{\eta r}(x'))}\le L C(n,\eta)$ for some $C(n,\eta)\geq 1$ and: \begin{enumerate}
\item $\|h'\circ \Phi - h\|_{L^{\infty}(B_{\eta r}(x))}< \ensuremath{\varepsilon} r,$ where $\Phi$ is a $(\nu r)$-GH isometry between $B_r(x)$ and $B_r(x')$; \item for all $s \in [\lambda r,\eta r]$
$$\left\| \fint_{B_s(x)} G_h \mathop{}\!\mathrm{d}\mu - \fint_{B_s(x')}G_{h'}\mathop{}\!\mathrm{d}\mu' \right\| < \ensuremath{\varepsilon},$$ \item for all $A\in \mathcal M_k(\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k)$ and $s \in [\lambda r,\eta r]$, \[
\left| \fint_{B_{s}(x)}\|G_h-A\|_1 \mathop{}\!\mathrm{d} \mu - \fint_{B_{s}(x')}\|G_{h'}-A\|_1\mathop{}\!\mathrm{d} \mu'\right| \le \ensuremath{\varepsilon}. \]
\end{enumerate} \end{theorem}
The previous is a consequence of the analysis made in \cite[Appendix A]{CMT}. For the sake of completeness, we provide a proof in Appendix \ref{app:meta}.
Theorem \ref{MetaThm} has the following direct consequence about existence of reinforced almost splittings.
\begin{prop} \label{prop:ExiSplit} For any $\ensuremath{\varepsilon}, \eta \in (0,1)$ there exists $\delta >0$ depending on $n,f,c, \ensuremath{\varepsilon}$ and $\eta$ such that if $(X,\mathsf{d}, \mu,o)\in \overline{\cK_\meas(n,f,c)}$, $x\in X$ and $r \in (0,\sqrt{T}]$ satisfy $$\dmGH(B_r(x),\mathbb{B}_r^k)< \delta r,$$ then there exists a reinforced $(k,\ensuremath{\varepsilon})$-splitting of $B_{\eta r}(x)$. \end{prop}
Moreover, Theorem \ref{MetaThm} implies that almost splittings are GH-isometries under the appopriate assumptions.
\begin{prop} \label{prop:GHisometry} For any $\ensuremath{\varepsilon}, \eta \in (0,1)$ there exist $\delta >0$ depending on $n, f, c, \ensuremath{\varepsilon}$ and $\eta$ and a constant $C(n,\eta)>0$, such that for all $(X,\mathsf{d}, \mu, o) \in \overline{\cK_\meas(n,f,c)}$, if $u: B_r(x) \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ is a $(k,\ensuremath{\varepsilon})$-splitting and $$\dmGH(B_r(x), \mathbb{B}^k_r) < \delta r,$$ then $u$ is a $\left(C(n,\eta)\sqrt{\ensuremath{\varepsilon}}r\right)$-GH isometry between $B_{\eta r}(x)$ and $\mathbb{B}_{\eta r}^k(u(x))$. \end{prop}
The proof of this proposition relies on the following Euclidean result. \begin{lemma} \label{lem:harmGH} If $v\colon \mathbb{B}^k \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ is a harmonic map such that
$$\fint_{\mathbb{B}^k} \| G_v-\mathrm{Id}_k\|\le \ensuremath{\varepsilon},$$ then $v\colon \mathbb{B}^k_{\eta} \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ is a $\left(C(n,\eta)\sqrt{\ensuremath{\varepsilon}}\right)$-GH isometry between $\mathbb{B}_{\eta }$ and $\mathbb{B}_{\eta }^k(v(0))$. \end{lemma}
\begin{proof} We will assume that $\eta\ge 1/2$. Consider a cut-off function $\chi$ equal to $1$ on $\frac{1+\eta}{2}\mathbb{B}^k$ and vanishing outside $\frac{3+\eta}{4}\mathbb{B}^k$, with
$$\|\Delta \chi\|_{L^\infty}\le C(k,\eta).$$ By the Bochner formula we have:
$$|\Hess v|^2+\frac12 \Delta( G_v-\mathrm{Id}_k)=0,$$ then multiplying by $\chi$ and integrating by parts lead to the estimate:
$$\int_{\frac{1+\eta}{2}\mathbb{B}^k} |\Hess v|^2\le C(k,\eta) \int_{\mathbb{B}^n} \left\| G_v-\mathrm{Id}_k\right\|\le C(k,\eta) \ensuremath{\varepsilon} $$
Using classical elliptic estimate, we obtain a $\mathcal C^2$ estimate on $v$:
$$\|\Hess v\|_{L^{\infty}(\eta\mathbb{B}^k)}\le C(k,\eta)\sqrt{\ensuremath{\varepsilon}}.$$ With Taylor formula, we get that for any $x\in \eta\mathbb{B}^k$,
$$|v(x)-v(0)-dv(0)(x)|\le C(k,\eta)\sqrt{\ensuremath{\varepsilon}} \, \, \, \, \text{ and } \, \, \, \, |dv(0)-dv(x)|\le C(k,\eta)\sqrt{\ensuremath{\varepsilon}}.$$ But we also have
$$\fint_{\eta\mathbb{B}^k} \| G_v-\mathrm{Id}_k\|\le \eta^{-k}\fint_{\mathbb{B}^k} \| G_v-\mathrm{Id}_k\|\le 2^k \ensuremath{\varepsilon}.$$ Hence we find a point $x_o\in\eta \mathbb{B}^k$ such that
$$ \| G_v(x_o)-\mathrm{Id}_k\|\le 2^k \ensuremath{\varepsilon}.$$ Using the polar decomposition of $dv(x_o)$ we obtain a linear isometry $g\in \text{O}(k)$ such
$$|dv(x_o)-g|\le C(k) \ensuremath{\varepsilon}.$$ Introducing the affine isometry $\iota:=v(0)+g$ we get that for any $x\in \eta\mathbb{B}^k$,
$$|v(x)-\iota(x)|\le C(k,\eta)\sqrt{\ensuremath{\varepsilon}}.$$ Setting $C'(n,\eta)= \max_{1\le k \le n} C(k,\eta)$ eventually leads to the desired result. \end{proof} \begin{proof}[Proof of Proposition \ref{prop:GHisometry}] We let $\ensuremath{\varepsilon}, \eta \in (0,1)$. We will assume that $\eta\ge 1/2$. With Theorem \ref{MetaThm}, we find some $\delta(n,\ensuremath{\varepsilon},\eta,f,c)>0$ such that if $(X,\mathsf{d}, \mu, o) \in \overline{\cK_\meas(n,f,c)}$, if $u: B_r(x) \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ is a $(k,\ensuremath{\varepsilon})$-splitting and $$\dmGH(B_r(x), \mathbb{B}^k_r) < \delta r,$$ then there is some harmonic map $$v\colon \mathbb{B}^k_{(1+\eta)\frac{r}{2}}\to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$$ and some $\delta r$-GH isometry $\Phi\colon B_r(x)\to\mathbb{B}^k_{r}$ such that \begin{equation}\label{approxGH}
\|v\circ\Phi-u\|_{L^\infty(B_{(1+\eta)\frac{r}{2}}(x))}\le \ensuremath{\varepsilon} r\end{equation} and
$$\left|\fint_{B_{(1+\eta)\frac{r}{2}}(x)} \|G_u-\mathrm{Id}_k\|_1\mathop{}\!\mathrm{d}\mu-\fint_{\mathbb{B}^k_{(1+\eta)\frac{r}{2}}} \|G_v-\mathrm{Id}_k\|_1\right|\le \ensuremath{\varepsilon}.$$
Observe that the doubling condition and the equivalence of the norms $\|\cdot \|$ and $\|\cdot\|_1$ yield
$$\fint_{B_{(1+\eta)\frac{r}{2}}(x)} \|G_u-\mathrm{Id}_k\|_1\mathop{}\!\mathrm{d}\mu\le A(n) \fint_{B_{r}(x)} \|G_u-\mathrm{Id}_k\|_1\mathop{}\!\mathrm{d}\mu\le C(n)\ensuremath{\varepsilon}$$
for some $C(n)$ only depending on $n$. Since $\|\cdot \|\leq \|\cdot \|_1$, we get
$$\fint_{\mathbb{B}^k_{(1+\eta)\frac{r}{2}}} \|G_v-\mathrm{Id}_k\|\le (1+C(n))\ensuremath{\varepsilon}.$$ Hence according to the previous lemma, we know that $v$ is a $\left(C(n,\eta)\sqrt{\ensuremath{\varepsilon}}r\right)$-GH isometry between $\mathbb{B}^k_{\eta r}$ and itself. Using \eqref{approxGH}, we obtain the desired conclusion about the restriction of $u$ to $B_{\eta r}(x)$.
\end{proof}
\subsection{Propagation of reinforced almost splittings} The next result is an important propagation property of reinforced splittings.
\begin{prop}[Propagation of reinforced splittings]\label{prop:propagation} Consider $(X,\mathsf{d},\mu,o) \in \overline{\mathcal{K}_\mathfrak{m}(n,f,c)}$. There exists $C>0$ depending only on $n$ such that for any $k \in \{1,\ldots,n\}$ and $\ensuremath{\varepsilon}\in(0,1)$, if $u$ is a reinforced $(k,\ensuremath{\varepsilon})$-splitting of a ball $B_r(x) \subset X$ with $r \in (0,\sqrt{T})$, then there exists a Borel set $\Omega_\ensuremath{\varepsilon} \subset B_{r/2}(x)$ such that:
\begin{enumerate}
\item[(A)]\label{A} $\mu(B_{r/2}(x) \backslash \Omega_\ensuremath{\varepsilon}) \le C\sqrt{\ensuremath{\varepsilon}}\mu(B_{r/2}(x))$,
\item[(B)]\label{B} the restriction of $u$ to $B_s(y)$ is a reinforced $(k,\sqrt{\ensuremath{\varepsilon}})$-splitting for any $y \in \Omega_\ensuremath{\varepsilon}$ and $s \in (0,r/2)$,
\item[(C)]\label{C} for $\mu$-a.e.~$y \in \Omega_\ensuremath{\varepsilon}$, for any $\xi \in \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$, \begin{equation}\label{eq:bilipGram}
(1-\sqrt{\ensuremath{\varepsilon}})|\xi|^2\le {}^t \xi G_u(y)\xi\le (1+\sqrt{\ensuremath{\varepsilon}})|\xi|^2, \end{equation}
\item[(D)]\label{D} any $y \in \Omega_\ensuremath{\varepsilon}$ is such that any $(Y,\mathsf{d}_Y,\mu_Y,y) \in \mathrm{Tan}_\mathfrak{m}(X,y)$ splits off a factor $\mathbb{R}^k$.
\end{enumerate}
\end{prop}
\begin{proof}Let $x \in X$ and $r \in (0,\sqrt{T})$. Assume that $u:B_r(x)\to \mathbb{R}^k$ is a $(k,\ensuremath{\varepsilon})$-splitting. Set \[\Omega_\ensuremath{\varepsilon}:=\{y \in B_{r/2}(x)\, : \, M_{r/2}v(y) \le \sqrt{\ensuremath{\varepsilon}}\}\]
where \[v:=\|G_u - \mathrm{Id}_k\| + r^2|d G_u|^2\]
and
\[
M_{r/2}v(y):= \sup_{s \in (0,r/2)} \fint_{B_s(y)} v \mathop{}\!\mathrm{d} \mu.
\] The definition of $\Omega_\ensuremath{\varepsilon}$ is made so that (B) is satisfied. Let us prove (A). For any $y \in B_{r/2}(x)\backslash \Omega_{\ensuremath{\varepsilon}}$ there exists $s_y \in (0,r/2)$ such that $\mu(B_{s_{y}}(y)) < (\sqrt{\ensuremath{\varepsilon}})^{-1} \int_{B_s(y)} v \mathop{}\!\mathrm{d} \mu$. By the Vitali covering lemma, there exists a countable family of points $\{y_i\} \subset B_{r/2}(x)\backslash \Omega_\ensuremath{\varepsilon}$ such that the balls $\{B_{s_{y_i}}(y_i)\}$ are pairwise disjoint and $B_{r/2}(x)\backslash \Omega_\ensuremath{\varepsilon} \subset \bigcup_i B_{5s_{y_i}}(y_i)$. Then, with a constant $C$ depending only on $n$ which may change from line to line,
\begin{align*}
\mu(B_{r/2}(x)\backslash \Omega_\ensuremath{\varepsilon}) & \le \sum_i \mu(B_{5s_{y_i}}(y_i)) \le C \sum_i \mu(B_{s_{y_i}}(y_i)) < C \frac{1}{\sqrt{\ensuremath{\varepsilon}}} \sum_i \int_{B_{s_{y_i}}(y_i)} v \mathop{}\!\mathrm{d} \mu\\
&\le C \frac{1}{\sqrt{\ensuremath{\varepsilon}}} \int_{B_r(x)} v \mathop{}\!\mathrm{d} \mu \le C \sqrt{\ensuremath{\varepsilon}} \mu(B_r(x)) \le C \sqrt{\ensuremath{\varepsilon}} \mu(B_{r/2}(x))
\end{align*} where we have used the doubling condition to get the second and the last inequalities, and the fact that $u$ is a reinforced $(k,\ensuremath{\varepsilon})$-splitting of $B_r(x)$ to get the fifth one. This shows (A).
Let us prove (C). It follows from the Lebesgue differentiation theorem for doubling metric measure spaces (see e.g. \cite{Heinonen}) that the set of Lebesgue points of $v$ has full measure in $\Omega_\ensuremath{\varepsilon}$. At any Lebesgue point $y \in \Omega_\ensuremath{\varepsilon}$ of $v$ we know that \[
\|G_u(y) - \mathrm{Id}_k\| \le v(y) = \lim\limits_{s \downarrow 0} \fint_{B_s(y)} v \mathop{}\!\mathrm{d} \mu \le M_{r/2}v(y) \le \sqrt{\ensuremath{\varepsilon}}, \] which yields \eqref{eq:bilipGram}.
We are left with proving (D) namely that for any $y \in \Omega_\ensuremath{\varepsilon}$, any $(Y,\mathsf{d}_Y,\mu_Y,y) \in \mathrm{Tan}_\mathfrak{m}(X,y)$ splits off a factor $\mathbb{R}^k$. To this aim, we are going to build a harmonic map $\tilde{u}_\infty : Y \to \mathbb{R}^k$ such that $G_{\tilde{u}_\infty}(z)=\mathrm{Id}_k$ for $\mu_Y$-a.e.~$z \in Y$.
For any $s \in (0,r/2)$, set \[ \overline{G}_s := \fint_{B_s(y)} G_u \mathop{}\!\mathrm{d} \mu. \] Following a classical argument (see \cite[(4.21)]{Cheeger}, for instance) involving Hölder's inequality, the doubling condition, and the $\uplambda$-Poincaré inequality, \begin{align*}
\| \overline{G}_s - \overline{G}_{s/2}\| & \le \fint_{B_{s/2}(y)} \|G_u - \overline{G}_s\| \mathop{}\!\mathrm{d} \mu\\
& \le A(n) \fint_{B_{s}(y)} \|G_u - \overline{G}_s\| \mathop{}\!\mathrm{d} \mu\\
& \le A(n) \left( \fint_{B_{s}(y)} \|G_u - \overline{G}_s\|^2 \mathop{}\!\mathrm{d} \mu \right)^{1/2}\\
& \le A(n) \uplambda s \left( \fint_{B_{s}(y)} |d G_u|^2 \mathop{}\!\mathrm{d} \mu \right)^{1/2} \le A(n) \uplambda s \frac{\ensuremath{\varepsilon}^{1/4}}{r} \cdot \end{align*} This shows that $\{\overline{G}_s\}_{0<s<r/2}$ is a Cauchy sequence, hence it admits a limit $\overline{G}$ as $s \downarrow 0$. Since \[
\|\overline{G} - \mathrm{Id}_k\| = \lim\limits_{s \to 0} \|\overline{G}_s - \mathrm{Id}_k\| \le \lim\limits_{s \to 0} \fint_{B_s(y)}\|G_u - \mathrm{Id}_k\| \le \sqrt{\ensuremath{\varepsilon}}, \]
we know from Remark \ref{rem:GSsplitting} that there exists a lower triangular $k\times k$ matrix $T$ such that $T\overline{G} {}^t T = \mathrm{Id}_k$ and $\|T\|\le C(n)$ for some generic constant $C(n)$ only depending on $n$. Moreover, for any $s \in (0,r/2)$, the previous computation yields \[
\fint_{B_s(y)} \|G_u - \overline{G}_s\| \mathop{}\!\mathrm{d} \mu \le A(n)\uplambda s \frac{\ensuremath{\varepsilon}^{1/4}}{r}, \] and a telescopic argument gives \[
\|\overline{G}_s - \overline{G}\| \le C(n) s\frac{\ensuremath{\varepsilon}^{1/4}}{r}, \] hence $\tilde{u}:=T\circ u$ satisfies \begin{equation}\label{eq:control}
\fint_{B_s(y)} \|G_{\tilde{u}} - \mathrm{Id}_k\| \le C(n) s\frac{\ensuremath{\varepsilon}^{1/4}}{r}\, \cdot \end{equation}
Now we let $\{s_\alpha\} \subset (0,+\infty)$ be such that $s_\alpha \downarrow 0$ and $\{(X,\mathsf{d}_\alpha := s_\alpha^{-1} \mathsf{d}, \mu_\alpha := \mu(B_{s_\alpha}(y))^{-1} \mu, y)\}$ converges to $(Y,\mathsf{d}_Y,\mu_Y,y)$ in the pointed measured Gromov-Hausdorff topology. Then the maps \[ u_\alpha :=\frac{1}{s_\alpha} (u-u(y)) : B_{r/2s_\alpha}^{\mathsf{d}_\alpha}(y) \to \mathbb{R}^k \] are all harmonic and locally $2$-Lipschitz. By \cite[Proposition E.10]{CMT}, up to extracting a subsequence we may assume that $\{u_\alpha\}$ converges uniformly on compact sets and locally strongly in energy to some harmonic map \[ \tilde{u}_\infty : Y \to \mathbb{R}^k. \] Then the local strong convergence in energy and \eqref{eq:control} imply that for any $R>0$, \begin{align*}
\fint_{B_R^{\mathsf{d}_{Y}}(y)} \|G_{\tilde{u}_\infty}-\mathrm{Id}_k\| \mathop{}\!\mathrm{d} \mu_Y & = \lim\limits_\alpha \fint_{B_{R}^{\mathsf{d}_\alpha}(y)} \|G_{u_\alpha} - \mathrm{Id}_k\| \\
& = \lim\limits_\alpha \fint_{B_{Rs_\alpha}(y)} \|G_u - \mathrm{Id}_k\| = 0. \end{align*} Since $(Y,\mathsf{d}_Y,\mu_Y)$ is an $\RCD(0,n)$ space, the Functional Splitting Lemma \cite[Lemma 1.21]{AntoBrueSemola} then yields the conclusion.
\end{proof}
\begin{rem}\label{rk:prop} The choice of $r/2$ in the previous proof is arbitrary: we can replace it with $\sigma r$ for $\sigma \in (0,1)$ and get the same result. \end{rem}
\section{Rectifiability of Kato limits}
Let us begin this section with recalling the definitions of bi-Lipschitz map and bi-Lipschitz chart.
\begin{D} Let $(X,\mathsf{d})$ be a metric space, $k$ a positive integer, and $\ensuremath{\varepsilon} \in (0,1)$. We say that a map $\phi:X\to\mathbb{R}^k$ is: \begin{enumerate}
\item bi-Lipschitz onto its image if there exists $C\ge 1$ such that $C^{-1} \mathsf{d}(x,y) \le |\phi(x)-\phi(y)|\le C\mathsf{d}(x,y)$ for any $x,y \in X$,
\item $(1+\ensuremath{\varepsilon})$-bi-Lipschitz onto its image if $(1+\ensuremath{\varepsilon})^{-1} \mathsf{d}(x,y) \le |\phi(x)-\phi(y)|\le (1+\ensuremath{\varepsilon}) \mathsf{d}(x,y)$ for any $x,y \in X$. \end{enumerate} Moreover, we call $(1+\ensuremath{\varepsilon})$-bi-Lipschitz chart from $X$ to $\mathbb{R}^k$ any couple $(V,\phi)$ where $V$ is a Borel set of $X$ and $\phi : V \to \mathbb{R}^k$ is a $(1+\ensuremath{\varepsilon})$-bi-Lipschitz map onto its image. \end{D}
We now provide a definition of rectifiability for metric measure spaces which is a natural variant of the one introduced in \cite[Definition 5.3]{CheegerColdingIII} and which has notably been used in the setting of $\RCD(K,N)$ spaces \cite{DePhilippisMarcheseRindler,KellMondino,GigliPas}.
\begin{D} We say that a metric measure space $(X,\mathsf{d},\mu)$ is rectifiable if there exists a countable collection $\{(k_i,V_i,\phi_i)\}_i$ where $\{V_i\}$ are Borel subsets covering $X$ up to a $\mu$-negligible set, $\{k_i\}$ are positive integers, and $\phi_i : V_i \to \mathbb{R}^{k_i}$ is a bi-Lipschitz map such that $(\phi_i)_\#(\mu \measrestr V_i) \ll \mathcal{H}^{k_i}$ for any $i$. \end{D}
According to this definition, our goal in this section is to prove that Kato limit spaces are rectifiable. Actually, we prove a more precise result which involves the so-called $k$-regular sets.
\begin{D} For any $k \in \{1,\ldots,n\}$, we define the $k$-regular set of a space $(X,\mathsf{d},\mu,o) \in \overline{\mathcal{K}_\mathfrak{m}(n,f,c)}$ as \[ \cR_k := \{ x \in X : \Tan_\mathfrak{m}(X,x)=\{(\mathbb{R}^k,\mathsf{d}_e,\mathcal{H}^k,0\}\}. \] \end{D}
Our main result in this section is the following.
\begin{theorem}\label{th:mainrect} Let $(X,\mathsf{d},\mu,o) \in \overline{\mathcal{K}_\mathfrak{m}(n,f,c)}$. Then the following hold. \begin{enumerate} \item[\normalfont(A)]\label{A} Up to a negligible set, the space $X$ coincides with the union of its $k$-regular sets: \begin{equation}\label{eq:essentialdecompo}\mu\left( X \backslash \bigcup_{k = 1}^n \mathcal{R}_k\right) = 0.\end{equation} \item[\normalfont(B)]\label{B} For any $k \in \{1,\ldots,n\}$ and $\ensuremath{\varepsilon} \in (0,1)$, there exists a countable family of $(1+\ensuremath{\varepsilon})$-bi-Lipschitz charts $\{(V_i^\ensuremath{\varepsilon},\phi_i^\ensuremath{\varepsilon})\}$ from $X$ to $\mathbb{R}^k$ such that \[\mu\left(\cR_k \backslash \bigcup_{i} V_i^\ensuremath{\varepsilon}\right)=0\] and $(\phi_i^\ensuremath{\varepsilon})_\#(\mu \measrestr V_i^\ensuremath{\varepsilon}) \ll \mathcal{H}^k$ for any $i$. \end{enumerate} \end{theorem}
We call \eqref{eq:essentialdecompo} the essential decomposition of $X$. Rectifiability of Kato limit spaces as stated in Theorem \ref{th:main1} is then an obvious corollary of Theorem \ref{th:mainrect}.
The rest of this section is devoted to proving Theorem \ref{th:mainrect}. Our proof in inspired by \cite{GigliPas,BPS} but contains some simplifications over the arguments presented there. To keep the notations short, we write $Y \in \Tan_\mathfrak{m}(X,x)$ instead of $(Y,\mathsf{d}_Y,\mu_Y,x) \in \Tan_{\mathfrak{m}}(X,x)$.
\subsection{Essential decomposition}
In this subsection, we prove (A) in Theorem \ref{th:mainrect}.
\begin{proof}[Proof of $\mathrm{(A)}$ in Theorem \ref{th:mainrect}] First observe that the doubling condition implies the iterated tangent property \cite{LeDonne,GigliMondinoRajala}, meaning that there exists a Borel set $E$ such that $\mu(X\backslash E)=0$ and for any $x\in E$, any $Y \in \Tan_\mathfrak{m}(X,x)$ and any $y \in Y$, it holds \begin{equation}\label{eq:iterated} \Tan_\mathfrak{m}(Y,y) \subset \Tan_\mathfrak{m}(X,x). \end{equation} Take $x \in E$ and assume that for some $l \in \{0,\ldots,n\}$ there exists a pointed $\RCD(0,n-l)$ space $Z$ such that $\mathbb{R}^l \times Z \in \Tan_\mathfrak{m}(X,x)$. If $Z$ is not reduced to a singleton, Gigli's splitting theorem \cite{GigliSplitting} ensures that there exists $z \in Z$ such that any $Z_z \in \Tan_\mathfrak{m}(Z,z)$ splits off an $\mathbb{R}$ factor, so that \eqref{eq:iterated} implies that there exists a pointed $\RCD(0,n-l-1)$ space $Z'$ such that $\mathbb{R}^{l+1} \times Z' \in \Tan_\mathfrak{m}(X,x)$. Then \[ \mathbb{R}^{d(x)} \in \Tan_\mathfrak{m}(X,x) \] where \begin{align*} d(x) & :=\max\{ 1 \le l \le n \, : \, \, \text{there exists a pointed $\RCD(0,n)$ space $Z$}\\ & \qquad \qquad \qquad \qquad \quad \qquad \qquad \qquad\text{such that $\mathbb{R}^l \times Z \in \Tan_\mathfrak{m}(X,x)$}\}. \end{align*} Setting \begin{align*} i(x) & :=\min\{ 1 \le l \le n \, : \, \text{there exists a pointed $\RCD(0,n)$ space $Z$}\\ & \qquad \qquad \qquad \qquad \quad \text{which splits off no $\mathbb{R}$ such that $\mathbb{R}^l \times Z \in \Tan_\mathfrak{m}(X,x)$}\}, \end{align*} we obtain (A) in Theorem \ref{th:mainrect} as a consequence of \begin{equation}\label{eq:1'} i(x) = d(x) \qquad \text{for $\mu$-a.e.~$x \in E$.} \end{equation} Let us prove \eqref{eq:1'} by contradiction, assuming \[ \mu(\{x \in E : i(x) < d(x)\}) >0. \] Set \[ \mathfrak{J}_k :=\{x \in E \, : \, d(x)=k\, \,\text{and} \,\,i(x)<k\} \] for any $1\le k \le n$, and note that these sets are measurable as can be proved following the arguments of \cite[Lemma 6.1]{MondinoNaber}. Since \[ \{x \in E : i(x) < d(x)\} = \bigcup_{1\le k \le n} \mathfrak{J}_k \] there exists $k \in \{1,\ldots,n\}$ such that \[ \mu(\mathfrak{J}_k)>0. \] Then $\mathfrak{J}_k$ admits a point with density $1$, that is to say a point $x \in \mathfrak{J}_k$ such that \begin{equation}\label{eq:1} \lim\limits_{r \downarrow 0} \frac{\mu(B_r(x) \cap \mathfrak{J}_k)}{\mu(B_r(x))} = 1. \end{equation} Since $\mathbb{R}^{k} \in \Tan_\mathfrak{m}(X,x)$, there exist two infinitesimal sequences $\{\ensuremath{\varepsilon}_i\}$ and $\{r_i\}$ such that for any $i$ there exists a $(k,\ensuremath{\varepsilon}_i)$-splitting $u_i$ of $B_{2r_i}(x)$. By propagation of splittings given in Proposition \ref{prop:propagation}, for any $i$ there exists a Borel set $\Omega_i \subset B_{r_i}(x)$ such that \begin{equation}\label{eq:density}
\frac{\mu(B_{r_i}(x)\backslash \Omega_i)}{\mu(B_{r_i}(x))} \le C \sqrt{\ensuremath{\varepsilon}_i} \end{equation} and for any $y \in \Omega_i$ any $Y \in \Tan_\mathfrak{m}(X,y)$ splits off a factor $\mathbb{R}^{k}$. As a consequence $i(y) \ge k$. This yields $\Omega_i \cap \mathfrak{J}_k = \emptyset$ and \eqref{eq:density} implies \begin{equation*} \lim\limits_{i \to \infty} \frac{\mu(\Omega_i)}{\mu(B_{r_i}(x))} = 1, \end{equation*} hence we get a contradiction with \eqref{eq:1}. \end{proof}
\subsection{Rectifiability of the regular sets: our key result}
In this subsection, with a view to proving (B) in Theorem \ref{th:mainrect}, we establish the next key technical proposition, where we make use of the almost $k$-regular sets $(\cR_k)_{\delta,r}\subset X$, defined as \[ (\cR_k)_{\delta,r}:=\left\{x \in X \, :\, \dmGH(B_s(x), \mathbb{B}^k_s)\le \delta s \, \, \, \, \text{for any $s\in (0,r]$}\right\} \] for any $\delta,r>0$. Note that each $(\cR_k)_{\delta,r}$ is a closed set. We also define $$(\cR_k)_{\delta}:=\bigcup_{r>0}(\cR_k)_{\delta,r} \subset \left\{x \in X \, : \,
\dmGH(B^Y_1(x), \mathbb{B}^k_1)\le \delta\, \, \, \, \text{for any $Y \in \mathrm{Tan}_\mathfrak{m}(X,x)$} \right\}$$ for any $\delta >0$, and we point out that for any $0<\delta'<\delta$, \[ (\cR_k)_{\delta} \supset \left\{x \in X \, : \,
\dmGH(B^Y_1(x), \mathbb{B}^k_1)\le \delta'\, \, \, \, \text{for any $Y \in \mathrm{Tan}_\mathfrak{m}(X,x)$} \right\}.
\] Moreover, we have
$$\cR_k=\bigcap_{\delta}(\cR_k)_{\delta}.$$
\begin{prop}\label{prop:bilip} Let $(X,\mathsf{d},\mu,o) \in \overline{\mathcal{K}_\mathfrak{m}(n,f,c)}$, $k \in \{1,\ldots,n\}$ and $\ensuremath{\varepsilon} \in (0,1/2)$ be given. Then there exists $\delta>0$ such that for any $x \in (\cR_k)_{\delta,16r}$ with $r\le \sqrt{T}/16$ and any $s\in (0,r]$ there exist a function $u\colon B_{2s}(x)\rightarrow \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ and a Borel set $V \subset B_{s}(x)$ such that: \begin{enumerate}[i)] \item $u$ is a $(k,\ensuremath{\varepsilon})$-splitting of $B_{2s}(x)$; \item $\mu(B_{s}(x) \backslash V) \le \ensuremath{\varepsilon} \mu(B_{s}(x))$; \item $u$ is an $(\ensuremath{\varepsilon}\,\sigma)$-GH isometry between $B_{\sigma}(y)$ and $u(y)+\mathbb{B}_{\sigma}^k$ for any $y\in V\cap(\cR_k)_{\delta,16r}$ and any $\sigma\le s/2$; \item $u$ is $(1+\ensuremath{\varepsilon})$-bi-Lipschitz on $V\cap(\cR_k)_{\delta,16r}$; \item $u_\#\left(\mathbf{1}_{V\cap(\cR_k)_{\delta,16r}}\mathop{}\!\mathrm{d}\mu\right)\ll \mathcal H^k$. \end{enumerate} \end{prop}
In the proof of the last point of this proposition, we use a fundamental result of De Philippis and Rindler \cite[Corollary 1.12]{DePhiRind} which requires the terminology of currents. For the interested reader, we refer to \cite{Federer} or \cite{Simon}.
Roughly speaking a current in $\mathbb{R}^k$ is a differential form whose coefficients are distributions. To be more precise, let $d$ be a positive integer. A $d$-dimensional current $T$ on $\mathbb{R}^k$ is a continuous linear map $$T\colon \mathcal C_0^\infty\left(\mathbb{R}^k,\Lambda^d(\mathbb{R}^k)^*\right)\rightarrow \mathbb{R}.$$ The differential of a $d$-dimensional current $T$ is the $(d-1)$-dimensional current $dT$ defined by $$dT(\omega):=T(d\omega)$$ for any $\omega \in \mathcal C_0^\infty\left(\mathbb{R}^k,\Lambda^{d-1}(\mathbb{R}^k)^*\right)$. Here we consider only currents with finite mass, that is to say differential forms whose coefficents are finite Radon measures. Any current with finite mass admits a canonical decomposition \begin{equation} \label{decomposition}
T(\cdot)= \int_{\mathbb{R}^k} \langle \cdot, \vec T \rangle \mathop{}\!\mathrm{d} \|T\| \end{equation}
where $\|T\|$ is a Radon measure and $\vec T$ is a $\|T\|$-integrable unitary vector field. In this regard, we shall make use of the following easy lemma, whose proof is omitted for brevity.
\begin{lemma}\label{lem:courants}
Let $\nu$ be a Radon measure on $\mathbb{R}^d$ and $\vec V$ a square $\nu$-integrable vector field such that $|\vec V (x)|>0$ for $\nu$-a.e.~$x \in \mathbb{R}^k$. Let $T$ be the one-dimensional current on $\mathbb{R}^k$ defined by \[ T(\omega) = \int_{\mathbb{R}^k} \langle \vec\omega, \vec V \rangle \mathop{}\!\mathrm{d} \nu \]
for any $\omega \in \mathcal C_0^\infty\left(\mathbb{R}^k,\Lambda^1(\mathbb{R}^k)^*\right)$. Then $\|T\|$ is absolutely continuous with respect to $\nu$ with density $|\vec V|$ and $\vec T(x) = \vec V(x)/|\vec V(x)|$ for $\nu$-a.e.~$x \in \mathbb{R}^k$. \end{lemma}
A current $T$ with finite mass such that $dT$ has finite mass too is called a normal current. We recall the result by De Philippis and Rindler that we shall use \cite[Corollary 1.12]{DePhiRind}.
\begin{theorem}\label{th:DePhiRin}
Let $\nu$ be a Radon measure on $\mathbb{R}^k$, and let $\{T_i\}_{1\le i \le k}$ be normal one-dimensional currents on $\mathbb{R}^k$ such that $\nu \ll \|T_i\|$ for any $i$, and the vectors $\{\vec T_i (x)\}_{1\le i \le k}$ are independent for $\nu$-a.e.~$x \in \mathbb{R}^k$. Then $\nu \ll \mathcal H^k$. \end{theorem}
We are now in a position to prove Proposition \ref{prop:bilip}.
\proof We first prove the first three assertions which are direct consequences of the propagation property of splittings we established in Section 3. Let us set \[ \tau(n):=\min\left\{ 1, \frac14 C^{-1}(n,1/2), (A(n)\sqrt{C'(n)})^{-1}\right\} \] where $A(n)$ is given by the doubling condition \eqref{eq:doubling}, $C(n,1/2)$ is given by Proposition \ref{prop:GHisometry}, and $C'(n)$ is given by Proposition \ref{prop:propagation}. According to Proposition \ref{prop:GHisometry}, there is some $\delta_1$ such that when $y\in (\cR_k)_{\delta_1,16r}$, $\sigma\in (0,4r]$ and $v\colon B_{4\sigma}(y)\rightarrow \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ is a $(k,[\tau(n)\ensuremath{\varepsilon}]^2)$-splitting of $B_{4\sigma}(x)$ then $v$ is an $(\ensuremath{\varepsilon}\,\sigma)$-GH isometry between $B_{2\sigma}(y)$ and $u(y)+\mathbb{B}_{2\sigma}^k$.
According to Proposition \ref{prop:ExiSplit}, there is a $\delta\le \delta_1$ such that if $x\in (\cR_k)_{\delta,16r}$ and $s\le r$ then there is $u\colon B_{8s}(x)\rightarrow \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ a reinforced $(k,[\tau(n)\ensuremath{\varepsilon}]^4)$-splitting of $B_{8s}(x)$.
Now let $x\in (\cR_k)_{\delta,16r}$ and let $s\in (0,r]$ and $u\colon B_{8s}(x)\rightarrow \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$ be a reinforced $(k,[\tau(n)\ensuremath{\varepsilon}]^4)$-splitting of $B_{8s}(x)$. With Proposition \ref{prop:propagation}, we find $\Omega\subset B_{4s}(x)$ such that $$\mu(B_{4s}(x) \backslash \Omega) \le C'(n)\tau^2(n)\ensuremath{\varepsilon}^2\,\mu(B_{4s}(x))$$ such that for any $y\in \Omega$ and any $\sigma\le s$ then $u$ is a $(k,[\tau(n)\ensuremath{\varepsilon}]^2)$-splitting of $B_{4\sigma}(y)$. If furthermore $y\in (\cR_k)_{\delta,16r}$ then $u$ is an $(\ensuremath{\varepsilon}\,\sigma)$-GH isometry between $B_{2\sigma}(y)$ and $u(y)+\mathbb{B}_{2\sigma}^k$.
We set $V:=\Omega\cap B_s(x)$. Then \begin{align*}\mu(B_{s}(x) \backslash V)& \le \mu(B_{4s}(x) \backslash \Omega)\\ &\le C'(n)\tau^2(n)\ensuremath{\varepsilon}^2\,\mu(B_{4s}(x))\\ &\le A^2(n)C'(n)\tau^2(n)\ensuremath{\varepsilon}^2\,\mu(B_{s}(x))\\ &\le \ensuremath{\varepsilon}\mu(B_{s}(x)).\end{align*}
The fourth assertion is a consequence of the third one. Indeed, if $y,z\in V\cap(\cR_k)_{\delta,16r}$, define $2\sigma:=\mathsf{d}(y,z)\le 2s$. Then, since $u$ is an $(\ensuremath{\varepsilon}\,\sigma)$-GH isometry between $B_{2\sigma}(y)$ and $u(y)+\mathbb{B}_{2\sigma}^k$, we get
$$\left| |u(y)-u(z)|-\mathsf{d}(y,z)\right|\le \ensuremath{\varepsilon} \sigma=\ensuremath{\varepsilon} \frac{\mathsf{d}(y,z)}{2}$$ from which follows the desired result.
In order to prove the last point we only need to show that if $K$ is a compact subset of $V\cap(\cR_k)_{\delta,16r} \subset B_s(x)$ with $\mu(K)>0$ then $$u_\#\left(\mathbf{1}_{K}\mathop{}\!\mathrm{d}\mu\right)\ll \mathcal H^k.$$
\textbf{Step 1.} To prepare the application of Theorem \ref{th:DePhiRin}, let us introduce a series of Radon measures and discuss some properties of these measures. Set $B:=B_{2s}(x)$. Choose $\{\chi_\ell\} \subset \Lip_c(B,[0,1])$ such that $\chi_\ell \downarrow \mathbf{1}_K$ : for instance for any $\ell$ we may choose $ $$\chi_\ell(\cdot):=\big(1-\ell \mathsf{d}(K,\cdot)\big)_+$ which has support $K_\ell=\left\{ \mathsf{d}(K,\cdot)\le \frac1\ell\right\}$. For convenience we also set $\chi_\infty:=\mathbf{1}_K$. We define the following Radon measures on $\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$: $$\nu_{i,j}^\ell:=u_\#\left(\chi_\ell \Gamma(u_i,u_j)\right)\qquad \text{and}\qquad \nu^\ell:=u_\#\left(\chi_\ell \mu\right)$$ for $i,j\in \{1,\dots,k\}$ and $\ell\in \ensuremath{\mathbb N}\cup\{\infty\}$ and we also set $$\nu:=u_\#\left(\mathbf{1}_B\mu\right).$$ The coefficients \[ \frac{\mathop{}\!\mathrm{d}\Gamma(u_i,u_j)}{\mathop{}\!\mathrm{d} \mu} = \langle d u_i, d u_j \rangle, \qquad i,j\in \{1,\dots,k\}, \] of the Gram matrix map of $u=(u_1,\ldots,u_k)$ are bounded Borel functions, hence there exist bounded Borel functions such that for any $i,j\in \{1,\dots,k\}$ and $\ell\in \ensuremath{\mathbb N}\cup\{\infty\}$, $$\mathop{}\!\mathrm{d}\nu_{i,j}^\ell=\rho_{i,j}^\ell \mathop{}\!\mathrm{d}\nu^\ell.$$ There are also bounded Borel functions $J^\ell$ on $B$ such that $$\mathop{}\!\mathrm{d}\nu^\ell=J^\ell \mathop{}\!\mathrm{d}\nu$$ and $J^{\ell+1}\le J^{\ell}\le 1$ for any $\ell \in \mathbb{N} \cup \{\infty\}$. Moreover,
$$\|J^\ell-J^\infty\|_{L^1(\mathop{}\!\mathrm{d}\nu)}=\int_{\mathbb{R}^k} (J^{\ell}- J^\infty)\mathop{}\!\mathrm{d}\nu=\int_B (\chi_\ell-\chi_\infty)\mathop{}\!\mathrm{d}\mu\le \mu(K_\ell\setminus K) \to 0$$ so that \begin{equation}\label{eq:yieldsalso}
\lim_{\ell\to+\infty} \|J^\ell-J^\infty\|_{L^1(\mathop{}\!\mathrm{d}\nu)}=0.\end{equation}
\textbf{Step 2.} For any $\ell \in \mathbb{N} \cup \{\infty\}$, let $\lambda^\ell$ be the lowest eigenvalue of the symmetric matrix $\left(\rho_{i,j}^\ell\right)$. Our goal is now to establish
\begin{equation}\label{convergence2}\lim_{\ell\to+\infty}
\|\lambda^\ell- \lambda^\infty\|_{L^1(\mathop{}\!\mathrm{d}\nu^\infty)}=0\end{equation} and for $\nu^\infty$-a.e. $p\in \mathbb{R}^k$,
\begin{equation}\label{ineq} \lambda^\infty(p)\ge 1-\epsilon. \end{equation}
For any $\xi=(\xi_1,\dots,\xi_k)\in \mathbb{R}^k$ such that $^t\xi \xi=1$ and any $\ell \in \mathbb{N} \cup \{\infty\}$, we introduce
$$\rho_{\xi}^\ell:=\sum_{i,j}\xi_i\xi_j\rho_{i,j}^\ell.$$ Setting $$u_\xi:=\langle \xi,u\rangle$$ we have
$$ \rho_{\xi}^\ell = \frac{\mathop{}\!\mathrm{d} u_\#\left(\chi_\ell \Gamma(u_\xi,u_\xi)\right)}{\mathop{}\!\mathrm{d} \nu^\ell}.$$ In particular, $\{ \rho_{\xi}^\ell(p) \}_{\ell}$ is a non negative non increasing sequence for $\nu^\infty$-a.e.~$p \in \mathbb{R}^k$. Arguing as we did to get \eqref{eq:yieldsalso} yields
$$\lim_{\ell\to+\infty} \|J^\ell\rho_{\xi}^\ell-J^\infty\rho_{\xi}^\infty \|_{L^1(\mathop{}\!\mathrm{d}\nu)}=0.$$ Since \begin{align*}
\|\rho_{\xi}^\ell- \rho_{\xi}^\infty\|_{L^1(\mathop{}\!\mathrm{d}\nu^\infty)}&=\int_{\mathbb{R}^k} (\rho_{\xi}^\ell- \rho_{\xi}^\infty) J^\infty\mathop{}\!\mathrm{d}\nu\\ &=\int_{\mathbb{R}^k} (J^\ell\rho_{\xi}^\ell- J^\infty\rho_{\xi}^\infty)\mathop{}\!\mathrm{d}\nu-\int_{\mathbb{R}^k} (J^\ell- J^\infty)\rho_{\xi}^\ell\mathop{}\!\mathrm{d}\nu \end{align*} we also get
$$\lim_{\ell\to+\infty}\|\rho_{\xi}^\ell- \rho_{\xi}^\infty\|_{L^1(\mathop{}\!\mathrm{d}\nu^\infty)}=0.$$ Using that $\xi\mapsto \rho_{\xi}^\ell$ is quadratic, by polarization we deduce that for any $i,j$,
$$\lim_{\ell\to+\infty}\|\rho_{i,j}^\ell- \rho_{i,j}^\infty\|_{L^1(\mathop{}\!\mathrm{d}\nu^\infty)}=0.$$ Up to extraction of a subsequence we can assume that there exists a set $C$ of full $\nu^\infty$ measure such that for any $i,j\in \{1,\dots,k\}$ and $p\in C$, $$ \lim_{\ell\to+\infty}\rho_{i,j}^\ell(p)= \rho_{i,j}^\infty(p).$$ Then for $\nu^\infty$-a.e. $p\in \mathbb{R}^k$, \begin{equation}\label{convergence1} \lim_{\ell\to+\infty}\lambda^\ell(p)= \lambda^\infty(p)\end{equation}
and thus we get \eqref{convergence2}.
For $\nu^\infty$-a.e. $p\in u(K)$, we have $$ \rho_\xi^\infty(p)=\lim_{\sigma\to 0} \frac{\int_{K\cap u^{-1}\left(\bB^k_\sigma(p)\right)} \mathop{}\!\mathrm{d}\Gamma(u_\xi,u_\xi)}{\mu\left(K\cap u^{-1}\left(\bB^k_\sigma(p)\right)\right)}\, \cdot$$ Since $\mu$-a.e.~on $B$ we have \[ \frac{\mathop{}\!\mathrm{d} \Gamma(u_\xi,u_\xi)}{\mathop{}\!\mathrm{d} \mu}={}^t \xi G_u\xi, \] from \eqref{eq:bilipGram} in Proposition \ref{prop:propagation} we get $\mu$-a.e.~on $K$: \[ 1-\ensuremath{\varepsilon} \le \frac{\mathop{}\!\mathrm{d} \Gamma(u_\xi,u_\xi)}{\mathop{}\!\mathrm{d} \mu} \le 1+\ensuremath{\varepsilon}. \] Thus for $\nu^\infty$-a.e.~$p\in u(K),$
\begin{equation}\label{ine} 1-\epsilon \le \rho_\xi^\infty(p)\le 1+\epsilon,\end{equation} from which follows \eqref{ineq}.
\textbf{Step 3.} Recall that our final goal is to prove that $\nu^\infty \ll \mathcal H^k$. To this aim, we will apply Theorem \ref{th:DePhiRin} for any finite $\ell $ to the currents $$T_i^\ell=\sum_{j=1}^k \nu_{i,j}^\ell dx_j=\sum_{j=1}^k \rho_{i,j}^\ell\nu^\ell dx_j.$$ These are indeed normal currents as for $\psi\in \mathcal C^\infty_0(\mathbb{R}^k)$, \begin{align*} dT_i^\ell(\psi)&=\sum_{j=1}^k\int_B \chi_\ell\, \frac{\partial \psi}{\partial x_j}\circ u\,\mathop{}\!\mathrm{d}\Gamma(u_i,u_j)\\ &=\int_B \chi_\ell \,\mathop{}\!\mathrm{d}\Gamma(\psi\circ u,u_i)\text{ using the chain rule}\\ &=-\int_B\psi\circ u \,\mathop{}\!\mathrm{d}\Gamma(\chi_\ell,u_i) \text{ by the fact that }u_i \text{ is harmonic,} \end{align*} hence $$dT_i^\ell=-u_\#\left(\Gamma(\chi_\ell,u_i)\right)$$ is a finite Radon measure. Moreover, by Lemma \ref{lem:courants}, the decomposition (\ref{decomposition}) of $T_i^\ell$ is given by $$\vec T_i^\ell=\left(\rho_i^\ell \right)^{-1}\left(\rho_{i,1}^\ell,\dots, \rho_{i,k}^\ell\right)$$ with
$$\rho_i^\ell=\left(\sum_{j=1}^k \left(\rho_{i,j}^\ell\right)^2\, \right)^{\frac12} \qquad \text{and} \qquad \|T_i^\ell\|=\rho_i^\ell \nu^\ell.$$ Notice that $\rho_i^\ell\ge \rho_{i,i}^\ell$ hence
$$\rho_{i,i}^\infty \nu^\infty= \nu_{i,i}^\infty \ll\nu_{i,i}^\ell=\rho_{i,i}^\ell \nu^\ell\ll \|T_i^\ell\|,$$ and inequality (\ref{ine}) implies that $\nu^\infty$-a.e.~$\rho_{i,i}^\infty \ge 1-\sqrt{\ensuremath{\varepsilon}}$ so that
$$\nu^\infty\ll \|T_i^\ell\|.$$ We remark that for any $\xi=(\xi_1,\dots,\xi_k)\in \mathbb{R}^k$ unitary it holds $$\left\langle\, \left(\sum_{i=1}^k \xi_i \rho_i^\ell\vec T_i^\ell\right),\xi\right\rangle=\rho_\xi^\ell.$$ We set $$\mathcal B_\ell :=\{p\in \mathbb{R}^k : \lambda^\ell(p)\le (1-\ensuremath{\varepsilon})/2\}.$$ Since $\rho_i^\ell$ are bounded functions, we deduce that if $p\in \mathbb{R}^k\setminus \mathcal B_\ell$ then $ \vec T_1^\ell(p),\dots, \vec T_k^\ell(p)$ is a basis of $\mathbb{R}^k$. Applying Theorem \ref{th:DePhiRin} we get $$\mathbf{1}_{ \mathbb{R}^k\setminus \mathcal B_\ell}\nu^\infty\ll \mathcal H^k.$$ But the convergence \eqref{convergence2} and the lower bound \eqref{ineq} yield $$\lim_{\ell\to \infty} \nu^\infty\left(\mathcal B_\ell\right)=0,$$ hence we get $\nu^\infty\ll \mathcal H^k.$ \endproof
\subsection{Rectifiability of the regular sets: end of the proof}
To get (B) in Theorem \ref{th:mainrect} from Proposition \ref{prop:bilip}, we use the following definition, introduced in \cite{BPS}. \begin{D} Let $(X,\mathsf{d},\mu)$ be a metric measure space, $k$ a positive integer and $\ensuremath{\varepsilon} \in (0,1)$. We call $(\mu,k,\ensuremath{\varepsilon})$-rectifiable any Borel set $\Omega \subset X$ for which there exists a countable family of $(1+\ensuremath{\varepsilon})$-bi-Lipschitz charts $\{(V_i^\ensuremath{\varepsilon},\phi_i^\ensuremath{\varepsilon})\}$ from $X$ to $\mathbb{R}^k$ such that $\mu(\Omega \backslash \bigcup_{i} V_i^\ensuremath{\varepsilon})=0$. \end{D}
According to the previous definition, we are left with establishing the following.
\begin{prop}\label{prop:rect} Let $(X,\mathsf{d},\mu,o) \in \overline{\mathcal{K}_\mathfrak{m}(n,f,c)}$, $k \in \{1,\ldots,n\}$ and $\ensuremath{\varepsilon}\in(0,1)$. Then $\cR_k$ is $(\mu,k,\ensuremath{\varepsilon})$-rectifiable. \end{prop}
To this aim, we prove a lemma which is a consequence of our key Proposition \ref{prop:bilip}.
\begin{lemma}\label{prop:realrect} Let $(X,\mathsf{d},\mu,o) \in \overline{\mathcal{K}_\mathfrak{m}(n,f,c)}$ and $k \in \{1,\ldots,n\}$. Then for any $p \in X$, $R>0$ and $\ensuremath{\varepsilon}\in (0,1)$, there exists a $(\mu,k,\ensuremath{\varepsilon})$-rectifiable set $\Omega_\ensuremath{\varepsilon} \subset \mathcal{R}_k \cap B_R(p)$ such that $\mu([\mathcal{R}_k \cap B_R(p)] \backslash \Omega_\ensuremath{\varepsilon}) \le \ensuremath{\varepsilon}$. \end{lemma}
\begin{proof} Let $(X,\mathsf{d},\mu,o)\in \overline{\mathcal{K}_\mathfrak{m}(n,f,c)}$, $k \in \{1,\ldots,n\}$, $p \in X$, $R>0$ and $\ensuremath{\varepsilon}>0$ be given. Set $\ensuremath{\varepsilon}':=\ensuremath{\varepsilon}/\mu(\mathcal{R}_k \cap B_R(p))$. Let $\delta>0$ be given by Proposition \ref{prop:bilip} applied to $\ensuremath{\varepsilon}'$. For any $x \in \cR_k$ there exists $r(x)>0$ such that $x \in (\mathcal{R}_k)_{\delta,16r(x)}$. Apply the Vitali covering lemma for doubling metric measure spaces \cite[Theorem 1.6]{Heinonen} to the set $\mathcal{R}_k \cap B_R(p)$ and the collection of balls $A:=\{B_{r}(x)\}_{x \in \mathcal{R}_k \cap B_R(p), 0<r\le r(x)}$. Then there exists countably many pairwise disjoint balls $\{B_{r_{x_i}}(x_i)\} \subset A$ such that $\mu([\mathcal{R}_k \cap B_R(p)]\backslash \cup_i B_{r_{x_i}}(x_i)) =0$. By Proposition \ref{prop:bilip} for any $i$ there exists a Borel set $V_i \subset B_{r_{x_i}}(x_i)$ which is the domain of a bi-Lipschitz chart and such that $\mu(B_{r_{x_i}}(x_i) \backslash V_i) \le \ensuremath{\varepsilon}' \mu(B_{r_{x_i}}(x_i) )$. Set $\Omega_\ensuremath{\varepsilon}=\cup_i V_i$. Then $\Omega_\ensuremath{\varepsilon}$ is the union of domains of bi-Lipschitz charts, so it is obviously $(\mu,k,\ensuremath{\varepsilon})$-rectifiable. Moreover, \begin{align*} \mu([\mathcal{R}_k \cap B_R(p)]\backslash \Omega_\ensuremath{\varepsilon}) & \le \mu(\cup_i B_{r_{x_i}}(x_i) \backslash V_i) = \sum_i \mu( B_{r_{x_i}}(x_i) \backslash V_i)\\ & \le \ensuremath{\varepsilon}' \sum_i \mu(B_{r_{x_i}}(x_i)) \le \ensuremath{\varepsilon}' \mu (\mathcal{R}_k \cap B_R(p)) = \ensuremath{\varepsilon}. \end{align*} \end{proof}
We are now in a position to prove Proposition \ref{prop:rect}, and conclude that (B) in Theorem \ref{th:mainrect} is established.
\begin{proof}[Proof of Proposition \ref{prop:rect}] From the previous lemma, for any $i \in \mathbb{N}\backslash \{0\}$ there exists a Borel set $\Omega_{\ensuremath{\varepsilon},i} \subset \mathcal{R}_k \cap B_i(p)$ which is $(\mu,k,2^{-i}\ensuremath{\varepsilon})$-rectifiable and such that $\mu([\mathcal{R}_k \cap B_R(p_i)] \backslash \Omega_{\ensuremath{\varepsilon},i}) \le 2^{-i}\ensuremath{\varepsilon}$. We set $\Omega_{\ensuremath{\varepsilon}}:=\bigcup_i \Omega_{\ensuremath{\varepsilon}, i}$. Then \[ \mu(\mathcal{R}_k \backslash \Omega_{\ensuremath{\varepsilon}}) \le \lim\limits_{i \to +\infty} \mu([\mathcal{R}_k \cap B_i(p)] \backslash \Omega_{\ensuremath{\varepsilon}}) \le \lim\limits_{i \to +\infty} \mu([\mathcal{R}_k \cap B_i(p)] \backslash \Omega_{\ensuremath{\varepsilon},i}) = 0. \] Since for any $i$ there exist countably many $(1+\ensuremath{\varepsilon})$-bi-Lipschitz charts $\{(V_{i,j}^\ensuremath{\varepsilon},\phi_{i,j}^\ensuremath{\varepsilon})\}_j$ such that $\mu(\Omega_{\ensuremath{\varepsilon},i} \backslash \bigcup_{j} V_{i,j}^\ensuremath{\varepsilon})=0$, we get that $\Omega_\ensuremath{\varepsilon}$ (and then $\mathcal{R}_k$) is $(\mu,k,\ensuremath{\varepsilon})$-rectifiable. \end{proof}
\section{Regularity of non-collapsed strong Kato limits} \label{sec:Reifenberg}
This section is devoted to the structure and regularity of non-collapsed strong Kato limits. We start by recalling some properties of such spaces, then show an almost rigidity result that leads to the Reifenberg regularity stated in Theorem \ref{thm:ReifregX}. In the second part of this section, we prove a Transformation Theorem which, together with Theorem \ref{thm:ReifregX} and the results of Section 3, implies Theorem \ref{th:Holderreg}.
\subsection{Non-collapsed strong Kato limits and almost monotone quantity} Recall that a manifold $(M^n,g) \in \cK(n,f)$ satisfies a strong Kato bound if the function $f$ is such that \begin{equation} \tag{SK} \Lambda := \int_0^T \frac{\sqrt{f(s)}}{s} \mathop{}\!\mathrm{d} s< \infty. \end{equation} Under assumption \eqref{eq:SK}, the volume bound \eqref{eq:volestim} given by Proposition \ref{eq:PI_manifolds} upgrades into the following, as proved in \cite{CMT}.
\begin{prop} \label{prop:VB} Let $(M^n,g) \in \cK(n,f)$ with $f$ satisfying \eqref{eq:SK}. Then there exists $C=C(n, \Lambda)>0$ such that for any $0< s \leq r \leq \sqrt{T}$ we have $$\frac{\nu_g(B_r(x))}{\nu_g(B_s(x))}\leq C\left( \frac rs\right)^n.$$ \end{prop}
For $v>0$, $(M^n,g,o)$ belongs to $\cK(n,f,v)$ if $f$ satisfies \eqref{eq:SK} and moreover $\nu_g(B_{\sqrt{T}}(o))\geq vT^{\frac{n}{2}}$. Non-collapsed strong Kato limits are elements of the closure $\overline{\cK(n,f,v)}$ with respect to Gromov-Hausdorff topology. As proved in \cite[Theorem 7.1]{CMT}, volume continuity holds for non-collapsed strong Kato limits.
\begin{theorem} \label{thm:volCont} Let $\{(M_\alpha, g_\alpha, o_\alpha)\} \subset \cK(n,f,v)$ be a sequence converging in the pointed Gromov-Hausdorff topology to $(X,\mathsf{d},o) \in \overline{\cK(n,f,v)}$. Then $(M_\alpha, g_\alpha, \nu_{g_\alpha}, o_\alpha)$ converges to $(X,\mathsf{d}, \mathcal H^n,o)$ in the pointed measured Gromov-Hausdorff topology. \end{theorem}
As a consequence, in this setting the results of Section 3.2 can be revisited. More precisely, if in Theorem \ref{MetaThm}, Propositions \ref{prop:ExiSplit} and \ref{prop:GHisometry}, we replace Kato limits by non-collapsed strong Kato limits, we can assume closeness of balls in the Gromov-Hausdorff topology instead of the measured Gromov-Hausdorff topology. Note that in this case the quantities $\nu$ and $\delta$ also depend on the volume bound $v>0$.
Now let $(X,\mathsf{d},o)\in \overline{\cK(n,f,v)}$ and let $H:\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C_+\times X\times X \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C_+$ be its heat kernel. For any $t>0$ and $x \in X$ we consider $$\uptheta(t,x)=(4\pi t)^{\frac n2}H(t,x,x).$$ As we recalled in the introduction, in \cite{CMT} we showed that the map $t\mapsto \uptheta(t,x)$ is almost non-decreasing for all $x \in X$. More precisely, define for any $t \in (0,T]$ $$\upphi(t):=\int_0^t \frac{\sqrt{f(s)}}{s} \mathop{}\!\mathrm{d} s< \infty.$$ Thanks to the Li-Yau inequality given by Proposition \ref{lem:LY}, we get the following (see also \cite[Corollaries 5.12 and 5.13]{CMT}).
\begin{prop}\label{monotonetheta} Let $(X,\mathsf{d},o)\in \overline{\cK(n,f,v)}$ with $f$ satisfying \eqref{eq:SK}. There is a constant $c_n>0$ depending only on $n$ such that for any $x \in X$ the function $$t\in (0,T)\mapsto e^{c_n \upphi(t)}\uptheta(t,x)$$ is non-decreasing and such that for any $t\in (0,T),$ $$e^{c_n \upphi(t)}\uptheta(t,x)\ge 1.$$ In particular, the limit $\vartheta(x)=\lim_{t\to 0}\uptheta(t,x)$ is well defined and satisfies $\vartheta(x)\ge 1$. \end{prop}
\begin{rem} In \cite{CMT} we also showed that for all $x \in X$, $\vartheta(x)$ is the inverse of the volume density: $\vartheta(x)^{-1}=\displaystyle\lim_{r \to 0}(\mathcal H^n(B_r(x))/\omega_n r^n),$ where $\omega_n$ is the volume of the Euclidean unit ball. \end{rem}
One consequence of \cite{CMT} is that the regular set coincides with the set of points where $\vartheta$ is equal to 1, as we show below.
\begin{prop}\label{prop:cR} Let $(X,\mathsf{d},o)\in \overline{\cK(n,f,v)}$ with $f$ satisfying \eqref{eq:SK}. Then \begin{equation*} \cR=\{x \in X : \Tan(X,x)=\{(\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n,\mathsf{d}_e,0)\}\}=\{x \in X : \vartheta(x)=1\}. \end{equation*} \end{prop}
\begin{proof} The first equality is a direct consequence of \cite[Theorem 6.2(iii)]{CMT} and of volume continuity as recalled in Theorem \ref{thm:volCont}. As for the second, \cite[Theorem 7.2]{CMT} ensures that if $(\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n,\mathsf{d}_e,0)$ is a tangent cone at $x \in X$, then $\vartheta(x)=1$, so that $$\cR \subset \{ x \in X : \vartheta(x)=1\}.$$ To prove the converse inclusion, consider $x \in X$ such that $\vartheta(x)=1$. The proof of \cite[Proposition 6.3]{CMT} ensures that $\vartheta$ is upper semi-continuous. We have then $$1 \leq \liminf_{y \to x} \vartheta(y) \leq \limsup_{y \to x} \vartheta(y) \leq \vartheta(x)=1,$$ so that $\vartheta$ is continuous at $x$. The proof of \cite[Theorem 6.2(iii)]{CMT} then implies that all tangent cones at $x$ are Euclidean, thus $x \in \cR$. \end{proof}
For a manifold $(M^n,g)$ satisfying a strong Kato bound, an upper bound on $\uptheta$ at some point $x$ implies a lower bound on the volume of $B_{\sqrt{T}}(x)$.
\begin{lemma}\label{prop:thetanC} Assume that $(M^n,g)$ is a closed manifold in $\cK(n,f)$ with $f$ satisfying \eqref{eq:SK}. There is a constant $v(n)>0$ such that if at some $x\in X$ and $t\le T$ we have $$\uptheta(t,x)\le 2,$$ then $\nu_g\left(B_{\sqrt{t}}(x)\right)\ge v(n) t^{\frac{n}{2}}.$ \end{lemma}
\proof Thanks to the heat kernel estimates given by Proposition \ref{Prop:heatKe}, we get $$ \frac{t^{\frac n2}}{C_n\nu_g(B_{\sqrt{t}}(x))} \le \uptheta(x,t)\le 2,$$ which immediately gives the desired lower bound. \endproof
We are also going to use the following lemma.
\begin{lemma} \label{lem:thetaSmallKato} Let $(M^n,g) \in \cK(n,f)$ for $f$ satisfying \eqref{eq:SK}. For any $\delta \in (0,1)$ there exists $\nu>0$ depending on $\delta, f$ such that if for some $t \in (0, T]$ we have $\mathrm{k}_t(M^n,g)<\nu,$ then for all $x \in M$ and $s \in (0,t]$ we have $\uptheta(s,x)\leq \uptheta(t,x)(1+\delta)$. \end{lemma}
\proof Assume $\mathrm{k}_t(M,g)<\nu$ and let $c_n$ be the constant appearing in Proposition \ref{monotonetheta}. Observe that for any $a \in (0,t)$ we can write $$\int_0^t \frac{\sqrt{\mathrm{k}_\tau(M^n,g)}}{\tau}\mathop{}\!\mathrm{d} \tau \leq \int_0^a \frac{\sqrt{f(\tau)}}{\tau}\mathop{}\!\mathrm{d} \tau + \sqrt{\nu}\log\left(\frac{T}{a}\right).$$ We can choose $a$ depending on $f$ and $\delta$ such that the first addend in the previous inequality is smaller than $\log(1+\delta)/2c_n$. Then we can choose $\nu$ depending on $a$ and $\delta$, thus on $f$ and $\delta$, such that the second addend is also smaller than $\log(1+\delta)/2c_n$. By Proposition \ref{monotonetheta}, then we know that for all $x\in M$ and $s \in (0,t]$ $$\uptheta(s,x)\leq \uptheta(t,x)\exp\left(c_n\int_s^t \frac{\sqrt{\mathrm{k}_\tau(M^n,g)}}{\tau}\mathop{}\!\mathrm{d} \tau\right)\leq \uptheta(t,x)(1+\delta).$$\endproof
\begin{rem} \label{rem:thetaSmallKato} The same argument as in the previous proof implies that for a sequence $\{(M_\ell, g_\ell, o_\ell)\}\subset \cK(n,f,v)$ converging to $(X,\mathsf{d}, o)\in \overline{\cK(n,f,v)}$ such that $\lim_\ell \mathrm{k}_t(M_\ell,g_\ell)=0$ for some $t \in (0,T]$, we have that for all $x \in X$ the map $s \mapsto \uptheta(s,x)$ is monotone non-decreasing and satisfies $\uptheta(s,x)\geq 1$ for all $s\in (0,t]$. \end{rem}
\subsection{Almost rigidity} This subsection is devoted to proving the following almost rigidity for $\uptheta$, which will be the key result to obtain our Reifenberg regularity statement, namely~Theorem \ref{thm:ReifregX}.
\begin{theorem}\label{almostrigidity} For any $\ensuremath{\varepsilon}>0$ and $A>0$ there exists $\delta>0$ depending only on $f,n,\varepsilon$ and $A$ such that if $(M^n,g)\in\cK(n,f)$, $x\in M$ and $t\le T$ satisfy $$\mathrm{k}_t(M,g)\le \delta \quad \text{ and } \quad \uptheta(t,x)\le 1+\delta,$$ then $$\dGH\left( B_{A\sqrt{t}}(x),\bB^n_{A\sqrt{t}}\right)< \ensuremath{\varepsilon} A\sqrt{t}.$$ \end{theorem} In order to prove Theorem \ref{almostrigidity}, we are going to use a contradiction argument, that we sketch here before giving the detailed proof. We will construct a contradicting sequence for which a ball of radius $1$ stays uniformly far from the unit Euclidean ball. Thanks to Lemma \ref{prop:thetanC} such sequence is non-collapsing. Then up to extracting a sub-sequence, we obtain a limit $(X,\mathsf{d},x) \in \overline{\cK(n,f,v)}$ such that $B_1(x)$ is at a positive distance from the unit Euclidean ball. We then aim to show that the limit space $(X,\mathsf{d})$ is isometric to the Euclidean space. For that, we use the heat kernel rigidity shown in \cite{CT}. More precisely, for a non-collapsed strong Kato limit $(X,\mathsf{d}, x)\in \overline{\cK(n,f,v)}$ we define $$\mathbb{P}(t,x,y)=\frac{e^{-\frac{\mathsf{d}^2(x,y)}{4t}}}{(4\pi t)^{\frac{n}{2}}}.$$ If for all $x,y \in X$ and $t >0$ we have $H(t,x,y)=\mathbb{P}(t,x,y)$, then \cite[Theorem 1.1]{CT} ensures that $(X,\mathsf{d})$ is isometric to the Euclidean space. In order to show that $H$ coincides with $\mathbb{P}$, we will rely on the Li-Yau inequality proven in Proposition \ref{lem:LY} and on the fact that, thanks to Remark \ref{rem:thetaSmallKato}, $\uptheta$ is monotone non-decreasing.
\begin{proof} We assume by contradiction that the statement is false. Then there exists $\ensuremath{\varepsilon}, A>0$ such that if we consider the sequence $\delta_\ell=\ell^{-1},$ $\ell \in \ensuremath{\mathbb N}$, we find $t_\ell \leq T$, $(M_\ell, g_\ell)\in \cK(n,f)$ and $x_\ell \in M_\ell$ such that $$\mathrm{k}_{t_\ell}(M,g)\leq \delta_\ell \mbox{ and } \uptheta(t_\ell,x_\ell) \leq 1+ \delta_\ell,$$ but \begin{equation} \label{eq:pfAR1} \dGH(B_{A\sqrt{t_\ell}}(x_\ell), \mathbb{B}^n_{A\sqrt{t_\ell}})> \ensuremath{\varepsilon}\sqrt{A} t_\ell. \end{equation} Observe that if we define $\tilde{ f}(s)= f(sT)$ for all $s\in [0,1]$ and $\tilde g_\ell=t_\ell^{-1}g_\ell$ for any $\ell$, then the rescaling properties of $\mathrm{k}_t$ and of the heat kernel imply that each $(M_\ell, \tilde g_\ell)$ belongs to $\cK(n,\tilde{ f})$ and $$\mathrm{k}_1(M_\ell, \tilde g_\ell)=\mathrm{k}_{t_\ell}(M_\ell,g_\ell)\leq \delta_\ell, \quad \tilde \uptheta(1,x_\ell)=\uptheta(t_\ell, x_\ell)\leq 1+\delta_\ell.$$ Then up to rescaling we can assume that $t_\ell=1$ for all $\ell \in \ensuremath{\mathbb N}$.
By Lemma \ref{prop:thetanC}, we also know that there exists $v=v(n)>0$ such that for any $\ell$, $$\nu_{g_\ell}(B_1(x_\ell))\geq v, $$ so that each $(M_\ell, g_\ell, x_\ell)$ belongs to $\cK(n, f, v)$. Up to extracting a subsequence, $\{(M_\ell, g_\ell, x_\ell)\}$ converges in the pointed Gromov-Hausdorff topology to $(X,\mathsf{d},x) \in \overline{\cK(n,f,v)}$. Moreover, convergence of the heat kernel given in Proposition \ref{prop:HKconv} ensures that $$\uptheta(1,x)=\lim_\ell \uptheta(1,x_\ell)\leq 1.$$ Thanks to Remark \ref{rem:thetaSmallKato}, we also know that $t \mapsto \uptheta(t,x)$ is monotone non-decreasing and larger than one. We then get for all $s \in (0,1]$, \begin{equation}\label{eq:=1} \uptheta(s,x)=\uptheta(1,x)=1. \end{equation}
\noindent Because of \eqref{eq:pfAR1}, we also have \begin{equation} \label{eq:pfAR2} \dGH(B_A(x),\mathbb{B}^n_A)>\ensuremath{\varepsilon} A. \end{equation}
Our setting constructed, we aim to prove that the heat kernel of $X$ satisfies \begin{equation} \label{euclHK} H=\mathbb{P} \end{equation} on $\mathbb{R}_+ \times X \times X$. In order do so, we fix $x \in X$ and we introduce the function $$\Phi : \mathbb{R}_+ \times X \ni (t,y) \mapsto (4\pi t)^{\frac n2} H^2(t/2,x,y).$$
\noindent \underline{\textbf{Step 1.}} We show that $\Phi$ satisfies \begin{equation} \label{weak_upper} 4\frac{\partial}{\partial t}\left(\int_X \varphi \Phi \mathop{}\!\mathrm{d}\mathcal H^n\right)+\int_X \langle d \phi, d \Phi \rangle \mathop{}\!\mathrm{d} \mathcal H^n\ge 0. \end{equation} for any non-negative $\varphi \in \mathcal C_c(X)\cap \mathcal D({\sf Ch})$. To this aim, we first observe that \begin{align*} & \phantom{=} \phantom{=} 4\frac{\partial}{\partial t}\left(\int_X \varphi \Phi \mathop{}\!\mathrm{d}\mathcal H^n\right)\\ & = \int_X \varphi(y)\left( \frac{2n}{t} \Phi(t,y) + 4(4\pi t)^{\frac n2} H(t/2,x,y)\frac{\partial H}{\partial t}(t/2,x,y) \right) \mathop{}\!\mathrm{d}\mathcal H^n(y). \end{align*} Then we use the definitions of $L$, $H$ and $\Phi$ to get \begin{align*} & \phantom{=} \phantom{=} \int_X \langle d \phi, d \Phi \rangle \mathop{}\!\mathrm{d} \mathcal H^n = \int_X \phi L\Phi \mathop{}\!\mathrm{d} \mathcal H^n\\
&= 2(4\pi t)^{\frac n2} \int_X \phi(y) \left( H(t/2,x,y)L_yH(t/2,x,y) - |d_yH(t/2,x,y)|^2 \right) \mathop{}\!\mathrm{d} \mathcal H^n(y)\\
&= - 2(4\pi t)^{\frac n2} \int_X \phi(y) \left( H(t/2,x,y)\frac{\partial H}{\partial t}(t/2,x,y) + |d_yH(t/2,x,y)|^2 \right) \mathop{}\!\mathrm{d} \mathcal H^n(y). \end{align*} Adding these two identities yields $$4\frac{\partial}{\partial t}\left(\int_X \varphi \Phi \mathop{}\!\mathrm{d}\mathcal H^n\right)+\int_X \langle d \phi, d \Phi \rangle \mathop{}\!\mathrm{d} \mathcal H^n= 2(4\pi t)^{\frac n2} \int_X\varphi Z \mathop{}\!\mathrm{d}\mathcal H^n,$$
where $Z$ is defined by $$Z(t,y)=\frac{n}{t}H^2(t/2,x,y)+H(t/2,x,y)\frac{\partial H}{\partial t}(t/2,x,y)-|d_yH(t/2,x,y)|^2$$ for any $t\in \mathbb{R}_+$ and $y \in X$. Since $(X, \mathsf{d},o)$ is the limit of manifolds $\{(M_\ell,g_\ell)\}$ such that $\mathrm{k}_1(M_\ell,g_\ell) \to 0$ as $\ell$ goes to infinity, the Li-Yau inequality given by Remark \ref{rem:mieux} holds. Then $Z\geq 0$, this concluding the proof of \eqref{weak_upper}.
\noindent \underline{\textbf{Step 2.}} We show that for any $t>0$ and $y \in X$, \begin{equation} \label{eq:pfAR4} H(t,x,y)=\mathbb{P}(t,x,y). \end{equation} First observe that by definition of $L$, inequality \eqref{weak_upper} is equivalent to $\Phi$ satisfying \begin{equation} \label{upper} \left( 4\frac{\partial}{\partial t}+L\right) \Phi \geq 0 \end{equation} in a weak sense. The Gaussian estimate given in Proposition \ref{Prop:heatKe} implies that for any $t>0$, $$\lim_{\mathsf{d}(x,y)\to \infty}\Phi(t,y)=0.$$ Moreover, the fact that $\mathcal H^n(B_{1}(x))\geq v$ and the volume bound given in Proposition \ref{prop:VB} imply that for any $y \in X \backslash \{x\}$, $$\lim_{t \to 0}\Phi(t,y)=0.$$ By the semi-group law and \eqref{eq:=1}, we know that for any $s\in (0,1]$, $$\int_X \Phi(s,y)\mathop{}\!\mathrm{d}\mathcal H^n(y)=\uptheta(s,x)= 1.$$ As a consequence we get $$\lim_{t\to 0}\Phi(t,y)=\delta_x(y).$$ By inequality \eqref{upper} and the maximum principle, we get that for all $t\in (0,1]$ and $y \in X$, $$\Phi(t,y)\geq H(t/4, x,y).$$ But we also have, for all $t \in (0,1]$, $$1=\int_X \Phi(t,y) \mathop{}\!\mathrm{d}\mathcal{H}^n(y)=\int_X H(t/4, x,y)\mathop{}\!\mathrm{d}\mathcal{H}^n(y),$$
then we obtain, for all $t \in (0,1]$ and $y \in X$, \begin{equation} \label{eq:PhiH} \Phi(t,y)=H(t/4, x,y). \end{equation} We now introduce $$U(t,x,y)=-4t\log((4\pi t)^{\frac n2}H(t,x,y)).$$ By Varadhan's formula, we know $$\lim_{\sigma \to 0} U(\sigma, x,y)=-\mathsf{d}^2(x,y).$$ Because of \eqref{eq:PhiH}, a simple computation shows that for any $s \in (0,1]$ we have \begin{equation*} U(s/4,x,y) = U(s/2, x,y). \end{equation*} As a consequence, for all $s\in (0,1]$, $$U(s/2,x,y)=\lim_{\sigma \to 0} U(\sigma,x,y)=-\mathsf{d}^2(x,y).$$ This shows that for all $t \in (0,1/2]$ and $y \in X$ $$H(t,x,y)=\mathbb{P}(t,x,y).$$ Both expressions in this equality are analytic in $t$, hence we get \eqref{eq:pfAR4} for any $t>0$.
\noindent \underline{\textbf{Step 3.}} We obtain \eqref{euclHK} and conclude. Equality \eqref{eq:pfAR4} implies in particular that $\uptheta(t,x)=1$ for all $t>0$ and not only for $t\in (0,1]$. By using the estimate on the derivatives of the heat kernel given in the last point of Proposition \ref{Prop:heatKe}, non-collapsing and the volume bound of Proposition \ref{prop:VB}, we get that there exists a constant $C>0$ such that for any $t>0$ and $z \in X$,
$$|\uptheta(t,x)-\uptheta(t,z)|\leq \frac{C}{\sqrt{t}}\mathsf{d}(x,z).$$ Then for any $z \in X$, $$\lim_{t\to +\infty}\uptheta(t,z)=1.$$ Since by Remark \ref{rem:thetaSmallKato} the map $t \mapsto \uptheta(t,z)$ is monotone non-decreasing and larger than one, it must be constantly equal to one. Arguing as in the previous step, the fact that $\uptheta(t,z)=1$ for any $z \in X$ and $t>0$ leads to \eqref{euclHK}. Then by \cite[Theorem 1.1]{CT}, the strong Kato limit $(X,\mathsf{d})$ is isometric to the Euclidean space $(\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n, \mathsf{d}_e)$, this contradicting inequality \eqref{eq:pfAR2}. \end{proof}
\begin{rem} Theorem \ref{almostrigidity} can be also proven by using \cite[Corollary 1.7]{DPG16}, that is rigidity in Bishop-Gromov inequality for non-collapsed $\RCD(0,n)$ spaces. We chose to provide a self-contained proof independent of $\RCD$ theory. \end{rem}
\subsection{Consequences of almost rigidity} As an immediate consequence of Theorem \ref{almostrigidity} and of the convergence of heat kernels given by Proposition \ref{prop:HKconv} we obtain the following.
\begin{cor}\label{cor:propuptheta} Assume that $f$ satisfies \eqref{eq:SK}. For any $\delta>0$, there is some $\nu>0$ depending only on $f,n$ and $\delta$ such that if $(M^n,g)\in\cK(n,f)$, $x\in M$ and $t\le T$ satisfy $$\mathrm{k}_t(M,g)\le \nu \quad \text{ and } \quad\uptheta(t,x)\le 1+\nu,$$ then for any $y\in B_{\sqrt{t}}(x)$ we have $\uptheta(t,y)\le 1+\delta.$ \end{cor}
By combining Corollary \ref{cor:propuptheta}, the almost monotonicity of $\uptheta$ (Lemma \ref{lem:thetaSmallKato}) with Theorem \ref{almostrigidity}, we get a Reifenberg regularity result for manifolds satisfying a strong Kato bound.
\begin{cor}\label{thm:ReifregM} Assume that $f$ satisfies \eqref{eq:SK}. For any $\ensuremath{\varepsilon}>0$, there exists $\nu>0$ depending only on $f,n, \ensuremath{\varepsilon}$ such that if $(M^n,g)\in\cK(n,f)$, $x\in M$ and $t\le T$ satisfy $$\mathrm{k}_t(M,g)\le \nu \quad \text{ and } \quad\uptheta(t,x)\le 1+\nu$$ then for any $y\in B_{\sqrt{t}}(x)$ and $s\in (0,\sqrt{t})\colon$ $$\dGH\left( B_{s}(y),\bB^n_{s}\right)\le \ensuremath{\varepsilon} s.$$ \end{cor}
The Reifenberg regularity for non-collapsed strong Kato limits given in Theorem \ref{thm:ReifregX} is then a direct consequence of Corollary \ref{thm:ReifregM}.
We point out a corollary of the almost rigidity statement Theorem \ref{almostrigidity} and of Proposition \ref{prop:ExiSplit} that we use later to obtain Hölder regularity of the regular set of a non-collapsed strong Kato limit.
\begin{cor} \label{cor:exiSplGH} Let $v>0$ and $f$ be a function satisfying \eqref{eq:SK}. For any $\ensuremath{\varepsilon}>0$ there exists $\delta>0$ depending only $f,n,\ensuremath{\varepsilon}$ such that if $(M^n,g) \in \cK(n,f,v)$, $x \in M$ and $t \leq T$ satisfy $$\mathrm{k}_t(M^n,g)\leq \delta \mbox{ and } \uptheta(t,x)\leq 1+\delta,$$ then there exists an $(n,\ensuremath{\varepsilon})$-splitting $u: B_{\sqrt{t}}(x)\to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n$. \end{cor}
\subsection{Transformation theorem} In order to obtain a quantitative version of Theorem \ref{thm:ReifregX}, we need to prove the following Transformation theorem.
\begin{theorem}[Transformation Theorem] \label{thm:Transformation} Let $f$ satisfy \eqref{eq:SK} and $v>0$. There exist a constant $\gamma_n>0$ and $\ensuremath{\varepsilon}_0 \in (0,1)$ depending on $n,f$ such that for all $\ensuremath{\varepsilon} \in (0,\ensuremath{\varepsilon}_0]$ there exists $\delta>0$ depending on $\ensuremath{\varepsilon}, n, f$ and $v$ such that if $(M^n,g)\in \mathcal{K}(n,f)$, $x\in M$ and $r \in (0,\sqrt{T}]$ satisfy \begin{enumerate} \item[\emph{i)}] $\nu_g(B_r(x))\geq vr^n$; \item[\emph{ii)}] $\mathrm{k}_{r^2}(M^n,g) \leq \delta;$ \item[\emph{iii)}] for any $s\in (0,r]$, $\dGH(B_{s}(x), \mathbb{B}_{s}^n) \leq \delta s$; \end{enumerate}
and if $u: B_{r}(x)\to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n$ is an $(n,\delta)$-splitting, then for all $s \in (0,r]$ there exists a $n \times n$ lower triangular matrix $T_s$ such that $\|T_s\| \leq (1+\ensuremath{\varepsilon}) (r/ s)^{\gamma_n \ensuremath{\varepsilon}}$ and the map $\tilde u= T_s \circ u$ is an $(n, \ensuremath{\varepsilon})$-splitting on $B_s(x)$. \end{theorem}
\begin{rem} Thanks to Lemma \ref{prop:thetanC}, we can reformulate the previous theorem replacing the non-collapsing assumption i) by $\uptheta(r^2,x)\leq 2$. In this case the choice of $\delta$ will not depend on $v$. \end{rem}
We obtain Theorem \ref{thm:Transformation} as a consequence of the following proposition.
\begin{prop} \label{prop:transf1} Let $(M,g)\in \mathcal{K}(n,f)$. Then there exist $C_n>0$ and \mbox{$\ensuremath{\varepsilon}_0, \lambda \in (0,1)$} depending only on $n$ such that for all $\ensuremath{\varepsilon} \in (0,\ensuremath{\varepsilon}_0]$ there exists $\delta>0$ depending on $n,f, \ensuremath{\varepsilon}$ such that if for some $r \in (0, \sqrt{T}]$ we have \[ \mbox{k}_{r^2}(M^n,g) \leq \delta, \] a ball $B\subset M$ of radius $r$ satisfies \[ \dGH(B, \mathbb{B}_{r}^n) \leq \delta r, \]
and $u$ is a balanced $(n,\ensuremath{\varepsilon})$-splitting of $B$, then there exists a lower triangular matrix $T$ of size $n$ such that $\|T-\mathrm{Id}_n\| \leq C_n\ensuremath{\varepsilon}$ and the map $\tilde u := T \circ u_{| \lambda B}$ is a balanced $(n,\ensuremath{\varepsilon})$-splitting of $\lambda B$. \end{prop}
We postpone the proof of Proposition \ref{prop:transf1} and first give a proof of Theorem \ref{thm:Transformation}.
\begin{proof}[Proof of Theorem \ref{thm:Transformation} given Proposition \ref{prop:transf1}] Let $\ensuremath{\varepsilon}_0,\lambda$ be as in Proposition \ref{prop:transf1}, and let $\ensuremath{\varepsilon} \in (0, \ensuremath{\varepsilon}_0]$. Consider $\eta \in (0,1]$ to be chosen later depending on $n$ and let $\delta=\delta(n, f,\eta \ensuremath{\varepsilon})$ be the quantity given by Proposition \ref{prop:transf1}. Assume that $$\mathrm{k}_{r^2}(M^n,g)\leq \delta, \mbox{ for all } s \in (0, r] \, \dGH(B_s(x), \mathbb{B}^n_s)\leq \delta s.$$ Consider a $(n,\eta\ensuremath{\varepsilon})$-splitting $u: B_r(x) \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n$ and $s \in (0,r]$.
First assume $s \in (\lambda r, r]$. Since $\lambda$ only depends on $n$, then \eqref{eq:cor_doublement} with $\phi=\|G_u-\mathrm{Id}_n\|$ implies
$$\fint_{B_s(x)}\|G_u-\mathrm{Id}_n\|\mathop{}\!\mathrm{d}\nu_g < C(n)\eta \ensuremath{\varepsilon}.$$
If $C(n) \ensuremath{\varepsilon}_0<1/2$, Remark \ref{rem:GSsplitting} implies the existence of a lower triangular matrix $T_s$ such that $\|T_s\|\leq 1+C(n)\eta \ensuremath{\varepsilon}$ and $T_s\circ u : B_s(x) \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n$ is a balanced $(n,(1+C(n)\eta \ensuremath{\varepsilon})^2 \eta \ensuremath{\varepsilon})$-splitting. We have no restriction in assuming that $\ensuremath{\varepsilon}_0$ is lower than $1/4C(n)$, thus we do it. Assume also that \[ \eta \le \frac{16}{25}\, \cdot \] Then $T_s \circ u_s$ is a balanced $(n,\ensuremath{\varepsilon})$-splitting.
Now assume that there exists some positive integer $\ell$ such that $\lambda^{-\ell}s \in (\lambda r, r]$. Thanks to assumption iii), we can apply Proposition \ref{prop:transf1} iteratively to get existence of lower triangular matrices $T_0,\ldots, T_l$ such that $\tilde{u} := T_l \circ \ldots \circ T_0 \circ u : B_s(x) \to \mathbb{R}^n$ is a balanced $(n,\ensuremath{\varepsilon})$-splitting and \[
\|T_j\| \le (1+ C(n) \eta \ensuremath{\varepsilon}) \] for any $j \in \{0,\ldots,l\}$. Set $T=:T_l \circ \ldots \circ T_0$. Then \[
\|T\| \le (1+ C(n) \eta \ensuremath{\varepsilon})^{l+1}. \] Since $\lambda^{-l}s\le r$ implies $l \le \frac{\ln(r/s)}{\ln(1/\lambda)}$, we get \[ (1+ C(n) \eta \ensuremath{\varepsilon})^l \le (r/s)^{\frac{\ln(1+ C(n) \eta \ensuremath{\varepsilon})}{\ln(1/\lambda)}} \le (r/s)^{\frac{C(n) \ensuremath{\varepsilon}}{\ln(1/\lambda)}}. \] Then we set $$\gamma_n:=\frac{C(n)}{\ln(1/\lambda)}\qquad \text{and} \qquad \eta := \min\left\{\frac{16}{25},\frac{1}{C(n)}\right\}$$
to get $\|T\|\leq (1+\ensuremath{\varepsilon})(r/s)^{\gamma_n\ensuremath{\varepsilon}}$. This concludes the proof. \end{proof}
\begin{rem} We point out that, unlike the proof of \cite[Proposition 7.7]{CJN}, which relies on a contradiction argument, we provide a direct proof of the Transformation Theorem. \end{rem}
We are left to proving Proposition \ref{prop:transf1}. In order to do so, we need the following property of harmonic maps on $\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n$. \begin{prop} \label{OscRn} Let $h : \mathbb{B}^n \to \mathbb{R}^k$ be a harmonic function and set
$$\Lambda:= \fint_{\mathbb{B}^n}\|G_h-\mathrm{Id}_k\|_1\mathop{}\!\mathrm{d} x.$$ Then there exists a constant $C>0$ depending only on $n$ such that for all $r \in (0,1/2)$ \begin{equation} \label{eq:harmBr}
\fint_{\mathbb{B}^n_r}\|G_h-\mbox{\small{$\fint_{\mathbb{B}^n_r}G_h$}}\|_1\mathop{}\!\mathrm{d} x \leq C\Lambda r. \end{equation} \end{prop}
\begin{proof} For the sake of brevity, we show an analog statement in the case $k=1$: consider a harmonic function $h: \mathbb{B}^n \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C$ and set
$$\Lambda_c=\fint_{\mathbb{B}^n} ||dh|^2-c|\mathop{}\!\mathrm{d} x.$$ Then we show that there exists $C>0$ only depending on $n$ such that for all $r \in (0,1/2)$ we have \begin{equation} \label{eq:Osc1}
\fint_{\mathbb{B}^n_r} \left||dh|^2-\fint_{\mathbb{B}^n_r}|dh|^2\right|\mathop{}\!\mathrm{d} x\leq C\Lambda_c r. \end{equation} By arguing as in Lemma \ref{lem:harmGH}, we obtain the following Hessian bound: \begin{equation} \label{eq:harm1}
\|\Hess h\|_{L^\infty(\frac 58\mathbb{B}^n)}\le C_n\sqrt{\Lambda_c}. \end{equation} Now we write $$h=\ell+\beta,$$ where $\ell$ is the linear part of $h$, namely $\ell(\cdot)=h(0)+dh(0)(\cdot)$, so that $\beta(0)=0$ and $d\beta(0)=0.$ We also have $$\Hess h=\Hess \beta,$$ then from \eqref{eq:harm1} we get, for any $x \in \mathbb{B}^n_{\frac 58}$, \begin{equation} \label{eq:harm2}
|d\beta|(x)\leq C_n\sqrt{\Lambda_c}\, |x| \end{equation} Using that the coefficients of $dh$ are harmonic and $d\beta(0)=0$, we obtain
$$\fint_{\mathbb{B}^n} dh = d\ell \quad \text{ and } \quad |d\ell|\le \fint_{\mathbb{B}^n}|dh|.$$ Moreover, for any $r\in (0,1)$ the mean value of $\langle d\ell,d\beta\rangle$ over $\mathbb{B}^n_r$ is equal to its value at $0$, thus it is equal to zero. We then get for any $r\in (0,1)$ \begin{equation*}
\fint_{r\mathbb{B}^n} |dh|^2=|d\ell|^2+\fint_{r\mathbb{B}^n} |d\beta|^2 \end{equation*} so that \begin{equation} \label{eq:harm3}
\fint_{r\mathbb{B}^n}\left| |dh|^2-\left(\fint_{r\mathbb{B}^n} |dh|^2\right)\right|\le 2 \fint_{r\mathbb{B}^n}|d\beta|^2+2 \fint_{r\mathbb{B}^n}|\langle d\ell,d\beta\rangle|. \end{equation} By \eqref{eq:harm2}, the first term in the right-hand side is smaller than $C_n\Lambda_c r^2$. As for the second term, we use
$$2\langle d\ell,d\beta\rangle=|dh|^2-|d\ell|^2-|d\beta|^2$$ to get \begin{equation*}
2 \fint_{r\mathbb{B}^n}|\langle d\ell,d\beta\rangle|\le 2 \fint_{r\mathbb{B}^n}|d\beta|^2+\fint_{r\mathbb{B}^n}\left| |dh|^2-\left(\fint_{r\mathbb{B}^n} |dh|^2\right)\right| \end{equation*} for any $r\in (0,1)$. Choosing $r=5/8$ gives
$$\fint_{\frac58\mathbb{B}^n}|\langle d\ell,d\beta\rangle|\le C_n \Lambda_c.$$ Since $\langle d\ell, d\beta \rangle$ is harmonic, elliptic estimates imply the following gradient estimate
$$\| d\langle d\ell, d\beta \rangle \|_{L^{\infty}(\frac 12\mathbb{B}^n)} \leq C_n\fint_{\frac 58\mathbb{B}^n}|\langle d\ell,d\beta\rangle|\le C_n \Lambda_c.$$ Then by using that $\langle d\ell,d\beta\rangle(0)$ vanishes we get for any $x \in \frac 12\mathbb{B}^n$
$$ |\langle d\ell,d\beta\rangle|(x)\le C_n \Lambda_c |x|.$$ As a consequence, for any $r\in (0, 1/2)$ the second term in \eqref{eq:harm3} is bounded above by $C_n\Lambda r$. We then get the desired inequality
$$\fint_{\mathbb{B}^n_r}\left| |dh|^2-\left(\fint_{\mathbb{B}^n_r} |dh|^2\right)\right| \leq C_n\Lambda_c(r^2+r) \leq C_n\Lambda_c r,$$ for any $r\in (0, 1/2)$. \end{proof}
We can now prove Proposition \ref{prop:transf1}.
\begin{proof}[of Proposition \ref{prop:transf1}] Up to rescaling the distance by a factor $r^{-1}$, we can assume that $r$ is equal to 1.
Let $\ensuremath{\varepsilon}_0, \kappa \in (0,1)$ and $\lambda \in (0,1/4)$ to be chosen later and which will depend only on the dimension $n$. In what follows we note $C(n)$ for a generic constant which depends only on the dimension $n$ and whose value may change from line to line.
Take $\ensuremath{\varepsilon} \in (0,\ensuremath{\varepsilon}_0]$ and let $u$ be a balanced $(n,\ensuremath{\varepsilon})$-splitting of a ball $B\subset M$ with radius $1$. We assume that $(M,g)\in \mathcal{K}(n,f)$ and for some $\delta \in (0,1/16n)$, \[ \mbox{k}_{1}(M^n,g) \leq \delta \quad \text{ and } \quad \dGH(B, \mathbb{B}_{1}^n) \leq \delta . \]
By Proposition \ref{Prop:LipMieux}, we have
\begin{equation}\label{Lipu}
\sup_{\frac{3}{4}B}|du| \leq \left(1+C(n)\delta\right).
\end{equation}
If $\delta\le \nu(n,f,v,\kappa\ensuremath{\varepsilon},1/2,\lambda)$
then by Theorem \ref{MetaThm}, there exists a harmonic map \mbox{$h: \frac 12 \mathbb{B}^n \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n$} such that $\|dh\|_{L^\infty(\frac 12 \mathbb{B}^n)}\le 2C(n)$ and \begin{equation} \label{eq:pf5.12_1.5}
\left|\fint_{\frac12 B} \|G_u - \mathrm{Id}_n\, \|_1 \mathop{}\!\mathrm{d}\nu_g - \fint_{\frac12\mathbb{B}^n}\|G_h-\mathrm{Id}_n\, \|_1\mathop{}\!\mathrm{d} x \right| <\kappa \ensuremath{\varepsilon} , \end{equation} \begin{equation}\label{eq:pf5.12_2}
\left|\fint_{\lambda B} \|G_u - \overline{G_h}\, \|_1 \mathop{}\!\mathrm{d}\nu_g - \fint_{\lambda\mathbb{B}^n}\|G_h-\overline{G_h}\, \|_1\mathop{}\!\mathrm{d} x \right| <\kappa \ensuremath{\varepsilon} , \end{equation} where we have noted $ \overline{G_h}=\fint_{\lambda\mathbb{B}^n} G_h,$ and we introduce similarly $ \overline{G_u}= \fint_{\lambda B} G_u \mathop{}\!\mathrm{d}\nu_g$.
We now have that \begin{align*}
\fint_{\lambda B} \|G_u - \overline{G_u}\, \| \mathop{}\!\mathrm{d}\nu_g&\le 2 \fint_{\lambda B} \|G_u - \overline{G_h}\, \| \mathop{}\!\mathrm{d}\nu_g\\
&\le 2 \fint_{\lambda B} \|G_u - \overline{G_h}\, \|_1 \mathop{}\!\mathrm{d}\nu_g\\
&\le 2 \fint_{\lambda\mathbb{B}^n}\|G_h-\overline{G_h}\, \|_1\mathop{}\!\mathrm{d} x +2\kappa \ensuremath{\varepsilon},\end{align*}
where we have used \eqref{eq:pf5.12_2} and $\|\cdot\|\le \|\cdot\|_1$.
But using Proposition \ref{OscRn} and then estimate \eqref{eq:pf5.12_1.5}, one gets that
\begin{align*}\fint_{\lambda\mathbb{B}^n}\|G_h-\overline{G_h}\, \|_1\mathop{}\!\mathrm{d} x&\le C(n)\lambda \fint_{\frac12\mathbb{B}^n}\|G_h-\mathrm{Id}_n\, \|_1\mathop{}\!\mathrm{d} x\\
& \le C(n)\lambda\left(\kappa\ensuremath{\varepsilon}+\fint_{\frac12 B} \|G_u - \mathrm{Id}_n\, \|_1 \mathop{}\!\mathrm{d}\nu_g\right)\\
& \le C(n)\lambda\left(\kappa\ensuremath{\varepsilon}+C(n)\ensuremath{\varepsilon}\right),
\end{align*}
where in the last inequality, we have used \eqref{eq:cor_doublement} and $\|\cdot\|_1\le C(n) \|\cdot\|$. Gathering all the estimates, we get that
$$\fint_{\lambda B} \|G_u - \overline{G_u}\, \| \mathop{}\!\mathrm{d}\nu_g\le C(n)\left(\kappa+\lambda\right)\ensuremath{\varepsilon}.$$ Again \eqref{eq:cor_doublement} implies that
$$\|\overline{G_u}-\mathrm{Id}_n\|\le \fint_{\lambda B} \|G_u-\mathrm{Id}_n\| \mathop{}\!\mathrm{d}\nu_g\le C(n,\lambda) \fint_{ B} \|G_u-\mathrm{Id}_n\| \mathop{}\!\mathrm{d}\nu_g\le C(n,\lambda)\ensuremath{\varepsilon}.$$ If $\ensuremath{\varepsilon}\le \frac{1}{4C(n,\lambda)}$, then by Lemma \ref{lem:GS} there exists a lower triangular matrix $T$ such that \begin{equation}\label{eq:balanced_0}
T \fint_{\lambda B} G_u \mathop{}\!\mathrm{d} \nu_g {}^t T =\mathrm{Id}_n, \quad \| T\| \leq 1+C(n)C(n,\lambda) \ensuremath{\varepsilon}. \end{equation} Then the map $\tilde u = Tu: \lambda B \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n$ satisfies \begin{equation*} \fint_{\lambda B} G_{\tilde u}\mathop{}\!\mathrm{d}\nu_g =\mathrm{Id}_n, \end{equation*} \begin{equation} \label{eq:pf5.12_0}
\fint_{\lambda B} \| G_{\tilde u} -\mathrm{Id}_n\|\mathop{}\!\mathrm{d}\nu_g \leq \|T\|^2 \fint_{\lambda B} \left \| G_u - \fint_{\lambda B} G_u \mathop{}\!\mathrm{d}\nu_g \right \| \mathop{}\!\mathrm{d} \nu_g\le \|T\|^2\,C(n)\left(\kappa+\lambda\right)\ensuremath{\varepsilon}, \end{equation} and \begin{equation} \label{eq:pf5.12_00}
\sup_{\lambda B} |d\tilde u|\le \|T\|\, (1+C(n)\delta). \end{equation}
We now make the following choices:
$$\kappa=\lambda=\frac{1}{8C(n)}\text{ and } \ensuremath{\varepsilon}_0= \frac{1}{4C(n)C(n,\lambda)}$$ and assume that $$\delta=\min\left\{\frac{1}{3C(n)}\,;\, \nu(n,f,v,\ensuremath{\varepsilon},\kappa,\lambda)\right\}$$
so that
\begin{itemize}
\item $\|T\|\le 1+C_n\ensuremath{\varepsilon} \leq \frac43\le 2 $ by \eqref{eq:balanced_0} and the fact that $\ensuremath{\varepsilon}\le \ensuremath{\varepsilon}_0$,
\item $\sup_{\lambda B} |d\tilde u|\le \frac43 (1+C(n)\delta)\le \frac43\frac32=2$ by \eqref{eq:pf5.12_00}, \item $\tilde u$ is a balanced $(n,\ensuremath{\varepsilon})$-splitting of $\lambda B$ by \eqref{eq:pf5.12_0}. \end{itemize} This concludes the proof. \end{proof}
\subsection{Hölder regularity} We conclude this section by observing that, under suitable assumptions, the results of the previous sections lead to the following Hölder regularity of almost splitting maps. \begin{theorem} \label{thm:strongReifenberg1} Assume that $f$ satisfies \eqref{eq:SK}. There exists $\ensuremath{\varepsilon}_0\in (0,1)$ depending only on $f,n$ such that for all $\ensuremath{\varepsilon}\in (0,\ensuremath{\varepsilon}_0]$ and for any $\eta \in (0,1)$, there exists $\delta >0$ depending only on $f,n, \ensuremath{\varepsilon}, \eta$ such that if $(M^n,g) \in \mathcal{K}(n,f)$, $x \in M$ and $t \in (0,\sqrt{T}]$ satisfy $$\mathrm{k}_t(M^n,g) \leq \delta, \quad \uptheta(t,x) \leq 1+\delta,$$ then any $(n,\delta)$-splitting $u: B_{\sqrt{t}}(x) \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n$, with $u(x)=0$, is a diffeomorphism from $B_{(1-\eta)\sqrt{t}}(x)$ onto its image. Moreover, $u$ satisfies for all $y,z \in B_{(1-\eta)\sqrt{t}}(x)$ \begin{equation} \label{eq:strongReif}
(1-\ensuremath{\varepsilon})\frac{\mathsf{d}_g(y,z)^{1+\ensuremath{\varepsilon}}}{(\sqrt{t})^{\ensuremath{\varepsilon}}} \leq |u(y)-u(z)| \leq (1+\ensuremath{\varepsilon}) \mathsf{d}_g(y,z), \end{equation} and we have $\mathbb{B}_{(1-2\eta)\sqrt{t}}^n\subset u(B_{(1-\eta)\sqrt{t}}(x)) \subset \mathbb{B}_{(1-\eta/2)\sqrt{t}}^n.$ \end{theorem}
As in the proof of \cite[Theorem 7.10]{CJN}, Theorem \ref{thm:strongReifenberg1} follows from the Reifenberg regularity given in Corollary \ref{thm:ReifregM}, Proposition \ref{prop:GHisometry} and the Transformation Theorem \ref{thm:Transformation}. We then refer to \cite{CJN} for the details of the proof.
Theorem \ref{thm:strongReifenberg1} clearly passes to the limit to give an analog statement on non-collapsed strong Kato limits. Now recall that Corollary \ref{cor:exiSplGH} states that if $\uptheta(t,x)$ is close enough to $1$, then there exists an $(n,\ensuremath{\varepsilon})$-splitting on a ball around $x$. As a consequence, we obtain:
\begin{cor} Assume that $f$ satisfies \eqref{eq:SK}. Let $(X,\mathsf{d},o)\in \overline{\cK(n,f,v)}$. For any $\upalpha \in (0,1)$ there exists $\delta$ depending on $\alpha, n$ and $f$ such that for any $x \in X$ satisfying $\vartheta(x) < 1+\delta$ there exist $r \in (0,\sqrt{T})$ and a homeomorphism $u: B_r(x) \to u(B_r(x))\subset \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^n$ such that for all $y,z \in B_r(x)$ we have
$$\upalpha r^{1-\frac{1}{\upalpha}} \mathsf{d}(y,z)^{\frac{1}{\upalpha}} \leq |u(y)-u(z)| \leq \frac{1}{\upalpha}\mathsf{d}(y,z)^\alpha r^{1-\upalpha}.$$ \end{cor}
Theorem \ref{thm:ReifregX} is then a consequence of this latter result and of a simple covering argument.
\appendix \section*{Appendix} \renewcommand{\Alph{subsection}}{\Alph{subsection}}
\makeatletter \renewcommand{\thesubsection.\arabic{theorem}}{\Alph{subsection}.\arabic{theorem}} \@addtoreset{theorem}{subsection} \makeatother
\subsection{Codimension 2}
In this section we prove the following.
\begin{theorem} \label{thm:codim2} Assume that \eqref{eq:SK} holds. Let $(X,\mathsf{d},o) \in \overline{\cK(n,f,v)}$. Then the singular set $\mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K := X \setminus \cR$ has Hausdorff dimension at most $n-2$. \end{theorem}
Consider $(X,\mathsf{d},o) \in \overline{\cK(n,f,v)}$. From \cite[Theorem 6.2]{CMT}, we know that the singular set $\mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K$ admits a filtration \[ \mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K^0 \subset \ldots \subset \mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K^{n-1} = \mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K \] where \[ \mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K^k:=\{x \in X : \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^\ell\times Z\in \Tan(X,x)\, \Rightarrow \,\ell\le k \} \] for any $k \in \{0,\ldots,n-1\}$. Moreover, the Hausdorff dimension of each $\mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K^k$ is at most $k$. Thus we are left with proving $\mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K^{n-1}=\mathcal S}\def\cE{\mathcal E}\def\cK{\mathcal K^{n-2}$.
Let us explain why the latter follows from proving that $\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C_+\times \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^{n-1}$ cannot be a tangent cone of $X$ at any $x \in X$. In \cite[Theorem A]{CMT} we proved that any metric measure tangent cone of $X$ is an $\RCD(0,n)$ metric measure cone. As a consequence, if $X_x=Z\times \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^{n-1}$ is a tangent cone of $X$ at $x$, since $X$ has Hausdorff dimension at most $n$, then $Z$ is an $\RCD(0,1)$ metric measure cone over some finite set $F$. If $\#F\ge 2$ then $Z$ has at least two ends and as a consequence splits so that necessarily $Z=\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C$. Therefore, we have $\#F=1$ and then $Z=\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C_+$, and this is what we aim to prove impossible.
We prove this by contradiction. Assume that there exists $x \in X$ admitting a metric tangent cone isometric to $\mathbb{R}^+ \times \mathbb{R}^{n-1}$. Then there exist pointed closed Riemannian manifolds $\{(M_\alpha,g_\alpha,o_\alpha)\}$ and positive numbers $\{\ensuremath{\varepsilon}_\alpha\}$ such that $\ensuremath{\varepsilon}_\alpha\downarrow 0$, $$ (M_\alpha,\mathsf{d}_{g_\alpha},o_\alpha) \stackrel{\pGH}{\longrightarrow} (\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C_+\times \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^{n-1},\mathsf{d}_e,0)$$ and $$\mbox{k}_t(M_\alpha,g_\alpha)\le f(\ensuremath{\varepsilon}_\alpha t)$$ for any $\alpha$ and any $t\in (0,1/\ensuremath{\varepsilon}_\alpha]$. Set $$\tau \bB^n_+:=\left\{(x_1,\dots,x_n)\in \bB^n_\tau : \ x_1\ge 0\right\}$$ for any $\tau>0$. By arguing as in the proof of \cite[Theorem 7.4]{CMT}, we get harmonic maps $$\Psi_\alpha=(h_2^\alpha,\dots,h_n^\alpha)\colon B_2(o_\alpha)\rightarrow \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^{n-1}$$ which converge uniformly to $(x_2,\dots,x_n)\colon 2\bB^n_+\rightarrow \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^{n-1}$ and such that for any $\alpha$, \begin{enumerate}[i)]
\item $\| d \Psi_\alpha \|_{L^\infty(B_2(o_\alpha))} \le 1+\ensuremath{\varepsilon}_\alpha$,
\item$\displaystyle \fint_{B_{2}(o_\alpha)}\left\| G_{\Psi_\alpha} - \mbox{Id}_{n-1}\right\| \mathop{}\!\mathrm{d} \nu_{g_\alpha}\leq \ensuremath{\varepsilon}_\alpha,$
\item $\displaystyle \fint_{B_{2}(o_\alpha)}| d G_{\Psi_\alpha}|^2 \mathop{}\!\mathrm{d} \nu_{g_\alpha}\leq \ensuremath{\varepsilon}_\alpha.$ \end{enumerate} From \cite[Proposition A.1]{CMT}, we get existence of uniformly Lipschitz functions $f_1^\alpha\in \mathcal C^\infty(B_{2}(o_\alpha))$ which converge uniformly to $x_1\colon 2\bB^n_+\rightarrow \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C$. With no loss of generality, we may assume that $$\Phi^\alpha : =(f_1^\alpha,h_2^\alpha,\dots,h_n^\alpha)\colon B_2(o_\alpha)\rightarrow 2\bB^n_+$$ is an $\ensuremath{\varepsilon}_\alpha$-GH isometry. We are going to modify each $f_1^\alpha$ into a suitable $h_1^\alpha$. To this aim, we consider a convergent sequence $p_\alpha\in B_{1}(o_\alpha) \to p=(1/2,0,\dots,0)$. Up to working with $\Phi^\alpha$ modified by an additive constant, we can assume that $$\Phi^\alpha(p_\alpha)=p,$$ and up to considering large enough $\alpha$ only, we can assume that $$B_{3/8}(p_\alpha)\subset B_1(o_\alpha).$$ For any $\alpha$ let $\tilde f_1^\alpha$ be the harmonic replacement of $f_1^\alpha$ on $B_{3/8}(p_\alpha)$. Then the sequence $\{\tilde f_1^\alpha\}$ is uniformly bounded in energy and in $L^\infty$, and any of its weak sub-limit in energy is equal to $x_1$ on $2\bB^+\setminus B_{3/8}(p)$ and is harmonic on $B_{3/8}(p)$, hence it is equal to $x_1$. Using the energy characterization of harmonic functions and the semicontinuity of the energy, this implies $$\tilde f_1^\alpha\stackrel{E}{\to} x_1.$$ Moreover, the gradient estimate \cite[Lemma 3.6]{CMT} implies that the convergence is uniform on $B_{5/16}(p_\alpha)$.
For any $\alpha$ let $\chi_\alpha$ be the smooth cut-off function on $M_\alpha$ such that $\chi_\alpha=1$ on $B_{9/32}(p_\alpha)$ and $\chi_\alpha=0$ on $M_\alpha\setminus B_{5/16}(p_\alpha)$ with $\Lip\chi_\alpha\le 64$. Up to extraction of a subsequence, we may assume that $\{\chi_\alpha\}$ converges uniformly to a similar cut-off function on $\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C_+\times \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^{n-1}$. For any $\alpha $ set $$h_1^\alpha:=\chi_\alpha \tilde f_1^\alpha+(1-\chi_\alpha)f_1^\alpha;$$ then $h_1^\alpha$ is smooth on $B_2(p_\alpha)$ and harmonic on $B_{9/32}(p_\alpha)$. Furthermore, the sequence $\{h_1^\alpha\}$ converges uniformly to $x_1$ on $B_1(p_\alpha)$, and the maps $$h_\alpha:=(h_1^\alpha,h_2^\alpha,\dots,h_n^\alpha)\colon B_1(o_\alpha)\rightarrow \bB_+^n$$ are $\ensuremath{\varepsilon}_\alpha$-GH isometries which converge uniformly to the identity function. Moreover, \begin{enumerate}[i)]
\item $\| d h_\alpha \|_{L^\infty(B_{17/64}(p_\alpha))} \le 1+\ensuremath{\varepsilon}_\alpha$,
\item$\displaystyle \fint_{B_{17/64}(p_\alpha)}\left\| G_{h_\alpha} - \mbox{Id}_{n}\right\| \mathop{}\!\mathrm{d} \nu_{g_\alpha}\leq \ensuremath{\varepsilon}_\alpha,$
\item $\displaystyle \fint_{B_{17/64}(p_\alpha)}| d G_{h_\alpha}|^2 \mathop{}\!\mathrm{d} \nu_{g_\alpha}\leq \ensuremath{\varepsilon}_\alpha.$ \end{enumerate}
Let $\{\tau_\alpha\}, \{\rho_\alpha\} \subset (0,1)$ be such that $\tau_\alpha\uparrow 1$, $\rho_\alpha\uparrow 1/4$, and for any $\alpha$, $\tau_\alpha^2$ is a regular value of $|h_\alpha|^2$
and $\rho_\alpha^2$ is a regular value of $|h_1^\alpha-1/2|^2+|\Psi_\alpha|^2$. For a given $\alpha$, set $$ \Omega_\alpha:= h_\alpha^{-1}(\mathbb{B}_{\tau_\alpha}^n) \qquad \text{and} \qquad \mathcal U_\alpha:=h_\alpha^{-1}(\mathbb{B}_{\rho_\alpha}^n(p)). $$ Since $h_\alpha(\Omega_\alpha) \subset \bB^n_+$, we know that $h_\alpha\colon \Omega_\alpha\rightarrow \bB^n_{\tau_\alpha}$ is not surjective. Moreover, $h_\alpha( \partial\Omega_\alpha)\subset \partial\bB^n_{\tau_\alpha}$. Thus for any regular value $x\in \tau_\alpha\bB^n_+$ of $h_\alpha$,
\begin{equation}\label{eq:even}
\# (h_\alpha^{-1}(\{x\}) \cap \Omega_{\alpha}) \in 2\ensuremath{\mathbb N}.
\end{equation}
Let us now consider a sequence $q_\alpha\in \mathcal U_\alpha \to p$ such that each $h_\alpha(q_\alpha)$ is a regular value of $h_\alpha$. As each $h_\alpha$ is an $\ensuremath{\varepsilon}_\alpha$-GH isometry, for any $q \in \Omega_\alpha$: $$h_\alpha(q)=h_\alpha(q_\alpha)\Longrightarrow \mathsf{d}_\alpha(q,q_\alpha)\le \ensuremath{\varepsilon}_\alpha.$$ Hence for large enough $\alpha:$
$$\left\{q\in \Omega_{\alpha} : h_\alpha(q)=h_\alpha(q_\alpha)\right\}\subset \mathcal U_\alpha.$$
But the analysis done in the proof of \cite[Theorem 7.4]{CMT} shows that
\begin{itemize}
\item if $\mathcal U_\alpha$ is orientable, then the degree of $h_\alpha\colon \mathcal U_\alpha\rightarrow \bB_{\rho_\alpha}(p)$ is $\pm 1$,
\item if $\mathcal U_\alpha$ is not orientable and if $\pi_\alpha:\tilde\mathcal U_\alpha\rightarrow \mathcal U_\alpha$ is the 2-fold orientation cover, then the degree of $h_\alpha\circ \pi_\alpha\colon \tilde\mathcal U_\alpha\rightarrow \bB_{\rho_\alpha}(p)$ is $\pm 2$.
\end{itemize}
In any case we get $$\# \left\{q\in \Omega_{\alpha} : h_\alpha(q)=h_\alpha(q_\alpha)\right\} \in 2 \mathbb{N} +1,$$ which contradicts \eqref{eq:even}.
\subsection{Proof of Theorem \ref{MetaThm}}\label{app:meta} In this section, we obtain Theorem \ref{MetaThm} as a consequence of a contradiction argument and the following result.
\begin{theorem}\label{preMetaThm}
Let $\{(M_\alpha, \mathsf{d}_{g_\alpha}, \mu_{\alpha}, o_\alpha)\} \subset \mathcal{K}_\mathfrak{m}(n,f,c)$ be converging to $(X,\mathsf{d},\mu,o)$ in the pointed measured Gromov-Hausdorff topology. For some $r \in (0, \sqrt{T}]$, assume that there exists a harmonic function \mbox{$h: B_r(o) \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$} such that \mbox{$h(o)=0$} and $\|dh\|_{L^\infty(B_r(o))}\le L$ for some $L >1$. Let $\eta\in (0,1)$ be given. Then there exist $C(n,\eta)\ge 1$ and \mbox{$h_\alpha : B_{\eta r}(o_\alpha) \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C^k$} harmonic with $\|dh_\alpha\|_{L^\infty(B_{\eta r}(o_\alpha))}\le LC(n,\eta)$ and $h_\alpha(o_\alpha)=0$ for any $\alpha$, such that \begin{enumerate} \item for all $s \in (0,\eta r]$ \begin{equation} \label{eq:average} \fint_{B_s(o_\alpha)} G_{h_\alpha} \mathop{}\!\mathrm{d}\mu_{\alpha} \to \fint_{B_s(o)} G_h \mathop{}\!\mathrm{d} \mu, \end{equation} \item for all $s \in (0,\eta r]$ and $A \in \mathcal{M}_k(\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C)$ \begin{equation} \label{eq:matrix}
\fint_{B_s(o_\alpha)}\|G_{h_\alpha}-A\| \mathop{}\!\mathrm{d} \mu_{\alpha} \to \fint_{B_s(o)}\|G_h-A\|\mathop{}\!\mathrm{d} \mu. \end{equation} \end{enumerate} \end{theorem}
Before proving it, we need a preliminary lemma.
\begin{lemma}\label{lem:prep_to_meta} Let $\{(X_\alpha, \mathsf{d}_\alpha, \mu_\alpha, o_\alpha)\}_{\alpha\in \mathbb{N} \cup \{\infty\}} \subset \overline{\cK_\meas(n,f,c)}$ be such that $$(X_\alpha, \mathsf{d}_\alpha, \mu_\alpha, o_\alpha) \to (X_\infty,\mathsf{d}_\infty,\mu_\infty,o_\infty)$$ in the pointed measured Gromov-Hausdorff topology. Consider $r \in (0,\sqrt{T})$. For any $\alpha$, let $u_\alpha, v_\alpha \in H^{1,2}(B_r(o_\alpha),\mathsf{d}_\alpha,\mu_\alpha)$ be such that \begin{enumerate} \item $u_\alpha \stackrel{L^2(B_r)}{\to} u_\infty$ and $v_\alpha \stackrel{L^2(B_r)}{\to} v_\infty$, \item\label{assum} $\sup_{\alpha\in \mathbb{N}}\left( \int_{B_r(o_\alpha)} \mathop{}\!\mathrm{d} \Gamma(u_\alpha) \mathop{}\!\mathrm{d} \mu_{\alpha} \, , \, \int_{B_r(o_\alpha)} \mathop{}\!\mathrm{d} \Gamma(v_\alpha) \mathop{}\!\mathrm{d} \mu_{\alpha} \right) < +\infty$. \end{enumerate} Then for any $s \in (0,r]$, \begin{equation}\label{eq:onr}
\fint_{B_s(o_\alpha)} |u_\alpha^2 - v_\alpha^2|\mathop{}\!\mathrm{d} \mu_\alpha \to \fint_{B_s(o_\infty)} |u_\infty^2 - v_\infty^2|\mathop{}\!\mathrm{d} \mu. \end{equation} \end{lemma}
\begin{proof} For any $\gamma \in (0,1)$ and $\alpha \in \mathbb{N} \cup\{\infty\}$, set \[ u_{\alpha,\gamma}(\cdot) := \fint_{B_\gamma(\cdot)} u_\alpha \mathop{}\!\mathrm{d} \mu_\alpha \, , \qquad v_{\alpha,\gamma}(\cdot) := \fint_{B_\gamma(\cdot)} v_\alpha \mathop{}\!\mathrm{d} \mu_\alpha. \] Acting as in the proof of \cite[Proposition E.1]{CMT}, it is enough to consider the case $s \in (0,r)$ only.
We first claim that there exists $C_0>0$ such that for any $\gamma \in (0,r-s)$, \begin{equation}\label{eq:C1}
\sup_{\alpha \in \mathbb{N} \cup\{\infty\}} \left| \fint_{B_{s}(o_\alpha)} |u_\alpha^2 - v_\alpha^2| - |u_{\alpha,\gamma}^2 - v_{\alpha,\gamma}^2| \mathop{}\!\mathrm{d} \mu_\alpha \right| \le C_0 \gamma. \end{equation} Indeed, \begin{align*}
\left| \fint_{B_{s}(o_\alpha)} |u_\alpha^2 - v_\alpha^2| - |u_{\alpha,\gamma}^2 - v_{\alpha,\gamma}^2| \mathop{}\!\mathrm{d} \mu_\alpha \right| & \le \fint_{B_{s}(o_\alpha)} |u_\alpha^2 - u_{\alpha,\gamma}^2| \mathop{}\!\mathrm{d} \mu_\alpha\\
& + \fint_{B_{s}(o_\alpha)} |v_\alpha^2 - v_{\alpha,\gamma}^2| \mathop{}\!\mathrm{d} \mu_\alpha. \end{align*} Boundedness in $L^2$ of the averaging operator on doubling spaces (see e.g.~\cite[Theorem 3.5]{Aldaz}) yields the existence of $C_1>0$ such that \[
\|u_{\alpha,\gamma}\|_{L^2(B_s(o_\alpha))} \le C_1 \|u_\alpha\|_{L^2(B_r(o_\alpha))}. \] Moreover, the $L^2$ strong convergence of $\{u_\alpha\}$ to $u_\infty$ gives $C_2>0$ such that \[
\sup_{\alpha\in \mathbb{N} \cup \{\infty\}} \|u_\alpha\|_{L^2(B_r(o_\alpha))} \le C_2. \] Finally, the $L^2$ pseudo-Poincaré inequality \cite{CoulhonSC1993} and assumption \eqref{assum} yield the existence of $C_3>0$ such that \[
\left( \fint_{B_{s}(o_\alpha)} |u_\alpha - u_{\alpha,\gamma}|^2 \mathop{}\!\mathrm{d} \mu_\alpha\right)^{1/2} \le C_3 \gamma. \] Then \begin{align*}
\fint_{B_s(o_\alpha)} |u_\alpha^2 - u_{\alpha,\gamma}^2| \mathop{}\!\mathrm{d} \mu_\alpha & \le \left( \fint_{B_s(o_\alpha)} |u_\alpha - u_{\alpha,\gamma}|^2 \mathop{}\!\mathrm{d} \mu_\alpha\right)^{1/2}\left( \fint_{B_s(o_\alpha)} |u_\alpha + u_{\alpha,\gamma}|^2 \mathop{}\!\mathrm{d} \mu_\alpha\right)^{1/2}\\ & \le (1+C_1)C_2 C_3 \gamma. \end{align*} This and the symmetry between $u$ and $v$ eventually leads to \eqref{eq:C1}.
We now claim that for any given $\ensuremath{\varepsilon}>0$ and $\gamma \in (0,1)$, we can choose $\alpha\in \mathbb{N}$ large enough to ensure \begin{equation}\label{eq:C2}
\left| \fint_{B_s(o_\alpha)} |u_{\alpha,\gamma}^2 - v_{\alpha,\gamma}^2| \mathop{}\!\mathrm{d} \mu_\alpha - \fint_{B_s(o_\infty)} |u_{\infty,\gamma}^2 - v_{\infty,\gamma}^2| \mathop{}\!\mathrm{d} \mu_\infty \right| \le \frac{\ensuremath{\varepsilon}}{3}\, \cdot \end{equation} The Hölder inequality and a consequence of the doubling condition (see e.g.~\cite[Proposition 1.2, (v)]{CMT}) imply that $\{u_{\alpha,\gamma}\}$ and $\{v_{\alpha,\gamma}\}$ are equicontinuous on balls of radius $B_{(s+r)/2}(o_\alpha)$ for any fixed $\gamma \in (0,1)$. Then $u_{\alpha,\gamma} \to u_\gamma$ and $v_{\alpha,\gamma} \to v_\gamma$ uniformly on $B_s$. This yields \eqref{eq:C2}.
To conclude, take $\ensuremath{\varepsilon}>0$, choose $\gamma= \ensuremath{\varepsilon} /(3C_0)$ and then choose $\alpha$ such that \eqref{eq:C2} holds. Then the triangle inequality, \eqref{eq:C1} and \eqref{eq:C2} yield \eqref{eq:onr}. \end{proof}
\begin{rem} \label{rem:prep_to_meta} The previous proof may be easily adapted to show that for any $a \in \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C$, \[
\fint_{B_{r}(o_\alpha)} |u_\alpha^2 - v_\alpha^2-a|\mathop{}\!\mathrm{d} \mu_\alpha \to \fint_{B_{r}(o)} |u_\infty^2 - v_\infty^2-a|\mathop{}\!\mathrm{d} \mu. \] \end{rem}
We are now in a position to prove Theorem \ref{preMetaThm} and conclude.
\begin{proof}
We start by treating the case $k=1$. Consider $\eta'=\eta^{1/2}$ and $\eta''=\eta^{1/3}$ so that $\eta <\eta' < \eta''< 1$. Then \cite[Proposition E.11]{CMT} ensures the existence of harmonic functions $h_\alpha : B_{\eta'' r}(o_\alpha) \to \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C$ uniformly converging to $h|_{B_{\eta''r}(o)}$ on $B_{\eta''r}(o)$ and such that for all $s \in (0, \eta''r]$ \begin{equation} \label{eq:conv_carré}
\fint_{B_s(o_\alpha)}|dh_\alpha|^2\mathop{}\!\mathrm{d}\mu_\alpha \to \fint_{B_s(o)}|dh|^2\mathop{}\!\mathrm{d}\mu. \end{equation}
By replacing $h_\alpha$ by $h_\alpha - h_\alpha(o_\alpha)$ we can assume that $h_\alpha(o_\alpha)=0$ for all $\alpha$. Moreover, the convergence of $|dh_\alpha|$ given by \eqref{eq:conv_carré} and the fact that $\|dh\|_{L^\infty(B_r(o))}\le L$ imply that for any large enough $\alpha$
$$\fint_{B_s(o_\alpha)}|dh_\alpha|^2\mathop{}\!\mathrm{d}\mu_\alpha \leq 2L.$$
We can then apply \cite[Lemma 3.6]{CMT} to get existence of $C(n,\eta)\ge 1$ such that $ \|dh_\alpha\|_{L^\infty(B_{\eta' r}(o_\alpha))}\le L C(n,\eta)$. Now consider $s \in (0,\eta r]$. The previous local Lipschitz bound and the Hessian estimate of \cite[Proposition 3.5]{CMT} yield the uniform Hessian bound \begin{equation} \label{eq:HessBd}
\sup_\alpha \fint_{B_{\eta r}(o_\alpha)} |\nabla dh_\alpha|^2\mathop{}\!\mathrm{d} \mu_\alpha \leq \frac{C(n,\eta,L)}{r^2} \, \cdot \end{equation}
We are then in a position to apply \cite[Proposition E.7]{CMT} and get $L^2(B_{\eta r})$ strong convergence of $\{|dh_\alpha|\}$ to $|dh|$. Then $\{u_\alpha=|dh_\alpha|\}$ and $\{v_\alpha=0\}$ satisfy the assumptions of Lemma \ref{lem:prep_to_meta}. We apply it and use Remark \ref{rem:prep_to_meta} to obtain that for all $a\in \mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C$ and $s\in (0,\eta r]$
$$\fint_{B_s(o_\alpha)}||dh_\alpha|^2-a|\mathop{}\!\mathrm{d}\mu_\alpha\to \fint_{B_s(o)}||dh|^2-a|\mathop{}\!\mathrm{d}\mu.$$
We consider now the case $k>1$. Observe that for all $i,j=1, \ldots, k$ we have
$$(G_{h_\alpha})_{i,j}=\langle (dh_\alpha)_i,(dh_\alpha)_j\rangle = \frac{1}{4}(|d((h_\alpha)_i+(h_\alpha)_j)|^2- |d((h_\alpha)_i-(h_\alpha)_j)|^2).$$ Set \begin{align*}
f_\alpha = \frac 12 |d((h_\alpha)_i+(h_\alpha)_j)| , &\quad \ g_\alpha= \frac 12 |d((h_\alpha)_i-(h_\alpha)_j)|,\\ f = \frac 12|d(h_i+h_j)|, & \quad g=\frac 12 |d(h_i-h_j)|. \end{align*} The sequences $\{f_\alpha\}$ and $\{g_\alpha\}$ satisfy the assumptions of Lemma \ref{lem:prep_to_meta}. This immediately yields \eqref{eq:average}. Moreover, if we consider $A \in \mathcal{M}_k(\mathbb R}\def\ensuremath{\mathbb N}{\mathbb N}\def\bS{\mathbb S}\def\bB{\mathbb B}\def\bC{\mathbb C)$ with components $a_{i,j}$, by arguing as above we get for all $i,j=1, \ldots, k$
$$\fint_{B_s(o_\alpha)} |f_\alpha^2-g_\alpha^2-a_{i,j}| \mathop{}\!\mathrm{d} \mu_\alpha \to \fint_{B_s(o)} |f^2-g^2-a_{i,j}|\mathop{}\!\mathrm{d}\mu, $$ which is equivalent to
$$\fint_{B_s(o_\alpha)} | (G_{h_\alpha})_{i,j} - a_{i,j}|\mathop{}\!\mathrm{d} \mu_\alpha \to \fint_{B_s(o)}|(G_h)_{i,j}-a_{i,j}|\mathop{}\!\mathrm{d}\mu.$$ This shows \eqref{eq:matrix}.
\end{proof}
\end{document} | arXiv | {
"id": "2205.01956.tex",
"language_detection_score": 0.5360331535339355,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{\textbf{A new boundary of the mapping class group}\footnote{The work was partially supported by NSFC, No: 11771456.}} \author{\textbf{Lixin $\mbox{Liu}^{1}$ and Yaozhong $\mbox{Shi}^{2,}$}\footnote{Corresponding author.} \\
{\normalsize $^{1}$Sun Yat-sen University, School of Mathematics} \\{\normalsize 510275, Guangzhou, P. R. China; mcsllx@mail.sysu.edu.cn} \\
{\normalsize $^{2}$Sun Yat-sen University, School of Mathematics} \\{\normalsize 510275, Guangzhou, P. R. China; shiyaozhong314@126.com}} \date{} \maketitle \begin{abstract} Based on the action of the mapping class group on the space of measured foliations, we construct a new boundary of the mapping class group and study the structure of this boundary. As an application, for any point in Teichm\"uller space, we consider the orbit of this point under the action of the mapping class group and describe the closure of this orbit in the Thurston compactification and the Gardiner-Masur compactification of Teichm\"uller space. We also construct some new points in the Gardiner-Masur boundary of Teichm\"{u}ller space. \end{abstract} \noindent \textit{Keywords}: Mapping class group, Measured foliation, Teichm\"uller space \\\noindent \textit{MSC (2010)}: 30F60, 32G15, 57M99 \section{Introduction} In order to study the structure of a group $G$, it is natural to equip $G$ with a boundary. For example, considering the Cayley graph of a finite generated group and rescaling the lengths of the edges by a summable function, we obtain a compact completion of the graph under this new metric. The boundary of this completion is the Floyd boundary of $G$ (see \cite{Floyd}). Besides, a probability measure $\nu$ on $G$ determines a random walk on $G$. There is also a boundary determined by this random walk, which is called the Poisson boundary. The Poisson boundary is strongly related to the harmonic functions corresponding to the random walk (see \cite{Kai}).
Let $S$ be an oriented surface of genus $g$ with $n$ punctures. We assume that $N=3g-3+n\geq1$. In this paper, we study a special group: the mapping class group $Mod(S)$ of $S$. $Mod(S)$ acts on two spaces: $\mathcal{T}(S)$ and $\mathcal{MF}$, where $\mathcal{T}(S)$ is the Teichm\"uller space of $S$ and $\mathcal{MF}$ is the space of measured foliations on $S$. Let $\mathcal{PMF}=\mathcal{MF}-\{0\}/R_{+}$ be the space of projective measured foliations. Based on the action of $Mod(S)$ on $\mathcal{T}(S)$, $\mathcal{MF}$ and $\mathcal{PMF}$, we can study the structure of $Mod(S)$. The most important result in the study of mapping class group may be the Nielsen-Thurston classification theorem, which states that every $f\in Mod(S)$ is one of three special types: periodic, reducible, or pseudo-Anosov. The structure of subgroups of $Mod(S)$ was also studied by the action of $Mod(S)$ on $\mathcal{T}(S)$ and $\mathcal{MF}$ (see \cite{MCP}, \cite{Ivanov}, etc.).
Different boundaries of $Mod(S)$ were studied by various people (see \cite{Masur-Minsky}, \cite{K-M}, \cite{Ham}, etc.). In particular, Kaimanovich and Masur (see \cite{K-M}) studied the Poisson boundary of $Mod(S)$. They proved that under some natural conditions, the Poisson boundary of $Mod(S)$ is $\mathcal{PMF}$ equipped with a unique measure. In order to obtain their main result, they considered the action of $Mod(S)$ on $\mathcal{PMF}$ and analyzed the asymptotic behaviour of the action of an infinite sequence $\{f_n\}_{n=1}^{\infty}$ on $\mathcal{PMF}$ (see Subsection 1.5 in \cite{K-M}). Inspired by their idea, we study the asymptotic behaviour of $\{f_n\}_{n=1}^{\infty}$ on $\mathcal{MF}$. In the special case that $f_n=f^{n}$ for a fixed $f\in Mod(S)$, we know that \\(1) when $f$ is a Dehn Twist determined by a simple closed curve $\alpha$, $\lim_{n\rightarrow\infty}\frac{1}{n}f^{n}(F)=i(\alpha,F)\alpha$ for any $F\in\mathcal{MF}$; \\(2) when $f$ is a pseudo-Anosov map with $\lambda>1$, $f(F^{u})=\lambda F^{u}$, $f(F^{s})=\lambda^{-1}F^{s}$ and $i(F^{u},F^{s})=1$, $\lim_{n\rightarrow\infty}\lambda^{-n}f^{n}(F)=i(F^{s},F)F^{u}$ for any $F\in\mathcal{MF}$.
A natural generalization of these two classical results is \begin{prob}\label{problem0} Is the action of $Mod(S)$ on $\mathcal{MF}$ ``projectively precompact"? That is, for any sequence $f_n\in Mod(S)$, are there a subsequence $f_{n_k}$ and a sequence of positive numbers $t_k$ such that $t_kf_{n_k}:\mathcal{MF}\rightarrow\mathcal{MF}$ converges to some map $f_0:\mathcal{MF}\rightarrow\mathcal{MF}$? \end{prob} Note that it is necessary to take a subsequence $f_{n_k}$ and a positive scalar $t_k$ in Problem \ref{problem0}, since without these two operations, a generic sequence $f_n$ is not convergent.
We settle Problem \ref{problem0} by embedding $Mod(S)$ into an appropriate space and constructing a new boundary of $Mod(S)$. For this, we need some notations (see Section \ref{sec2}). Let $\Omega(\mathcal{MF})$ be the set of all homogeneous measurable functions from $\mathcal{MF}$ to $\mathcal{MF}$. Note that $R_{+}$ acts on $\Omega(\mathcal{MF})$ by multiplication. Let $P\Omega(\mathcal{MF})=\Omega(\mathcal{MF})-\{0\}/R_{+}$ be the projective space of $\Omega(\mathcal{MF})$. Endow $\Omega(\mathcal{MF})$ with the topology of pointwise convergence and $P\Omega(\mathcal{MF})$ with the quotient topology. Considering the action of $Mod(S)$ on $\mathcal{MF}$, there is a natural map $I:Mod(S)\rightarrow P\Omega(\mathcal{MF})$ (see Section \ref{sec2}). Up to a finite normal subgroup $ker(I)$, we can identify $Mod(S)$ with its image $I(Mod(S))$, which is denoted by $E$ for simplicity. Thus $Mod(S)$ is nearly embedded into $P\Omega(\mathcal{MF})$ and $P\Omega(\mathcal{MF})$ is the appropriate space for settling Problem \ref{problem0}.
With these notations, we prove that the closure $Cl(E)$ of $E=I(Mod(S))$ in $P\Omega(\mathcal{MF})$ is metrizable and compact (Theorem \ref{main}). Note that this result answers Problem \ref{problem0} completely: by the definition of $P\Omega(\mathcal{MF})$, identifying $Mod(S)$ with $E=I(Mod(S))$, the compactness of $Cl(E)$ means that for any sequence $f_n\in Mod(S)$, there are a subsequence $f_{n_k}$ and a sequence of positive numbers $t_k$ such that $t_kf_{n_k}:\mathcal{MF}\rightarrow\mathcal{MF}$ converges to a map $f_0:\mathcal{MF}\rightarrow\mathcal{MF}$.
Identifying $Mod(S)$ with $E=I(Mod(S))$, $\partial E=Cl(E)-E$ is a boundary of $Mod(S)$. For the structure of $\partial E$, we prove (see Section \ref{sec3}) \begin{itemize}
\item In $Cl(E)$, $E$ is discrete and $\partial E$ is closed (Proposition \ref{prop1}).
\item The operations of multiplication and inverse on $Mod(S)$ extend continuously to $Cl(E)$ (Proposition \ref{prop2}, \ref{prop3}, \ref{prop4}). But $Cl(E)$ is not indeed a group (Remark \ref{rema2}).
\item Any point $p\in \partial E$ can be represented as $[\sum_{i=1}^{k}i(E_i,\cdot)F_i]$, where $\{F_i\}\,(\{E_i\})$ are disjoint measured foliations (Theorem \ref{main2}).
\item Some special points of $\partial E$ are constructed (Proposition \ref{prop5}, \ref{prop6}, \ref{prop7}). In particular, $\partial E\neq\emptyset$. \end{itemize}
We also consider the actions of $Mod(S)$ on the Thurston compactification $\mathcal{T}^{Th}(S)$ and the Gardiner-Masur compactification $\mathcal{T}^{GM}(S)$ of $\mathcal{T}(S)$. The Thurston boundary is precisely $\mathcal{PMF}$; while the structure of the Gardiner-Masur boundary $GM$ is complex. See \cite{GM}, \cite{Miya-1}, \cite{Miya-2} and \cite{Walsh2} for more details on the Gardiner-Masur boundary. $Mod(S)$ acts on $\mathcal{T}^{Th}(S)$ and $\mathcal{T}^{GM}(S)$ naturally. Considering the actions of $Mod(S)$ on $\mathcal{T}^{Th}(S)$ and $\mathcal{T}^{GM}(S)$, we have two maps $$\Pi_{Th}:Mod(S)\times \mathcal{T}^{Th}(S)\rightarrow\mathcal{T}^{Th}(S),\,(f,p)\mapsto f(p)$$ and $$\Pi_{GM}:Mod(S)\times \mathcal{T}^{GM}(S)\rightarrow\mathcal{T}^{GM}(S),\,(f,p)\mapsto f(p).$$ If we endow $Mod(S)$ with the discrete topology, then $\Pi_{Th}$ and $\Pi_{GM}$ are both continuous. Since $Cl(E)=E\bigcup \partial E$ is a completion of $Mod(S)$ in some sense, we extend the domains of $\Pi_{Th}$ and $\Pi_{GM}$ to $Cl(E)\times \mathcal{T}^{Th}(S)$ and $Cl(E)\times \mathcal{T}^{GM}(S)$, respectively (see Theorem \ref{main5} and Remark \ref{rema5}).
As an application, we prove Theorem \ref{main3}, which answers the following problem: \begin{prob}\label{problem1} For any $x_0$ in $\mathcal{T}(S)$, considering the orbit $\Gamma(x_0)$ of $x_0$ under the action of $Mod(S)$, how to describe the closure of $\Gamma (x_0)$ in $\mathcal{T}^{Th}(S)$ or $\mathcal{T}^{GM}(S)$? \end{prob}
Besides, using the new boundary $\partial E$, we construct some new points in the Gardiner-Masur boundary of $\mathcal{T}(S)$ (see Remark \ref{rema7}).
The new boundary $\partial E$ is related to a special boundary of $Mod(S)$. For a base point $x\in\mathcal{T}(S)$, identifying $Mod(S)$ with the orbit $\Gamma(x)$ of $x$ by a map $Mod(S)\ni f\mapsto f(x)\in\Gamma(x)$ and then taking boundary in $\mathcal{T}^{Th}(S)$, we get a boundary of $Mod(S)$ depending upon the base point $x$. Note that this boundary is indeed the whole $\mathcal{PMF}$ (see Theorem \ref{main3}). Thus we may call it the ``Thurston boundary" of $Mod(S)$ with base point $x$. And then the new boundary $\partial E$ covers each ``Thurston boundary" of $Mod(S)$ (see Remark \ref{rema6}).
It may be interesting to study the relations between the new boundary $\partial E$ of $Mod(S)$ and some known boundaries of $Mod(S)$, such as the Floyd boundary, the Poisson boundary, etc. We will study these relations in coming future. Besides, Hamenst\"adt introduced a new boundary of $Mod(S)$ (see Section 8 in \cite{Ham2}). It is also interesting to compare our new boundary with Hamenst\"adt's boundary.
We may consider a more general space than $\mathcal{MF}$, that is, the space of geodesic currents (see \cite{Bonahon}). Note that the space of geodesic currents includes $\mathcal{MF}$ and $\mathcal{T}(S)$. Since the construction of the new boundary $\partial E$ is based on the action of $Mod(S)$ on $\mathcal{MF}$ and $Mod(S)$ also acts continuously on the space of geodesic currents, it is natural to ask the following interesting problem: \begin{prob}\label{problem2} If we replace the space of measured foliations by the space of geodesic currents, does the construction of the new boundary work? \end{prob}
This paper is organized as follows.
In Section \ref{sec1}, we introduce background materials on measured foliations, Teichm\"{u}ller space and the action of the mapping class group. In Section \ref{sec2}, we construct the new boundary of $Mod(S)$. In Section \ref{sec3}, we study the structure of the new boundary. In Section \ref{sec4}, we give some applications of our new boundary.
\section{Preliminaries} \label{sec1} \subsection{Measured foliations} Let $\mathcal{S}=\mathcal{S}(S)$ be the set of isotopy classes of essential simple closed curves on $S$. For any $\alpha,\beta$ in $\mathcal{S}$, denote by $i(\alpha,\beta)$ the geometric intersection number between $\alpha$ and $\beta$.
Let $R_{\geq 0}=\{x\in R:x\geq 0\}$ and $R_{+}=\{x\in R:x>0\}$. Let $R_{\geq 0}^{\mathcal{S}}$ be the space of non-negative functions on $\mathcal{S}$ endowed with the topology of pointwise convergence. Denote the set of weighted simple closed curves by $R_{+}\times \mathcal{S}=\{t\cdot\alpha :t>0,\alpha\in \mathcal{S}\}$. It is known that \begin{equation*} i_{*}:R_{+}\times \mathcal{S}\rightarrow R_{\geq 0}^{\mathcal{S}}, \end{equation*} \begin{equation*} t\cdot \alpha\mapsto t\cdot i(\alpha,\cdot) \end{equation*} is injective and induces a topology on $R_{+}\times\mathcal{S}$. With this topology, $i_{*}$ is an embedding.
The closure of $i_{*}(R_{+}\times\mathcal{S})$ in $R_{\geq 0}^{\mathcal{S}}$ is called the space of measured foliations on $S$, which is denote by $\mathcal{MF}$. $R_{+}$ acts on $R_{\geq 0}^{\mathcal{S}}$ by multiplication. Denote $R_{\geq 0}^{\mathcal{S}}-\{0\}/R_{+}$ by $PR_{\geq 0}^{\mathcal{S}}$ and $\mathcal{MF}-\{0\}/R_{+}$ by $\mathcal{PMF}$. $\mathcal{PMF}$ is called the space of projective measured foliations. For $F\in\mathcal{MF}-\{0\}$, denote $[F]\in\mathcal{PMF}$ to be the projective class of $F$. Note that $\mathcal{S}$ is embedded in $PR_{\geq 0}^{\mathcal{S}}$, and the closure of $\mathcal{S}$ in $PR_{\geq 0}^{\mathcal{S}}$ is $\mathcal{PMF}$. It is well known that $\mathcal{MF}$ is homeomorphic to $R^{6g-6+2n}$ and $\mathcal{PMF}$ is homeomorphic to $S^{6g-7+2n}$ (see \cite{FLP}).
For two weighted simple closed curves $t\alpha,s\beta\in R_{+}\times\mathcal{S}$, define their intersection number by the homogeneous equation $i(t\alpha,s\beta)=tsi(\alpha,\beta)$. Then the intersection number function $i$ extends continuously to $i:\mathcal{MF}\times\mathcal{MF}\rightarrow R_{\geq 0}$.
Any $F\in\mathcal{MF}-\{0\}$ is represented by a singular foliation with a transverse measure $\mu$ in the sense that for any simple closed curve $\alpha$, \begin{equation*} i(F,\alpha)=\inf_{\alpha^{'}}\int_{\alpha^{'}}d\mu, \end{equation*} where the infimum is over all simple closed curves $\alpha^{'}$ homotopic to $\alpha$.
Besides, we need the definition of ergodic decomposition of a measured foliation. A saddle connection of a foliation is a leaf connecting two singularities (not necessarily distinct). The critical graph of a foliation is defined to be the union of all saddle connections. The complement of the critical graph contains finitely many connected components. Each connected component is either a cylinder swept out by closed leaves or a so-called minimal component (each leaf is dense). On every minimal component $D$, there exists a finite set of ergodic transverse measures $\mu_1,...,\mu_n$ such that any transverse measure $\mu$ on $D$ can be written as $\mu=\sum_{j=1}^{n}f_j\mu_j$ for some non-negative coefficients $\{f_j\}$. An indecomposable component of a measured foliation is either a cylinder with a positive weight or a minimal component $D$ with a ergodic measure $\mu_j$. A measured foliation is called indecomposable if it contains only one indecomposable component. With these notations, any measured foliation $F$ can be uniquely represented as \begin{equation*} F=\sum_{i=1}^{k}F_i, \end{equation*} where each $F_i$ is an indecomposable measured foliation such that $i(F_i,F_j)=0$ and $[F_i]\neq[F_j]$ for $i\neq j$. We call this the ergodic decomposition of $F$.
Finitely many simple closed curves $\alpha_1,\alpha_2,...,\alpha_k$ fill up $S$ if for any $F\in \mathcal{MF}-\{0\}$, \begin{equation*} \sum_{i=1}^{k}i(\alpha_i,F)>0. \end{equation*}
We need the following result (Lemma 6.3, \cite{Walsh1}). \begin{lemm} \label{lemm0} Let $\{F_i:i=0,1,2,...,k\}$ be some projectively distinct indecomposable measured foliations such that $i(F_i,F_j)=0\,(i\neq j)$. Then for any $\epsilon>0$, there exists a simple closed curve $\alpha$ such that \begin{equation*} i(F_i,\alpha)<\epsilon i(F_0,\alpha),\,i=1,2,...,k. \end{equation*} \end{lemm}
\subsection{Teichm\"{u}ller space and its compactifications} Let $\mathcal{T}(S)$ be the Teichm\"{u}ller space of $S$. There are two equivalent definitions of $\mathcal{T}(S)$: the set of isotopy classes of hyperbolic metrics on $S$ and the set of isotopy classes of conformal structures on $S$. For two hyperbolic metric $m_1,m_2$ of finite area on $S$, $m_1$ is equivalent to $m_2$ if there exists an orientation-preserving homeomorphism $f:S\rightarrow S$ isotopic to the identity map such that $f_{*}m_1=m_2$, where $f_{*}m_1$ is the push-forward of $m_1$ by $f$. $\mathcal{T}(S)$ is defined to be the set of equivalence classes of hyperbolic metrics of finite area on $S$. For two conformal structure $\mu_1,\mu_2$ on $S$, $\mu_1$ is equivalent to $\mu_2$ if there exists an orientation-preserving homeomorphism $f:S\rightarrow S$ isotopic to the identity map such that $f_{*}\mu_1=\mu_2$, where $f_{*}\mu_1$ is the push-forward of $\mu_1$ by $f$. $\mathcal{T}(S)$ is also defined to be the set of equivalence classes of conformal structures on $S$. By the uniformization theorem, these two definitions are consistent.
For the definition corresponding to hyperbolic metric, we consider the hyperbolic length function on $\mathcal{T}(S)$. For any $x\in\mathcal{T}(S)$ and $\alpha\in\mathcal{S}$, let $l(x,\alpha)$ be the hyperbolic length of the geodesic isotopic to $\alpha$ in the hyperbolic metric corresponding to $x$. The hyperbolic length of a simple closed curve extends continuously to the hyperbolic length of a measured foliation. The map \begin{equation*} l(\cdot,\cdot):\mathcal{T}(S)\times \mathcal{MF}\rightarrow R, \end{equation*} \begin{equation*} (x,F)\mapsto l(x,F) \end{equation*} is continuous.
Thurston constructed a compactification of Teichm\"uller space by the hyperbolic lengths of simple closed curves. Define a map $\widetilde{\varphi}_{Th}$ by \begin{equation*} \widetilde{\varphi}_{Th}: \mathcal{T}(S)\rightarrow R_{\geq0}^{\mathcal{S}}, \end{equation*} \begin{equation*} x\mapsto (l(x,\alpha))_{\alpha\in\mathcal{S}}. \end{equation*}
Let $pr:R_{\geq0}^{\mathcal{S}}-\{0\}\rightarrow PR_{\geq0}^{\mathcal{S}}$ be the projective map. Then the map $\varphi_{Th}=pr\circ\widetilde{\varphi}_{Th}$ is an embedding and the closure of the image is compact. Moreover, $Cl(\mathcal{T}(S))-\mathcal{T}(S)=\mathcal{PMF}$. Thus we have a compactification of $\mathcal{T}(S)$ denoted by $\mathcal{T}^{Th}(S)=\mathcal{T}(S)\bigcup\mathcal{PMF}$. $\mathcal{T}^{Th}(S)$ is the Thurston compactification and $\mathcal{PMF}$ is the Thurston boundary.
A sequence $\{x_n\}_{n=1}^{\infty}$ in $\mathcal{T}(S)$ converges to a boundary point $[F]$ in $\mathcal{PMF}$ if and only if there exists a positive sequence $\{t_n\}_{n=1}^{\infty}$ such that $\lim_{n\rightarrow\infty}t_n=0$ and $\lim_{n\rightarrow\infty}t_nl(x_n,\alpha)=i(F,\alpha)$ for any $\alpha\in\mathcal{S}$.
For the definition corresponding to conformal structure, we consider the extremal length function on $\mathcal{T}(S)$. For any $x\in\mathcal{T}(S)$ and $\alpha\in\mathcal{S}$, let $Ext(x,\alpha)$ be the extremal length of $\alpha$ in the conformal structure corresponding to $x$. The extremal length of a simple closed curve extends continuously to the extremal length of a measured foliation. For more details on extremal length, see \cite{Kerc}. The map \begin{equation*} Ext(\cdot,\cdot):\mathcal{T}(S)\times \mathcal{MF}\rightarrow R, \end{equation*} \begin{equation*} (x,F)\mapsto Ext(x,F) \end{equation*} is continuous.
Gardiner and Masur constructed a compactification of Teichm\"uller space by the extremal lengths of simple closed curves in \cite{GM}. Define a map $\widetilde{\varphi}_{GM}$ by \begin{equation*} \widetilde{\varphi}_{GM}: T(S)\rightarrow R_{\geq0}^{\mathcal{S}}, \end{equation*} \begin{equation*} x\mapsto (Ext^{\frac{1}{2}}(x,\alpha))_{\alpha\in\mathcal{S}}. \end{equation*}
The map $\varphi_{GM}=pr\circ\widetilde{\varphi}_{GM}$ is an embedding and the closure of the image is compact. Thus we have a compactification of $\mathcal{T}(S)$ denoted by $\mathcal{T}^{GM}(S)=\mathcal{T}(S)\bigcup GM$. $\mathcal{T}^{GM}(S)$ is the Gardiner-Masur compactification and $GM$ is the Gardiner-Masur boundary.
Different from the Thurston boundary $\mathcal{PMF}$, the structure of Gardiner-Masur boundary $GM$ is much more complex. For more details on its structure, see \cite{Miya-1}, \cite{Miya-2}, \cite{Walsh2}.
\subsection{The action of the mapping class group} Let $Mod(S)$ be the mapping class group of surface $S$, which is the set of isotopy classes of orientation-preserving homeomorphisms of $S$. $Mod(S)$ acts on $\mathcal{MF}$ and $\mathcal{T}(S)$ by push-forward. Precisely, for $f\in Mod(S)$ and $x\in\mathcal{T}(S)$, if $m$ and $\mu$ are a hyperbolic metric and a conformal structure in the equivalence class $x$, respectively, define $f(x)$ to be the the equivalence class of $f_{*}m$ or $f_{*}\mu$. For a measured foliation $(F,\nu)$, define $f(F,\nu)$ to be $(f(F),f_{\ast}\nu)$. And its action on $\mathcal{T}(S)$ extends naturally to the Thurston compactification and the Gardiner-Masur compactification of $\mathcal{T}(S)$. For more details on $Mod(S)$, see \cite{FM}.
In this paper, we use the following convention: for any $x\in \mathcal{T}(S)$, $f\in Mod(S)$, $F\in \mathcal{MF}$, \begin{equation*} l(f(x),F)=l(x,f^{-1}(F)),\,Ext(f(x),F)=Ext(x,f^{-1}(F)). \end{equation*}
\section{Construction of the new boundary}\label{sec2} Based on the action of $Mod(S)$ on the measured foliation space $\mathcal{MF}=\mathcal{MF}(S)$, we construct a new boundary of $Mod(S)$ in this section.
For any $f\in Mod(S)$, $f$ acts on $\mathcal{MF}$ as a homogeneous continuous map $f:\mathcal{MF}\rightarrow \mathcal{MF}$, which is measurable in particular. Recall that $f$ is homogeneous if for any $F\in \mathcal{MF}$, $k\geq0$, $f(kF)=kf(F)$. Denote the set of all homogeneous measurable maps from $\mathcal{MF}$ to $\mathcal{MF}$ by $\Omega(\mathcal{MF})$. We endow $\Omega(\mathcal{MF})$ with the topology of pointwise convergence.
Since $R_{+}$ acts on $\mathcal{MF}$ by multiplication, multiplying any $f\in\Omega(\mathcal{MF})$ by a positive number $k$, we get a homogeneous measurable map $kf:\mathcal{MF}\rightarrow\mathcal{MF},\,F\mapsto kf(F)$. Thus $R_{+}$ also acts on $\Omega(\mathcal{MF})$ by multiplication. Then we have the projective space $P\Omega(\mathcal{MF})=\Omega(\mathcal{MF})-\{0\}/R_{+}$, where $0$ is the zero element in $\Omega(\mathcal{MF})$. Let $\pi:\Omega(\mathcal{MF})-\{0\}\rightarrow P\Omega(\mathcal{MF})$ be the projective map. Denote $[f]=\pi(f)$ to be the projective class of $f\in\Omega(\mathcal{MF})$.
We endow $P\Omega(\mathcal{MF})$ with the quotient topology induced by $\pi$. Precisely, for a sequence $\{[f_n]\}_{n=0}^{\infty}$ in $P\Omega(\mathcal{MF})$, $\lim_{n\rightarrow\infty}[f_n]=[f_0]$ if and only if there exists a positive sequence $\{t_n\}_{n=1}^{\infty}$ such that $t_nf_n$ converges to $f_0$ in the topology of pointwise convergence.
Sending $f\in Mod(S)$ to its action $f:\mathcal{MF}\rightarrow\mathcal{MF}$, we have a natural map \begin{equation*} \widetilde{I}:Mod(S)\rightarrow \Omega(\mathcal{MF}). \end{equation*} Composing it with $\pi$, we have another map \begin{equation*} I=\pi\circ\widetilde{I}:Mod(S)\rightarrow P\Omega(\mathcal{MF}). \end{equation*}
The kernel $ker(I)=\{f\in Mod(S):[f:\mathcal{MF}\rightarrow\mathcal{MF}]=[id_{\mathcal{MF}}]\}$ is finite. In fact, if $f\in ker(I)$, then there exists a positive number $k$ such that for any $F\in\mathcal{MF}$, $kf(F)=F$. Since $R_{+}\mathcal{S}$ is dense in $\mathcal{MF}$, this is equivalent to that for any $\alpha\in \mathcal{S}$, $f(\alpha)=k\alpha$. Since $f$ sends a simple closed curve to a simple closed curve, we have $k=1$. Thus $f\in kerI$ is equivalent to that $f$ fixes the isotopy class of each essential simple closed curve. By the result in page 344 of \cite{FM}, we know that when the topology type $(g,n)$ of $S$ is $(2,0),(1,1),(1,2)$ or $(0,4)$, $ker(I)$ is a subgroup of order $2$ or $4$; in the other cases, $ker(I)$ is trivial. So up to the finite normal subgroup $ker(I)$ (with order $1,2$ or $4$), we can identify $Mod(S)$ with its image $I(Mod(S))$. For simplicity, denote the image $I(Mod(S))$ by $E$.
Let $Cl(E)$ be the closure of $E$ in $P\Omega(\mathcal{MF})$. The main result of this section is \begin{theo}\label{main} $Cl(E)$ is metrizable and compact. \end{theo} Thus $Cl(E)$ is a completion of $Mod(S)$ and $\partial E=Cl(E)-E$ is a boundary of $Mod(S)$ in some sense.
We need some preparations to prove Theorem \ref{main}. In order to give a clear description to the topology of $\mathcal{MF}$, we choose $N$ simple closed curves $\{\alpha_1,\alpha_2,...,\alpha_N\}$ filling up the surface $S$ such that the map \begin{equation*} \Phi: \mathcal{MF}\rightarrow R^{N}, \end{equation*} \begin{equation*} F\mapsto \big(i(\alpha_1,F),i(\alpha_2,F),...,i(\alpha_N,F)\big) \end{equation*} is an embedding (see \cite{FLP}). As a result, we identify $\mathcal{MF}$ with the image $\Phi(\mathcal{MF})$ which is endowed with the Euclidean metric on $R^{N}$.
Let $l(\cdot)=\sum_{i=1}^{N}i(\alpha_i,\cdot):\mathcal{MF}\rightarrow R_{\geq0}$ be the length function on $\mathcal{MF}$ corresponding to $\{\alpha_1,\alpha_2,...,\alpha_N\}$. Since $\{\alpha_1,\alpha_2,...,\alpha_N\}$ fill up the surface, $l(F)=0$ if and only if $F=0$. Recall a result from \cite{Bonahon}: \begin{lemm}\label{lemm1} For any $M>0$, $\{F\in \mathcal{MF}:l(F)\leq M\}$ is compact in $\mathcal{MF}$. \end{lemm}
From Lemma \ref{lemm1}, we have \begin{lemm} \label{lemm2} $\Phi(\mathcal{MF})$ is closed in $R^{N}$. \end{lemm} \begin{proof} Suppose $\lim_{n\rightarrow\infty}\Phi(F_n)=f_0$ for some sequence $\{F_n\}_{n=1}^{\infty}\subseteq\mathcal{MF}$ and $f_0=(a_1,a_2,...,a_N)\in R^N$. Then $\lim_{n\rightarrow\infty}i(\alpha_i,F_n)=a_i$ for $i=1,2,...,N$. So $l(F_n)=\sum_{i=1}^{N}i(\alpha_i,F_n)\leq M$ for some $M>0$. From Lemma \ref{lemm1}, there exists a subsequence $\{F_{n_k}\}_{k=1}^{\infty}$ such that $\lim_{k\rightarrow\infty}F_{n_k}=F_0$ for some $F_0\in\mathcal{MF}$. Since $\Phi$ is continuous, we have $\Phi(F_0)=f_0$. Thus $f_0\in \Phi(\mathcal{MF})$, which completes the proof. \end{proof}
Let $\Omega^{'}(\mathcal{MF})\subseteq\Omega(\mathcal{MF})$ be the set of homogeneous continuous maps from $\mathcal{MF}$ to $\mathcal{MF}$. And let $P\Omega^{'}(\mathcal{MF})\subseteq P\Omega(\mathcal{MF})$ be the projective space of $\Omega^{'}(\mathcal{MF})$. Since $Mod(S)$ acts continuously on $\mathcal{MF}$, $E\subseteq P\Omega^{'}(\mathcal{MF})$.
Now we proceed to construct a metric on $P\Omega^{'}(\mathcal{MF})$. Set $\mathcal{MF}_1=\{F\in\mathcal{MF}:l(F)\leq 1\}$. For any $[f]$ in $P\Omega^{'}(\mathcal{MF})$, we define the normalized lift of $[f]$ to $\Omega^{'}(\mathcal{MF})$ by \begin{equation*} \widehat{f}(\cdot)=\frac{f(\cdot)}{L(f)}:\mathcal{MF}\rightarrow \mathcal{MF}, \end{equation*} where $L(f)=\sup_{\mathcal{MF}_1}l(f(\cdot))$. Note that $L(f)$ is finite because of the compactness of $\mathcal{MF}_1$.
Let $d$ be the Euclidean metric on $\mathcal{MF}$ induced by $\Phi$: for any $F,G\in\mathcal{MF}$, $d(F,G)=|\Phi(F)-\Phi(G)|$, where $|\cdot|$ is the Euclidean norm on $R^{N}$. We define a map $\widehat{d}:P\Omega^{'}(\mathcal{MF})\times P\Omega^{'}(\mathcal{MF})\rightarrow R$ as follows: for any $[f],[g]\in P\Omega^{'}(\mathcal{MF})$, \begin{equation*} \widehat{d}([f],[g])=\sup_{F\in\mathcal{MF}_1}d(\widehat{f}(F),\widehat{g}(F)). \end{equation*} Note that $\widehat{d}$ is a metric on $P\Omega^{'}(\mathcal{MF})$. In fact, the symmetry and the triangle inequality come from these two properties of metric $d$; the positive definiteness comes from the definition of the normalized lift $\widehat{f}$.
For the metric $\widehat{d}$, we have \begin{lemm}\label{lemm3} For any $\{[f_n]\}_{n=0}^{\infty}\subseteq P\Omega^{'}(\mathcal{MF})$, $\lim_{n\rightarrow \infty}\widehat{d}([f_n],[f_0])=0$ if and only if there exists a positive sequence $\{t_n\}_{n=1}^{\infty}$ such that $t_nf_n$ converges uniformly to $f_0$ on any compact subset of $\mathcal{MF}$. \end{lemm} \begin{proof}
Suppose that $\lim_{n\rightarrow\infty}\widehat{d}([f_n],[f_0])=0$. Then from the definition of metric $\widehat{d}$, we know that $f_n(\cdot)/L(f_n)$ converges uniformly to $f_0(\cdot)/L(f_0)$ on $\mathcal{MF}_1$. For any compact subset $A$ of $\mathcal{MF}$, set $l(A)=\sup_{F\in A}l(F)$. Note that for any $F\in A$, $F/l(A)\in \mathcal{MF}_{1}$. Thus we have $f_n(\cdot)/l(A)L(f_n)$ converges uniformly to $f_0(\cdot)/l(A)L(f_0)$ on $A$, which also implies that $L(f_0)f_n(\cdot)/L(f_n)$ converges uniformly to $f_0(\cdot)$ on $A$.
Conversely, suppose that there exists a sequence $\{t_n\}_{n=1}^{\infty}\subseteq R_{+}$ such that $t_nf_n$ converges uniformly to $f_0$ on any compact subset of $\mathcal{MF}$. In particular, $t_nf_n$ converges uniformly to $f_0$ on $\mathcal{MF}_{1}$, which implies that $\frac{t_nf_n(\cdot)}{L(t_nf_n)}$ converges uniformly to $\frac{f_0(\cdot)}{L(f_0)}$ on $\mathcal{MF}_{1}$. Thus
\begin{equation*}
\lim_{n\rightarrow\infty}\hat{d}([f_n],[f_0])=\lim_{n\rightarrow\infty}\sup_{\mathcal{MF}_1}d(\frac{f_n(\cdot)}{L(f_n)},\frac{f_0(\cdot)}{L(f_0)})
=\lim_{n\rightarrow\infty}\sup_{\mathcal{MF}_1}d(\frac{t_nf_n(\cdot)}{L(t_nf_n)},\frac{f_0(\cdot)}{L(f_0)})=0.
\end{equation*} \end{proof}
In the metric space $(P\Omega^{'}(\mathcal{MF}),\widehat{d})$, $E$ is precompact: \begin{lemm}\label{lemm4}
For any sequence $\{[f_n]\}_{n=1}^{\infty}\subseteq E$, there exists a subsequence $\{[f_{n_k}]\}_{k=1}^{\infty}$ such that
$$
\lim_{k\rightarrow\infty}\widehat{d}([f_{n_k}],[f_0])=0
$$
for some $[f_0]\in P\Omega^{'}(\mathcal{MF})$. \end{lemm} \begin{proof}
From the definition of $E$, we assume that $f_n\in Mod(S)$ ($n=1,2,...$). Take a point $x_0\in\mathcal{T}(S)$. Note that the action of $Mod(S)$ on $\mathcal{T}(S)$ is properly discontinuous. Thus by the definition of the Thurston compactification of $\mathcal{T}(S)$, one of the followings holds:
\\(1) $f_{n_k}\equiv f_0$ for some subsequence $\{f_{n_k}\}_{k=1}^{\infty}$ and $f_0\in Mod(S)$;
\\(2) $\lim_{k\rightarrow\infty}f_{n_k}(x_0)=[F_0]$ for some subsequence $\{f_{n_k}\}_{k=1}^{\infty}$ and $[F_0]\in \mathcal{PMF}$.
For the case (1), $\widehat{d}([f_{n_k}],[f_0])\equiv 0.$
For the case (2), there exists a sequence of positive numbers $\{t_k\}_{k=1}^{\infty}$ such that for any $F\in\mathcal{MF}$,
$$
\lim_{k\rightarrow\infty}t_kl(x_0,f^{-1}_{n_k}(F))=\lim_{k\rightarrow\infty}t_kl(f_{n_k}(x_0),F)=i(F_0,F).
$$
In particular, we have $\lim_{k\rightarrow\infty}l(x_0,t_kf^{-1}_{n_k}(\alpha_i))=i(F_0,\alpha_i)$ for $i=1,2,...,N$. Since $\{\alpha_i\}_{i=1}^{N}$ fill up the surface, we have $l(F_0)=\sum_{i=1}^{N}i(\alpha_i,F_0)>0$, which implies that $m\leq \sum_{i=1}^{N}l(x_0,t_kf^{-1}_{n_k}(\alpha_i))\leq M$ for some $m,M>0$. Note that $\{F\in\mathcal{MF}:l(x_0,F)\leq M\}$ is compact. Thus passing to a subsequence again, we assume that $\lim_{k\rightarrow\infty}t_kf^{-1}_{n_k}(\alpha_i)=F_i$ for some $F_i\in\mathcal{MF}$ and $F_{i_0}\neq 0$ for some $i_0$.
By the definition of $\Phi$, we have
\begin{equation*}
t_k\Phi\circ f_{n_k}(\cdot)=\big(i(\alpha_i,t_kf_{n_k}\cdot)\big)_{i=1}^{N}=\big(i(t_kf^{-1}_{n_k}(\alpha_i),\cdot)\big)_{i=1}^{N}.
\end{equation*}
Since $i(\cdot,\cdot):\mathcal{MF}\times\mathcal{MF}\rightarrow R_{\geq0}$ is continuous and $\lim_{k\rightarrow\infty}t_kf^{-1}_{n_k}(\alpha_i)=F_i$, we know that $t_k\Phi\circ f_{n_k}(\cdot)=\big(i(t_kf^{-1}_{n_k}(\alpha_i),\cdot)\big)_{i=1}^{N}$ converges uniformly to $\big(i(F_i,\cdot)\big)_{i=1}^{N}\neq 0$ on any compact subset of $\mathcal{MF}$. By Lemma \ref{lemm2}, for any $F\in\mathcal{MF}$, $\big(i(F_i,F)\big)_{i=1}^{N}=\lim_{k\rightarrow\infty}\big(i(\alpha_i,t_kf_{n_k}(F))\big)_{i=1}^{N}\in \Phi(\mathcal{MF})$, which implies that $f_0=\Phi^{-1}\big(i(F_i,\cdot)\big)_{i=1}^{N}$ is a homogeneous continuous map from $\mathcal{MF}$ to $\mathcal{MF}$. Since $\Phi$ is a homeomorphism, $t_kf_{n_k}$ converges uniformly to $f_0$ on any compact subset of $\mathcal{MF}$. By Lemma \ref{lemm3}, $\lim_{k\rightarrow\infty}\widehat{d}([f_{n_k}],[f_0])=0$. \end{proof}
By Lemma \ref{lemm3} and Lemma \ref{lemm4}, we prove Theorem \ref{main} now. \\\textbf{Proof of Theorem \ref{main}.} Firstly, we prove that $Cl(E)\subseteq P\Omega^{'}(\mathcal{MF})$. Naturally, $E\subseteq P\Omega^{'}(\mathcal{MF})$. Suppose that a sequence $[f_n]\in E$ converges to $[f_0]\in Cl(E)$ in $P\Omega(\mathcal{MF})$. By Lemma \ref{lemm4}, there exists a subsequence $\{[f_{n_k}]\}_{k=1}^{\infty}$ such that $\lim_{k\rightarrow\infty}\widehat{d}([f_{n_k}],[f^{'}_0])=0$ for some $[f^{'}_{0}]\in P\Omega^{'}(\mathcal{MF})$. By Lemma \ref{lemm3}, there exists a sequence of positive numbers $\{t_k\}_{k=1}^{\infty}$ such that $t_kf_{n_k}$ converges uniformly to $f^{'}_0$ on any compact subset of $\mathcal{MF}$. Since the uniform convergence on compact sets is stronger than the pointwise convergence, $t_kf_{n_k}$ converges to $f^{'}_0$ in the topology of pointwise convergence. By the topology of $P\Omega(\mathcal{MF})$, $[f_{n_k}]$ converges to $[f^{'}_{0}]$ in $P\Omega(\mathcal{MF})$, which implies that $[f_0]=[f^{'}_{0}]\in P\Omega^{'}(\mathcal{MF})$. Thus $Cl(E)\subseteq P\Omega^{'}(\mathcal{MF})$.
Secondly, we prove that the topology on $Cl(E)$ is coincident with that induced by the metric $\widehat{d}$, that is, $Cl(E)$ is metrizable. From Lemma \ref{lemm3} and the fact that the uniform convergence on compact sets is stronger than the pointwise convergence, we know that for any sequence $\{[f_n]\}^{\infty}_{n=0}$ in $Cl(E)$, $\lim_{n\rightarrow\infty}\widehat{d}([f_n],[f_0])$ implies that $[f_n]$ converges to $[f_0]$ in the topology of $Cl(E)$. For the inverse direction, suppose that $[f_n]$ converges to $[f_0]$ in the topology of $Cl(E)$. We wish to prove that $\lim_{n\rightarrow\infty}\widehat{d}([f_n],[f_0])=0$. We prove this by contradiction. Suppose that there exists a subsequence $\{[f_{n_k}]\}_{k=1}^{\infty}$ such that $\widehat{d}([f_{n_k}],[f_0])>\varepsilon$ for some $\varepsilon>0$. Then by Lemma \ref{lemm4}, passing to a subsequence again, we can assume that $\lim_{k\rightarrow \infty}\widehat{d}([f_{n_k}],[f^{'}_0])=0$ for some $[f^{'}_{0}]\in P\Omega^{'}(\mathcal{MF})$, which implies that $[f^{'}_0]\in Cl(E)$ and $[f_{n_k}]$ converges to $[f^{'}_{0}]$ in the topology of $Cl(E)$. Thus $[f^{'}_0]=[f_0]$. By $$ 0=\lim_{k\rightarrow \infty}\widehat{d}([f_{n_k}],[f^{'}_0])=\lim_{k\rightarrow \infty}\widehat{d}([f_{n_k}],[f_0])\geq\varepsilon, $$ we get a contradiction.
Finally, by Lemma \ref{lemm4}, as a dense subset of metric space $(Cl(E),\widehat{d})$, $E$ is precompact. Thus $Cl(E)$ is compact.\qed
By the proof of Theorem \ref{main}, we have two useful corollaries. \begin{coro}\label{coro1} The boundary point set $\partial E=Cl(E)-E$ is included in $P\Omega^{'}(\mathcal{MF})$, that is, any boundary point $p$ can be represented by $p=[f_p]$, where $f_p$ is a homogeneous continuous map from $\mathcal{MF}$ to $\mathcal{MF}$. \end{coro} \begin{coro}\label{coro2} For any sequence $\{[f_n]\}_{n=0}^{\infty}$ in $Cl(E)$, the followings are equivalent: \\(1) $\lim_{n\rightarrow\infty}[f_n]=[f_0]$; \\(2) there exists a sequence of positive numbers $\{t_n\}_{n=1}^{\infty}$ such that $t_nf_n$ converges to $f_0$ in the topology of pointwise convergence; \\(3) there exists a sequence of positive numbers $\{t_n\}_{n=1}^{\infty}$ such that $t_nf_n$ converges uniformly to $f_0$ on any compact subset of $\mathcal{MF}$. \end{coro} Corollary \ref{coro2} means that in $Cl(E)$, the pointwise convergence and the uniform convergence on compact sets are equivalent, which is not true in general.
\section{The structure of the boundary} \label{sec3} In this section, we study the structure of the boundary $\partial E=Cl(E)-E$ in details. Recall that we endow $P\Omega(\mathcal{MF})$ with the quotient topology from the pointwise convergence on $\Omega(\mathcal{MF})$. $Cl(E)$ is the closure of $E$ in this topology.
\begin{prop} \label{prop1} In $Cl(E)$, $E$ is discrete and $\partial E$ is closed. \end{prop} \begin{proof} For any $[f]$ in $E$, if $[f]$ is not an isolated point in $Cl(E)$, then there exists a sequence $\{[f_n]\}_{n=1}^{\infty}$ in $E$ such that $f_n\neq f_m$ for $n\neq m$ and $\lim_{n\rightarrow\infty}[f_n]=[f]$. So there exists a sequence of positive numbers $\{t_n\}_{n=1}^{\infty}$ such that $\lim_{n\rightarrow\infty}t_nf_n=f$. Choosing a point $x_0$ in $\mathcal{T}(S)$, we have \begin{equation*} \lim_{n\rightarrow\infty}t_nl(f_{n}^{-1}(x_0),\cdot)=\lim_{n\rightarrow\infty}l(x_0,t_nf_n(\cdot))=l(x_0,f(\cdot)) =l(f^{-1}(x_0),\cdot). \end{equation*} Thus $f_n^{-1}(x_0)$ converges to $f^{-1}(x_0)$ in $\mathcal{T}(S)$, which contradicts the properly discontinuity of the action of $Mod(S)$ on $\mathcal{T}(S)$. Thus any point of $E$ is isolated in $Cl(E)$, which implies that $E$ is discrete in $Cl(E)$.
Since $E$ is discrete in $Cl(E)$, $E$ is open in $Cl(E)$. Thus $\partial E$ is closed in $Cl(E)$. \end{proof}
The operations of multiplication and inverse on $Mod(S)$ extend continuously to $Cl(E)$. For this, we need some notations. Let $\widetilde{E}=\pi^{-1}(E)$ be the inverse image of $E$ in $\Omega(\mathcal{MF})$ and $Cl(\widetilde{E})$ be the closure of $\widetilde{E}$ in $\Omega(\mathcal{MF})$. By Corollary \ref{coro1}, $Cl(\widetilde{E})\subseteq \Omega^{'}(\mathcal{MF})$. Similar to Corollary \ref{coro2}, we have \begin{coro}\label{coro3} For any sequence $\{f_n\}_{n=0}^{\infty}$ in $Cl(\widetilde{E})-\{0\}$, the followings are equivalent: \\(1) $\lim_{n\rightarrow \infty}f_n=f_0$, that is, $f_n$ converges to $f_0$ in the topology of pointwise convergence; \\(2) $f_n$ converges uniformly to $f_0$ on any compact subsets of $\mathcal{MF}$. \end{coro} \begin{proof} Since the uniform convergence on compact sets is stronger than the pointwise convergence, (2) implies (1).
For the inverse direction, suppose that $f_n$ converges to $f_0$ in the topology of pointwise convergence. Then by Corollary \ref{coro2}, there exists a sequence of positive numbers $\{t_n\}_{n=1}^{\infty}$ such that $t_nf_n$ converges uniformly to $f_0$ on any compact subset of $\mathcal{MF}$. In particular, $t_nf_n$ converges to $f_0$ in the topology of pointwise convergence, which implies that $\lim_{n\rightarrow\infty}t_n=1$. Therefore, $f_n=\frac{t_nf_n}{t_n}$ converges uniformly to $f_0$ on any compact subsets of $\mathcal{MF}$. \end{proof} By Corollary \ref{coro3}, we have \begin{prop} \label{prop2}
The map $M:Cl(\widetilde{E})-\{0\}\times Cl(\widetilde{E})-\{0\}\rightarrow \Omega^{'}(\mathcal{MF})$ defined by $(f,g)\mapsto f\circ g$ is continuous. And for any $f,g\in Cl(\widetilde{E})-\{0\}$, $f\circ g\in Cl(\widetilde{E})$. \end{prop} \begin{proof} Firstly, we prove the continuity of $M$. Suppose that $\lim_{n\rightarrow \infty}f_n=f$ and $\lim_{n\rightarrow \infty}g_n=g$ in $Cl(\widetilde{E})-\{0\}$. By Corollary \ref{coro3}, $f_n$ and $g_n$ converge uniformly to $f$ and $g$ on any compact subset of $\mathcal{MF}$, respectively. We need to prove that $f_n\circ g_n$ converges to $f\circ g$ in the topology of pointwise convergence.
Let $d$ be the Euclidean metric on $\mathcal{MF}$ induced by $\Phi$. For any $F\in\mathcal{MF}$, since $\lim_{n\rightarrow\infty}g_n(F)=g(F)$, we know that for any $\epsilon>0$, there is $N_1>0$ such that for any $n>N_1$, $$ d(f\circ g_n(F),f\circ g(F))<\frac{\epsilon}{2} $$ and $\{g_n(F)\}_{n=1}^{\infty}\subseteq B$ for some compact neighbourhood $B$ of $g(F)$.
Since $f_n$ converges uniformly to $f$ on $B$, there exists $N_2>0$ such that for any $n>N_2$ and $F\in B$, $d\big(f_n(F),f(F)\big)<\frac{\epsilon}{2}$.
Thus for any $n>\max\{N_1,N_2\}$, \begin{equation*} d(f_n\circ g_n(F),f\circ g(F))\leq d(f_n\circ g_n(F),f\circ g_n(F))+d(f\circ g_n(F),f\circ g(F))<\frac{\epsilon}{2}+\frac{\epsilon}{2}=\epsilon, \end{equation*} which implies that $f_n\circ g_n(F)$ converges to $f\circ g(F)$. Thus $M$ is continuous.
Secondly, we prove that $f,g\in Cl(\widetilde{E})-\{0\}$ implies $f\circ g\in Cl(\widetilde{E})$. Take $f_n,g_n\in Mod(S)$ and $t_n,k_n>0$ such that $\lim_{n\rightarrow\infty}t_nf_n=f$ and $\lim_{n\rightarrow\infty}k_ng_n=g$. Since $M$ is continuous, $\lim_{n\rightarrow\infty}t_nk_nf_n\circ g_n=f\circ g$, which implies that $f\circ g\in Cl(\widetilde{E})$. \end{proof}
By Proposition \ref{prop2}, for any two element $[f],[g]$ in $Cl(E)$, if $f\circ g\neq 0$, we can define the product of $[f]$ and $[g]$ by $[f\circ g]\in Cl(E)$. In particular, restricting to $Mod(S)$, it is coincident with the multiplication operation on $Mod(S)$. Thus the multiplication operation on $Mod(S)$ extends continuously to $Cl(E)$ except in some degenerated cases ($f\circ g=0$).
For the inverse operation, we have \begin{prop} \label{prop3} For any $f$ in $Cl(\widetilde{E})$, there exists a unique element $\overline{f}$ in $Cl(\widetilde{E})$ such that $i\big(f(F),G\big)=i\big(F,\overline{f}(G)\big)$ for any $F,G$ in $\mathcal{MF}$. And the map $\varphi:Cl(\widetilde{E})\rightarrow Cl(\widetilde{E}),\,f\mapsto\overline{f}$ is a homeomorphism. \end{prop} \begin{proof} Firstly, we prove the existence of $\overline{f}$. For any $f$ in $Cl(\widetilde{E})$, if $f=Kf_{0}$ for some $K\geq0$ and $f_0\in Mod(S)$, we set $\overline{f}=Kf^{-1}_0$.
For other cases, we assume that $\lim_{n\rightarrow\infty}t_nf_n=f$ for some $t_n>0,\,f_n\in Mod(S)\,(n=1,2,...)$. Then we have \begin{equation*} \lim_{n\rightarrow\infty}\Phi(t_n f_n^{-1}(\cdot))=\lim_{n\rightarrow\infty}\big(i(\alpha_i,t_n f_n^{-1}(\cdot))\big)_{i=1}^{N}=\lim_{n\rightarrow\infty}\big(i(t_n f_n(\alpha_i),\cdot)\big)_{i=1}^{N}=\big(i(f(\alpha_i),\cdot)\big)_{i=1}^{N}. \end{equation*} From Lemma \ref{lemm2}, we set $f_{0}=\Phi^{-1}(\big(i(f(\alpha_i),\cdot)\big)_{i=1}^{N})$ and then \begin{equation*} \lim_{n\rightarrow\infty}t_nf_n^{-1}=f_0. \end{equation*} Thus for any $F,G$ in $\mathcal{MF}$, we have \begin{equation*} i(f(F),G)=\lim_{n\rightarrow\infty}i\big(t_nf_n(F),G\big) =\lim_{n\rightarrow\infty}i\big(F,t_nf_n^{-1}(G)\big)=i\big(F,f_0(G)\big). \end{equation*} So we set $\overline{f}=f_0$.
Secondly, we prove the uniqueness of $\overline{f}$. For any $f\in Cl(\widetilde{E})$, suppose there are two elements $f_1,f_2$ such that for any $F,G\in \mathcal{MF}$, \begin{equation*} i\big(f(F),G\big)=i\big(F,f_1(G)\big)=i\big(F,f_2(G)\big). \end{equation*} Then \begin{equation*} \Phi\big(f_1(\cdot)\big)=\Phi\big(f_2(\cdot)\big)=\big(i(f(\alpha_i),\cdot)\big)_{i=1}^{N}. \end{equation*} Since $\Phi$ is an embedding, we know that $f_1=f_2$.
Now we prove that $\varphi$ is a homeomorphism. Obviously, we have $\overline{\overline{f}}=f$ for any $f$ in $Cl(\widetilde{E})$, which implies that $\varphi^{2}=id:Cl(\widetilde{E})\rightarrow Cl(\widetilde{E})$. Thus we only need to prove that $\varphi$ is continuous. Suppose $\lim_{n\rightarrow\infty}f_n=f_0$ for $\{f_n\}_{n=0}^{\infty}$ in $Cl(\widetilde{E})$. Then we have \begin{equation*} \lim_{n\rightarrow\infty}\Phi(\overline{f_n}(\cdot)) =\lim_{n\rightarrow\infty}\big(i(\alpha_i,\overline{f_n}(\cdot))\big)_{i=1}^{N} =\lim_{n\rightarrow\infty}\big(i(f_n(\alpha_i),\cdot)\big)_{i=1}^{N} \end{equation*} \begin{equation*} =\big(i(f_0(\alpha_i),\cdot)\big)_{i=1}^{N} =\big(i(\alpha_i,\overline{f_0}(\cdot))\big)_{i=1}^{N} =\Phi(\overline{f_0}(\cdot)). \end{equation*} Since $\Phi$ is an embedding, $\lim_{n\rightarrow\infty}\overline{f_n}=\overline{f_0}$. Thus $\varphi$ is continuous. \end{proof} We call $\overline{f}$ defined in Proposition \ref{prop3} the conjugate of $f$. For any $[f]\in Cl(E)$, define the conjugate of $[f]$ by $[\overline{f}]$. In particular, for any $f\in Mod(S)$, the conjugate of $f$ is exactly the inverse of $f$ in $Mod(S)$. Thus the inverse operation on $Mod(S)$ extends continuously to $Cl(E)$.
There is a natural relation between the operations of multiplication and conjugate on $Cl(\widetilde{E})$. \begin{prop} \label{prop4} For any $f,g\in Cl(\widetilde{E})$, $\overline{f\circ g}=\overline{g}\circ\overline{f}$. \end{prop} \begin{proof} From the definition of the conjugate operation, we know that for any $F,G\in\mathcal{MF}$, \begin{equation*} i(f\circ g(F),G)=i(g(F),\overline{f}(G))=i(F,\overline{g}\circ\overline{f}(G)), \end{equation*} which implies that $\overline{f\circ g}=\overline{g}\circ\overline{f}$. \end{proof} \begin{rema} \label{rema2} By proposition \ref{prop2}, \ref{prop3}, \ref{prop4}, the natural group structure of $Mod(S)$ extends continuously to $Cl(E)$. But $Cl(E)$ is not a group, since there are some degenerated cases that the multiplication is not defined and the conjugate operation on $Cl(E)$ is not indeed an inverse operation for a group. \end{rema}
Now we prove a lemma: \begin{lemm}\label{lemm7} Suppose $\lim_{n\rightarrow\infty}[f_n]=[f_0]$ for some $\{f_n\}_{n=1}^{\infty}$ in $Mod(S)$ and $[f_0]\in \partial E$. Then there exists a sequence of positive numbers $\{t_n\}_{n=1}^{\infty}$ such that $\lim_{n\rightarrow\infty}t_n=0$ and $\lim_{n\rightarrow\infty}t_nf_n=f_0$. \end{lemm} \begin{proof} Since $\lim_{n\rightarrow\infty}[f_n]=[f_0]$, there exists a sequence of positive numbers $\{t_n\}_{n=1}^{\infty}$ such that $\lim_{n\rightarrow\infty}t_nf_n=f_0$. Now we need to prove that $\lim_{n\rightarrow\infty}t_n=0$.
Choosing a point $x_0$ in $\mathcal{T}(S)$, we have \begin{equation*} \lim_{n\rightarrow\infty}t_nl(f^{-1}_n(x_0),\cdot)=\lim_{n\rightarrow\infty}l(x_0,t_nf_n(\cdot))=l(x_0,f_0(\cdot)). \end{equation*} From the properly discontinuity of the action of $Mod(S)$ on $\mathcal{T}(S)$, we know that $f^{-1}_n(x_0)\rightarrow\infty$ in $\mathcal{T}(S)$. By the definition of the Thurston compactification, we know that $\lim_{n\rightarrow\infty}t_n l(f^{-1}_n(x_0),\cdot)=l(x_0,f_0(\cdot))=i(F,\cdot)$ for some $F$ in $\mathcal{MF}$. Then we have $\lim_{n\rightarrow\infty}t_n=0$. \end{proof}
Now we give a description of the points in $\partial E$. \begin{theo}\label{main2} For any $[f]$ in $\partial E$, we have \begin{equation*} f(\cdot)=\sum_{i=1}^{m}i(E_i,\cdot)F_i, \end{equation*} where $E_i,F_i$ are some measured foliations with $i(E_i,E_j)=0$ and $i(F_i,F_j)=0$ for $i,j=1,2,...,m$. \end{theo} \begin{proof} Since $[f]\in \partial E$, there exists a sequence $\{f_n\}_{n=1}^{\infty}$ in $Mod(S)$ such that $\lim_{n\rightarrow\infty}[f_n]=[f]$. By Lemma \ref{lemm7}, there exists a sequence of positive numbers $\{t_n\}_{n=1}^{\infty}$ such that $\lim_{n\rightarrow\infty}t_n=0$ and $\lim_{n\rightarrow\infty}t_nf_n=f$ in $\Omega(\mathcal{MF})$.
As $f$ is a map from $\mathcal{MF}$ to $\mathcal{MF}$, let $Imf=\{f(F):F\in\mathcal{MF}\}\subseteq\mathcal{MF}$ be the image of $f$. We claim that for any $F,G\in Imf$, $i(F,G)=0$. For any $F,G$ in $Imf$, there are $F_1,G_1\in\mathcal{MF}$ such that $f(F_1)=F$ and $f(G_1)=G$. Since $\lim_{n\rightarrow\infty}t_nf_n=f$ and $\lim_{n\rightarrow\infty}t_n=0$, we have \begin{equation*} i(F,G)=i(f(F_1),f(G_1))=\lim_{n\rightarrow\infty}i(t_nf_n(F_1),t_nf_n(G_1))=\lim_{n\rightarrow\infty}t_n^{2}i(F_1,G_1)=0. \end{equation*}
From this claim, there are $m$ pairwise disjoint indecomposable measured foliations $\{F_i\}_{i=1}^{m}$ and some nonnegative function $f_i(\cdot)$ on $\mathcal{MF}$ such that \begin{equation*} f(\cdot)=\sum_{i=1}^{m}f_i(\cdot)F_i. \end{equation*}
For $1\leq i\leq m$, from Lemma \ref{lemm0}, there exists a sequence of simple closed curves $\{\gamma_n\}_{n=1}^{\infty}$ such that \begin{equation*} i(\gamma_n,F_i)>0,\,\frac{i(\gamma_n,F_j)}{i(\gamma_n,F_i)}<\frac{1}{n}\,(j\neq i). \end{equation*} Then \begin{equation*} \frac{i(\gamma_n,f(\cdot))}{i(\gamma_n,F_i)}=f_i(\cdot)+\sum_{j\neq i}^{m}\frac{i(\gamma_n,F_j)}{i(\gamma_n,F_i)}f_j(\cdot) \leq f_i(\cdot)+\frac{1}{n}\sum_{j\neq i}^{m}f_j(\cdot). \end{equation*} Set $G_n=\frac{1}{i(\gamma_n,F_i)}\gamma_n$. Then \begin{equation*} \lim_{n\rightarrow\infty}i(\overline{f}(G_n),\cdot) =\lim_{n\rightarrow\infty}\frac{i(\gamma_n,f(\cdot))}{i(\gamma_n,F_i)}=f_i(\cdot). \end{equation*} In particular, for the filling curves $\{\alpha_j\}_{j=1}^{N}$ defined in Section \ref{sec2}, \begin{equation*} \lim_{n\rightarrow\infty}i(\overline{f}(G_n),\alpha_j)=f_i(\alpha_j)\,\,(j=1,2,...,N). \end{equation*} Thus there exists a constant $M>0$ such that \begin{equation*} l(\overline{f}(G_n))=\sum_{j=1}^{N}i(\overline{f}(G_n),\alpha_j)\leq M. \end{equation*} From Lemma \ref{lemm1}, there exists a subsequence $\{G_{n_k}\}_{k=1}^{\infty}$ such that \begin{equation*} \lim_{k\rightarrow\infty}\overline{f}(G_{n_k})=E_i \end{equation*} for some $E_i$ in $\mathcal{MF}$. Then we have \begin{equation*} f_i(\cdot)=\lim_{k\rightarrow\infty}i(\overline{f}(G_{n_k}),\cdot)=i(E_i,\cdot). \end{equation*}
For $1\leq i\leq m$, we construct a measured foliation $E_i$ as above. Since for any $F,G\in Im\overline{f}$, $i(F,G)=0$, we have $i(E_i,E_j)=0$ for $i\neq j$. Thus we have \begin{equation*} f=\sum_{i=1}^{m}i(E_i,\cdot)F_i, \end{equation*} and $E_i,F_i$ are measured foliations with $i(E_i,E_j)=0$ and $i(F_i,F_j)=0$ for $i,j=1,2,...,m$, which completes the proof. \end{proof} \begin{rema} \label{rema3} If $f(\cdot)=\sum_{i=1}^{m}i(E_i,\cdot)F_i$ as in Theorem \ref{main2}, then $\overline{f}(\cdot)=\sum_{i=1}^{m}i(F_i,\cdot)E_i$. \end{rema} \begin{prob} Does the converse of Theorem \ref{main2} hold: for any $\{E_i\}_{i=1}^{m},\{F_i\}_{i=1}^{m}$ in $\mathcal{MF}$ with $i(E_i,E_j)=0$ and $i(F_i,F_j)=0$ ($i,j=1,2,...,m$), $[\sum_{i=1}^{m}i(E_i,\cdot)F_i]\in \partial E$? \end{prob}
Now we construct some special points in $\partial E$. Firstly, we consider the limit of the sequence $\{f^n\}_{n=1}^{\infty}$ for some $f$ in $Mod(S)$. We need two results (see \cite{Ivanov}). \begin{prop} \label{theo1} Let $f=T_{\alpha_1}^{n_1}\circ T_{\alpha_2}^{n_2}\circ \cdot\cdot\cdot\circ T_{\alpha_k}^{n_k}$, where $\alpha_1,...,\alpha_k$ are pairwise disjoint simple closed curves, $T_{\alpha_i}$ is the Dehn Twist of $\alpha_i$ and $n_i\in Z$ $(i=1,2,...,k)$. Then for any $F\in\mathcal{MF}$, we have \begin{equation*}
\lim_{n\rightarrow \pm\infty} \frac{f^{n}(F)}{|n|}=\sum_{i=1}^{k}|n_i|i(\alpha_i,F)\alpha_i. \end{equation*} \end{prop}
\begin{prop}\label{theo2} Let $f\in Mod(S)$ be a pseudo-Anosov element such that $f(F^s)=\lambda^{-1}F^{s},\,f(F^u)=\lambda F^{u}$ with $\lambda>1,\,F^{s},F^{u}\in \mathcal{MF}$ and $i(F^s,F^u)=1$. Then for any $F\in\mathcal{MF}$, we have \begin{equation*} \lim_{n\rightarrow \infty} \frac{f^{n}(F)}{\lambda^{n}}=i(F^{s},F)F^{u},\,\, \lim_{n\rightarrow \infty} \frac{f^{-n}(F)}{\lambda^{n}}=i(F^{u},F)F^{s}. \end{equation*} \end{prop}
From Proposition \ref{theo1} and Proposition \ref{theo2}, we have \begin{prop} \label{prop5} (1) With the assumption of Proposition \ref{theo1}, we have \begin{equation*}
\lim_{n\rightarrow \pm\infty}[f^{n}(\cdot)]=[\sum_{i=1}^{k}|n_i|i(\alpha_i,\cdot)\alpha_i]\in \partial E. \end{equation*} (2) With the assumption of Proposition \ref{theo2}, we have \begin{equation*} \lim_{n\rightarrow \infty}[f^{n}(\cdot)]=[i(F^{s},\cdot)F^{u}]\in\partial E \,\,\, \lim_{n\rightarrow \infty}[f^{-n}(\cdot)]=[i(F^{u},\cdot)F^{s}]\in\partial E. \end{equation*} \end{prop}
It is well known that the action of $Mod(S)$ on $\mathcal{PMF}$ is minimal, that is, the orbit of any element of $\mathcal{PMF}$ under the action of $Mod(S)$ is dense in $\mathcal{PMF}$ (see \cite{FLP}). We extend this result a little: \begin{lemm} \label{lemm5} Let $Mod^{'}(S)\subseteq Mod(S)$ be the set of all mapping classes preserving the punctures of $S$ pointwise. Then the action of $Mod^{'}(S)$ on $\mathcal{PMF}$ is minimal, that is, the orbit of any element of $\mathcal{PMF}$ under the action of $Mod^{'}(S)$ is dense in $\mathcal{PMF}$. \end{lemm} \begin{proof} Since $\mathcal{S}$ is dense in $\mathcal{PMF}$, we only need to prove that for any $\alpha,\beta\in\mathcal{S}$, $\beta\in Cl(Mod^{'}(S)(\alpha))$, where $Cl(Mod^{'}(S)(\alpha))\subseteq\mathcal{PMF}$ is the closure of the orbit of $\alpha$ under the action of $Mod^{'}(S)$.
Take $\gamma\in\mathcal{S}$ such that $i(\alpha,\gamma)\neq 0$ and $i(\beta,\gamma)\neq 0$. Let $T_{\gamma}$ and $T_{\beta}$ be the Dehn Twist of $\gamma$ and $\beta$, respectively. Note that $T_{\gamma}$ and $T_{\beta}$ preserve each puncture of $S$. Thus $T_{\gamma},T_{\beta}\in Mod^{'}(S)$. By Theorem \ref{theo1}, $\lim_{n\rightarrow \infty} \frac{T_{\gamma}^{n}(\alpha)}{n}=i(\alpha,\gamma)\gamma$, which implies that $\gamma\in Cl(Mod^{'}(S)(\alpha))$. Using Theorem \ref{theo1} again, we have $\lim_{n\rightarrow \infty} \frac{T_{\beta}^{n}(\gamma)}{n}=i(\gamma,\beta)\beta$, which implies that $\beta\in Cl(Mod^{'}(S)(\gamma))$. Thus we have $\beta\in Cl(Mod^{'}(S)(\alpha))$. \end{proof}
From Lemma \ref{lemm5}, we have \begin{prop} \label{prop6} For any $F,G\in \mathcal{MF}$, $[i(F,\cdot)G]\in \partial E$. \end{prop} \begin{proof} From Lemma \ref{lemm5}, for a simple closed curve $\alpha$ in $S$, there are two sequences $\{f_n\}_{n=1}^{\infty}, \{g_n\}_{n=1}^{\infty}$ in $Mod^{'}(S)$ such that $\lim_{n\rightarrow\infty}[f_n(\alpha)]=[F]$ and $\lim_{n\rightarrow\infty}[g_n(\alpha)]=[G]$ in $\mathcal{PMF}$. Note that $[f_\alpha(\cdot)]=[i(\alpha,\cdot)\alpha]\in \partial E$ by Proposition \ref{prop5}(1). Then $\lim_{n\rightarrow\infty}[g_{n}\circ f_\alpha\circ f^{-1}_{n}(\cdot)]=\lim_{n\rightarrow\infty}[i(f_n(\alpha),\cdot)g_n(\alpha)]=[i(F,\cdot)G]\in \partial E$. \end{proof}
We extend the result of Proposition \ref{prop6} by operation on subsurfaces. Let $\gamma_1,...,\gamma_p$ be disjoint essential simple closed curves in $S$. After cutting along these curves, we have some connected subsurfaces $S_1,...,S_k$. Then we have \begin{prop} \label{prop7} For any $a_i\geq0$ $(i=1,2,...,p)$, $F_j, G_j$ in $\mathcal{MF}(S_j)$ $(j=1,2,...,k)$, \begin{equation*} [\sum_{i=1}^{p}a_ii(\gamma_i,\cdot)\gamma_i+\sum_{j=1}^{k}i(F_j,\cdot)G_j]\in \partial E. \end{equation*} \end{prop} \begin{proof} For $j=1,2,...,k$, take a simple closed curve $\beta_{j}$ in $S_j$. Using Lemma \ref{lemm5} in each subsurface $S_j$ (seen as a punctured surface), we find two sequences $\{f_n\}_{n=1}^{\infty}, \{g_n\}_{n=1}^{\infty}$ in $Mod(S)$ such that $\lim_{n\rightarrow\infty}[f_n(\beta_j)]=[F_j]$, $\lim_{n\rightarrow\infty}[g_n(\beta_j)]=[G_j]$ for $j=1,2,...,k$ and $f_n(\gamma_i)=\gamma_i,g_n(\gamma_i)=\gamma_i$ for $i=1,2,...,p$. Thus for $j=1,2,...,k$, there are two sequences of positive numbers $\{t^{j}_{n}\}_{n=1}^{\infty}$, $\{s^{j}_n\}_{n=1}^{\infty}$ such that \begin{equation*} \lim_{n\rightarrow\infty}t^{j}_nf_n(\beta_j)=F_j,\,\,\,\lim_{n\rightarrow\infty}s^{j}_ng_n(\beta_j)=G_j\,(j=1,2,...,k). \end{equation*} From Proposition \ref{prop5}(1) and the denseness of the set of rational numbers in $R$, we have \begin{equation*} [h_n(\cdot)]=[\sum_{i=1}^{p}a_ii(\gamma_i,\cdot)+ \sum_{j=1}^{k}t^{j}_n s^{j}_n i(\beta_{j},\cdot)\beta_{j}]\in \partial E\,(n=1,2,...). \end{equation*} Thus \begin{equation*} [g_n\circ h_n\circ f^{-1}_n(\cdot)]=[\sum_{i=1}^{p}a_ii(\gamma_i,\cdot)+ \sum_{j=1}^{k} i(t^{j}_nf_n(\beta_{j}),\cdot)s^{j}_ng_n(\beta_{j})]\in \partial E\,(n=1,2,...). \end{equation*} Note that \begin{equation*} \lim_{n\rightarrow\infty}[g_n\circ h_n\circ f^{-1}_n(\cdot)]=[\sum_{i=1}^{p}a_ii(\gamma_i,\cdot)\gamma_i+\sum_{j=1}^{k}i(F_j,\cdot)G_j], \end{equation*} which implies that $[\sum_{i=1}^{p}a_ii(\gamma_i,\cdot)\gamma_i+\sum_{j=1}^{k}i(F_j,\cdot)G_j]\in \partial E$. \end{proof}
\section{Some applications} \label{sec4} Since $Mod(S)$ acts continuously on the Thurston compactification $\mathcal{T}^{Th}(S)=\mathcal{T}(S)\bigcup \mathcal{PMF}$ and the Gardiner-Masur compactification $\mathcal{T}^{GM}(S)=\mathcal{T}(S)\bigcup GM$ of $\mathcal{T}(S)$, we have two maps $$\Pi_{Th}:Mod(S)\times \mathcal{T}^{Th}(S)\rightarrow\mathcal{T}^{Th}(S),\,(f,p)\mapsto f(p)$$ and $$\Pi_{GM}:Mod(S)\times \mathcal{T}^{GM}(S)\rightarrow\mathcal{T}^{GM}(S),\,(f,p)\mapsto f(p).$$ If we endow $Mod(S)$ with the discrete topology, then $\Pi_{Th}$ and $\Pi_{GM}$ are both continuous. Since $Cl(E)=E\bigcup \partial E$ is a completion of $Mod(S)$ in some sense, it may be natural to extend the domains of $\Pi_{Th}$ and $\Pi_{GM}$ to $Cl(E)\times \mathcal{T}^{Th}(S)$ and $Cl(E)\times \mathcal{T}^{GM}(S)$, respectively.
For this, we need equivalent models of $\mathcal{T}^{Th}(S)$ and $\mathcal{T}^{GM}(S)$. From the definitions of $\mathcal{T}^{Th}(S)$ and $\mathcal{T}^{GM}(S)$, a point in $\mathcal{T}^{Th}(S)$ and $\mathcal{T}^{GM}(S)$ are represented by $[p_1:\mathcal{S}\rightarrow R_{\geq 0}]$ and $[p_2:\mathcal{S}\rightarrow R_{\geq 0}]$, respectively, where $[p_i]\in PR_{\geq0}^{\mathcal{S}}$ is the projective class of $p_i\in R_{\geq0}^{\mathcal{S}}$. Since $R_{+}\times\mathcal{S}$ is dense in $\mathcal{MF}$, $p_1$ and $p_2$ extend to homogeneous continuous functions on $\mathcal{MF}$ (see \cite{Bonahon} and \cite{Miya-1}). Thus a point in $\mathcal{T}^{Th}(S)$ or $\mathcal{T}^{GM}(S)$ can be represented by the projective class of a homogeneous continuous function on $\mathcal{MF}$. Using these notations, the actions of $Mod(S)$ on $\mathcal{T}^{Th}(S)$ and $\mathcal{T}^{GM}(S)$ are defined as follows: for any $f\in Mod(S)$, $p_1=[p_1:\mathcal{MF}\rightarrow R_{\geq0}]\in\mathcal{T}^{Th}(S)$ and $p_2=[p_2:\mathcal{MF}\rightarrow R_{\geq0}]\in\mathcal{T}^{GM}(S)$, $f(p_1)=[p_1\circ f^{-1}]$ and $f(p_2)=[p_2\circ f^{-1}]$. Since the inverse operation $(\cdot)^{-1}$ on $Mod(S)$ extends to the conjugate operation $\overline{(\cdot)}$ on $Cl(E)$, we define the extensions of $\Pi_{Th}$ and $\Pi_{GM}$ as \begin{theo}\label{main5} Let $\Delta_1=\{([f],[p])\in Cl(E)\times \mathcal{T}^{Th}(S):p\circ\overline{f}(\cdot)\neq 0\}$ and $\Delta_2=\{([f],[p])\in Cl(E)\times \mathcal{T}^{GM}(S):p\circ\overline{f}(\cdot)\neq 0\}$. The two maps $\Psi_{Th}:\Delta_1\rightarrow \mathcal{T}^{Th}(S)$ and $\Psi_{GM}:\Delta_2\rightarrow \mathcal{T}^{GM}(S)$ defined by $\Psi_{Th}([f],[p])=[p\circ\overline{f}(\cdot)]$ and $\Psi_{GM}([f],[p])=[p\circ\overline{f}(\cdot)]$, respectively, are continuous. \end{theo} \begin{proof} We only prove the continuity of $\Psi_{GM}$. The continuity of $\Psi_{Th}$ can be proved by a similar argument.
Suppose $\{[p_n]\}_{n=0}^{\infty}\subseteq \mathcal{T}^{GM}(S),\{[f_n]\}_{n=0}^{\infty}\subseteq Cl(E)$ and $\lim_{n\rightarrow\infty}[p_n]=[p_0]$, $\lim_{n\rightarrow\infty}[f_n]=[f_0]$. Up to some constants, we assume that $\lim_{n\rightarrow\infty}f_n=f_0$ in $\Omega(\mathcal{MF})$ and $p_n:\mathcal{MF}\rightarrow R_{\geq0}$ converges uniformly to $p_0:\mathcal{MF}\rightarrow R_{\geq0}$ on any compact subsets of $\mathcal{MF}$. By Proposition \ref{prop3}, $\lim_{n\rightarrow\infty}\overline{f_n}=\overline{f_0}$.
Observe that for any $F$ in $\mathcal{MF}$, \begin{equation*}
|p_0\circ\overline{f_0}(F)-p_n\circ\overline{f_n}(F)|\leq
|p_0\circ\overline{f_0}(F)-p_0\circ\overline{f_n}(F)|+|p_0\circ\overline{f_n}(F)-p_n\circ\overline{f_n}(F)|. \end{equation*}
Since $\lim_{n\rightarrow\infty}\overline{f_n}(F)=\overline{f_0}(F)$, we know that for any $\epsilon>0$, there exists $N_1>0$ such that for any $n>N_1$, \begin{equation*}
|p_0\circ\overline{f_0}(F)-p_0\circ\overline{f_n}(F)| <\frac{\epsilon}{2} \end{equation*} and $\{\overline{f_n}(F)\}_{n=1}^{\infty}\subseteq M$ for some compact subset $M$ of $\mathcal{MF}$.
Since $p_n(\cdot)$ converges uniformly to $p_0(\cdot)$ on compact set $M$, there exists $N_2>0$ such that for any $n>N_2$, \begin{equation*}
|p_0\circ\overline{f_n}(F)-p_n\circ\overline{f_n}(F)| <\frac{\epsilon}{2}. \end{equation*}
Thus for any $n>\max\{N_1,N_2\}$,
\begin{equation*} |p_0\circ\overline{f_0}(F)-p_n\circ\overline{f_n}(F)|<\epsilon, \end{equation*} which implies that for any $F\in\mathcal{MF}$, \begin{equation*} \lim_{n\rightarrow\infty}p_n\circ\overline{f_n}(F)= p_0\circ\overline{f_0}(F). \end{equation*} By the definition of $\mathcal{T}^{GM}(S)$, \begin{equation*} \Psi_{GM}([f_0],[p_0])=[p_0\circ\overline{f_0}(\cdot)]= \lim_{n\rightarrow\infty}[p_n\circ\overline{f_n}(\cdot)]= \lim_{n\rightarrow\infty}\Psi_{GM}([f_n],[p_n]), \end{equation*} which completes the proof. \end{proof}
\begin{rema}\label{rema5} (1) For $[f]\in Cl(E)$, $[p_1]\in \mathcal{T}^{Th}(S)$ and $[p_2]\in \mathcal{T}^{GM}(S)$, it may occur that $p_1\circ \overline{f}(\cdot)=0$ and $p_2\circ \overline{f}(\cdot)=0$, that is, the values of $p_1$, $p_2$ on the image of $\overline{f}$ are $0$. In these cases, $\Psi_{Th}$ and $\Psi_{GM}$ are degenerated at $([f],[p_1])$ and $([f],[p_2])$, respectively. Thus we restrict the definitions of $\Psi_{Th}$ and $\Psi_{GM}$ on $\Delta_1$ and $\Delta_2$, respectively. \\(2) For $f_0\in Mod(S)$, $\Psi_{Th}([f_0],\cdot)$ and $\Psi_{GM}([f_0],\cdot)$ are defined on the whole $\mathcal{T}^{Th}(S)$ and $\mathcal{T}^{GM}(S)$, respectively. And $\Psi_{Th}([f_0],\cdot)$ and $\Psi_{GM}([f_0],\cdot)$ are consistent with the actions of $f_0$ on $\mathcal{T}^{Th}(S)$ and $\mathcal{T}^{GM}(S)$, respectively. \\(3) For $x_0\in\mathcal{T}(S)$, $\Psi_{Th}(\cdot,x_0)$ and $\Psi_{GM}(\cdot,x_0)$ are both defined on the whole $Cl(E)$. \end{rema}
By Theorem \ref{main5}, we have \begin{coro}\label{coro4} For any $x\in\mathcal{T}(S)$ and sequence $\{f_n\}_{n=1}^{\infty}\subseteq Mod(S)$, suppose that $\lim_{n\rightarrow \infty}[f_n]=[f_0]$ in $Cl(E)$ for some $[f_0]\in\partial E$, then $\lim_{n\rightarrow \infty}f_n(x)=[l(x,\overline{f_0}(\cdot)]\in\mathcal{PMF}$ in $\mathcal{T}^{Th}(S)$ and $\lim_{n\rightarrow \infty}f_{n}(x)=[Ext^{\frac{1}{2}}(x,\overline{f_0}(\cdot))]\in GM$ in $\mathcal{T}^{GM}(S)$. \end{coro} By Theorem \ref{main5} and Corollary \ref{coro4}, we answer Problem \ref{problem1}: for any $x_0$ in $\mathcal{T}(S)$, considering the orbit $\Gamma(x_0)$ of $x_0$ under the action of $Mod(S)$, how to describe the closure of $\Gamma (x_0)$ in $\mathcal{T}^{Th}(S)$ or $\mathcal{T}^{GM}(S)$? For this, we set \begin{equation*} \partial E^{Th}(x_0)=\{\Psi_{Th}([f],x_0):[f]\in \partial E\}\subseteq \mathcal{PMF};\,\,\partial E^{GM}(x_0)=\{\Psi_{GM}([f],x_0):[f]\in \partial E\}\subseteq GM. \end{equation*} Then we have \begin{theo} \label{main3} In $\mathcal{T}^{Th}(S)$, the closure of $\Gamma(x_0)$ is $\Gamma(x_0)\cup \partial E^{Th}(x_0)$. In $\mathcal{T}^{GM}(S)$, the closure of $\Gamma(x_0)$ is $\Gamma(x_0)\cup \partial E^{GM}(x_0)$. What's more, $\partial E^{Th}(x_0)=\mathcal{PMF}$. \end{theo} \begin{proof} By Corollary \ref{coro4}, $\partial E^{Th}(x_0)$ is included in the closure of $\Gamma(x_0)$ in $\mathcal{T}^{Th}(S)$. Conversely, suppose $p\in\mathcal{T}^{Th}(S)$ is an element of the closure of $\Gamma(x_0)$ in $\mathcal{T}^{Th}(S)$ and $p\notin \Gamma(x_0)$. Then there exists a sequence $\{f_n\}_{n=1}^{\infty}\subseteq Mod(S)$ such that $\lim_{n\rightarrow\infty}f_n(x_0)=p$ in $\mathcal{T}^{Th}(S)$. Since $Mod(S)$ acts properly discontinuously on $\mathcal{T}(S)$, we know that $p\in \mathcal{PMF}$. By Theorem \ref{main} and Proposition \ref{prop1}, there exists a subsequence $\{f_{n_k}\}_{k=1}^{\infty}$ such that $\lim_{k\rightarrow\infty}[f_{n_k}]=[f_0]$ in $Cl(E)$ for some $[f_0]\in\partial E$. By Corollary \ref{coro4}, we have $$p=\lim_{k\rightarrow\infty}f_{n_k}(x_0)=\Psi_{Th}([f_0],x_0)\in \partial E^{Th}(x_0).$$ Thus the closure of $\Gamma(x_0)$ in $\mathcal{T}^{Th}(S)$ is $\Gamma(x_0)\cup \partial E^{Th}(x_0)$.
Using a similar argument, we know that the closure of $\Gamma(x_0)$ in $\mathcal{T}^{GM}(S)$ is $\Gamma(x_0)\cup \partial E^{GM}(x_0)$.
Now we prove $\partial E^{Th}(x_0)=\mathcal{PMF}$. From Proposition \ref{prop5}(1), we know that for any simple closed curve $\alpha$ in $S$, $[i(\alpha,\cdot)\alpha]\in \partial E$. Thus $[i(\alpha,\cdot)]=[l(x_0,i(\alpha,\cdot)\alpha)]\in \partial E^{Th}(x_0)$. Since the set of simple closed curves is dense in $\mathcal{PMF}$, we have $\partial E^{Th}(x_0)=\mathcal{PMF}$. \end{proof} \begin{rema}\label{rema8} It is well-known that the action of $Mod(S)$ on $\mathcal{PMF}$ is minimal (see \cite{FLP}). This fact also implies $\partial E^{Th}(x_0)=\mathcal{PMF}$: since $Mod(S)$ acts properly discontinuously on $\mathcal{T}(S)$, $\partial E^{Th}(x_0)\cap \mathcal{PMF}\neq\emptyset$. By the minimal action of $Mod(S)$ on $\mathcal{PMF}$, this implies $\partial E^{Th}(x_0)=\mathcal{PMF}$. \end{rema} \begin{rema}\label{rema6} The new boundary $\partial E$ is related to a special boundary of $Mod(S)$. Precisely, fixed a base point $x\in\mathcal{T}(S)$, sending $f\in Mod(S)$ to $f(x)\in \Gamma(x)\subseteq\mathcal{T}(S)$, we can identify $Mod(S)$ with the orbit $\Gamma(x)$ naturally. By Theorem \ref{main3}, the boundary of $\Gamma(x)$ in $\mathcal{T}^{Th}(S)$ is $\partial E^{Th}(x)=\mathcal{PMF}$. Thus $\partial E^{Th}(x)=\mathcal{PMF}$ can be seen as a boundary of $Mod(S)$. As a boundary of $Mod(S)$, $\partial E^{Th}(x)$ is homeomorphic to $\mathcal{PMF}$ but depends upon the base point $x$ heavily. Thus we get a family of boundaries $\{\partial E^{Th}(x):x\in\mathcal{T}(S)\}$ of $Mod(S)$ in which each boundary is isomorphic to $\mathcal{PMF}$. We may call each boundary $\partial E^{Th}(x)$ the Thurston boundary with base point $x$. By Theorem \ref{main5}, the new boundary $\partial E$ covers each boundary $\partial E^{Th}(x)$ in this family by a surjective continuous map $\Psi_{x}:\partial E\rightarrow \partial E^{Th}(x),\,\, [f]\mapsto \Psi_{Th}([f],x)$. \end{rema} \begin{rema}\label{rema7} Different from the case of Thurston compactification, $\partial E^{GM}(x_0)$ may be not the whole boundary $GM$. From the compactness of $\partial E$, we only know that $\partial E^{GM}(x_0)$ is a compact subset of $GM$. And $\partial E^{GM}(x_0)$ contains some new points different from those known points in $GM$. A special kind of boundary point was constructed in \cite{Bourque}: \begin{equation*} [Ext^{\frac{1}{2}}\big(x_0,\sum_{i=1}^{k}n_ii(\alpha_i,\cdot)\alpha_i\big)], \end{equation*} where $\alpha_i$ are pairwise disjoint simple closed curves and $n_i>0$. By Proposition \ref{prop5}(1), $\partial E^{GM}(x_0)$ contains these points. \end{rema} Let $\mathcal{T}_{\epsilon}(S)=\{x\in\mathcal{T}(S):\underline{l}(x)>\epsilon\}$ be the $\epsilon-$Thick part of $\mathcal{T}(S)$, where $\underline{l}(x)=\min_{\alpha\in\mathcal{S}}l(x,\alpha)$. The following result characterizes the points of $\partial E^{GM}(x_0)$. \begin{theo}\label{main4} For any $p\in GM$, $p\in \partial E^{GM}(x_0)$ for some $x_0\in\mathcal{T}(S)$ if and only if there exists a sequence $\{p_n\}_{n=1}^{\infty}\subseteq \mathcal{T}_{\epsilon}(S)$ for some $\epsilon>0$ such that $\lim_{n\rightarrow\infty}p_n=p$. \end{theo} \begin{proof} Suppose that $p\in \partial E^{GM}(x_0)$ for some $x_0\in\mathcal{T}(S)$. Then $p=\lim_{n\rightarrow\infty}f_n(x_0)$ for some sequence $\{f_n\}_{n=1}^{\infty}\subseteq Mod(S)$. Note that $\underline{l}\big(f_n(x_0)\big)\equiv\underline{l}(x_0)$. Set $\epsilon=\frac{1}{2}\underline{l}(x_0)$. Then $f_n(x_0)\in \mathcal{T}_{\epsilon}(S)$.
Suppose that $\{p_n\}_{n=1}^{\infty}\subseteq \mathcal{T}_{\epsilon}(S)$ for some $\epsilon>0$ and $\lim_{n\rightarrow\infty}p_n=p$. Then from the Mumford's compactness criterion, we know that after projecting to the moduli space $\mathcal{M}(S)=\mathcal{T}(S)/Mod(S)$, the sequence $\{p_n\}_{n=1}^{\infty}$ lies in a precompact set $\mathcal{M}_{\epsilon}(S)=\mathcal{T}_{\epsilon}(S)/Mod(S)$, which is the $\epsilon$-thick part of $\mathcal{M}(S)$. Thus passing to a subsequence, we assume that there exists a sequence $\{f_n\}_{n=1}^{\infty}$ in $Mod(S)$ and $x_0$ in $\mathcal{T}(S)$ such that \begin{equation*} \lim_{n\rightarrow\infty}f_n(p_n)=x_0. \end{equation*} Set $x_n=f_n(p_n)$. Then $p_n=f_n^{-1}(x_n)$ and $\lim_{n\rightarrow\infty}x_n=x_0$. By the compactness of $Cl(E)$, passing to a subsequence again, we assume that $[f_n^{-1}]$ converges to some $[f]$ in $Cl(E)$. Thus by Theorem \ref{main5}, \begin{equation*} p=\lim_{n\rightarrow\infty}p_n=\lim_{n\rightarrow\infty}f_n^{-1}(x_n)=\lim_{n\rightarrow\infty}\Psi_{GM}([f^{-1}_n],x_n)=\Psi_{GM}([f],x_0) =[Ext^{\frac{1}{2}}(x_0,\overline{f}(\cdot))]. \end{equation*} Since $[Ext^{\frac{1}{2}}(x_0,\overline{f}(\cdot))]=p\in GM$, we have $[f]\in \partial E$. So $p\in \partial E^{GM}(x_0)$. \end{proof}
\addcontentsline{toc}{section}{\refname}
\end{document} | arXiv | {
"id": "2302.06850.tex",
"language_detection_score": 0.6109777688980103,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{A Remark on Lebesgue Criterion} \author{Yu-Lin Chou\thanks{Yu-Lin Chou, Institute of Statistics, National Tsing Hua University, Hsinchu 30013, Taiwan, R.O.C.; Email: \protect\url{y.l.chou@gapp.nthu.edu.tw}.}} \date{} \maketitle
\begin{abstract} We remark a variant of the existence part of the fundamental theorem of calculus, which, together with the Lebesgue differentiation theorem, constitute a new proof that every Riemann-integrable function on a compact interval having limit everywhere on the interior is almost everywhere continuous with respect to Lebesgue measure. The proof is intended as a new connection between Lebesgue differentiation theorem and Lebesgue criterion of Riemann integrability.\\
{\noindent \textbf{Keywords:}} almost everywhere continuity; fundamental theorem of calculus; Lebesgue criterion; Lebesgue differentiation theorem; Riemann integrability
{\noindent \textbf{MSC 2020:}} 26A42; 28A15 \end{abstract}
\section{Introduction} A characterization for Riemann integrability is Lebesgue criterion, i.e. the assertion that a bounded function on a compact interval is Riemann-integrable if and only if the function is continuous almost everywhere modulo Lebesgue measure. As well-known and well-documented, the more involved part of Lebesgue criterion (for and only for a direct proof) would be the only-if part, which, even with measure theory employed here and there, usually takes some nontrivial preliminary observations. The only-if part of Lebesgue criterion is thus more “interesting”.
We obtain a new proof of a weakened form of the only-if part\footnote{We recall that boundedness follows from Riemann integrability.} of Lebesgue criterion: \textit{If $[a, b] \subset \mathbb{R}$, if $f: [a,b] \to \mathbb{R}$ is Riemann-integrable, and if $f$ has limit everywhere on $]a,b[$, then $f$ is continuous almost everywhere with respect to Lebesgue measure.} The main observation is a natural variant of the existence part of the fundamental theorem of calculus — the assertion that every continuous function on a compact interval has a primitive, the indefinite integral of the function being a suitable choice. The variant seems to be easily unnoticed, but, as will be shown, takes only a simple proof. Moreover, our proof of the proposition connects Lebesgue criterion with Lebesgue differentiation theorem in a new direction.
\section{Proof} Let $f$ satisfy the assumptions of our proposition. Then the Riemann integrability of $f$ implies that $\int_{a}^{x}f$ exists for every $x \in [a,b]$. Since $f$ has limit everywhere on $]a,b[$, let $l_{x} \coloneqq \lim_{y \to x}f(y)$ for every $x \in ]a,b[$. For convenience, write $\partial_{x}$ for the usual differential operator.
We claim that $\partial_{x}\int_{a}^{x}f = l_{x}$ for every $x \in ]a,b[$. If $x \in ]a,b[$, and if $h > 0$, then \[
\bigg| h^{-1}\bigg( \int_{a}^{x+h}f - \int_{a}^{x}f \bigg) - l_{x} \bigg|
&= \bigg| h^{-1}\int_{x}^{x+h}f - l_{x} \bigg|\\
&= \bigg| h^{-1}hl_{x} - l_{x} + h^{-1}\int_{x}^{x+h}(f(t) - l_{x}) \mathop{}\!\mathrm{d} t \bigg|\\
&= \bigg| h^{-1}\int_{x}^{x+h} ( f(t) - l_{x}) \mathop{}\!\mathrm{d} t \bigg|\\
&\leq h^{-1}\int_{x}^{x+h}|f(t) - l_{x}| \mathop{}\!\mathrm{d} t. \]Given any $\varepsilon > 0$, there is some $\delta > 0$ such that $0 < h < \delta$
implies that $|f(t) - l_{x}| < \varepsilon$ for all $t \in ]x, x+h]$; so the last term above is $< \varepsilon$ for all $0 < h < \delta$. The case where $h < 0$ follows for the same reason after repeating the analysis for the expression $|h|^{-1}\int_{x+h}^{x}f$. This proves the claim.
On the other hand, we have $\lim_{h \to 0+}(2h)^{-1}\int_{x - h}^{x+h}f = \partial_{x}\int_{a}^{x}f$ for every $x \in ]a,b[$. Since $f$ is Riemann-integrable, the extension of $f$ to $\mathbb{R}$ by assigning $0$ to every point of $\mathbb{R} \setminus [a,b]$ is Lebesgue-integrable; the Lebesgue differentiation theorem (e.g. Theorem 7.2, Wheeden and Zygmund \cite{wz}) asserts in particular that $(2h)^{-1}\int_{x-h}^{x+h}f \to f(x)$ as $h \to 0+$ for almost every $x \in ]a,b[$ (modulo Lebesgue measure). But then $l_{x} = \lim_{y \to x}f(y) = f(x)$ for Lebesgue-almost all $x \in ]a,b[$, and the desired almost everywhere continuity follows. \qed
\begin{rem} It is not \textit{a priori} obvious whether a Riemann-integrable function on a compact interval should even have one-sided limit everywhere. Call a function on $[a,b]$ a step function if and only if there is some partition $\{ x_{0}, \dots, x_{n} \}$ of $[a,b]$, including the endpoints $a,b$, such that the function is constant on each $]x_{i}, x_{i+1}[$. Since it can be shown (e.g. Theorem 7.6.1, Dieudonn{\'e} \cite{d}) that a function on a compact interval has one-sided limit everywhere if and only if the function is the uniform limit of some sequence of step functions, and since not every Riemann-integrable function on a compact interval is the uniform limit of some sequence of step functions, being Riemann-integrable alone does not even ensure having one-sided limit everywhere. \qed \end{rem}
\end{document} | arXiv | {
"id": "2006.03867.tex",
"language_detection_score": 0.770631730556488,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\begin{abstract} We show how to use the combinatorial notion of jump sets to parametrize the possible structures of the group of principal units of local fields, viewed as filtered modules. We establish a natural bijection between the set of jump sets and the orbit space of a $p$-adic group of filtered automorphisms acting on a free filtered module. This, together with a Markov process on Eisenstein polynomials, culminates into a \emph{mass-formula} for unit filtrations. As a bonus the proof leads in many cases to explicit invariants of Eisenstein polynomials, yielding a link between the filtered structure of the unit group and ramification theory. Finally, with the basic theory of filtered modules developed here, we recover, with a more conceptual proof, a classification, due to Miki, of the possible sets of upper jumps of a wild character: these are all jump sets, with a set of exceptions explicitly prescribed by the jump set of the local field and the size of its residue field. \end{abstract}
\maketitle
\setcounter{tocdepth}{1} \setcounter{page}{1} \tableofcontents
\section{Introduction} In this paper we introduce \emph{jump sets}, elementary combinatorial objects, and use them to establish several fundamental results concerning two natural filtrations in the theory of local fields. These are the unit filtration and the ramification filtration. We subdivide our main results into three themes and introduce each of the themes with a basic question. We use the answer to each question as a starting point to explain our main results.
\subsection{Three questions} \label{Three questions}
\subsubsection{Principal units} \label{principal units}
Let $p$ be a prime number. A non-archimedean local field is a field $K$, equipped with a non-archimedean absolute value $|\cdot|$, such that $K$ is a non-discrete locally compact space with respect to the topology induced by $|\cdot|$. Write $O:=\{x \in K:|x| \leqslant 1 \}$ for the ring of integers and $m:=\{x \in K:|x|<1 \}$ for its unique maximal ideal. We assume that $p$ is the residue characteristic of $K$, i.e. the characteristic of the finite field $O/m$. Denote by $f_K$ the positive integer satisfying $p^{f_K}=\#O/m$. Recall that $O$ is a discrete valuation ring, and denote by $\text{v}_K:K^{*} \to \mathbb{Z}$ the valuation that maps any generator of the ideal $m$ to $1$.
The inclusions $K^{*} \supseteq O^{*} \supseteq U_1(K)=1+m=\{\text{principal units}\}$ \ split in the category of topological groups. So, as topological groups, we have $K^{*} \simeq_{\text{top.gr.}} \mathbb{Z} \times O^{*}$, $O^{*}=(O/m)^{*} \times U_1(K)$, where $\mathbb{Z}$ is taken with the discrete topology. This paper focuses on $U_1(K)$. The profinite group $U_1(K)$ is a pro-$p$ group, thus, being abelian, it has a natural structure of $\mathbb{Z}_p$-module. As a topological $\mathbb{Z}_p$-module $U_1(K)$ is very well understood. If $\text{char}(K)=0$ then $U_1(K) \simeq \mathbb{Z}_p^{[K:\mathbb{Q}_p]} \times \mu_{p^{\infty}}(K)$, while if $\text{char}(K)=p$ then $U_1(K) \simeq \mathbb{Z}_p^{\omega}$. Here $\omega$ denotes the first infinite ordinal number and $\mu_{p^{\infty}}(K)$ denotes the $p$-part of the group of roots of unity of $K$. In both cases the isomorphism is meant in the category of topological $\mathbb{Z}_p$-modules. For a reference see \cite[Chapter 1, Section 6]{Fesenko--Vostokov}
The $\mathbb{Z}_p$-module $U_1(K)$ comes naturally with some additional structure, namely the filtration $U_1(K) \supseteq U_2(K) \supseteq \ldots \supseteq U_i(K) \supseteq \ldots $, where $ U_i(K)=1+m^i$. In order to take into account this additional structure we make the following definition. A \emph{filtered $\mathbb{Z}_p$-module} is a sequence of $\mathbb{Z}_p$-modules, $M_1 \supseteq M_2 \supseteq \ldots \supseteq M_i\supseteq \ldots $, with $\bigcap_{i \in \mathbb{Z}_{\geqslant 1}}M_i=\{0\}$. We will use the symbol $M_{\bullet}$ to denote a filtered $\mathbb{Z}_p$-module. A morphism of filtered $\mathbb{Z}_p$-modules is a morphism of $\mathbb{Z}_p$-modules $\varphi:M_1 \to N_1$ such that $\varphi(M_i) \subseteq N_i$ for each positive integer $i$. A filtered module can be also described in terms of its \emph{weight} map $w:M_1 \to \mathbb{Z}_{\geqslant 1} \cup \{\infty\}$ attaching to each $x$ the sup of the set of integers $i$ such that $x \in M_i$. \\
$\mathbf{Question \ (1)}$ What does $U_1(K)$ look like as a filtered $\mathbb{Z}_p$-module?\\ \\ In other words, we ask what is, as a function of $K$, the isomorphism class of $U_1(K)$ in the category of filtered $\mathbb{Z}_p$-modules. We will sometimes use the symbol $U_{\bullet}(K)$ to stress the presence of the additional structure present in $U_1(K)$, coming from the filtration. Denote by $G_K$ the absolute Galois group of $K$. Thanks to local class field theory, the above question is essentially asking to describe $G_K^{\text{ab}}$ as a filtered group, where the filtration is given by the upper numbering on $G_K^{\text{ab}}$. Equipping any quotient of $G_K$ with the upper numbering filtration and studying it in the category of filtered groups is a natural thing to do. Indeed it is a fact that the local field $K$ can be uniquely determined from the filtered group $G_K$, see \cite{Mochizuki}. \subsubsection{Galois sets} \label{Question 2}
Fix $K^{\text{sep}}$ a separable closure of $K$. Denote by $G_K:=\text{Gal}(K^{\text{sep}}/K)$ the absolute Galois group. Denote by $|\cdot|$ the unique extension of $|\cdot|$ to $K^{\text{sep}}$. Take $L/K$ finite separable. Thus $L$ naturally comes with a \emph{Galois set}: $\Gamma_L=\{K\text{-embeddings} \ L \to K^{\text{sep}}\}$.
Recall by Galois theory that this is a transitive $G_K$-set with $|\Gamma_L|=[L:K]$. This holds for any field $K$. But, if $K$ is a local field, there is an additional piece of structure, namely a $G_K$-invariant metric on $\Gamma_L$, defined as follows:
$d(\sigma,\tau)=\text{max}_{x \in O_L}|\sigma(x)-\tau(x)|$ \ $(\sigma,\tau \in \Gamma_L)$. Here $O_L$ denotes the ring of integers of $L$. Observe that the maximum is attained since $O_L$ is compact and the function in consideration is continuous. If $L/K$ is \text{unramified} then the metric space $\Gamma_L$ is a simple one: $d(\sigma,\tau)=1$ whenever $\sigma \neq \tau$. Since every finite separable extension of local fields splits canonically as an unramified one and a totally ramified one, we go to the other extreme of the spectrum and consider $L/K$ totally ramified: in other words we put $L=K(\pi)$, with $g(\pi)=0$, where $g \in K[x]$ is \emph{Eisenstein}. We can now phrase the second question. \\
$\mathbf{Question \ (2)}$ Which invariants does the metric space impose on the coefficients of $g$?\\ \\ As we shall see, the answer to our second question comes often with a surprising link to the answer to our first question.
\subsubsection{Jumps of characters}
A \emph{character} of $U_1(K)$ is a continuous group homomorphism \\
$\chi:U_1(K) \to \mathbb{Q}_p/\mathbb{Z}_p \simeq \mu_{p^{\infty}}(\mathbb{C})$. Define $J_{\chi}=\{i \in \mathbb{Z}_{\geqslant 1}:\chi(U_i(K)) \neq \chi(U_{i+1}(K))\}=\{\text{jumps for $\chi$}\}$. Since $U_1(K)$ is a profinite group, a character $\chi$ has always finite image. Moreover it is easy to check that at each jump the size of the image gets divided exactly by $p$. So one has that $\text{order}(\chi)=p^{|J_{\chi}|}<\infty$. In particular $J_{\chi}$ is always a finite subset of $\mathbb{Z}_{\geqslant 1}$. We can now phrase our third question. \\
$\mathbf{Question \ (3)}$ Given a local field $K$, which subsets of $\mathbb{Z}_{\geqslant 1}$ occur as $J_{\chi}$ for a character of $U_1(K)$? \\ \\ Thanks to local class field theory this question is essentially asking to determine which sets $A \subseteq \mathbb{Z}_{\geqslant 1}$ occur as the set of jumps in the upper filtration of $\text{Gal}(L/K)$, for some $L$, a finite cyclic totally ramified extension of $K$, with $[L:K]$ a power of $p$. This connection is articulated in Section \ref{wild extension}. \subsection{Shifts and jump sets} \label{main results} The goal of this subsection is to explain the notion of a \emph{jump set}. Jump sets are defined using \emph{shifts}. A \emph{shift} is a strictly increasing function $\rho:\mathbb{Z}_{\geqslant 1} \to \mathbb{Z}_{\geqslant 1}$, with $\rho(1)>1$. If $T_{\rho}=\mathbb{Z}_{\geqslant 1}-\rho(\mathbb{Z}_{\geqslant 1})$ is finite, put $e^{*}=\text{max}(T_{\rho})+1$. The example of shift relevant for local fields is the following: \[ \rho_{e,p}(i)=\text{min}\{i+e,pi\} \ \text{for} \ p \ \text{prime}, e \in \mathbb{Z}_{>0} \cup \{\infty\}. \] In this example one has that if $e \neq \infty$, then $e^{*}=\lceil \frac{pe}{p-1} \rceil$. The case $e \neq \infty$ will be used for local fields of characteristic $0$, and the case $e=\infty$ will be used for local fields of characteristic $p$.
The following property explains how this shift can be used to express how $p$-powering in $U_1$ changes the weights in the filtration.
\emph{Crucial property}: If $K$ is local field, $e=\text{v}_K(p)$, then $$ U_i^p \subset U_{\rho(i)} \ \text{for} \ \rho=\rho_{e,p}. $$ This follows at once inspecting valuations in the binomial expansion $(1+x)^p=1+px+ \ldots +x^p$. For a local field $K$ we denote by $\rho_K$ the shift $\rho_{e,p}$.
We can now provide the notion of a jump set for a shift $\rho$ and respectively, in case $T_{\rho}$ is finite, of an extended jump set for $\rho$. A \emph{jump set} for $\rho$ (resp.\ an \emph{extended jump set} for $\rho$) is a finite subset $A \subseteq \mathbb{Z}_{\geqslant 1}$, satisfying the following two conditions: \\ \\ $(C.1)$ if $a,b \in A$, and $a<b$ then $\rho(a) \leqslant b$, \\ $(C.2)$ one has that $A - \rho(A) \subseteq T_{\rho}$ \ (resp.\ $A - \rho(A) \subseteq T_{\rho}^{*}=T_{\rho} \cup \{e^{*}\}$). \\
Write $\text{Jump}_{\rho}=\{\text{jump sets for $\rho$}\}$ \ (resp.\ $\text{Jump}_{\rho}^{*}=\{\text{extended jump sets for $\rho$}\}$). The jump set $A$ can be reconstructed from the following data. \\ \\ $(a)$ $I_{A}=A-\rho(A)$. \\
$(b)$ The function $\beta_A:A-\rho(A) \to \mathbb{Z}_{\geqslant 1}$, $i \to |[i,\infty) \cap A|$. \\ \\ The pair $(I_A,\beta_A)$ satisfies the following three conditions.\\ \\ $(C.1)'$ One has that $I_A \subseteq T_{\rho}$ (resp.\ $I_A \subseteq T_{\rho}^{*}$), \newline $(C.2)'$ the map $\beta_A$ is a strictly decreasing map $\beta:I_A \to \mathbb{Z}_{\geqslant 1}$, \newline $(C.3)'$ the map $i \mapsto \rho^{\beta(i)}(i)$ from $I_A$ to $\mathbb{Z}_{\geqslant 1}$ is strictly increasing. \\
Conversely, given any pair $(I,\beta)$ satisfying properties $(C.1)', (C.2)'$ and $(C.3)'$, we can attach to it a jump set for $\rho$ denoted by $A_{(I,\beta)}$ (resp.\ an extended jump set for $\rho$). The assignments $A \mapsto (I_A,\beta_A)$ and $(I,\beta) \mapsto A_{(I,\beta)}$ are inverses to each other. Namely we have $$A_{(I_A,\beta_A)}=A,$$ and $$(I_{A_{(I,\beta)}},\beta_{A_{(I,\beta)}})=(I,\beta). $$ We will refer also to the pair $(I,\beta)$ as a jump set. \subsubsection{Answer to question $(1)$} \label{Answer (1)} We will answer question (1) exploiting the following analogy with usual $\mathbb{Z}_p$-modules. We denote by $\mu_p(K):=\{\alpha \in K: \alpha^p=1\}$. It is not difficult to show that $\mu_p(K)=\{1\}$ if and only if $$U_1(K) \simeq_{\mathbb{Z}_p\text{-mod}}\prod_{i \in T_{\rho_K}} \mathbb{Z}_p^{f_K}. $$ Suppose that $\mu_p(K) \neq \{1\}$. Then $U_1(K)$ has a presentation: $$ 0\to \mathbb{Z}_p \to \mathbb{Z}_p^{[K:\mathbb{Q}_p]+1} \to U_1(K) \to 0. $$ Denote by $v_0$ the image of $1$ in the inclusion of $\mathbb{Z}_p$ into $\mathbb{Z}_p^{[K:\mathbb{Q}_p]+1}$. One can obtain a different presentation using the natural action of $\text{Aut}_{\mathbb{Z}_p}(\mathbb{Z}_p^{[K:\mathbb{Q}_p]+1})$ on $\text{Epi}_{\mathbb{Z}_p}(\mathbb{Z}_p^{[K:\mathbb{Q}_p]+1},U_1(K))$, which denotes the set of surjective morphisms of $\mathbb{Z}_p$-modules from $\mathbb{Z}_p^{[K:\mathbb{Q}_p]+1}$ to $U_1(K)$. In this way all presentations are obtained. That is, $\text{Aut}_{\mathbb{Z}_p}(\mathbb{Z}_p^{[K:\mathbb{Q}_p]+1})$ acts transitively on $\text{Epi}_{\mathbb{Z}_p}(\mathbb{Z}_p^{[K:\mathbb{Q}_p]+1},U_1(K))$. Thus knowing $U_1(K)$ as a $\mathbb{Z}_p$-module is tantamount to knowing the orbit of the vector $v_0$ under the action of $\text{Aut}_{\mathbb{Z}_p}(\mathbb{Z}_p^{[K:\mathbb{Q}_p]+1})$. But recall that for all $v_1, v_2 \in \mathbb{Z}_p^{[K:\mathbb{Q}_p]+1}$ one has that $$
v_1 \sim_{\text{Aut}_{\mathbb{Z}_p}} v_2 \leftrightarrow \text{ord}(v_1)=\text{ord}(v_2).
$$ Here $\text{ord}$ of a vector $v \in \mathbb{Z}_p^{[K:\mathbb{Q}_p]+1}$ denotes the minimum of $\text{v}_{\mathbb{Q}_p}(a)$ as $a$ varies among the coordinates of $v$ with respect to the standard basis of $\mathbb{Z}_p^{[K:\mathbb{Q}_p]+1}$. Therefore we have that $$
\{v: \mathbb{Z}_p^{[K:\mathbb{Q}_p]+1}/\mathbb{Z}_pv \simeq U_1(K) \}=\{v:|\mu_{p^{\infty}}(K)|=p^{\text{ord}(v)} \}. $$ We will see that in the finer category of filtered $\mathbb{Z}_p$-modules the story is very similar. To reach an analogous picture we need to introduce the analogues of the actors appearing above. Namely we need a notion of a ``\emph{free-filtered-module}".
As we shall explain in section \ref{def direct sums}, with filtered modules one can do the usual operations of direct sums, direct product, and when the modules are finitely generated of taking quotients. Having this in mind, one defines what may be thought of as the building blocks for ``free-filtered-modules", namely the analogue of rank $1$ modules over $\mathbb{Z}_p$ (but now there will be many different rank $1$ filtered modules), as follows. Let $\rho$ be a shift, and let $i$ be a positive integer. \begin{definition} The $i$-th standard filtered module, $S_i$, for $\rho$, is given by setting $S_i=\mathbb{Z}_p$, with weight map $$w(x)=\rho^{\text{ord}_p(x)}(i).$$ \end{definition} The analogues of a ``free-filtered-module" used to describe $U_1(K)$ will be $$M_{\rho}=\prod_{i \in T_{\rho}}S_i,$$ \ \ $$M_{\rho}^{*}=\prod_{i \in T_{\rho}^{*}}S_i.$$ We have the following theorem. \begin{theorem}\label{the free guys}
Let $K$ be a local field, with $|O/m|=p^{f_K}$. Then $U_1 \simeq M_{\rho_K}^{f_K}$ as filtered $\mathbb{Z}_p$-modules if and only if $\mu_p(K)=\{1\}$. \end{theorem} So we are left with the case $\mu_p(K) \neq \{1\}$. In particular we have that $\text{char}(K)=0$. We proceed in analogy with the case of $\mathbb{Z}_p$-modules described above.
To describe $U_{\bullet}$ as a \emph{filtered} $\mathbb{Z}_p$-module one \emph{constructs} a filtered presentation: $$M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*} \twoheadrightarrow U_{\bullet}(K). $$ Just as with $\mathbb{Z}_p$-modules, one can obtain a different presentation using the natural action of $\text{Aut}_{\text{filt}}(M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*})$ on $\text{Epi}_{\text{filt}}(M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*},U_{\bullet}(K))$. As established in Proposition \ref{transitive action} we obtain a statement in perfect analogy with the case of $\mathbb{Z}_p$-modules explained above. Namely we have the following crucial proposition. \begin{proposition} \label{orbit for intro} Let $K$ be a local field with $\mu_p(K) \neq \{1\}$. Then the action of $\emph{Aut}_{\emph{filt}}(M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*})$ upon the set $\emph{Epi}_{\emph{filt}}(M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*},U_{\bullet}(K))$ is transitive. \end{proposition}
For a local field $K$ as in Proposition \ref{orbit for intro} knowing the filtered module $U_{\bullet}(K)$ is tantamount to knowing the set of vectors $v \in M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*}$ such that $$(M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*})/\mathbb{Z}_pv \simeq_{\text{filt}} U_{\bullet}(K). $$ Thanks to Proposition \ref{orbit for intro} the set of such vectors $v$ consists of a single orbit under the action of the group $\text{Aut}_{\text{filt}}(M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*})$. Thus we are led to study the orbits of $\text{Aut}_{\text{filt}}(M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*})$ acting on $M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*}$, just as we did above in the case of $\mathbb{Z}_p$-modules. In particular we are led to find the filtered analogue of the function $\text{ord}$. It is in this context that jump sets come into play. For two vectors $v_1,v_2 \in M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ we will use the notation $$v_1 \sim_{\text{Aut}_{\text{filt}}} v_2 $$ to say that $v_1$ and $v_2$ are in the same orbit under the action of $\text{Aut}_{\text{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$. Observe that if $\varphi \in \text{Epi}_{\text{filt}}(M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*},U_{\bullet}(K))$, then in particular $\text{ker}(\varphi) \subseteq p \cdot (M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*})$. Therefore we proceed to describe only orbits of $\text{Aut}_{\text{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$ acting upon $p \cdot (M_{\rho}^{f-1} \oplus M_{\rho}^{*})$. However there is no loss of generality in doing so. Indeed it is clear that given $v_1,v_2$ in $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ one has that $v_1 \sim_{\text{Aut}_{\text{filt}}} v_2$ if and only if $p \cdot v_1 \sim_{\text{Aut}_{\text{filt}}}p \cdot v_2$. We attach to each extended jump set $(I,\beta)$ a vector in $p\cdot(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$ defined as follows: $$ v_{(I,\beta)}=(x_j)_{j \in T_{\rho}^{*}} \in p \cdot M_{\rho}^{*}=\prod_{j \in T_{\rho}^{*}}p \cdot S_j $$ $$\text{by} \ x_j=0 \ \text{if} \ j \not\in I, \ x_j=p^{\beta(j)} \ \text{if} \ j \in I. $$
\begin{theorem}\label{j.s.param.orbit}(\emph{Jump sets parametrize orbits}) Let $\rho$ be any shift with $\#T_{\rho}<\infty$ and $f$ be a positive integer. Then there exists a unique map \[\emph{\text{filt-ord}}:p\cdot(M_{\rho}^{f-1} \oplus M_{\rho}^*) \to \emph{Jump}_{\rho}^{*} \] having the following two properties. \\ $(1)$ For all $v_1,v_2 \in p\cdot(M_{\rho}^{f-1} \oplus M_{\rho}^*) $ one has $$
v_1 \sim_{{\emph{\text{Aut}}}_{\emph{\text{filt}}}} v_2 \leftrightarrow \emph{\text{filt-ord}}(v_1)=\emph{\text{filt-ord}}(v_2).
$$ $(2)$ For each $(I,\beta) \in \emph{Jump}_{\rho}^{*}$, we have that $$ \emph{\text{filt-ord}}(v_{(I,\beta)})=(I,\beta). $$ \end{theorem} In fact the proof of Theorem \ref{j.s.param.orbit}, as given in Section \ref{filtered modules}, provides us with an effective way to \emph{compute} the map $\text{filt-ord}$. This goes as follows. Let $v$ be in $p\cdot(M_{\rho}^{f-1} \oplus M_{\rho}^*)$. Firstly define the following subset of $\mathbb{Z}_{\geqslant 1}^2$ $$S_v:=\{(i,\text{ord}(v_i)) \}_{i \in T_{\rho}^{*}:v_i \neq 0}, $$ where $v_i$ is the projection of $v$ on the factor $S_{i}^f$ if $i<e_{\rho}^{*}$ and on $S_{e_{\rho}^{*}}$ in case $i=e_{\rho}^{*}$. Next, for any shift $\rho$ consider the following partial order $\leqslant_{\rho}$ defined on $\mathbb{Z}_{\geqslant 1}^2$. We let $(a_1,b_1) \leqslant_{\rho} (a_2,b_2)$ if and only if $$b_2 \geqslant b_1 \ \text{and} \ \rho^{b_2}(a_2) \geqslant \rho^{b_1}(a_1). $$ Finally define $S_v^{-}$ to be the set of minimal points of $S_v$ with respect to $\leqslant_{\rho}$. One can easily show that there is a unique extended jump set $(I_v,\beta_v) \in \text{Jump}_{\rho}^{*}$ such that $$ S_{v}^{-}=\text{Graph}(\beta_v). $$ It is shown in Section \ref{filtered modules} that $\text{filt-ord}(v)=(I_v,\beta_v)$. This phenomenon of a jump set arising as the set of minimal or maximal elements of some finite subset of $\mathbb{Z}_{\geqslant 1}^{2}$ is a leitmotif of this paper. Another instance of this phenomenon will emerge at the end of this sub-section in Theorem \ref{a surprising relation}, in the context of Eisenstein polynomials. We mention that this way of computing $\text{filt-ord}$ is used in \cite{de Boer--Pagano} where, among other things, algorithmic problems of this subject are explored.
From Theorem \ref{j.s.param.orbit} one concludes the following. \begin{theorem}\label{the quasi-free guys}
Let $K$ be a local field, with $\mu_p(K) \neq \{1\}$ and $|O/m|=p^{f_K}$. Then there is a \emph{unique} $(I_K,\beta_K) \in \emph{Jump}_{\rho_K}^{*}$ such that
$$U_1(K) \simeq M_{\rho_K}^{f_K-1} \oplus (M_{\rho_K}^{*}/\mathbb{Z}_pv_{(I_K,\beta_K)})$$
as filtered $\mathbb{Z}_p$-modules.
\end{theorem} So when $\mu_p(K) \neq \{1\}$, knowing $U_1(K)$ as a filtered module is tantamount to knowing the extended $\rho_K$-jump set $(I_K,\beta_K)$.
The next theorem tells us, for given $e,f$, which orbits of the action of $\text{Aut}_{\text{filt}}(M_{\rho_{e,p}}^{f-1} \oplus M_{\rho_{e,p}}^{*})$ on $M_{\rho_{e,p}}^{f-1} \oplus M_{\rho_{e,p}}^{*}$ are realized by a local field $K$ with $\mu_p(K) \neq \{1\}$, $e_K=e$ and $f_K=f$. In other words, together with Theorem \ref{the free guys} this provides a complete classification of the filtered $\mathbb{Z}_p$-modules $M_{\bullet}$ such that $$U_{\bullet}(K) \simeq_{\text{filt}} M_{\bullet}, $$ for some local field $K$, therefore answering Question $(1)$.
\begin{theorem} \label{realizable j.s. are realized} Let $p$ be a prime number, let $e,f \in \mathbb{Z}_{>0}$, and let $(I,\beta)$ be an extended $\rho_{e,p}$-jump set. Then the following are equivalent. \\ (1) There exists a local field $K$ with residue characteristic $p$ and $$\mu_p(K) \neq \{1\}, \ f_K=f, \ e=\emph{v}_K(p), \ (I_K,\beta_K)=(I,\beta).$$
(2) We have that $p-1|e, I \neq \emptyset$ and $$\rho_{e,p}^{\beta(\emph{min}(I))}(\emph{min}(I))=\frac{pe}{p-1} \ (=e^{*}).$$ \end{theorem} For a shift $\rho$ such that $T_{\rho}$ is finite, the extended jump sets $(I,\beta) \in \text{Jump}_{\rho}^{*}$ such that $I \neq \emptyset$ and $\rho^{\beta(\text{min}(I))}(\text{min}(I))=e^{*}$ are said to be \emph{admissible}. The implication $(2) \to (1)$, in the above theorem, is proved in Section \ref{U1 as filtered module} in Theorem \ref{admissible j.s. occur}. The implication $(1) \to (2)$ follows from Proposition \ref{rho for loc fields} and Theorem \ref{classification of quasi free} combined.
Our next main result provides a quantitative strengthening of Theorem \ref{realizable j.s. are realized}. Once we fix $e \in (p-1)\mathbb{Z}_{\geqslant 1}$ and a positive integer $f$, then, thanks to Theorem \ref{realizable j.s. are realized}, we know precisely which $(I,\beta) \in \text{Jump}_{\rho_{e,p}}^{*}$ occur as $(I_K,\beta_K)$ for some local field $K$ with $\mu_p(K) \neq \{1\}, e_K=e, f_K=f$. But Theorem \ref{realizable j.s. are realized} doesn't tell us ``\emph{how often}" each $(I,\beta)$ occurs. To make this point precise we should firstly agree in which manner we \emph{weight} local fields. A very natural way to do this is provided by Serre's Mass formula \cite{Serre m.f.}. We briefly recall how this works.
Let $E$ be a local field. Write $q=|O_E/m_E|$. Let $e$ be a positive integer. Let $S(e,E)$ be the set of isomorphism classes of separable totally ramified degree $e$ extensions $K/E$. To $K\in S(e,E)$ one gives mass $\mu_{e,E}(K):=\frac{1}{q^{c(K/E)}|\text{Aut}_{E}(K)|}$, where $c(K/E)=\text{v}_K(\delta_{K/E})-e+1$, and $\delta_{K/E}$ denotes the different of the extension $K/E$. Serre's Mass formula \cite{Serre m.f.} states that $\mu_{e,E}$ is a probability measure on $S(e,E)$, i.e. $$ \sum_{K \in S(e,E)}\mu_{e,E}(K)=1.$$
Now we can make the ``\emph{how often}" written above precise. Namely given $e \in (p-1)\mathbb{Z}_{\geqslant 1}, f \in \mathbb{Z}_{\geqslant 1}$ and $(I,\beta) \in \text{Jump}_{\rho}^{*}$, write $E_f:=\mathbb{Q}_{p^f}(\zeta_p)$. Here $\mathbb{Q}_{p^f}$ denotes the degree $f$ unramified extension of $\mathbb{Q}_p$. We can ask to evaluate $$\sum_{K\in S(\frac{e}{p-1},E_f): (I_K,\beta_K)=(I,\beta)}\mu_{\frac{e}{p-1},E_f}(K), $$ in words we are asking to evaluate the probability that a random $K$, totally ramified degree $\frac{e}{p-1}$ extension of $E_f$, has $(I_K,\beta_K)=(I,\beta)$.
Observe that, thanks to Proposition \ref{orbit for intro} and Theorems \ref{j.s.param.orbit} and \ref{the quasi-free guys} combined, we know that for $K \in S(\frac{e}{p-1},E_f)$ the set of vectors $O:=\{v \in M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*}: U_{\bullet}(K) \simeq_{\text{filt}} (M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*})/\mathbb{Z}_pv \}$ is precisely equal to the orbit of the vector $v_{(I_K,\beta_K)}$ under $\text{Aut}_\text{filt}(M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*})$. Moreover $M_{\rho_K}^{f_K-1} \oplus M_{\rho_K}^{*}$ viewed as a topological group is compact, and hence has a Haar measure. It is then natural to think that, for a given admissible extended $\rho_{e,p}$-jump set $(I,\beta)$, a randomly chosen totally ramified degree $\frac{e}{p-1}$ extension $K$ of $E_f$, satisfies $$(I_K,\beta_K)=(I,\beta) $$ with probability \emph{proportional} to the Haar measure of the orbit of $v_{(I,\beta)}$. Our next theorem shows that this turns out to be exactly right.
For $(I,\beta) \in \text{Jump}_{\rho_{e,p}}^{*}$, with $I \neq \emptyset$, it is easy to see that the set $\text{filt-ord}^{-1}((I,\beta))$ is an open subset of $M_{\rho_{e,p}}^{f-1} \oplus M_{\rho_{e,p}}^{*}$. Normalize $\mu_{\text{Haar}}$, imposing that $$\mu_{\text{Haar}}(\bigcup\limits_{(I,\beta) \ \text{admissible}}\text{filt-ord}^{-1}(I,\beta))=1.$$ In other words, choose the unique normalization of the Haar measure that induces a probability measure on the union of the orbits of the vectors $v_{(I,\beta)}$ as $(I,\beta)$ runs among admissible extended jump sets for $\rho_{e,p}$. We call admissible those orbits of $M_{\rho_{e,p}}^{f-1} \oplus M_{\rho_{e,p}}^{*}$, under the action of $\text{Aut}_{\text{filt}}(M_{\rho_{e,p}}^{f-1} \oplus M_{\rho_{e,p}}^{*})$, that contain a vector $v_{(I,\beta)}$ with $(I,\beta)$ admissible.
Let $E_f$ be $\mathbb{Q}_{p^f}(\zeta_p)$, the unramified extension of $\mathbb{Q}_p(\zeta_p)$ of degree $f$. \begin{theorem} \label{counting}
Let $e \in (p-1)\mathbb{Z}_{\geqslant 1}, f \in \mathbb{Z}_{\geqslant 1}$ and $(I,\beta) \in \emph{Jump}_{\rho_{e,p}}^{*}$ be an admissible jump set. Then the probability that a random totally ramified degree $\frac{e}{p-1}$ extension $K$ of $E_f$ satisfies $(I_K,\beta_K)=(I,\beta)$, is equal to the probability that a vector $v \in M_{\rho_{e,p}}^{f-1} \oplus M_{\rho_{e,p}}^{*} $, randomly chosen among admissible orbits, is in the orbit of $v_{(I,\beta)}$. In other words $$\sum_{K\in S(\frac{e}{p-1},E_f): (I_K,\beta_K)=(I,\beta)}\mu_{\frac{e}{p-1},E_f}(K)=\mu_{\emph{\text{Haar}}}({\emph{\text{filt-ord}}}^{-1}(I,\beta)).$$ \end{theorem} From the first proof given by Serre \cite{Serre m.f.}, Theorem \ref{counting} can be equivalently expressed as a volume computation in a space of Eisenstein polynomials. Namely for $e \in (p-1)\mathbb{Z}_{\geqslant 1}$ and $f \in \mathbb{Z}_{\geqslant 1}$, denote by $\text{Eis}(\frac{e}{p-1}, \mathbb{Q}_{p^f}(\zeta_p))$ the set of degree $\frac{e}{p-1}$-Eisenstein polynomials over $\mathbb{Q}_{p^f}(\zeta_p)$. This can be viewed as a topological space equipped with a natural probability measure, simply by using the Haar measure on the coefficients. For a $g(x) \in \text{Eis}(\frac{e}{p-1}, \mathbb{Q}_{p^f}(\zeta_p))$, denote by $F_{g(x)}:=\mathbb{Q}_{p^f}(\zeta_p)[x]/(g(x))$. We can reformulate Theorem \ref{counting} in the following manner.
\begin{theorem} \label{counting2}
Let $e \in (p-1)\mathbb{Z}_{\geqslant 1}, f \in \mathbb{Z}_{\geqslant 1}$ and $(I,\beta) \in \emph{Jump}_{\rho_{e,p}}^{*}$ be an admissible jump set. Then the volume of the set of $g(x) \in \emph{Eis}(\frac{e}{p-1}, \mathbb{Q}_{p^f}(\zeta_p))$ satisfying $(I_{F_{g(x)}},\beta_{F_{g(x)}})=(I,\beta),$ equals $$\mu_{\emph{\text{Haar}}}({\emph{\text{filt-ord}}}^{-1}(I,\beta)). $$ \end{theorem} The above two Theorems are implied by Theorem \ref{counting rephrased}. As a bonus, the method of the proof of Theorem \ref{counting rephrased} allows us to \emph{explicitly compute} the jump set $(I_{F_{g(x)}},\beta_{F_{g(x)}})$ out of the valuation of the coefficients of $g(x)$, for a large class of Eisenstein polynomials $g(x)$. This will be the class of \emph{strongly separable Eisenstein polynomials}, which are defined right after Proposition \ref{strongly sep. poly}. To state our next Theorem, we begin attaching to any $g(x) \in \text{Eis}(\frac{e}{p-1}, \mathbb{Q}_{p^f}(\zeta_p))$, an element $(I_{g(x)},\beta_{g(x)}) \in \text{Jump}_{\rho_{\infty,p}}$. Under certain conditions, given below, we have that actually $(I_{g(x)},\beta_{g(x)}) \in \text{Jump}_{\rho_{e,p}}^{*}$ and $(I_{F_{g(x)}},\beta_{F_{g(x)}})=(I_{g(x)},\beta_{g(x)})$. We shall begin by explaining the construction of $(I_{g(x)},\beta_{g(x)})$. Write $$g(x):=\sum_{i=0}^{\frac{e}{p-1}}a_ix^i. $$ Firstly consider the following subset of $\mathbb{Z}^2$ $$S_{g(x)}:=\{\big(\frac{\text{v}_{E_f}(a_i)\frac{e}{p-1}+i}{p^{\text{v}_{\mathbb{Q}_p}(i)}},\text{v}_{\mathbb{Q}_p}(i)+1 \big) \}_{1 \leqslant i \leqslant \frac{e}{p-1}: \text{v}_{\mathbb{Q}_p}(i) \leqslant \text{v}_{\mathbb{Q}_p}(e) \ \text{and} \ a_i \neq 0}. $$ Recall the definition of the partial order $\leqslant_{\rho}$ attached to a shift $\rho$ given right after Theorem \ref{j.s.param.orbit}. We denote by $S_{g(x)}^{-}$ the set of \emph{minimal} elements of $S_{g(x)}$ with respect to the order $\leqslant_{\rho_{\infty,p}}$. One can prove that there is a unique pair $$(I_{g(x)},\beta_{g(x)}) \in \text{Jump}_{\rho_{\infty,p}}, $$ such that $S_{g(x)}^{-}=\text{Graph}(\beta_{g(x)})$. It turns out that if $g(x)$ is strongly separable, a notion that we are going to provide right after Proposition \ref{strongly sep. poly}, then the pair $(I_{g(x)},\beta_{g(x)})$ is also in $\text{Jump}_{\rho_{e,p}}$.
We next make a definition that will have the effect of sub-dividing the characteristic $0$ local field extensions into two sub-categories. Loosely speaking, when the ramification of $E/F$ will not be ``too big" compared to $\text{v}_{E}(p)$, then the arithmetic of this extension will be, for our purposes, indistinguishable from the arithmetic of a characteristic $p$ extension. We make this notion precise in the following definition, while the relation with characteristic $p$ fields will only become visible in Theorem \ref{a surprising relation 2}. For an extension of local fields $F/E$ we denote by $\delta_{F/E}$ the different of the extension. \begin{definition} \label{strongly Eis} Let $F/E$ be any extension of local fields of residue characteristic $p$. We say that $F/E$ is \emph{strongly separable} if $$\text{v}_{F}(\delta_{F/E})<\text{v}_{F}(p). $$ \end{definition} Observe that in characteristic $p$ the notions of strongly separable and separable coincide. One can easily show the following general fact. \begin{proposition} \label{strongly sep. poly} Let $n$ be a positive integer. Consider $F/E$ a monogenic degree $n$ extension given by an Eisenstein polynomial $g(x):=\sum_{i=0}^{n}a_ix^i$. Then $F/E$ is strongly separable if and only if there exists $i \in \{1,\ldots ,n\}$ such that $(i,p)=1$ and $\emph{v}_{E}(a_i)<\emph{v}_E(p)$. \end{proposition} An Eisenstein polynomial $g(x) \in \text{Eis}(n,E)$ giving rise to a strongly separable extension is itself called strongly separable. So Proposition \ref{strongly sep. poly} says that $g(x)$ is strongly separable if and only if it has a coefficient $a_i$ with $(i,p)=1$ and $\text{v}_E(a_i)<\text{v}_E(p)$. We can now state our next result. For a positive integer $f$, recall that $E_f$ denotes $\mathbb{Q}_{p^f}(\zeta_p)$, the unramified extension of $\mathbb{Q}_p(\zeta_p)$ of degree $f$. \begin{theorem} \label{valuation coefficients} Let $e \in (p-1)\mathbb{Z}_{\geqslant 1}, f \in \mathbb{Z}_{\geqslant 1}$ and $g(x) \in \emph{Eis}(\frac{e}{p-1}, E_f)$ be strongly separable. Then $$(I_{g(x)},\beta_{g(x)})=(I_{E_f[x]/g(x)},\beta_{E_f[x]/g(x)}). $$
\end{theorem} As explained at the end of Section \ref{finding jump sets inside}, the assumption of being strongly separable cannot be omitted. Theorem \ref{valuation coefficients} is deduced in Section \ref{finding jump sets inside} from a slightly finer result. Moreover in that Section we provide a \emph{procedure} that allows one to compute $(I_{g(x)},\beta_{g(x)})$ very quickly, even by hand. See \cite{de Boer--Pagano} for an actual implementation of this as well.
The moral of Theorem \ref{valuation coefficients} is that in a portion of the space of Eisenstein polynomials, the assignment $K \mapsto (I_K,\beta_K)$ can be read off very explicitly from the valuations of the coefficients of an Eisenstein polynomial giving the field $K$. In general this is not the case, but nevertheless one is able to establish the exact counting formula as in Theorem \ref{counting} by means of a genuinely probabilistic argument.
\subsubsection{Answer to question $(2)$} Let $n$ be a positive integer and let $L/K$ be a degree $n$ totally ramified separable extension of local fields with residue characteristic $p$. Suppose $L/K$ is given by $g(x) \in \text{Eis}(n,K)$, i.e. $L=K[x]/g(x)$. Denote by $\Gamma_L$ the metric space introduced in \ref{Question 2}. One can find invariants of $g(x)$ from the structure of the metric space $\Gamma_L$ as follows. Fix $\pi \in K^{\text{sep}}$ a root of $g(x)$. Denote by $\sigma_{\pi} \in \Gamma_L$ the corresponding embedding $$ \sigma_{\pi}(x)=\pi. $$ Consider the polynomial $$g_{\text{twist}}(t)=g(\pi \cdot t+\pi) \in K[\pi][t]. $$ The knowledge of the Newton polygon of $g_{\text{twist}}(t)$ tells us precisely how the distances are disposed around $\sigma_{\pi}$ in $\Gamma_L$. But recall that $\Gamma_L$ is a transitive $G_K$-set, and every element of $G_K$ acts as an isometry on $\Gamma_L$. Hence the Newton polygon of $g_{\text{twist}}(\pi \cdot x+\pi)$ is an invariant of the metric space $\Gamma_{L}$ independent of the choice of $\pi$ and of $g$. Denote this polygon by $$\text{Newt}(L/K). $$ Observe that in case $L/K$ is Galois, then the knowledge of $\text{Newt}(L/K)$ amounts to the knowledge of the map $\mathbb{Z}_{>0} \to \mathbb{Z}_{>0}$
$$ u \mapsto |\text{Gal}(L/K)_{u}|, \ (u \in \mathbb{Z}_{>0}) $$ where $\text{Gal}(L/K)_u$ denotes the lower $u$-th ramification group as defined in \cite{Local fields}. But $\text{Newt}(L/K)$ makes sense also for non-Galois extensions.
This Newton polygon is called the \emph{ramification polygon} in the literature, and, among other things, a complete survey on this subject can be found in \cite{Pauli--Sinclair}. In that paper the polynomial in consideration is instead $\frac{g(\pi t+\pi)}{\pi^n}$. Of course this has simply the effect of shifting the polygon vertically by $-n$. As it will become clear to the reader in a moment, we have chosen our normalization since the form of our results is slightly more pleasant with our convention.
The following fact, certainly folklore, can be shown by direct inspection. We refer the reader to Section \ref{Procedure Eis} for how to calculate in practice $(I_{g(x)},\beta_{g(x)})$: this together with the basic properties of $\text{Newt}(L/K)$, which can be found in \cite{Pauli--Sinclair}, gives the following fact quite rapidly. \begin{theorem} \label{strongly separable easy newton} Let $n$ be a positive integer and let $K$ be a local field with residue characteristic $p$. Let $g(x) \in \emph{Eis}(n, K)$ be a strongly separable polynomial. Then $$\emph{\text{Lower-Convex-Hull}}(\{(p^{\beta_{g(x)}(i)-1},p^{\beta_{g(x)}(i)-1}i): i \in I_{g(x)} \} \cup \{(n,n)\})=\emph{\text{Newt}}(K[x]/g(x)/K). $$ \end{theorem} In other words Theorem \ref{strongly separable easy newton} gives us a way to read off $\text{Newt}(K[x]/g(x)/K)$ from $(I_{g(x)},\beta_{g(x)})$, in case $g(x)$ is strongly separable. Hence combined with Theorem \ref{valuation coefficients} we obtain the following surprising result. \begin{theorem} \label{a surprising relation} Let $L/\mathbb{Q}_{p^f}(\zeta_p)$ be a strongly separable totally ramified extension. Then $$ \emph{\text{Lower-Convex-Hull}}(\{(p^{\beta_{L}(i)-1},p^{\beta_{L}(i)-1}i): i \in I_L \} \cup \{(n,n)\})=\emph{\text{Newt}}(L/\mathbb{Q}_{p^f}(\zeta_p)).$$ \end{theorem} Hence for a strongly separable extension $L/\mathbb{Q}_{p^f}(\zeta_p)$ the knowledge of the filtered $\mathbb{Z}_p$-module $U_{\bullet}(L)$ implies the knowledge of the ramification polygon $\text{Newt}(L/\mathbb{Q}_{p^f}(\zeta_p))$. Moreover we see something else going on: for such an extension the full object $(I_{g(x)},\beta_{g(x)})$ is an invariant of the extension. This indeed follows from Theorem \ref{valuation coefficients}: that Theorem is telling us that the object $(I_{g(x)},\beta_{g(x)})$ encodes the structure of $U_{\bullet}(\mathbb{Q}_{p^f}(\zeta_p)[x]/g(x))$ as a filtered $\mathbb{Z}_p$-module. But in the more general case of Theorem \ref{strongly separable easy newton} we see a priori only a way to \emph{deduce} an invariant from $(I_{g(x)},\beta_{g(x)})$, without any structural information provided for $(I_{g(x)},\beta_{g(x)})$ itself. In particular it gives us no a priori guarantees that $(I_{g(x)},\beta_{g(x)})$ is the same as $g(x)$ varies among polynomials representing the same field. In Section \ref{generalization of I beta} we pinpoint this additional structural information. Namely to \emph{any} strongly separable extension $L/K$ of local fields, we will attach $(I_{L/K},\beta_{L/K})$, a $\rho_{\infty,p}$-jump set that encodes structural information about the filtered inclusion $$U_{\bullet}(K) \subseteq U_{\bullet}(L). $$ In particular, if $\mu_p(L)=\{1\}$ then $(I_{L/K},\beta_{L/K})$ has the following simple interpretation. In this case one can attach, essentially by means of Theorem \ref{j.s.param.orbit}, to any element $u$ of $U_1(K)-U_2(K)$ a $\rho_{e_L,p}$-jump set $(I_{L/K}(u),\beta_{L/K}(u))$. The jump set $(I_{L/K}(u),\beta_{L/K}(u))$ tells us the orbit of $u$ under the action of $\text{Aut}_{\text{filt}}(U_{\bullet}(L))$. Let $u$ be any element of $U_1(K)-U_2(K)$ and let $g(x)$ be any Eisenstein polynomial giving $L/K^{\text{nr}}$, where $K^{\text{nr}}$ is the maximal unramified extension of $K$ in $L$. It turns out that $(I_{L/K}(u),\beta_{L/K}(u))=(I_{g(x)},\beta_{g(x)})$. In particular all the elements of $U_1(K)-U_2(K)$ are in the same orbit for the action of $\text{Aut}_{\text{filt}}(U_{\bullet}(L))$. This orbits correspond to a single jump set $(I_{L/K},\beta_{L/K})$.
For general strongly separable extensions of local fields we have the following joint generalization of Theorem \ref{valuation coefficients} and Theorem \ref{a surprising relation}.
\begin{theorem} \label{a surprising relation 2} Let $L/K$ be a strongly separable totally ramified extension of local fields of residue characteristic $p$. Then $$ \emph{\text{Lower-Convex-Hull}}(\{(p^{\beta_{L/K}(i)-1},p^{\beta_{L/K}(i)-1}i): i \in I_{L/K}\} \cup \{(n,n)\})=\emph{\text{Newt}}(L/K).$$ Moreover if $L/K$ is given by an Eisenstein polynomial $g(x)$, then $$(I_{L/K},\beta_{L/K})=(I_{g(x)},\beta_{g(x)}). $$ \end{theorem} Therefore Theorem \ref{a surprising relation 2} provides an intrinsic description of $(I_{g(x)},\beta_{g(x)})$ as a filtered invariant of the corresponding inclusion of groups of principal units. In particular this says that $(I_{g(x)},\beta_{g(x)})$ is an invariant of the Eisenstein polynomial $g(x)$ as long as $g(x)$ is strongly separable.
\subsubsection{Answer to question (3)} Denote by $\mathcal{J}_{K}$ the set of possible sets of jump for a character of $U_1(K)$. Clearly $\mathcal{J}_K$ is determined by the structure of $U_1(K)$ as a filtered $\mathbb{Z}_p$-module. So one can use the answer to question (1) in order to answer question (3). The first step is answering the same problem for free filtered modules. The main idea for doing this is again to exploit the action of the group of filtered automorphisms. Denote by $\widehat{M_{\rho}^f}$ the group of characters of $M_{\rho}^{f}$. There is a natural action of $\text{Aut}_{\text{filt}}(M_{\rho}^{f})$ on $\widehat{M_{\rho}^f}$. The action clearly preserves the set of jumps of each character. It turns out that conversely one can reconstruct the orbit of the character from the set of jumps: two characters in $\widehat{M_{\rho}^{f}}$ are in the same orbit under the action of $\text{Aut}_{\text{filt}}(M_{\rho}^{f})$ if and only if they have the same set of jumps. Moreover the possible sets of jumps are exactly the $\rho$-jump sets. This fact is expressed in the following theorem. \begin{theorem}(\emph{Jump sets parametrize orbits of characters}) \label{Jump sets parametrize orbits of characters} Let $\rho$ be a shift, and $f$ be a positive integer. Then the set of possible sets of jumps of characters of the free-filtered $\mathbb{Z}_p$-module $M_{\rho}^{f}$ is exactly $\emph{Jump}_{\rho}$. Moreover two characters have the same set of jumps if and only if they are in the same orbit under the group $\emph{Aut}_{\emph{filt}}(M_{\rho}^{f})$. \end{theorem} So in particular we have the following result. \begin{theorem} Let $K$ be a local field with $\mu_p(K)=\{1\}$, then $\mathcal{J}_K=\emph{Jump}_{\rho_K}$. \end{theorem} We now consider the case $\mu_p(K) \neq \{1\}$. By Theorem \ref{the quasi-free guys}, we first look at the possible sets of jumps of characters of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$. These are precisely the extended jump sets, as we next explain. \begin{theorem}(\emph{Jump sets parametrize orbits of characters---part 2}) Let $\rho$ be a shift with $\#T_{\rho}<\infty$. Let $f$ be a positive integer. Then the set of possible sets of jumps of characters of the free-filtered $\mathbb{Z}_p$-module $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ is exactly $\emph{\text{Jump}}_{\rho}^{*}$. Moreover two characters have the same set of jumps if and only if they are in the same orbit under the group $\emph{\text{Aut}}_{\emph{\text{filt}}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$. \end{theorem} We then show that this, essentially thanks to Proposition \ref{a lot surjective}, implies that $\mathcal{J}_{K} \subseteq \text{Jump}_{\rho_K}^{*}$ always, i.e. a set of jumps for a character is always an extended $\rho_K$-jump set. The remaining task is to classify which orbits of characters of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ admit a representative killing a given element of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$. In this way we obtain the final classification, which is Theorem \ref{classification wild characters}. This Theorem says that $\mathcal{J}_K$ consists of the elements of $\text{Jump}_{\rho_K}^{*}$ that are $(I_K,\beta_K,f_K,p)$-\emph{compatible}. Compatibility is an explicit combinatorial criterion that consists in a comparison between a jump set $(I,\beta)$ and the jump set of the field $(I_K,\beta_K)$: in the comparison an important role is played by the case distinction of whether $f_K \geqslant 2$ or not and whether $p=2$ or not. For a precise definition see Definition \ref{incompatibility}. In the rest of Section \ref{wild extension} we establish several explicit applications of this criterion, stressing especially the first dichotomy. As an example we give here the following result. \begin{theorem} Let $K_1,K_2$ be two totally ramified extensions of $\mathbb{Q}_p(\zeta_p)$. Then $\mathcal{J}_{K_1}=\mathcal{J}_{K_2}$ if and only if $U_{\bullet}(K_1) \simeq _{\mathbb{Z}_p\emph{-filt}} U_{\bullet}(K_2)$. \end{theorem} In other words, for totally ramified extensions $K/\mathbb{Q}_p(\zeta_p)$, not only do we have an explicit criterion to compute $\mathcal{J}_{K}$ from the filtered $\mathbb{Z}_p$-module $U_{\bullet}(K)$, but we can conversely reconstruct the filtered $\mathbb{Z}_p$-module $U_{\bullet}(K)$ from $\mathcal{J}_{K}$.
Finally we remark that, by the reciprocity map, this criterion gives an explicit classification of the possible sets of jumps in the upper numbering of a cyclic wild extension of a local field. We explain this in further detail in Section \ref{wild extension}.
\subsection{Further results and questions} We hope to have shed some light on the role that the jump set $(I_K,\beta_K)$ plays in the arithmetic of the local field $K$. This makes some basic questions about this invariant worth investigating. A very basic one is the following. Let $K$ be a local field with $\mu_p(K) \neq \{1\}$. Let $e$ be in $e_K \mathbb{Z}_{\geqslant 1}$ and let $f$ be in $f_K\mathbb{Z}_{\geqslant 1}$. \\
\emph{Question}: For which $(I,\beta) \in \text{Jump}_{\rho_{e,p}}^{*}$ does there exist an extension $L$ of $K$ such that $e_L=e, f_L=f$ and $(I_L,\beta_L)=(I,\beta)$? \\
We have made some progress on this question, see Section \ref{Jump sets under field extensions}. In that Section we establish some peculiarly specific rules that constraint the possible changes of a jump set under a totally ramified extension. As the reader will learn in that section, the interesting case, among totally ramified extensions, is only that of wild extensions. In the present paper we leave open a complete characterization of which jump sets occur under such extensions, providing only necessary conditions. From further calculations, not included in the present paper, we believe that a full classification might be within reach, but the final result might look quite intricate.
In a different direction, we would like to mention that most of the results of the present paper can be viewed as an investigation of the filtered $\mathbb{Z}_p$-modules arising from taking points of one of the simplest formal groups, namely $\mathbb{G}_m$. The theory in Section \ref{filtered modules} should be general enough to cover the case of other Lubin-Tate formal groups giving rise to filtered $O_K$-modules with cyclic torsion sub-module, where $K$ is any other local field, and $O_K$ its ring of integers. It would be an interesting investigation to see which of the results of the present paper extend to this context. For instance, it should be possible to provide a theorem on the lines of Theorem \ref{admissible j.s. occur}.
Finally we would like to conclude with yet another potentially worthwhile direction of investigation. Our mass formula, contained in Theorem \ref{counting}, follows the first interpretation of Serre's weight for local fields, namely using volumes of Eisenstein polynomials. But Serre \cite{Serre m.f.} established also a different interpretation of these weights, by means of division algebras. This suggests the possibility of studying the filtered pro-$p$ group $U_{\bullet}(D)$ of principal units of a central division algebra over a local field, and to study the action of the group $\text{Aut}_{\text{filt}}(U_{\bullet}(D))$ on the set of maximal abelian filtered $\mathbb{Z}_p$-sub-modules. It would be very elegant to reach in this manner a different proof of Theorem \ref{counting}.
\subsection{Comparison with the literature} An explicit classification of the possible upper jumps of wild characters of a local field $K$, i.e.\ of the set $\mathcal{J}_K$, was given in a series of papers, by respectively Maus, Miki and Sueyoshi \cite{Maus}, \cite{Miki}, \cite{Sueyoshi}. The first author has given the criterion for characteristic $p$ local fields. The full classification was given by Miki, and some of Miki's arguments in \cite{Miki} were simplified by Suyeoshi in \cite{Sueyoshi}, where the reader can find also a neat statement for Miki's criterion. Two points come here in order. The first point is that in \cite{Miki} and \cite{Sueyoshi} the invariant $(I_K,\beta_K)$ was already introduced. This is buried in \cite[Lemma 17]{Miki}. In the language of this paper, we can say that $(I_K,\beta_K)$ was understood as the unique element of $\text{Jump}_{\rho_K}^{*}$ such that there is an equation of the form $\zeta_p=\prod_{i \in I_K} u_i^{p^{\beta_K(i)-1}}$, where $\text{v}_K(u_i-1)=i$, and in case $\frac{pe_K}{p-1} \in I_K$, then $u_{\frac{pe_K}{p-1}} \not \in K^{*p}$. The uniqueness was proved in an ad hoc manner in the above mentioned \cite[Lemma 17]{Miki}. The present work is the first place in the literature where the \emph{structural meaning} of the invariant $(I_K,\beta_K)$ is established: it gives, together with $f_K$ and $p:=\text{char}(O_K/m_K)$, the structure of $U_{\bullet}(K)$ as a filtered module. Apart from being conceptually more satisfying, this slightly more abstract approach has two practical advantages. Firstly it leads naturally to all the above mentioned additional results: the interpretation of jump sets in terms of \emph{filtered orbits} of vectors, see Theorem \ref{j.s.param.orbit}, leads to the mass formula for unit filtrations, Theorem \ref{counting}, which in turns leads naturally to Theorem \ref{a surprising relation}, which links the filtered structure of $U_{\bullet}(K)$ with ramification theory. To the best of our knowledge all these results are new. Secondly the interpretation of jump sets as parametrizing filtered orbits of characters, see \ref{Jump sets parametrize orbits of characters}, makes it an easy job to deduce, from first principles, our classification of the possible sets of jumps for a character, contained in Theorem \ref{classification wild characters}. This brings us to the second point. Namely the combinatorial criterion of \cite{Miki} is not tautologically equal to the one contained in Theorem \ref{classification wild characters}. We check, by direct combinatorial inspection, that they coincide in Proposition \ref{inadequacy equivalent to incompatibility}, showing in this way that the tools of this paper give, among other things, a simple unified approach to deduce all the results in \cite{Maus}, \cite{Miki} and \cite{Sueyoshi}, by means of a general theory of filtered modules.
Coming to more recent literature, in 2014, I. del Corso and L. Capuano \cite{Capuano--Del Corso} have obtained a classification of all possible upper jumps in an exponent $p$ extensions of a local field $K$. It would be interesting to push this further obtaining a classification, for \emph{any} finite abelian $p$-group $A$, of the possible structures $A_{\bullet}$ as \emph{filtered group} on $A$ such that $\text{Epi}_{\text{filt}}(U_{\bullet}(K),A_{\bullet}) \neq \emptyset$. For instance, this might be useful in counting the average number of extensions with prescribed ramification data at $p$, in families of number fields containing $\zeta_p$. For a first work in the direction of such counting with ``prescribed ramification", see \cite{Pagano--Sofos}.
Finally we would like to mention that the ramification polygon of an Eisenstein polynomial has been the object of study of several papers \cite{Pauli}, \cite{Romano}, \cite{Pauli--Sinclair}, especially in relation to the problem of calculating Galois groups of Eisenstein polynomials. In his Ph.D. thesis, D. Romano \cite{Romano} provided a characterization of \emph{strongly Eisenstein} polynomials in terms of their Galois group. In a sense these are the polynomials with the simplest possible ramification polygon. It is then interesting that strongly Eisenstein polynomials $g(x)$ over $\mathbb{Q}_{p^f}(\zeta_{p^j})$ with $(p,j) \neq (2,1)$ and $\text{v}_{\mathbb{Q}_p}(\text{deg}(g(x)))>j$, can be also characterized in terms of filtered modules, see Theorem \ref{strongly Eisenstein characterized}. Under the assumption $(p,j) \neq (2,1)$ and $\text{v}_{\mathbb{Q}_p}(\text{deg}(g(x)))>j$, these polynomials are the ones giving the simplest possible filtered module, which is also the most frequent one, in the sense of Theorem \ref{counting}: it occurs $\frac{p^f-1}{p^f}$ of the times, just as the probability for an Eisenstein polynomial over $\mathbb{Q}_{p^f}(\zeta_{p^j})$ to be strongly Eisenstein. The work of Romano has been substantially refined by S. Pauli and C. Greve \cite{Pauli}.
\section{Jump sets} \label{jump sets} The goal of this section is to define and explain the notion of a jump set, which is the key object of this paper. Jump sets are defined in terms of shifts. A \emph{shift} is a strictly increasing function $\rho:\mathbb{Z}_{\geqslant 1} \to \mathbb{Z}_{\geqslant 1}$, with $\rho(1)>1$. For a shift $\rho$, we denote by $T_{\rho}$ the set $\mathbb{Z}_{\geqslant 1}-\rho(\mathbb{Z}_{\geqslant 1})$. If $T_{\rho}$ is finite, we denote by $e^{*}$ the positive integer $\text{max}(T_{\rho})+1$. We denote by $e'_{\rho}$ the positive integer $\rho^{-1}(e^*)$. The shifts that will be relevant for local fields are the ones explained in the following. \begin{example} \label{main example shift} For $p$ a prime, and $e \in \mathbb{Z}_{>0} \cup \{\infty\}$ denote $\rho_{e,p}(i)=\text{min}\{i+e,pi\}$. It is a shift.
Clearly $T_{\rho_{e,p}}$ is finite iff $e$ is finite. Indeed one has always $e=|T_{\rho_{e,p}}|$. If $e \neq \infty$, then $e^{*}=\lceil \frac{pe}{p-1} \rceil$. The reason why these shifts will play a role is due to the following property.
\emph{Crucial property}: let $K$ local field, of residue characteristic $p$, let $e=\text{v}_K(p)$, then we have that
$$U_i^p \subset U_{\rho(i)},$$ for $\rho=\rho_{e,p} \ (=\rho_K)$. One can see this by inspection of the valuations in the expansion $(1+x)^p=1+px+ \ldots +x^p$. \end{example} We now define $\rho$-jump sets (resp.\ extended $\rho$-jump sets). \begin{definition}
A \emph{jump set} for $\rho$ (resp.\ an \emph{extended jump} set for $\rho$) is a finite subset $A \subseteq \mathbb{Z}_{\geqslant 1}$ such that:\\ \\ $\bullet$ \ if $a,b \in A$, and $a<b$ then $\rho(a) \leqslant b$, \\
$\bullet$ \ $A - \rho(A) \subseteq T_{\rho}$ (resp.\ $A - \rho(A) \subseteq T_{\rho}^{*}=T_{\rho} \cup \{e^{*}\}$). \\ \\
Write $\text{Jump}_{\rho}=\{\text{jump sets for $\rho$}\}$ \ (resp.\ $\text{Jump}_{\rho}^{*}=\{\text{extended jump sets for $\rho$}\}$). \end{definition} A jump set for $\rho$ will also be called $\rho$-jump set (resp.\ an extended jump set for $\rho$ will also be called an extended $\rho$-jump set).
If $A$ is a $\rho$-jump set (resp.\ an extended jump set) then we denote by $I_A$ the set $A - \rho(A)$, and by $\beta_A$ the map $\beta_A:I_{A} \to \mathbb{Z}_{\geqslant 1}$, $i \to |[i,\infty) \cap A|$. This allows us to express the notion of jump sets in different, but equivalent, terms. Namely the pair $(I_A,\beta_A)$ evidently has the following three properties. \\ \\ $(1)$ \ $I_A \subseteq T_{\rho}=\mathbb{Z}_{>0}-\rho(\mathbb{Z}_{>0})$ (resp.\ $I_A \subseteq T_{\rho}^{*}=T_{\rho} \cup \{e^{*}\}$), \\ $(2)$ \ $\beta_A$ is a strictly decreasing map $\beta:I_A \to \mathbb{Z}_{\geqslant 1}$, \\ $(3)$ \ the map $i \mapsto \rho^{\beta(i)}(i)$ from $I_A$ to $\mathbb{Z}_{\geqslant 1}$ is strictly increasing. \\ \\ Suppose now we have a pair $(I,\beta)$ with the three above properties $(1),(2),(3)$. We can attach to such an $(I,\beta)$ an element $A_{(I,\beta)}$ of $\text{Jump}_{\rho}$ (resp.\ of $\text{Jump}_{\rho}^{*}$) defined as follows. If $I=\emptyset$ then $A_{(I,\beta)}=\emptyset$. Suppose now that $I$ is not empty. Then put $$A_{(I,\beta)}:=\{\rho^{n}(i)\}_{i \in I-\{\text{max}(I)\}, 0 \leqslant n <\beta(i)-\beta(s(i))} \cup \{\rho^{n}(\text{max}(I))\}_{0 \leqslant n <\beta(\text{max}(I))}, $$ where, for $i \in I-\{\text{max}(I)\}$, the element $s(i)$ denotes the successor of $i$ in $I$. The following proposition follows in a straightforward manner from the definitions. \begin{proposition} \label{equivalent data} The assignments $A \mapsto (I_A,\beta_A)$ and $(I,\beta) \mapsto A_{(I,\beta)}$ are inverse to each other yielding a bijection between $\emph{Jump}_{\rho}$ (resp.\ $\emph{Jump}_{\rho}^{*}$) and the set of pairs $(I,\beta)$ having the following properties: \\ \\ $\bullet$ \ $I \subseteq T_{\rho}=\mathbb{Z}_{>0}-\rho(\mathbb{Z}_{>0})$ (resp.\ $I \subseteq T_{\rho}^{*}=T_{\rho} \cup \{e^{*}\}$), \\ $\bullet$ \ $\beta$ is a strictly decreasing map $\beta:I \to \mathbb{Z}_{\geqslant 1}$, \\ $\bullet$ \ the map $i \mapsto \rho^{\beta(i)}(i)$ from $I$ to $\mathbb{Z}_{\geqslant 1}$ is strictly increasing. \end{proposition}
From now on, we shall often write $(I,\beta)$ to denote a jump set (resp.\ an extended jump set), meaning implicitly that we are identifying it with an actual jump set via the above mentioned bijection. \begin{example}
$\bullet$ There is a unique jump set having $|I|=0$, namely the empty set $A=\emptyset \in \text{Jump}_{\rho}$. \\
$\bullet$ \ A $\rho$-jump set (resp.\ extended $\rho$-jump set) $(I,\beta)$ with $|I|=1$ is given by the choice of an element, $a$, of $T_{\rho}$ (resp.\ $T_{\rho}^{*}$), and of a positive integer $m=\beta(a)$. The actual jump set will then be $\{a,\rho(a),\ldots,\rho^{m-1}(a)\}$. \\
$\bullet$ \ A $\rho$-jump set (resp.\ extended $\rho$-jump set) $(I,\beta)$ with $|I|=2$ is given by the choice of two elements, $a<b$, of $T_{\rho}$ (resp.\ $T_{\rho}^{*}$)), and of two positive integers $m_1=\beta(a)>\beta(b)=m_2$, such that $\rho^{m_1-m_2}(a)<b$ (or equivalently $\rho^{m_1}(a)<\rho^{m_2}(b)$). The actual jump set will then be $\{a,\rho(a),\ldots ,\rho^{m_1-m_2-1}(a) \} \cup \{b,\rho(b),\ldots,\rho^{m_2-1}(b) \}$. \end{example} \begin{example} We now explain a general procedure to inductively construct any jump set $A$ for $\rho$ (resp.\ extended jump set). As a first step one decides whether $A=\emptyset$ or not. In case $A=\emptyset$ one has obtained a jump set and stops. Suppose instead that one wants to construct a jump set $A \neq \emptyset$. Then pick an $i_1 \in T_{\rho}$ (resp.\ in $T_{\rho}^{*}$) and a positive integer $n_1$. Consider the set $$A_1:=\{\rho^{j}(i_1)\}_{0 \leqslant j<n_1}. $$
Now you can stop and have obtained a jump set $A:=A_1$. In this case $I=\{i_1\}$ and $\beta(i_1)=n_1$. If you want instead a jump set with $|I|>1$, then you check whether there is a $y \in T_{\rho}$ (resp.\ in $T_{\rho}^{*}$) such that $\rho^{n_1}(i_1)<y$. If such a $y$ doesn't exist, then we set $A:=A_1$ and we stop having obtained a jump set (resp.\ an extended jump set). Otherwise you pick any such $y$ and put $y:=i_2$ and pick a positive integer $n_2$. Then write $$A_2:=A_1 \cup \{\rho^{j}(i_2)\}_{0 \leqslant j<n_2}. $$
Now you can stop and have obtained a jump set $A:=A_2$. In this case $I=\{i_1,i_2\}$ and $\beta(i_1)=n_1+n_2, \beta(i_2)=n_1$. If you want instead a jump set with $|I|>2$, then you check whether there is a $y \in T_{\rho}$ (resp.\ $T_{\rho}^{*}$) such that $\rho^{n_2}(i_2)<y$. If such a $y$ doesn't exist, then we set $A:=A_2$ and we stop having obtained a jump set (resp.\ an extended jump set). Otherwise you pick any such $y$ and put $y:=i_3$ and pick a positive integer $n_3$. Then write $$A_3:=A_2 \cup \{\rho^{j}(i_3)\}_{0 \leqslant j<n_3}. $$ In this case we have $I=\{i_1,i_2,i_3\}$ and $\beta(i_1)=n_1+n_2+n_3, \beta(i_2)=n_2+n_3, \beta(i_3)=n_3$.
One continues inductively as follows. Having arrived at $A_k$, together with $i_k, n_k$, for $k \in \mathbb{Z}_{\geqslant 3}$, either we set $A:=A_k$ and we have obtained a jump set, or we verify whether there exists a $y \in T_{\rho}$ (resp.\ in $T_{\rho}^{*}$) such that $\rho^{n_k}(i_k)<y$. If such a $y$ doesn't exist then we set $A:=A_k$ and we stop having obtained a jump set (resp.\ an extended jump set). Otherwise we pick any such $y$ and set $y:=i_{k+1}$, we choose a positive integer $n_{k+1}$ and write $$A_{k+1}=A_k \cup \{\rho^{j}(i_{k+1})\}_{0 \leqslant j<n_k}. $$ The set $A_{k+1}$ is a jump set for $\rho$ (resp.\ an extended jump set). In this case we have $I=\{i_1,\ldots ,i_{k+1} \}$ with $\beta(i_1)=n_1+\ldots+n_{k+1}, \beta(i_2)=n_2+\ldots+n_{k+1},\ldots, \beta(i_{k})=n_k+n_{k+1}, \beta(i_{k+1})=n_{k+1}$. \end{example} Jump sets will often arise as the set of \emph{maximal} or \emph{minimal} of certain sets, with respect to the following partial order. This partial order will also play an important role in the classification of the possible sets of jumps of a character. \begin{definition} Let $(a_1,b_1),(a_2,b_2)$ be in $(\mathbb{Z}_{\geqslant 1})^2$. We let $(a_1,b_1) \leqslant_{\rho} (a_2,b_2)$ if and only if $$b_2 \geqslant b_2 \ \text{and} \ \rho^{b_2}(a_2) \geqslant \rho^{b_1}(a_1). $$
\end{definition}
Let now $A$ be a subset of $T_{\rho}$ (resp.\ of $T_{\rho}^*$), and let $b:A \to \mathbb{Z}_{\geqslant 1}$. Let $\text{Max}(A,b)$ and $\text{Min}(A,b)$ be the subsets of $\text{Graph}(b)$ consisting of, respectively, the maximal and the minimal elements with respect to $\leqslant_{\rho}$. Then the following fact follows from the definition of a jump set. \begin{proposition} \label{jump set attached to a function} There are unique jump sets $(I_{(A,b)}^{+},\beta_{(A,b)}^{+})$ and $(I_{(A,b)}^{-},\beta_{(A,b)}^{-})$ \ (resp.\ extended jump sets) such that $\emph{Graph}(\beta_{(A,b)}^{+})=\emph{Max}(A,b)$ and $\emph{Graph}(\beta_{(A,b)}^{-})=\emph{Min}(A,b).$ \end{proposition}
Proposition \ref{jump set attached to a function} is repeatedly used throughout this paper. Moreover it occurs always in the same manner, namely to recover an intrinsic description of an object presented in a non-canonical fashion. This will firstly apply in the context of filtered modules in Proposition \ref{reduction process}, to reconstruct from a coordinate representation, with respect to a filtered basis (see \ref{definition of a basis}) the orbit of a vector of a free filtered module (see \ref{free modules def}) acted upon by the group of filtered automorphisms. Another example is given by Proposition \ref{reduction for characters}, where Proposition \ref{jump set attached to a function} is used to determine the set of jumps of a character. Finally it is used in the context of Eisenstein polynomials in Theorem \ref{a surprising relation} and Theorem \ref{a surprising relation 2}. \section{Filtered modules} \label{filtered modules} \subsection{Overview} The goal of this section is to use jump sets to parametrize quasi-free filtered modules (see definition \ref{def of quasi free}). As stated in Proposition \ref{rho for loc fields}, principal units give rise to a free or quasi-free filtered module. So the material of this section will provide exactly the amount of general (elementary) theory of filtered modules sufficient to classify, in terms of jump sets, the possible structures of $U_1$, as a filtered module.
The rest of the section is organized as follows:
In \ref{generalities filt mod} we will collect very general facts about filtered modules that will be applied in the other sections.
In \ref{DVR} we will specialize to the case where the base ring, $R$, is a complete DVR.
In \ref{rho} we explain how one can attach to a filtered module $M$ a non-decreasing function $\rho_M$, by looking at the action of $\pi_R$, a uniformizer in $R$, on the filtration.
In \ref{shit} we introduce the notion of free filtered modules: in a precise sense they stand as universal modules among those having a fixed $\rho$-map (see \ref{Universal property of free filtered modules} for the precise universal property). Next we will introduce the notion of quasi-free filtered modules, which in a precise sense are just one step more complicated than the free ones. The goal of the rest of the section is classifying quasi-free modules.
In \ref{transitive} we will provide presentations of a quasi-free filtered module via a free filtered module and exploit the action of the filtered automorphism group of the free filtered module on the set of presentations of a given quasi-free filtered module.
In \ref{orbits} we will parametrize the set of orbits of lines in a free filtered module, under the filtered automorphism group, via jump sets.
In \ref{jump sets and quasi free} we will use \ref{transitive} and \ref{orbits} to explain how jump sets parametrize the set of quasi-free filtered modules.
In \ref{reading jump inside} we explain an internal procedure to reconstruct the jump set of a quasi-free filtered module. This will suggest a generalization which will be exploited in later sections. This will be used to detect a more general connection between phenomena in the filtration and ramification theory. See also Theorem \ref{a surprising relation 2}. \subsection{General facts about filtered modules} \label{generalities filt mod} Let $R$ be a commutative ring with unity. \begin{definition} \label{definition filtered module} \emph{A filtered $R$-module} is a sequence of $R$-modules, $M_1 \supseteq M_2 \supseteq \ldots \supseteq M_i\supseteq \ldots$ \ with $\bigcap_{i \in \mathbb{Z}_{\geqslant 1}}M_i=\{0\}$. \end{definition}
We will usually denote by $M_{\bullet}$ a filtered $R$-module $M_1 \supseteq M_2 \supseteq \ldots \supseteq M_i\supseteq \ldots$. A filtered module comes with a weight map $w:M_1 \to \mathbb{Z}_{\geqslant 1} \cup \{\infty\}$, defined as $w(x):=\sup \{i \in \mathbb{Z}_{\geqslant 1}: x \in M_i\}$. The weight map $w$ enjoys the following conditions: $w^{-1}(\{\infty\})=\{0\}$ and if $x,y \in M_1, a \in R$, then $w(x+y) \geqslant \text{min}\{w(x),w(y)\}$ and $w(ax) \geqslant w(x)$. Clearly one can recover the filtration from the knowledge of $w$, and conversely given an $R$-module $M$, together with a map $w:M \to \mathbb{Z}_{\geqslant 1} \cup \{\infty\}$ enjoying the above conditions, one can define the filtration $M_i:=\{x \in M: w(x) \geqslant i \}$. It follows that one can equivalently speak of a filtered $R$-module as a pair $(M,w)$, where $M$ is an $R$-module and $w$ is a map with the above properties. We will interchangeably denote a filtered module as $M_{\bullet}$ and as a pair $(M,w)$. \begin{definition}\label{morphism filtered module} Given $M_{\bullet},N_{\bullet}$ two filtered $R$-modules, a morphism of filtered $R$-modules $\varphi:M_{\bullet} \to N_{\bullet}$ is a morphism of $R$-modules $\varphi:M_1 \to N_1$, such that, for each positive integer $i$, $\varphi(M_i) \subseteq N_i$. \end{definition} With definitions \ref{definition filtered module} and \ref{morphism filtered module}, filtered $R$-modules form a category, which we will denote as $\text{Filt-}R\text{-mod}$. We next explain basic constructions in this category which we will use later in this section. \subsubsection{Direct products and direct sums} \label{def direct sums} Let $\{{M_h}_{\bullet}\}_{h \in \mathcal{H}}$ be a collection of filtered $R$-modules. The filtration $\prod_{h \in \mathcal{H}}M_{h,1} \supseteq \prod_{h \in \mathcal{H}}M_{h,2} \supseteq \ldots \supseteq \prod_{h \in \mathcal{H}}M_{h,n} \supseteq \ldots $ gives to $\prod_{h \in \mathcal{H}}M_{h,1}$ the structure of a filtered $R$-module. This filtered module behaves as a categorical direct product. The filtration $\bigoplus_{h \in \mathcal{H}}M_{h,1} \supseteq \bigoplus_{h \in \mathcal{H}}M_{h,2} \supseteq \ldots \supseteq \bigoplus_{h \in \mathcal{H}}M_{h,n} \supseteq \ldots$ gives to $\bigoplus_{h \in \mathcal{H}}M_{h,1}$ the structure of a filtered $R$-module. This filtered module behaves as a categorical direct sum. \subsubsection{Metric structure} \label{metric structure} Let $(M,w)$ be a filtered module. Fix a real number $c \in (0,1)$. Then we have a distance on $M$, defined as $d(x,y)=c^{w(x-y)}$, which gives to $M$ the structure of a metric space and of a Hausdorff topological group. In the notation $M_{\bullet}$, the topology can be alternatively described by saying that the $\{M_i\}_{i \in \mathbb{Z}_{\geqslant 1}}$ form a fundamental system of neighborhoods of $0_{M_1}$.\\
It is with respect to this metric that we will perform, in the rest of this paper, any metric or topological operation on a filtered $R$-module. For instance a filtered module $M_{\bullet}$ will be said to be complete, if $M_1$, with the above metric, is a complete metric space. There is a completion functor from $R\text{-filt-mod}$ to the full subcategory whose objects are complete filtered modules, $\text{Compl-$R$-filt-mod}$, which consists simply of completing the underlying metric space. We denote this functor by \ $\widehat{•}$\ . It is left adjoint to the inclusion functor $\text{Compl-$R$-filt-mod} \subseteq R\text{-filt-mod} $ which is the identity on both objects and morphisms. Thus one has a natural transformation of the identity, which we denote by $\text{compl}:\text{id}_{R\text{-filt-mod}} \to \widehat{•}$\ . This natural transformation consists of the natural inclusion of a filtered module $M_{\bullet}$ in its completion, which we denote by $\widehat{M_{\bullet}}$. \subsubsection{Sub-modules}
If $(M,w)$ is a filtered $R$-module, and $N \subseteq M$ an $R$-sub-module of $M$, then $(N,w_{|N})$ is a filtered $R$-module. If the filtration for $M$ is $M_1 \supseteq M_2 \supseteq \ldots \supseteq M_i \supseteq \ldots$, the one for $N$ is $N \cap M_1 \supseteq N \cap M_2 \supseteq \ldots \supseteq N \cap M_i \supseteq \ldots$. It is in this sense that we will speak of a filtered $R$-sub-module. \subsubsection{Quotients} \label{quotients} Let $M_{\bullet}$ be a filtered $R$-module and $N \subseteq M_1$ an $R$-sub-module of $M$. Then the filtration $M_1/N=(M_1+N)/N \supseteq (M_2+N)/N \supseteq \ldots \supseteq (M_i+N)/N \supseteq \ldots$, gives to $M_1/N$ the structure of a filtered $R$-module if and only if $N$ is closed. Indeed this filtration defines a fundamental system of neighbours of $0_{M_1/N}$ corresponding to the quotient topology coming from $M_1$: the requirement of being a filtered module is equivalent to the requirement that this topology is Hausdorff, and the quotient of a topological group by a normal subgroup is Hausdorff iff the normal subgroup is closed, since a topological group is Hausdorff iff the origin is closed. \\
We now introduce the functors which will play an important role in the rest of the section. \begin{definition} \label{functors} (a) Let $M_{\bullet}, N_{\bullet}$ be two filtered $R$-modules, and $i,j$ two positive integers with $i \leqslant j$. Denote by $F_{i,j}(M_{\bullet}):=M_i/M_{j}$. Given a morphism of filtered $R$-modules $\varphi:M_{\bullet} \to N_{\bullet}$, denote by $F_{i,j}(\varphi)$, the induced morphism $F_{i,j}(\varphi): M_i/M_{j} \to N_i/N_{j}$. Denote by $F_{i,j}$ the functor, $F_{i,j}:\text{Filt-}R\text{-mod} \to R\text{-mod}$, obtained in this way. Denote by $F_i$ the functor $F_{i,i+1}$. \end{definition} The rest of this section describes the relations between a morphism $\varphi:M_{\bullet} \to N_{\bullet}$ of filtered $R$-modules and the sequence of morphisms $\{F_j(\varphi):F_j(M_{\bullet}) \to F_j(N_{\bullet})\}_{j \in \mathbb{Z}_{\geqslant 1}}$ of $R$-modules. We begin by describing the effect of $F_j$ on the completion morphism: \begin{remark}{\label{F_i bar}} For every positive integer $i$, the natural transformation $\text{compl}$ induces an isomorphism of functors $F_i \circ \widehat{•} \simeq_{\text{functors}}F_i$. \end{remark} Next we determine basic properties when applying $F_j$ to the inclusion of the direct sum in the direct product. \subsubsection{More on direct sum and direct product} \begin{remark}{\label{F_j preserve}} For each positive integer $j$ and $\{(M_i,w_i)\}_{i \in I}$ any collection of filtered $R$-modules, we have that \\ \\ $\bullet$ $F_j(\prod_{i \in I}M_i)=\prod_{i \in I}F_j(M_i)$ \\ $\bullet$ $F_j(\bigoplus_{i \in I}M_i)=\bigoplus_{i \in I}F_j(M_i)$ \\ $\bullet$ $F_j(\bigoplus_{i \in I}M_i\subseteq\prod_{i \in I}M_i)=(\bigoplus_{i \in I}F_j(M_i)\subseteq\prod_{i \in I}F_j(M_i))$, where in both cases we mean the natural inclusion of the direct sum in the direct product. \end{remark} \
\begin{proposition}{\label{products}} Given $\{M_{i,\bullet} \}_{i \in I}$ any collection of $R$-filtered modules, the following are equivalent: \\ \emph{(a)} The inclusion of filtered modules $\bigoplus_{i \in I}M_{i,\bullet} \subseteq \prod_{i \in I}M_{i,\bullet}$ induces a dense inclusion of metric spaces. \\ \emph{(b)} For each $ m \in \mathbb{Z}_{\geqslant 1}$ there are only finitely many $i \in I$ such that $\emph{min}(w_{M_{i,\bullet}}(M_{i,1})) \leqslant m$. \\ \emph{(c)} We have that $F_m(\bigoplus_{i \in I}M_{i,\bullet} \subseteq \prod_{i \in I}M_{i,\bullet})$ is an isomorphism for all $m \in \mathbb{Z}_{\geqslant 1}$. \begin{proof} $\text{(a)} \to \text{(b)}$ Fix $m \in \mathbb{Z}_{\geqslant 1}$. Pick a vector $v=(v_i)_{i \in I} \in \prod_{i \in I}M_{i,1}$ such that, for all $i \in I$, $v_i=0$ or $w_{M_{i,\bullet}}(v_i) \leqslant m$ holds. By assumption we can find a finite subset, $J$, of $I$, and a vector $(y_i)_{i \in I} \in \prod_{i \in I}M_{i,1}$, such that $y_i=0$ if $i \not \in J$ and $(w_{\prod_{i \in I}M_{i,\bullet}})(v_i-y_i)_{i \in I}>m$. It follows that for all $i \not \in J$, $w_{M_{i,\bullet}}(v_i)>m$. Thus for every $v=(v_i)_{i \in I} \in \prod_{i \in I}M_i$, $w_{M_{i,\bullet}}(v_i)\leqslant m$ holds for only finitely many $i \in I$, that is $\text{min}(w_{M_{i,\bullet}}(M_i))\leqslant m$ holds for only finitely many $i \in I$.
$\text{(b)} \to \text{(a)}$ Observe that assumption $\text{(b)}$ implies that $M_i=0$ holds for all but countably many $i\in I$: indeed, by assumption, the function $M_{i,\bullet} \to \text{min}(w(M_{i,1}))$ has finite fiber over every positive integer, so, except for a countable set of indices, $w_{M_{i,\bullet}}(M_{i,1})=\{\infty\}$ holds, which is equivalent (by definition of filtered module) to $M_{i,1}=0$ for all but countably many indices. So we can assume that $I=\mathbb{Z}_{\geqslant 1}$. Thus fix $v:=(v_n)_{n \in \mathbb{Z}_{\geqslant 1}} \in \prod_{i \in \mathbb{Z}_{\geqslant 1}}M_{i,1}$. Consider the sequence $\{h_l\}_{l \in \mathbb{Z}_{\geqslant 1}}:=\{(w_{l,i})_{i \in \mathbb{Z}_{\geqslant 1}}\}_{l \in \mathbb{Z}_{\geqslant 1}}$,
where $w_{l,i}=v_i$ if $i \leqslant l $, $0$ otherwise. One has that for all $m \in \mathbb{Z}_{\geqslant 1}$, $(w_{\prod_{i \in \mathbb{Z}_{\geqslant 1}}M_i})(v-w_l)>m$, holds for all but finitely many values of $l$. This means exactly that $h_l \to v$ as $l \to \infty$. Thus the inclusion of filtered modules $(\bigoplus_{i \in I}M_i,d_{\bigoplus_{i \in I}M_{i,\bullet}}) \subseteq (\prod_{i \in I}M_i,d_{\prod_{i \in I}M_{i,\bullet}})$ induces a dense inclusion of metric spaces. For the equivalence between $\text{(b)}$ and $\text{(c)}$ see Remark \ref{equivalence between a and c}. \end{proof} \end{proposition}
Finally we look at the relation between injectivity/surjectivity of $\varphi$ and the pointwise injectivity/surjectivity of the sequence $\{F_j(\varphi)\}_{j \in \mathbb{Z}_{\geqslant 1}}$: \subsubsection{Surjectivity and injectivity} \begin{proposition}{\label{phi surj}} Let $M_{\bullet},N_{\bullet}$ be two filtered modules, and $\varphi \in \emph{Hom}_{\emph{filt}}(M_{\bullet},N_{\bullet})$. Then the following holds: \\ \emph{(a)} Assume $M_{\bullet}$ complete. If for all $i \in \mathbb{Z}_{\geqslant 1}$ we have that $\emph{coker}(F_i(\varphi))=0$, then $\emph{coker}(\varphi)=0$. \\ \emph{(b)} We have that for all $i \in \mathbb{Z}_{\geqslant 1}$ the module $\emph{ker}(F_i(\varphi))$ is $0$ if and only if for all $x \in M_1$ the weights $w_{M_{\bullet}}(x)$ and $w_{N_{\bullet}}(\varphi(x))$ coincide. \\ \emph{(c)} If for all $i \in \mathbb{Z}_{\geqslant 1}$ we have that $\emph{ker}(F_i(\varphi))=0$, then $\emph{ker}(\varphi)=0$. \\ \emph{(d)} If $\varphi$ is an isomorphism then for all $i \in \mathbb{Z}_{\geqslant 1}$ the map $F_i(\varphi)$ is an isomorphism. If $M_{\bullet}$ is complete, the converse holds as well. \\ \begin{proof} (a) Let $x \in N_1$. We construct inductively sequences $\{x_n\}_{n \in \mathbb{Z}_{\geqslant 0}},\{y_n\}_{n \in \mathbb{Z}_{\geqslant 0}}$ respectively $N_1,M_1$-valued, which will do for us the following: $\{\sum_{i=0}^{n}y_i\}_{n \in \mathbb{Z}_{\geqslant 1}}$ will be a convergent sequence, with $\varphi(\sum_{i=0}^{n}y_i)-x=x_{n+1}$, with $\text{lim}_{n \to \infty} x_n=0$. Since $\varphi$ is a filtered morphism and in particular continuous, and $M_{\bullet}$ is complete, we can conclude then that $\varphi(\sum_{i=0}^{\infty}y_i)=x$. The construction of $\{x_n\}_{n \in \mathbb{Z}_{\geqslant 0}},\{y_n\}_{n \in \mathbb{Z}_{\geqslant 0}}$ goes as follows. Put $x_0=x,y_0=0$; construct $x_{n+1},y_{n+1}$ from $x_{n}$ in the following way. If $x_n=0$ put $x_{n+1}=y_{n+1}=0$. Otherwise $w_{N_{\bullet}}(x_n) \in \mathbb{Z}_{\geqslant 1}$ holds. Since the map $F_{w_{N_{\bullet}}(x_n)}(\varphi)$ is surjective, pick $y \in M_{w_{N_{\bullet}}(x_n)}$ such that $(\varphi)(y) \equiv x_n \ \text{mod} \ N_{w_{N_{\bullet}}(x_n)+1}$, and denote $y_{n+1}=y$ and $x_{n+1}=-\varphi(y)+x_{n}$. By construction, the sequences $\{x_n\}_{n \in \mathbb{Z}_{\geqslant 0}},\{y_n\}_{n \in \mathbb{Z}_{\geqslant 0}}$ both converge to $0$. So by the ultrametric inequality and completeness of $M_{\bullet}$ the series $\sum_{n \in \mathbb{Z}_{\geqslant 0}}y_n$ converges to an element of $M_1$, which we denote by $\overline{y}$. By construction $\varphi(\sum_{1\leqslant j \leqslant n}y_j)-x=x_{n+1} \to 0$, so, since $\varphi$ is continuous, $\varphi(\overline{y})=x$. So $\text{coker}(\varphi)=0$.
(b) By definition $M_i-M_{i+1}=\{x \in M_i, w_{M_{\bullet}}(x)=i \}$, on the other hand $\text{ker}(\varphi)_{i}=0$ iff $\varphi(M_i-M_{i+1})\subseteq N_i-N_{i+1}=\{y \in N_i, w_{M_{\bullet}}(y)=i\}$, thus $\text{ker}(\varphi)_i=0$ for all $i \in \mathbb{Z}_{\geqslant 1}$ iff $w_{M_{\bullet}}(x)=w_{N_{\bullet}}(\varphi(x))$ for all $x \in M_1$.
(c) Thanks to (b) the hypothesis in (c) is equivalent to $\varphi(M_i-M_{i+1})\subseteq N_i-N_{i+1}$, which implies that $\text{ker}(\varphi)\subseteq \bigcap_{i \in \mathbb{Z}_{\geqslant 1}}M_i=\{0\}$.
(d) The first implication follows from the general fact that a functor preserves isomorphisms, applied to the functors $F_i$. For the second implication: assume $M_{\bullet}$ complete, then (a) implies that $\varphi$ is surjective. On the other hand (c) implies that $\varphi$ is also injective. Thus $\varphi$ is a filtered isomorphism. \end{proof} \end{proposition} \begin{remark}{\label{phi_1}} Suppose $\varphi:M_{\bullet} \to N_{\bullet}$ is a filtered epimorphism. Then $F_1(\varphi)$ is surjective. Indeed by definition of filtered epimorphism, and the fact that $1$ is minimal in $\mathbb{Z}_{\geqslant 1}$ we have ${\varphi}^{-1}(N_1-N_2)\subseteq M_1-M_2$: since $\varphi$ is surjective, applying $\varphi$ to both sizes of this relation one gets $N_1-N_2 \subseteq \varphi(M_1-M_2)$, which proves that $F_1(\varphi)$ is surjective. \end{remark} \begin{definition} \label{definition of shifted filtered modules} Let $i$ be a positive integer and let $M_{\bullet}$ be a filtered $R$-module. We define $M_{\bullet+i}$ to be the filtered $R$-module $$M_{i+1} \supseteq M_{i+2} \supseteq \ldots $$ \end{definition} \begin{proposition}{\label{first non iso is epi}}
Let $M_{\bullet},N_{\bullet}$ be two filtered modules. Let $\varphi:M_{\bullet} \to N_{\bullet}$ be a filtered epimorphism. Let $i$ be a positive integer such that $F_j(\varphi)$ is an isomorphism for every $j$ such that $1 \leqslant j \leqslant i$. Then $\varphi_{|M_{\bullet+i}}:M_{\bullet+i} \to N_{\bullet+i}$ is a filtered epimorphism and $F_{i+1}(\varphi)$ is surjective. \begin{proof}
Indeed, by Proposition \ref{phi surj}, the hypothesis is equivalent to $F_{1,i+1}(\varphi)$ being a filtered isomorphism. Thus $\varphi(M_1-M_{i+1}) \subseteq N_1-N_{i+1}$. Thus, since $\varphi$ is an epimorphism, it follows that $\varphi(M_{i+1})=N_{i+1}$, in particular by remark \ref{phi_1} we have that $F_{i+1}(\varphi)=F_1(\varphi_{|M_{\bullet+i}})$ is surjective, proving the statement. \end{proof} \end{proposition} \begin{proposition}{\label{a lot surjective}} Let $M_{\bullet},N_{\bullet}$ be two filtered modules with $M_{\bullet}$ complete. Let $\varphi$ be an element of $\emph{Hom}_{\emph{filt}}(M_{\bullet},N_{\bullet})$. The following are equivalent: \\ \emph{(a)} For every positive integer $i$, we have that $\emph{coker}(F_i(\varphi))=0$. \\
\emph{(b)} For every positive integer $i$, we have that $\emph{coker}(\varphi_{|M_i}:M_i \to N_i)=0$. \begin{proof}
$\text{(a)} \to \text{(b)}$ Let $i$ be a positive integer. For a positive integer $j>i$, the equality $F_j(\varphi_{|M_{\bullet+i}})=F_{i+j-1}(\varphi)$ trivially holds. Thus assumption $(a)$ is preserved by restriction of $\varphi$ to the filtered submodule $M_{\bullet+i}$. So Proposition \ref{phi surj} implies that $\text{coker}(\varphi_{|M_i}:M_i \to N_i)=0$.
$\text{(b)} \to \text{(a)}$ The statement trivially follows applying remark \ref{phi_1} to every filtered morphism $\varphi_{|M_i}:M_{\bullet +i} \to N_{\bullet+i}$ since they are all assumed to be epimorphisms. \end{proof} \end{proposition} \begin{proposition}{\label{last ker}} Let $M_{\bullet},N_{\bullet}$ be two filtered modules, $M_{\bullet}$ complete, and $\varphi \in \emph{Hom}_{\emph{filt}}(M_{\bullet},N_{\bullet})$. Assume $i \in \mathbb{Z}_{\geqslant 1}$ is such that $\emph{ker}(F_j(\varphi))=\emph{coker}(F_j(\varphi))=0$ for all $j>i$. Then $\emph{ker}(\varphi) \cap w_{M_{\bullet}}^{-1}\{i,\infty\}$ is an $R$-submodule, and the inclusion in $M_i$ induces an isomorphism $\emph{ker}(\varphi) \cap w_{M_{\cdot}}^{-1}\{i,\infty\} \simeq \emph{ker}(F_i(\varphi))$. \begin{proof}
Since $\text{ker}(F_j(\varphi))=0$ for all $j>i$, it follows that $\text{ker}(\varphi) \cap M_i=\text{ker}(\varphi) \cap w_{M_{\cdot}}^{-1}\{i,\infty\}$ proving thus that is an $R$-submodule, and that the inclusion in $F_i(M_{\bullet})$ is injective. Suppose $x \in M_i-M_{i+1}$, $\varphi(x) \in N_{i+1}$ holds. Thanks to the assumption $\text{ker}(F_j(\varphi))=\text{coker}(F_j(\varphi))=0$ for all $j>i$, and to Proposition \ref{phi surj}, we see that $\varphi_{|M_{\bullet+i+1}}$ is an isomorphism and thus it follows that there is exactly one $y \in M_{i+1}$ such that $\varphi(x)=\varphi(y)$. Thus, since $x \equiv x-y \ \text{mod} \ M_{i+1}$, and $x-y \in \text{ker}(\varphi)$ we obtain that the natural map from $\text{ker}(\varphi) \cap w_{M_{\bullet}}^{-1}\{i,\infty\}$ to $F_i(M_{\bullet})$ is also surjective. \end{proof} \end{proposition} \begin{corollary}{\label{last ker +}} Let $M_{\bullet},N_{\bullet}$ be two filtered modules, $M_{\bullet}$ complete, and $\varphi \in \emph{Hom}_{\emph{filt}}(M_{\bullet},N_{\bullet})$. Assume $i \in \mathbb{Z}_{\geqslant 1}$ is such that $\emph{coker}(F_j(\varphi))=0$ for all $j>i$ and $\emph{ker}(F_j(\varphi))=0$ for all $j \neq i$. Then $\emph{ker}(\varphi) \subseteq w_{M_{\cdot}}^{-1}\{i,\infty\}$, and this inclusion induces an isomorphism $\emph{ker}(\varphi) \simeq_{R\emph{-mod}}\emph{ker}(F_i(\varphi))$. \begin{proof} Clearly the assumption that $\text{ker}(F_j(\varphi))=0$ for all $j \neq i$ implies that $\text{ker}(\varphi) \subseteq w_{M_{\bullet}}^{-1}\{i,\infty\}$. Thus lemma \ref{last ker} implies that this inclusion induces an isomorphism $$\text{ker}(\varphi)=\text{ker}(\varphi) \cap w_{M_{\bullet}}^{-1}\{i,\infty\} \simeq_{R\text{-mod}}\text{ker}(F_i(\varphi)).$$ \end{proof} \end{corollary} \begin{remark} Part (a),(c) of Proposition \ref{phi surj} do not hold without the assumption of completeness. An example is given as follows: take a collection of filtered modules $\{(M_i,w_{M_i})\}_{i \in \mathbb{Z}_{\geqslant 1}}$ such that for all $ m \in \mathbb{Z}_{\geqslant 1}$ there are only finitely many $i \in I$ such that $\text{min}(w_i(M_i)) \leqslant m$. Now consider $\bigoplus_{i \in I}(M_i,w_i)\subseteq \prod_{i \in I}(M_i,w_i)$. Then $F_m(M_i)=0$ for all but finitely many $i$. Thus, by remark \ref{F_j preserve}, we have that the inclusion of the direct sum of the direct product is preserved by $F_m$, but since it is over a finite set of indices (the ones where $F_m$ does not vanish) it is also an isomorphism. But if $M_i \neq 0$ for infinitely many $i \in \mathbb{Z}_{\geqslant 1}$ the inclusion of the direct sum in the direct product is not an isomorphism. This suggests the following proposition. \end{remark} \begin{proposition}{\label{phi surj comple}} Let $M_{\bullet},N_{\bullet}$ be two filtered modules, and $\varphi \in \emph{Hom}_{\emph{filt}}(M_{\bullet},N_{\bullet})$, denote by $\hat{\varphi}:\hat{M} \to \hat{N}$ the map induced on the completions. Then the following hold: \\ \emph{(a)} If $\emph{coker}(F_i(\varphi))=0$ for all $i \in \mathbb{Z}_{\geqslant 1}$, then $\emph{coker}(\hat{\varphi})=0$. \\ \emph{(b)} If $\emph{ker}(F_i(\varphi))=0$ for all $i \in \mathbb{Z}_{\geqslant 1}$, then $\emph{ker}(\hat{\varphi})=0$. \\ \emph{(c)} $F_i(\varphi)$ is an isomorphism for every $i \in \mathbb{Z}_{\geqslant 1}$ iff $\hat{\varphi}$ is an isomorphism. \end{proposition} \begin{proof} From remark \ref{F_i bar}, we know that $F_i$ and $F_i(\text{compl})$ are isomorphic functors. Thus $\text{coker}F_i(\varphi)=0$ for all $i \in \mathbb{Z}_{\geqslant 1}$ is equivalent to $\text{coker}F_i(\hat{\varphi})=0$ for all $i \in \mathbb{Z}_{\geqslant 1}$, and $\text{ker}(F_i(\varphi))=0$ for all $i \in \mathbb{Z}_{\geqslant 1}$, is equivalent to $\text{ker}(F_i(\hat{\varphi}))=0$ for all $i \in \mathbb{Z}_{\geqslant 1}$. Thus the proposition follows from Proposition \ref{phi surj}. \end{proof} \begin{remark}{\label{equivalence between a and c}} Proposition \ref{phi surj comple} implies the equivalence between (b) and (c) in Proposition \ref{products}. Indeed if we have (c) of Proposition \ref{products} then we conclude that the completion of $\prod_{i \in I} M_i$ is also the completion of $\bigoplus_{i \in I} M_i$. Hence in particular $ \bigoplus_{i \in I} M_i$ is dense in $ \prod_{i \in I}M_i$. This gives that (c) implies (a). But we have shown in Proposition \ref{products} that (a) is equivalent to (b), hence (c) implies (b). Conversely it is an immediate verification that (b) implies (c). \end{remark} \subsection{Filtered modules over a complete DVR} \label{DVR} Now we specialize to the case where $R$ is a complete DVR: we ask completeness because in what follows, we want to apply Propositions \ref{phi surj}, \ref{a lot surjective}, \ref{last ker +}, and moreover it will be handy when taking filtered quotients of finitely generated modules (see \ref{quotients}). We fix a uniformizer of $R$, and we denote it by $\pi_R$. \subsubsection{The $\rho$-map} \label{rho} Let $M_{\bullet}$ a filtered $R$-module, denote by $w$ its weight map. Define $\rho_{M_{\bullet}}:\mathbb{Z}_{\geqslant 1} \to \mathbb{Z}_{\geqslant 1} \cup \{\infty\}$ as follows: $\rho_{M_{\bullet}}(i):=\text{sup}\{j \in \mathbb{Z}_{\geqslant 1}, \pi_RM_{i} \subseteq M_j\}$. In terms of the weight map we have that $\rho_{M_{\bullet}}(i)=\min_{x \in M_i} \{w(\pi_Rx)\}$. \begin{remark} \label{property of a linear module} The condition that $\rho_{M_{\bullet}}$ is a shift map is equivalent to the conjunction of the following two conditions: \\ (a) For all positive integers $i$, one has that $M_i/M_{i+1}$ is an $R/(\pi_R)$-vector space. Moreover $\pi_RM_i \neq 0$. \\
(b) For all positive integers $i$ the $R$-linear map ${\pi_R}_{|M_{i}}:M_i \to M_{\rho_{M_{\bullet}}(i)}$, given by multiplication by $\pi_R$, is a filtered morphism. \end{remark} \begin{definition} \label{definition of linear} We call a filtered $R$-module \emph{linear} if it satisfies (a) of remark \ref{property of a linear module}. We call a filtered $R$-module \emph{strictly linear} if it satisfies both part (a) and part (b) of remark \ref{property of a linear module}. \end{definition}
Let $M_{\bullet}$ be a linear filtered $R$-module. Multiplication by $\pi_R$ induces a map $F_i(M_{\bullet}) \to F_{\rho_{M_{\bullet}}(i)}(M_{\bullet})$, which we denote by $[\pi_R]_i$. One has by definition that $[\pi_R]_i=F_1({\pi_R}_{|{M_{i}}})$. Observe that the right hand side is well defined thanks to part (b) of the definition of a linear filtered $R$-module. \begin{definition} \label{definition: f, defect, codefect} Let $M_{\bullet}$ be a linear $R$-filtered module and let $i$ be a positive integer. \\ (a) We denote by $f_i(M_{\bullet})=\text{dim}_{R/(\pi_R)}(F_i(M_{\bullet}))$. \\ (b) We denote by $\text{defect}_{M_{\bullet}}(i):=\text{dim}_{R/(\pi_R)}(\text{ker}([\pi_R]_i))$. \\ (c) We denote by $\text{codefect}_{M_{\bullet}}(i):=\text{dim}_{R/(\pi_R)}(\text{coker}([\pi_R]_i))$. \end{definition}
\subsubsection{Free filtered modules} \label{(quasi)-free modules} Fix $\rho$ a shift map. Here we introduce the class of free filtered $R$-modules with respect to $\rho$. Free filtered modules play a role in the category of filtered modules similar to the one played by free $R$-modules in the category of $R$-modules. We thus recall the role of the latter to clarify the introduction of the former.
\emph{Free $R$-modules.} \label{free R module} Recall that if $X$ is a set, then we have a covariant functor $H_{X}:R\text{-mod} \to \text{Set}$, defined on an object $M \in R\text{-mod}$ as $H_{X}(M):=\text{Hom}_{\text{Set}}(X,M)$, and defined on a morphism $\varphi:M \to N$ as $H_X(\varphi)(f):=\varphi \circ f$ for each $f \in \text{Hom}_{\text{Set}}(X,M)$. In other words $H_{X}$ is the restriction of the functor $\text{Hom}_{\text{Set}}(X,-)$ to the image of $R\text{-mod}$ in $\text{Set}$ via the forgetful functor. This functor is representable in $R\text{-mod}$: up to isomorphism there is a unique $R$ module, $N_{X}$, such that $H_X \simeq_{\text{functor}} \text{Hom}_{R\text{-mod}}(N_{X},-)$. This module is called the free module over $X$, and concretely it is the module of finite formal $R$-linear combinations of elements of $X$. By Yoneda's Lemma the different choices of an isomorphism $\Phi: \text{Hom}_{R\text{-mod}}(N_X,-) \to H_X$, correspond to the different choices of ${\Phi}_{N_X}(\text{id}_{N_X}):X \to N_{X}$, which are the different choices of a basis $\mathcal{B}$ for $N_X$ together with a bijection between $\mathcal{B}$ and $X$. Again, by Yoneda's Lemma, the set $\text{Isom}_{\text{functors}}(H_X,N_X)$ is a torsor under $\text{Aut}_{R\text{-mod}}(N_X)$.
Free $R$-modules are the easiest $R$-modules, and once we trivialize $\text{Isom}_{\text{functors}}(H_X,N_X)$, by the choice of a basis $\Phi$, then, by construction, for any $R$-module $M$, the set $\text{Hom}_{R\text{-mod}}(N_X,M)$ is in natural bijection with $\text{Hom}_{\text{Set}}(X,M)$, via $\Phi$. Thus we can easily use suitable free $R$-modules to present other modules. The ease in defining presentations $N_X \twoheadrightarrow M$, once a trivialization $\Phi$ is chosen, has the price of obscuring structural information about $M$. Thus one is led to look for properties of the presentation which are invariant under $\text{Aut}_{R\text{-mod}}(N_X)$. This is exactly the path we will follow in attaching jump sets to special filtered modules. So, first, we need to define the analogue of a free filtered module, which we do next.
\emph{Free filtered $R$-modules.} \label{shit} First we introduce the analogue of the functors $H_X$ of the previous paragraph. Consider pairs $(X,g)$, where $X$ is a set and $g$ is a map $g:X \to \mathbb{Z}_{\geqslant 1}$. Denote by $\rho\text{-Filt-}R\text{-mod}$ the full sub-category of $\text{Compl-Filt-}R\text{-mod}$, having as objects complete linear $R$-filtered modules $M_{\bullet}$ such that $\rho_{M_{\bullet}} \geqslant \rho$. Consider the functor $H_{(X,g)}:\rho\text{-Filt-}R\text{-mod} \to \text{Set}$, defined on an object $M_{\bullet} \in \rho\text{-Filt-}R\text{-mod}$ as $H_{(X,g)}(M_{\bullet}):=\{f \in \text{Hom}_{\text{Set}}(X,M_1): \text{for all $x$ in $X$,} \ w(f(x)) \geqslant g(x) \}$, and defined on morphisms by left composition. The goal of this paragraph is show that this functor is representable. We start with the simplest possible case of a pair $(X,g)$ with $X=\{x\}$ being a point. Put $n:=g(x)$. Clearly the functor depends only on $n$, so, for simplicity, we will denote it by $H_n$. \begin{definition} The $n$-th standard filtered module, $S_n$, for $\rho$, is given by: $S_n=R$, with weight map defined as $w(x)=\rho^{\text{ord}_R(x)}(n)$, for all $x$ in $R$. \end{definition} Observe that $S_n$ is an object of $\rho\text{-Filt-}R\text{-mod}$ (recall that $R$ is assumed complete). It turns out that it represents $H_n$. \begin{proposition} \label{small universal property} The functor $H_n$ is represented by $S_n$. \begin{proof}
Observe that by definition $H_n$ is simply the functor sending $M_{\bullet}$ to the set $M_n$, and sending a morphism $ \varphi:M_{\bullet} \to N_{\bullet}$ to the restriction $\varphi_{|_{M_n}}:M_n \to N_n$. So it suffices to prove that given $M_{\bullet} \in \rho\text{-Filt-}R\text{-mod}$, and given $v \in M_n$, the unique $R$-linear morphism from $R$ to $M_1$ sending $1 \mapsto v$, is a filtered morphism from $S_n$ to $M_{\bullet}$, and that these are all the possible filtered morphism from $S_n$ to $M_{\bullet}$. But this follows directly from the definition of $S_n$ and the fact that $M_{\bullet}$ is an object of $\rho\text{-Filt-}R\text{-mod}$. \end{proof} \end{proposition}
Now we can prove that $H_{(X,g)}$ is representable for any set $X$ and any map $g:X \to \mathbb{Z}_{\geqslant 1}$. For a positive integer $i$ denote by $c_{(X,g)}(i):=|g^{-1}(i)|$. Given $c$ a cardinal number and $N_{\bullet}$ a filtered module, denote by $N_{\bullet}^{(c)}$ the direct sum of $c$ copies of $N_{\bullet}$. \begin{proposition} \label{Universal property of free filtered modules} The functor $H_{(X,g)}$ is represented by the filtered $R$-module $\prod_{i \in \mathbb{Z}_{\geqslant 1}}\widehat{S_i^{(c_{(X,g)}(i))}}$. \begin{proof} The functor $H_{(X,g)}$ is isomorphic to the direct product of the functors $H_{g(x)}$ as $x$ varies in $X$. So it follows from Proposition \ref{products}, Claim \ref{small universal property} and the universal property of the completion, that $H_{(X,g)}$ is isomorphic to the functor $\text{Hom}_{\text{filt}}(\prod_{i \in \mathbb{Z}_{\geqslant 1}}\widehat{S_i^{(c_{(X,g)}(i))}},-)$. \end{proof} \end{proposition} \begin{remark} Let $i$ be a positive integer. If $c_{(X,g)}(i)$ \emph{finite}, then we can omit the completion of the factor $S_i^{(c_{(X,g)}(i))}$, since it is already a complete filtered module. In our application $c_{(X,g)}(i)$ will always be finite. \end{remark} An object $M_{\bullet}$ in $\rho\text{-Filt-}R\text{-mod}$, representing $H_{(X,g)}$ (so by Yoneda's Lemma and by Proposition \ref{Universal property of free filtered modules}, isomorphic to $\prod_{i \in \mathbb{Z}_{\geqslant 1}}\widehat{S_i^{(c_{(X,g)}(i))}}$), is said to be free on $(X,g)$. Motivated by the discussion in the above paragraph on free modules, we introduce the following notion. \begin{definition}\label{definition of a basis} Let $M_{\bullet}$ be in $\rho\text{-Filt-}R\text{-mod}$ a free module on $(X,g)$. A \emph{filtered} basis for $M_{\bullet}$ is an element of $\text{Isom}_{\text{functor}}(\text{Hom}_{\text{filt}}(M_{\bullet},-),H_{(X,g)})$. \end{definition} Given $\Phi$ a filtered basis for $M_{\bullet}$, one recovers a more concrete version of the notion of a filtered basis, by means of Yoneda's Lemma, taking $\Phi_{M_{\bullet}}(\text{id}_{M_{\bullet}}):X \to M_1$. The image of this map generates a free $R$-module that is dense in $M_1$ (coinciding with $M_1$ if and only if $X$ is finite, observe that for $X$ infinite the resulting module is never free as an $R$-module).
Clearly, the functor $H_{(X,g)}$ depends only on the map $c_{(X,g)}$. So from now on we will directly speak of the functors $H_{f^{*}}$, where $f^{*}$ is a map from $\mathbb{Z}_{\geqslant 1}$ to the cardinal numbers.
We next give an internal criterion for a filtered module to be representing the functor $H_{f^{*}}$, under the assumption that $f^{*}$ is supported in $T_{\rho}$, that is, we assume that $f^{*}(\text{Im}(\rho))=\{0\}$. \begin{proposition} \label{charact. free-filt-mod} Let $M_{\bullet}$ be an object of $\rho\emph{-Filt-}R\emph{-mod}$, and $f^{*}$ as above. Then the following are equivalent: \\ \emph{(a)} For every positive integer $i$ one has $\emph{defect}_{M_{\bullet}}(i)=\emph{codefect}_{M_{\bullet}}(i)=0$. Moreover if $i$ is in $T_{\rho}$, one has $f_i(M_{\bullet})=f^{*}(i)$. \\ \emph{(b)} One has an isomorphism of functors $H_{f^{*}} \simeq_{\emph{functor}}\emph{Hom}_{\emph{filt}}(M_{\bullet},-)$. \\ \emph{(c)} One has an isomorphism of filtered modules $M_{\bullet} \simeq_{\emph{filt}} \prod_{i \in \mathbb{Z}_{\geqslant 1}}\widehat{S_i^{f^{*}(i)}}$. \begin{proof} The equivalence between (b) and (c) is an immediate consequence of Proposition \ref{Universal property of free filtered modules} and Yoneda's Lemma. It is a straightforward verification that (c) implies (a). We prove that (a) implies (c).
For every positive integer $i$ in $T_{\rho}$, lift a basis of $M_i/M_{i+1}$ to $M_i$ and denote it by $\mathcal{B}_i$. The inclusion $\bigcup_{i \in T_{\rho}} \mathcal{B}_i \subseteq M_1$ consists of an element of $H_{\tilde{f}}(M_{\bullet})$, which thus gives, thanks to Proposition \ref{Universal property of free filtered modules}, a filtered morphism $\varphi: \prod_{i \in \mathbb{Z}_{\geqslant 1}} \widehat{S_i^{f^{*}(i)}} \to M_{\bullet}$. We claim that $\varphi$ is an isomorphism.
Indeed by construction $F_i(\varphi)$ is an isomorphism for every $i$ in $T_{\rho}$. But together with the fact that for every positive integer $i$ one has $\text{defect}_{M_{\bullet}}(i)=\text{codefect}_{M_{\bullet}}(i)=0$, this easily implies that for every positive integer $i$, the map $F_i(\varphi)$ is an isomorphism. So, since $M_{\bullet}$ is complete, we conclude by part (d) of Proposition \ref{phi surj}. \end{proof} \end{proposition} \begin{definition} \label{free modules def} Let $M_{\bullet}$ be an object of $\rho\text{-Filt-}R\text{-mod}$, and $f$ a positive integer. Then we call $M_{\bullet}$ a $(f,\rho)$-\emph{free filtered module} if it satisfies any of the equivalent conditions of Proposition \ref{charact. free-filt-mod}, with respect to the constant map $T_{\rho} \to \mathbb{Z}_{\geqslant 1}$, $i \mapsto f$. \end{definition} We denote by $M_{\rho}:=\prod_{i \in T_{\rho}}S_i$, i.e. the $(1,\rho)$-free filtered module. So $M_{\rho}^f$ is the $(f,\rho)$-free filtered module.
We next introduce the class of filtered modules that, together with those described in this paragraph, will suffice to classify the possible filtered structures of $U_1$. \subsubsection{Quasi-free filtered $R$-modules} Recall that in case $\rho$ is a shift with $\#T_{\rho}< \infty$, then we denote by $e_{\rho}^{*}=\text{max}(T_{\rho})+1$. Moreover we define $e_{\rho}^{'}$ to be the unique positive integer such that $\rho(e_{\rho}')=e_{\rho}^{*}$. \begin{definition} \label{def of quasi free} Let $M_{\bullet}$ be an object of $\rho\text{-Filt-}R\text{-mod}$. Then we call it $(f,\rho)$-quasi-free if it satisfies the following three conditions: \\ (a) For every positive integer $i$, we have that $f_i(M_{\bullet})=f$. \newline (b) If $T_{\rho}$ is finite (resp.\ if $T_{\rho}$ is not finite), for every positive integer $i$ different from $e_{\rho}^{'}$ (resp.\ for every positive integer $i$), one has $\text{defect}_{M_{\bullet}}(i)=\text{codefect}_{M_{\bullet}}(i)=0$. \newline (c) If $T_{\rho}$ is finite one has that $\text{defect}_{M_{\bullet}}(e'_{\rho}) \leqslant 1$. \end{definition} So we see that if $T_{\rho}$ is not finite the notion of a $(f,\rho)$-quasi-free module coincides with the notion of a $(f,\rho)$-free module. We characterize this distinction with a module-theoretic property: \begin{proposition}{\label{finite module cofinite map}} Let $M_{\bullet}$ be a $(f,\rho)\text{-quasi-free}$ filtered module. Then the following are equivalent: \\ \emph{(a)} $T_{\rho}$ is finite, \\ \emph{(b)} $M_1$ is finitely generated. \begin{proof}
$\text{(a)} \to \text{(b)}$ Since all the $F_i(M_{\bullet})$ are finite dimensional, (b) is equivalent to the statement that for some positive integer $n$, the $R$-module $M_n$ is finitely generated. But for $n>e'_{\rho}$, the filtered $R$-module $M_{\bullet+n}$ is a $(f,\tau_{|T_{\rho}|})$-free-module, where for a positive integer $m$, the symbol $\tau_m$ denotes the shift sending any positive integer $x$ to $x+m$. So one concludes by Proposition \ref{charact. free-filt-mod}.
$\text{(b)} \to \text{(a)}$ Since $M_1$ is finitely generated, so is $M_n$. But for $n>e'_{\rho}$, one has that $M_{\bullet+n}$ is a $(f,\rho \circ \tau_{n-1})$-free module. So by Proposition \ref{charact. free-filt-mod} one has that $T_{\rho \circ \tau_{n-1}}$ is finite, which is equivalent to say that $T_{\rho}$ is finite. \end{proof} \end{proposition}
Until the end of the next paragraph, we will restrict to the case that $T_{\rho}$ is finite or equivalently that $M_1$ is finitely generated. We will work again in greater generality only from Section \ref{transitive} onward.
We now recover the distinction between $(f,\rho)$-quasi-free and $(f,\rho)$-free with a module-theoretic property. \begin{proposition} \label{quasi free via torsion} Let $M_{\bullet}$ be a $\rho \text{-}\emph{Filt-}R \text{-}\emph{Mod}$ such that $f_i(M_{\bullet})=f$ for every positive integer $i$, and $\emph{defect}_{M_{\bullet}}(j)=\emph{codefect}_{M_{\bullet}}(j)=0$ for every positive integer $j \neq e'_{\rho}$. Then the following are equivalent: \\ \emph{(a)} $M_{\bullet}$ is $(f,\rho)$-quasi-free. \\ \emph{(b)} $M_{1}[\pi_R] $ is a cyclic $R$-module. \begin{proof} Given the hypothesis we have to prove that $\text{defect}_{M_{\bullet}}(e'_{\rho}) \leqslant 1$ is equivalent to $M[\pi_R]$ cyclic. One has that multiplication by $\pi_R$ is a filtered morphism $\pi_R:M_{\bullet+e'_{\rho}-1} \to M_{\bullet+e_{\rho}^{*}-1}$. Thus the conclusion follows immediately from Corollary \ref{last ker +}. \end{proof} \end{proposition} In particular we have the following. \begin{corollary} \label{where is the torsion} Let $M_{\bullet}$ be a $(f,\rho)$-quasi-free module which is not $(f,\rho)$-free. Then we have an isomorphism $M[\pi_R] \simeq_{R\emph{-mod}} R/\pi_R$ and $w_{M_{\bullet}}(M[\pi_R])=\{e'_{\rho},\infty \}$. \end{corollary} \subsubsection{Presentations of a quasi-free modules are conjugate} \label{transitive} We keep assuming that $T_{\rho}$ is finite. Let $f$ be a positive integer. We will proceed classifying $(f,\rho)$-quasi-free modules with the help of the additional free module $M_{\rho}^{*}:=M_{\rho} \oplus S_{e_{\rho}^{*}}$: we will use the module $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$. This module is the free module over the map $f^{*}:\mathbb{Z}_{\geqslant 1} \to \mathbb{Z}_{\geqslant 1}$ defined as $f^{*}(i)=f$ for $i \in T_{\rho}$, $f^{*}(e_{\rho}^{*})=1$ and $f^{*}(i)=0$ for all the other $i$. So we fix an isomorphism between $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ and $H_{f^{*}}$, that is we fix a filtered basis for $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$. Let now $M_{\bullet}$ be a $(f,\rho)$-quasi-free module that is not $(f,\rho)$-free. We call a subset $\mathcal{B} \subseteq M_1$ a quasi-basis if it consists of the union of the lifting of a basis of $M_{i}/M_{i+1}$ for each $i \in T_{\rho}$ together with the lifting of a generator of $\text{coker}[\pi_R]_{{e'_{\rho}}}$ (this cokernel is 1-dimensional because the kernel is 1-dimensional and we assume that $f_i(M_{\bullet})$ is constantly $f$). By the universal property proved in Proposition \ref{Universal property of free filtered modules}, each inclusion of a quasi-basis $\mathcal{B} \subseteq M_1$ gives uniquely (via the above choice of a filtered basis for $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$) a morphism $\varphi_{\mathcal{B}}:M_{\rho}^{f-1} \oplus M_{\rho}^{*} \to M_{\bullet}$. \begin{proposition} \label{presentations} For each quasi-basis $\mathcal{B}$, one has that $\varphi_{\mathcal{B}}$ is a filtered epimorphism. \begin{proof} By construction for each $i \in T_{\rho}^{*}$, one has that $F_i(\varphi_{\mathcal{B}})$ is surjective. But since for both modules one has that $[\pi_R]_i$ is surjective for $i$ different from $e'_{\rho}$, and at $e_{\rho}^{*}$ a generator of the co-kernel has been added, one clearly concludes that $F_i(\varphi_{\mathcal{B}})$ is surjective for all $i$, by repeatedly using the above conditions and the multiplication by $\pi_R$. Since $M_{\bullet}$ is complete, we conclude with Proposition \ref{phi surj}. \end{proof} \end{proposition} We have found for $M_{\bullet}$ presentations with the easiest possible type of filtered module with the given constraints (namely those on the $\rho$-map) and in a minimal way: $M_{\bullet}$ and $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ have the same minimal number of generators as $R$-modules. This presentation is obtained via the choice of a quasi-basis. To read off the intrinsic structure of $M_{\bullet}$ via these presentations we proceed looking at the action of $\text{Aut}_{\text{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$ on $\text{Epi}_{\text{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*},M_{\bullet})$, in search of invariants. The next proposition is then quite relevant for us. \begin{proposition} \label{transitive action} The action of $\emph{Aut}_{\emph{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$ on $\emph{Epi}_{\emph{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*},M_{\bullet})$ is transitive. \begin{proof} Recall from definition \ref{functors} that for a positive integer $i$ we denote by $F_{1,i}$ the functor $F_{1,i}:\text{Filt-}R\text{-mod} \to \text{Filt-}R\text{-mod}$, defined as $F_{1,i}(N_{\bullet}):=N_1/N_i$, with the quotient filtration, on the objects, and on a morphism $\varphi:M_{\bullet} \to N_{\bullet}$, one has that $F_{1,i}(\varphi)$ is defined as the morphism induced by $\varphi$, from $F_{1,i}(M_{\bullet})$ to $F_{1,i}(N_{\bullet})$. Fix $\mathcal{B}$ a quasi-basis of $M_{\bullet}$. Take $\varphi \in \text{Epi}_{\text{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*},M_{\bullet})$. We claim that we can find a filtered basis $\mathcal{B}'$ of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ such that $\varphi(\mathcal{B}')=\mathcal{B}$. We prove this in 2 steps.
1) Firstly we observe that $F_{1,{e_{\rho}}^{*}}(\varphi)$ is an isomorphism. Indeed by construction $F_i(\varphi)=F_i(F_{1,e_{\rho}^{*}})(\varphi)$ is an isomorphism for each $i \in T_{\rho}$, and on both sides the $[\pi]_{i}$-maps are isomorphisms for each $i<e'_{\rho}$ since they are $(f,\rho)$-quasi-free. So it follows that they are isomorphisms for all $i<e_{\rho}^{*}$. So the observation is proved by Proposition \ref{phi surj}. This provides us with the piece of the filtered basis corresponding to the elements $x \in \mathcal{B}$ with $w(x) \in T_{\rho}$.
2) Take the unique $x \in \mathcal{B}$ with $w(x)=e_{\rho}^{*}$. By Proposition \ref{first non iso is epi}, together with Step 1), we can find $y \in \varphi^{-1}(x)$ with $w(y)=e_{\rho}^{*}$. Now we claim that $y$ must generate $\text{coker}[\pi]_{e'_{\rho}}$. Since this is a 1-dimensional $R/(\pi_R)$-vector space this is equivalent to claiming that $y$ is not the $0$-class in that cokernel. But if it were the $0$-class, then it there would exist $z$ with $w(z)=e'_{\rho}$, such that $\pi_Rz=y \ \text{mod} \ {(M_{\rho}^{f-1} \oplus M_{\rho}^{*})}_{e_{\rho}^{*}+1}$. But, from Step 1), it follows that $w(\varphi(z))=e'_{\rho}$, but then, since $x=\pi_R\varphi(z) \ \text{mod} \ {(M_{\rho}^{f-1} \oplus M_{\rho}^{*})}_{e_{\rho}^{*}+1}$, we see that $x$ is in the $0$-class in $\text{coker}[\pi]_{e'_{\rho}}$, which is a contradiction.
So given $\varphi_1,\varphi_2 \in \text{Epi}_{\text{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*},M_{\bullet})$, there are two filtered basis $\mathcal{B}_1, \mathcal{B}_2$ of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ mapping to $\mathcal{B}$ via respectively $\varphi_1,\varphi_2$ as explained above. It follows that there exists a suitable bijection, $\theta$, between $\mathcal{B}_1$ and $\mathcal{B}_2$ that respects the weights and such that $\varphi_2 \circ \theta=\varphi_1$ on $\mathcal{B}_1$. But then, by Proposition \ref{Universal property of free filtered modules}, we have that $\theta$ extends to a filtered automorphism of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ and $\varphi_2 \circ \theta=\varphi_1$ holds on all $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ and we are done. \end{proof} \end{proposition} Proposition \ref{transitive action} and Proposition \ref{presentations} tell us that to classify-$(f,\rho)$-quasi-free filtered modules we have to accomplish two tasks: \\ (a) Classify the orbits of vectors in $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ under $\text{Aut}_{\text{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$. \\ (b) Recover which orbits of task (a) arise from $(f,\rho)$-quasi-free filtered modules. \\ This is what we do next.
\subsubsection{Jump sets parametrize orbits} \label{orbits} We keep denoting by $\rho$ a shift map, and by $f$ a positive integer. Whenever a star is added, and we refer to an extended jump set in the following statements, we will be implicitly assuming that, in that case, $T_{\rho}$ is finite. On the other hand, in the parts of the statements where there is no star and we refer to regular jump sets, we only require $\rho$ to be a shift. We begin by attaching to each jump set a vector. \begin{definition} Let $(I,\beta)$ be a $\rho$-jump set (resp.\ an extended $\rho$-jump set). We denote by $v_{(I,\beta)}$ the following vector of $\pi_RM_{\rho}^f$ (resp.\ $\pi_R(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$): for each $i \in T_{\rho}$ (resp.\ in $T_{\rho}^{*}$) with $i \not \in I$, the projection of $v_{(I,\beta)}$ on $S_{i}^{f}$ (resp.\ the same and on $S_{e_{\rho}^{*}}$) is $0$. For each $i \in I$, the projection of $v_{(I,\beta)}$ on $S_{i}^{f}$ (resp.\ the same and on $S_{e_{\rho}^{*}}$) is the vector $(\pi_R^{\beta(i)},0,\ldots ,0)$, having $\pi_R^{\beta(i)}$ on the first coordinate and $0$ on all the others (resp.\ $(\pi_{R}^{\beta(e_{\rho}^{*})})$). \end{definition} We now prove that with the map $(I,\beta) \mapsto v_{(I,\beta)}$ we catch each orbit at least once. For a vector $v \in \pi_RM_{\rho}^f$ (resp.\ $\pi_RM_{\rho}^{f-1} \oplus \pi_RM_{\rho}^{*}$), denote by $A_v$ the set of elements of $T_{\rho}$ (resp.\ $T_{\rho}^{*}$), such that $\text{proj}_{i}(v) \neq 0$, where $\text{proj}_{i}$ denotes the projection on the factor $S_{i}^{f}$ (resp.\ the same if $i \in T_{\rho}$ and we look at $\text{proj}_{S_{e_{\rho}^{*}}}$ for $i=e_{\rho}^{*}$). For $a \in A_v$ define $b_v(a)=\text{ord}_{R}(\text{proj}_{i}(v))$, the valuation of the $a$-th projection. Recall the definition of $(I_{(A_v,b_v)}^{-},\beta_{(A_v,b_v)}^{-})$ from Proposition \ref{jump set attached to a function}. \begin{proposition} \label{reduction process} For each $v \in \pi_RM_{\rho}^f$ (resp.\ $\pi_RM_{\rho}^{f-1} \oplus \pi_RM_{\rho}^{*}$) there exists an automorphism $\theta \in \emph{Aut}_{\emph{filt}}(M_{\rho}^f)$ (resp.\ $\theta \in \emph{Aut}_{\emph{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$) such that $\theta(v)=v_{(I,\beta)}$, where $(I,\beta):=(I_{(A_v,b_v)}^{-},\beta_{(A_v,b_v)}^{-})$. \begin{proof} Clearly we can find a filtered automorphism $\theta_0$ such that $\theta_0(v)=v_{(A_v,b_v)}$, where $v_{(A_v,b_v)}$ denotes the following vector of $\pi_RM_{\rho}^f$ (resp.\ $\pi_R(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$): for each $i \in T_{\rho}$ (resp.\ $T_{\rho}^{*}$) with $i \not \in A_v$, the projection of $v_{(A_v,b_v)}$ on $S_i^f$ (resp.\ the same and to $S_{e_{\rho}^{*}}$) is $0$, while for each $i \in A_v$, the projection of $v_{(A_v,b_v)}$ on $S_i^f$ is $(\pi_R^{b_v(i)},0, \ldots ,0)$ (resp.\ the same and for $i=e_{\rho}^{*}$ is $(\pi_R^{b_v(e_{\rho}^{*})})$). So without loss of generality, we can assume that $v$ has this special form.
Next, let $(i,b_v(i)) <_{\rho} (j,b_v(j))$. That means that either $i<j$ and $b_v(i)<b_v(j)$, or that $i>j$ and $\rho^{b_v(i)}(i)<\rho^{b_v(j)}(j)$. Observe that in either case we have $b_v(i)<b_v(j)$ and the $R$-linear automorphism $\theta_{i,j}$ on $M_{\rho}$ (resp.\ $M_{\rho}^{*}$), defined as $\theta_{i,j}((x_h)_{h \in T_{\rho}})=(x_h-\pi_R^{b_v(j)-b_v(i)}\delta_{i,h}x_j)_{i \in T_{\rho}}$ (resp.\ as $\theta_{i,j}((x_h)_{h \in T_{\rho}^{*}})=(x_h-\pi_R^{b_v(j)-b_v(i)}\delta_{i,h}x_j)_{i \in T_{\rho}^{*}}$) is \emph{filtered}, precisely due to the above inequalities. Clearly we can extend $\theta_{i,j}$ to a filtered automorphism of $M_{\rho}^f$ (resp.\ $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$) by simply letting it act as the identity on the complementary factor $M_{\rho}^{f-1}$. The obtained filtered automorphism $\theta_{i,j}$ satisfies the identity
$$A_{\theta_{i,j}(v)}=A_v-\{j\}, b_{\theta_{i,j}(v)}={b_{v}}_{|A_v-\{j\}}. $$ If $T_{\rho}$ is finite, by repeatedly applying transformations $\theta_{i,j}$ we end up precisely having constructed a $\theta$ as claimed in this Proposition. If $T_{\rho}$ is infinite, one can repeatedly apply such elementary transformations $\theta_{i,j}$ in a a sequence that \emph{converges} to a filtered automorphism $\theta$ as we wanted to prove this Proposition. \end{proof} \end{proposition} For a vector $v \in \pi_RM_{\rho}^f$ (resp.\ in $\pi_RM_{\rho}^{f-1} \oplus \pi_RM_{\rho}^{*}$), denote by $g_v$ the map $g_v: \mathbb{Z}_{\geqslant 0} \to \mathbb{Z}_{\geqslant 0} \cup \{\infty\}$ defined as $g_v(i):=w_{M_{\rho}^f/\pi_R^{i}M_{\rho}^f}(v)$ (resp.\ $g_v(i):=w_{M_{\rho}^{f-1} \oplus M_{\rho}^{*}/\pi_R^{i}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})}(v)$). Here $w_{M_{\rho}^f/\pi_R^{i}M_{\rho}^f}(v)$ (resp.\ $w_{M_{\rho}^{f-1} \oplus M_{\rho}^{*}/\pi_R^{i}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})}(v)$) denotes the weight of $v$ in the $R$-module $M_{\rho}^f/\pi_R^{i}M_{\rho}^f$ (resp. $M_{\rho}^{f-1} \oplus M_{\rho}^{*}/\pi_R^{i}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$) viewed as a filtered $R$-module with the quotient filtration (see Section \ref{quotients}). Say that $g_v$ \emph{breaks} at $i$ if $g_v(i) \neq g_{v}(i+1)$. \begin{proposition} \label{reconstruction process} Let $(I,\beta)$ be a $\rho$-jump set (resp.\ an extended $\rho$-jump set). Let $v_{(I,\beta)} \in \pi_RM_{\rho}^f$ (resp.\ $v_{(I,\beta)} \in \pi_RM_{\rho}^{f-1} \oplus \pi_RM_{\rho}^{*}$). Then $g_v$ breaks at $i$ if and only if $i \in \beta(I)$. Moreover if $i \in I$, then we have that $g_v(\beta(i)+1)=\rho^{\beta(i)}(i)$. \begin{proof} Let $n$ be a positive integer such that there exists an $i \in I$ with $\beta(i)<n$. Denote by $i_0$ the smallest such $i$. Fix the standard basis for $M_{\rho}^f$ (resp.\ for $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$) and denote it by $\{b_{ij}: i \in T_{\rho}, \ j \in \{1, \ldots ,f\}\}$ (resp.\ denote it by $\{b_{ij}: i \in T_{\rho}, \ j \in \{1, \ldots ,f\}\} \cup \{b_{e_{\rho}^{*},1}\}$). In this notation we have that $v_{(I,\beta)}=\sum_{i \in I} \pi_R^{\beta(i)}e_{i,1}$. It is clear that $$v_{(I,\beta)} \equiv \sum_{i \in I: \beta(i)<n}\pi_R^{\beta(i)}e_{i,1} \ \text{mod} \ \pi_R^{n} M_{\rho}^f. $$ (resp.\ $v_{(I,\beta)} \equiv \sum_{i \in I: \beta(i)<n}\pi_R^{\beta(i)}e_{i,1} \ \text{mod} \ \pi_R^{n}\cdot(M_{\rho}^{f-1} \oplus M_{\rho}^{*}) $). Observe that, thanks to the definition of a jump set, we have that $$w_{M_{\rho}^f}(\sum_{i \in I: \beta(i)<n}\pi_R^{\beta(i)}e_{i,1})=\rho^{\beta(i_0)}(i_0). $$ (resp.\ $w_{M_{\rho}^{f-1} \oplus M_{\rho}^{*}}(\sum_{i \in I: \beta(i)<n}\pi_R^{\beta(i)}e_{i,1})=\rho^{\beta(i_0)}(i_0) $). Therefore we conclude that $$w_{M_{\rho}^f/\pi_R^nM_{\rho}^f}(\sum_{i \in I: \beta(i)<n}\pi_R^{\beta(i)}e_{i,1}) \geqslant \rho^{\beta(i_0)}(i_0). $$ (resp.\ $w_{(M_{\rho}^{f-1} \oplus M_{\rho}^{*})/\pi_R^n\cdot(M_{\rho}^{f-1} \oplus M_{\rho}^{*})}(\sum_{i \in I: \beta(i)<n}\pi_R^{\beta(i)}e_{i,1}) \geqslant \rho^{\beta(i_0)}(i_0) $). We next prove that this inequality is actually an equality which clearly gives the desired result.
Let $x \in M_{\rho}^f$ (resp.\ in $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$). We claim that $$w_{M_{\rho}^f}(\pi_R^nx+\sum_{i \in I: \beta(i)<n}\pi_R^{\beta(i)}e_{i,1}) \leqslant \rho^{\beta(i_0)}(i_0). $$ (resp.\ $w_{M_{\rho}^{f-1}\oplus M_{\rho}^{*}}(\pi_R^nx+\sum_{i \in I: \beta(i)<n}\pi_R^{\beta(i)}e_{i,1}) \leqslant \rho^{\beta(i_0)}(i_0) $). Indeed if this claim would not hold we must conclude that $$w_{M_{\rho}^f}(\pi_R^nx)=\rho^{\beta(i_0)}(i_0). $$ (resp.\ $w_{M_{\rho}^{f-1} \oplus M_{\rho}^{*}}(\pi_R^nx)=\rho^{\beta(i_0)}(i_0) $). But, by construction of the free filtered modules, we have that $w_{M_{\rho}^f}(\pi_R^nx)=\rho^n(w_{M_{\rho}^f}(x))$ (resp.\ $w_{M_{\rho}^{f-1} \oplus M_{\rho}^{*}}(\pi_R^nx)=\rho^n(w_{M_{\rho}^{f-1} \oplus M_{\rho}^{*}}(x))$). This implies that $\rho^{n-\beta(i_0)}(w_{M_{\rho}^f}(x))=i_0$, contradicting that $i_0 \in T_{\rho}$, since $n>\beta(i_0)$ by construction (resp.\ it implies that $\rho^{n-\beta(i_0)}(w_{M_{\rho}^{f-1} \oplus M_{\rho}^{*}}(x))=i_0$. In case $i_0<e_{\rho}^{*}$, it again contradicts that $i_0 \in T_{\rho}$. If $i_0=e_{\rho}^{*}$ we would conclude that $b_{e_{\rho}^{*},1} \in \pi_R \cdot (M_{\rho}^{f-1} \oplus M_{\rho}^{*}$), which is not possible). This ends the proof. \end{proof} \end{proposition} This allows us to conclude the following important corollary. \begin{corollary} \label{at most one} In each orbit $\mathcal{O}$ of $\pi_RM_{\rho}^f$ under $\emph{Aut}_{filt}(M_{\rho}^f)$ (resp.\ $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ under $\emph{Aut}_{filt}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$), there exist at most one $\rho$-jump set (resp.\ extended $\rho$-jump set) such that $v_{(I,\beta)}$ belongs to $\mathcal{O}$. \begin{proof} Clearly the function $g_v$ is preserved by applying a filtered automorphism. But by Proposition \ref{reconstruction process} it follows that from the function $g_{v_{(I,\beta)}}$ one can reconstruct $(I,\beta)$. The conclusion follows. \end{proof} \end{corollary} So, putting together Proposition \ref{reduction process} and \ref{at most one}, we see that with the map $(I,\beta) \mapsto v_{(I,\beta)}$ we catch each orbit exactly once: \begin{theorem} \label{bijection orbits jumps} The map $(I,\beta) \to v_{(I,\beta)}$ induces a bijection between the set of $\rho$-jump sets (resp.\ extended $\rho$-jump sets) and the set of orbits of $\pi_RM_{\rho}^f$ under the action of $\emph{Aut}_{\emph{filt}}(M_{\rho}^f)$ (resp.\ orbits of $\pi_RM_{\rho}^{f-1} \oplus \pi_RM_{\rho}^{*}$ under the action of $\emph{Aut}_{\emph{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$). \end{theorem} Given a vector $v \in \pi_RM_{\rho}^f$ (resp.\ in $\pi_RM_{\rho}^{f-1} \oplus \pi_RM_{\rho}^{*}$) we define $\text{filt-ord}(v)$ to be the jump set corresponding to the orbit of $v$ under the above bijection. As the terminology suggests, the map $\text{filt-ord}$ can be considered as the filtered analogue of the map $\text{ord}$, which gives the valuation of the vector $v$. Indeed in the latter case knowing $\text{ord}(v)$ gives exactly the orbit, under $R$-linear automorphisms, of $v$, likewise in the former case knowing $\text{filt-ord}(v)$ gives exactly the orbit, under filtered $R$-linear automorphisms, of $v$. Moreover as $\text{ord}(v)$ is computed by taking the minimum valuation of the coordinates of $v$, with respect to an $R$-linear basis, so $\text{filt-ord}(v)$ is computed by taking the set of minimal points with respect to $\leqslant_{\rho}$ for the graph of valuations of the coordinates of $v$, with respect to a filtered basis (see definition \ref{definition of a basis}). \subsubsection{Jump sets parametrize quasi-free filtered module} \label{jump sets and quasi free} Now we fix $\rho$ a shift with $T_{\rho}$ finite and $f$ a positive integer. Let $M_{\bullet}$ be a $(f,\rho)$-quasi-free filtered module that is not free. By Proposition \ref{presentations} and \ref{transitive action} we see that $M_{\bullet}$ correspond to a unique orbit of vectors in $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ under $\text{Aut}_{\text{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$. So, together with Theorem \ref{bijection orbits jumps}, we obtain a unique extended $\rho$-jump set $(I_{M_{\bullet}},\beta_{M_{\bullet}})$ that determines $M_{\bullet}$ as a filtered module. Thus the map $M_{\bullet} \mapsto (I_{M_{\bullet}},\beta_{M_{\bullet}})$ gives an injection of the set of isomorphism classes of $(f,\rho)$-quasi-free module that are not $(f,\rho)$-free to the set of extended $\rho$-jump sets. We now want to describe the image. By Proposition \ref{reconstruction process}, together with Corollary \ref{where is the torsion}, we find that $\rho^{\beta(\text{min}(I_{M_{\bullet}}))}(\text{min}(I_{M_{\bullet}}))=e_{\rho}^{*}$. Conversely one checks immediately that for an extended $\rho$-jump set $(I,\beta)$ such that $\rho^{\beta(\text{min}(I))}(\text{min}(I))=e_{\rho}^{*}$, the filtered $R$-module $(M_{\rho}^{f-1} \oplus M_{\rho}^{*})/Rv_{(I,\beta)}$ is a $(f,\rho)$-quasi-free module. We call these jump sets \emph{admissible}. We have thus proved the following theorem. \begin{theorem} \label{classification of quasi free} The map sending an admissible extended $\rho$-jump set $(I,\beta)$ to $(M_{\rho}^{f-1} \oplus M_{\rho}^{*})/Rv_{(I,\beta)}$ induces a bijection from the set of admissible extended $\rho$-jump sets to the set of $(f,\rho)$-quasi-free filtered modules that are not $(f,\rho)$-free. \end{theorem}
\subsubsection{Reading the jump set inside the module} \label{reading jump inside} We have classified $(f,\rho)$-quasi-free modules (which are not free) via admissible extended $\rho$-jump sets. We have proceeded by introducing an external module, $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$, presenting each of them, and proving that the invariant of each presentation is an admissible extended $(f,\rho)$-jump set.
We now provide a description of the jump set $(I_{M_{\bullet}},\beta_{M_{\bullet}})$, internally from $M_{\bullet}$, without any further reference to an external module $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$. In other words we face the task of providing the inverse of the bijection in Theorem \ref{classification of quasi free}, without reference to $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$. We will proceed by imitating the way we reconstructed the jump set belonging to each orbit in Proposition \ref{reconstruction process}. For $v \in M_{\bullet}$ denote by $g_{v,M_{\bullet}}$ the map $g_{v,M_{\bullet}}: \mathbb{Z}_{\geqslant 0} \to \mathbb{Z}_{\geqslant 0} \cup \{\infty\}$ defined as $g_{v,M_{\bullet}}(i):=w_{M_{\bullet}/{\pi_R}^{i}M_{\bullet}}(v)$. Say that $g_v$ breaks at $i$ if $g_{v,M_{\bullet}}(i) \neq g_{v,M_{\bullet}}(i+1)$. Fix $\tilde{m}$ a generator of $(M_1)_{\text{tors}}$, denote by $N$ the exponent of the torsion, that is $N:=\text{min}\{i \in \mathbb{Z}_{\geqslant 1}:\pi_{R}^{i} \tilde{m}=0 \}$. The following proposition can be proved by a straightforward imitation of the proof of Proposition \ref{reconstruction process}. \begin{proposition} \label{break points of g} The function $g_{\tilde{m},M_{\bullet}}$ breaks exactly at the elements of $\beta_{M_{\bullet}}(I_{M_{\bullet}})-N$, moreover if $i \in I_{M_{\bullet}}$ then $g_{\tilde{m},M_{\bullet}}(i+1)=\rho^{\beta_{M_{\bullet}}(i)-N}(i)$. \end{proposition} So we deduce the following corollary. \begin{corollary} \label{classifying quasi free modules internally} Let $M_{\bullet}$ be a $(f,\rho)$-quasi-free filtered module that is not free. Let $\tilde{m} \in M_1$ be a generator of $(M_1)_{\emph{tors}}$, then the map $g_{\tilde{m},M_{\bullet}}$ determines $M_{\bullet}$ as a filtered module. \end{corollary} The following simple corollary of Theorem \ref{classification of quasi free} will be often useful. Recall the notation $(I_{(A,b)}^{-},\beta_{(A,b)}^{-})$ introduced in Proposition \ref{jump set attached to a function}. \begin{corollary} \label{The relation between the units} Let $M_{\bullet}$ be a $(f,\rho)$-quasi-free filtered module that is not free. Let $I$ be a subset of $T_{\rho}^{*}$ and $b$ a map from $I$ to $\mathbb{Z}_{\geqslant 1}$. Suppose that for each $i \in I$ we have $m_i \in M_i$ satisfying the following three conditions. \\ \emph{(1)} For each $i \in I$ we have that $w_{M_{\bullet}}(m_i)=i$. \\ \emph{(2)} We have that $$\sum_{i \in I}\pi_R^{b(i)}m_i=0. $$ \emph{(3)} If $e_{\rho}^{*} \in I$ then $m_{e_{\rho}^{*}} \not \in \pi_R M_1$.
Then it must be that $$(I_{(A,b)}^{-},\beta_{(A,b)}^{-})=(I_{M_{\bullet}},\beta_{M_{\bullet}}). $$ \end{corollary} The following proposition shall be often used to recover the structure of the $R$-module $M_1[\pi_R^{\infty}]:=(M_1)_{\text{tors}}$ from $(I_{M_{\bullet}},\beta_{M_{\bullet}})$. This goes as follows. \begin{proposition} \label{how much torsion} Let $M_{\bullet}$ be a $(f,\rho)$-quasi-free filtered $R$-module. Then we have that $$M_1[\pi_R^{\infty}] \simeq R/\pi_R^{\beta(\emph{max}(I_{M_{\bullet}}))}R, $$ as $R$-modules. \begin{proof} Using Theorem \ref{classification of quasi free} we deduce that $$M_1[\pi_R^{\infty}] \simeq R/\pi_R^{\text{min}(\beta(I_{M_{\bullet}}))}R. $$ Since $(I_{M_{\bullet}},\beta_{M_{\bullet}})$ is a jump set, the map $\beta_{M_{\bullet}}$ is in particular decreasing. Hence $\text{min}(\beta(I_{M_{\bullet}}))=\beta(\text{max}(I_{M_{\bullet}}))$, which gives precisely the desired conclusion. \end{proof} \end{proposition}
\section{Jumps of characters of a quasi-free module} \label{characters} \subsection{Motivation and main results} In section \ref{U1 as filtered module} we will see that $U_1$ as a filtered module is quasi-free. So, as we will see in detail in \ref{wild extension}, via the local reciprocity map the question of determining the possible upper jumps of a cyclic $p$-power totally ramified extension of a given local field is a special case of the question of determining the jumps of a cyclic character of a given $(f,\rho)$-quasi-free filtered module, which is the goal of the present section.
Let $R$ be a complete DVR. We denote by $Q(R)$ the fraction field of $R$. We equip $Q(R)/R$ with the discrete topology. \begin{definition} (a) Let $M_{\bullet}$ be a filtered $R$-module. A \emph{character} of $M_{\bullet}$ is a continuous $R$-linear homomorphism $\chi:M_1 \to Q(R)/R$, where the implicit topology on $M_1$ is the one coming from the filtration, see \ref{metric structure}.
(b) Let $\chi$ be a character of $M_{\bullet}$. A positive integer $i$ is said to be a \emph{jump} of $\chi$, if $\chi(M_i) \neq \chi(M_{i+1})$. We denote the collection of jumps of $\chi$ by $J_{\chi}$. Finally we denote by $\mathcal{J}_{M_{\bullet}}$ the collection of all $J_{\chi}$ as $\chi$ varies among characters of $M_{\bullet}$. \end{definition} One can easily show that if $M_{\bullet}$ is linear (see definition \ref{definition of linear}), then for each character $\chi$ of $M_{\bullet}$ the set $J_{\chi}$ is finite. We fix a shift map $\rho$, and a positive integer $f$. Recall that $(f,\rho)$-quasi-free modules are in particular linear. The goal of this section is to understand exactly which are the possible sets of jumps: \begin{goal} Let $M_{\bullet}$ be a $(f,\rho)$-quasi-free module. Characterize the sets $A \subseteq \mathbb{Z}_{\geqslant 1}$ such that $A=J_{\chi}$ for some character $\chi$ of $M_{\bullet}$. \end{goal} We will proceed as follows: in \ref{jump set are set jump} we prove that $\mathcal{J}_{M_{\rho}^f}=\text{Jump}_{\rho}$ and $\mathcal{J}_{M_{\rho}^{f-1} \oplus M_{\rho}^{*}}=\text{Jump}_{\rho}^{*}$. Next in \ref{jump set are set jump +} we examine the case of $(f,\rho)$-quasi-free modules that are not free. Given such a module $M_{\bullet}$, we know from Theorem \ref{classification of quasi free} that all we need to know to understand $M_{\bullet}$ as a filtered module is the extended jump set $(I_{M_{\bullet}},\beta_{M_{\bullet}})$. So it must be possible to predict $\mathcal{J}_{M_{\bullet}}$ from $(I_{M_{\bullet}},\beta_{M_{\bullet}})$. We achieve this in Theorem \ref{classification of characters}, where it is shown that $\mathcal{J}_{M_{\bullet}} \subseteq \text{Jump}_{\rho}^{*}$, and the missing jump sets are characterized by a combinatorial criterion involving $(I_{M_{\bullet}},\beta_{M_{\bullet}})$. \subsection{Set of jumps are jump set for a free module} \label{jump set are set jump} We proceed in the same way as we did for orbits of vectors in \ref{orbits}. Clearly the set of jumps of a character does not change if we apply to it a filtered automorphism of $M_{\bullet}$. Therefore we shall take advantage of this symmetry. It turns out that, for free filtered modules, knowing the set of jumps of a character $\chi$ is \emph{equivalent} to knowing to which orbit $\chi$ belongs (under the action of the group of filtered automorphisms). \begin{definition} (a) Let $\chi$ be a character of $M_{\rho}^f$ (resp.\ of $M_{\rho}^{f-1}\oplus M_{\rho}^{*}$). Denote by $A_{\chi}$ the set of $i$ in $T_{\rho}$ (resp.\ $T_{\rho}^{*}$), such that $\chi(\text{proj}_{i}) \neq \{0\}$, where $\text{proj}_{i}$ denotes the projection on $S_i^{f}$ (resp.\ the same if $i \in T_{\rho}^{*}$, where the projection is on $S_{e_{\rho}^{*}}$ for the last coordinate).
(b) For $a$ in $A_{\chi}$, denote by $b_{\chi}(a)=\text{min} \{r \in \mathbb{Z}_{\geqslant 1}: \pi_R^{r}\chi(\text{proj}_{a})= \{0\}\}$. \end{definition} We next show that, after applying a suitable filtered automorphism, one can make the pair $(A_{\chi},b_{\chi})$ a jump set (resp.\ an extended jump set). Recall the notation $(A_{\chi}^{+},b_{\chi}^{+})$ introduced in Proposition \ref{jump set attached to a function}. \begin{proposition} \label{reduction for characters} Let $\chi$ be a character of $M_{\rho}^f$ (resp.\ of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$). Then there exists $\theta \in \emph{Aut}_{\emph{filt}}(M_{\rho}^f)$ (resp.\ $\emph{Aut}_{\emph{filt}}(M_{\rho}^{f-1}\oplus M_{\rho}^{*})$) such that $(A_{\chi \circ \theta},b_{\chi \circ \theta})=(A_{\chi}^{+},b_{\chi}^{+})$. In particular $(A_{\chi \circ\theta},b_{\chi \circ \theta})$ is a $\rho$-jump set (resp.\ an extended $\rho$-jump set). \begin{proof}
The structure of the proof is the same as the one given for Proposition \ref{reduction process}, we just mention some differences. Just as in that proof, as a first step we can assume $\chi$ is a character vanishing on the factor $M_{\rho}^{f-1} \oplus 0$ and, as a character of the factor $M_{\rho}$ (resp.\ $M_{\rho}^{*}$), it is defined as follows. If $i \not \in A_{\chi}$, then we have $\chi_{|S_i}=0$. If $i \in A_{\chi}$, we have $\chi_{|S_i}(1)=\pi_{R}^{-b_{\chi}(i)}$. Next if for two points $(i,b_{\chi}(i),(j,b_{\chi}(j))$ in $\text{Graph}(b_{\chi})$ we have $(i,b_{\chi}(i)<_{\rho}(j,b_{\chi}(j))$, it follows that the transformation $\theta_{i,j}$, introduced in the proof of Proposition \ref{reduction process}, is filtered. Now, the only difference with that proof, is that the effect of applying $\theta_{i,j}$ is to erase the smaller point, namely $(i,b_{\chi}(i))$. Indeed the character $\chi \circ \theta_{i,j}$ will send to $0$ all the factors $S_a$ with $a \not \in A_{\chi}$, and it will be $0$, additionally also on $S_i$. On the other hand, on all the other factors $S_a$, with $a \in A_{\chi}-\{i\}$ it coincides with $\chi$. Thus by repeatedly applying this type of transformation the sequence of filtered automorphism so produced converges to a filtered automorphism $\theta$ with $(A_{\chi \circ \theta},b_{\chi \circ \theta})=(A_{\chi}^{+},b_{\chi}^{+})$, concluding the proof. \end{proof} \end{proposition} We now show that if $(A_{\chi},b_{\chi})$ is a $\rho$-jump set (resp.\ an extended $\rho$-jump set), then, if viewed as a subset of $\mathbb{Z}_{\geqslant 1}$, it is the set of jumps of $\chi$. \begin{proposition} \label{when characters are reduced} Let $\chi$ be a character of $M_{\rho}^f$ (resp.\ of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$), such that $(A_{\chi},b_{\chi})$ is a jump set (resp. an extended jump set). Then $J_{\chi}=J_{(A_{\chi},b_{\chi})}$. \begin{proof} For a general $\chi$ we have the following formula $$ \text{ord}(\chi(M_{\rho}^f)_{i})=\text{max}(\{b_{\chi}(j)-v_{\rho}(j,i)\}_{j \in T_{\rho}}), $$ where for $i \in \mathbb{Z}_{\geqslant 1}$ and $j \in T_{\rho}$ (resp.\ $T_{\rho}^{*}$) we have that $v_{\rho}(j,i)=\text{min}(\{s \in \mathbb{Z}_{\geqslant 0}: \rho^{s}(j) \geqslant i\})$ (respectively we have the formula $$\text{ord}(\chi(M_{\rho}^f)_{i})=\text{max}(\{b_{\chi}(j)-v_{\rho}(j,i)\}_{j \in T_{\rho}^{*}})). $$
Since $(A_{\chi},b_{\chi})$ is a jump set (resp.\ an extended jump set), it is visible from the definition that the right hand side, as a function of $i$, changes value precisely in the set $J_{(A_{\chi},b_{\chi})}$, which is precisely giving the desired identity $J_{(A_{\chi},b_{\chi})}=J_{\chi}$. \end{proof} \end{proposition} So for two characters of $M_{\rho}^f$ or $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ the equivalence relation ``having the same set of jumps" and ``being in the same filtered orbit" are precisely the same relation and one obtains the following fact. \begin{theorem} \label{character for free modules} Let $\rho$ be a shift map, and let $f$ be a positive integer. We have that $\mathcal{J}_{M_{\rho}^f}=\emph{Jump}_{\rho}$, and if $T_{\rho}$ is finite, then $\mathcal{J}_{M_{\rho}^{f-1} \oplus M_{\rho}^{*}}=\emph{Jump}_{\rho}^{*}$. \end{theorem} The similarity with Theorem \ref{j.s.param.orbit} is noteworthy: in both cases jump sets parametrize orbits.
\subsection{Sets of jumps for a quasi-free module} \label{jump set are set jump +} Let $\rho$ be a shift map with $T_{\rho}$ finite. Let $f$ be a positive integer. Let $M_{\bullet}$ be a $(f,\rho)$-quasi-free module that is not free. Then from Theorem \ref{classification of quasi free} we know that the knowledge of $M_{\bullet}$ as a filtered module is equivalent to the knowledge of the extended $\rho$-jump set $(I_{M_{\bullet}},\beta_{M_{\bullet}})$. So the invariant $\mathcal{J}_{M_{\bullet}}$ is completely determined once we know $(I_{M_{\bullet}},\beta_{M_{\bullet}})$. Here we explain how.
We know from Proposition \ref{presentations} that $M_{\bullet}$ admits a presentation $\varphi:M_{\rho}^{f-1} \oplus M_{\rho}^{*} \to M_{\bullet}$, with $\text{coker}(F_i(\varphi))=0$ for every positive integer $i$, so from Proposition \ref{a lot surjective} we know that $\varphi_{|(M_{\rho}^{f-1} \oplus M_{\rho}^{*})_i}$ is a map onto $M_i$ for each positive integer $i$. It follows that given a character $\chi$ of $M_{\bullet}$, the induced character on $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ obtained by post-composition to $\varphi$, has the same set of jumps of $\chi$. So together with Theorem \ref{character for free modules} we obtain: \begin{proposition} \label{set of jump for quasi free are jump set} Let $M_{\bullet}$ be a $(f,\rho)$-quasi-free module. Then $\mathcal{J}_{M_{\bullet}} \subseteq \emph{Jump}_{\rho}^{*}$. \end{proposition} Thus we see that to characterize which elements of $\text{Jump}_{\rho}^{*}$ belongs to $\mathcal{J}_{M_{\bullet}}$ we need to see which jump sets are ruled out when on a character $\chi$ of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ we impose the condition $\chi(v_{(I_{M_{\bullet}},\beta_{M_{\bullet}})})=0$. The following simple lemma will be relevant to this end. For $x \in Q(R)/R$ we denote by $\text{ord}(x)$ the smallest non-negative integer $n$ such that $\pi_R^nx=0$. Equivalently we can say that $\text{ord}(x)$ is the unique non-negative integer such that $Rx$ is isomorphic to $R/\pi_R^nR$ as $R$-modules. \begin{lemma}{\label{units in a killer vector}} Let $n$ be a positive integer and $(v_1,\ldots ,v_n) \in (Q(R)/R)^n$. Write $Y:=\{i \in \{1,\ldots ,n\}:0<\emph{ord}(v_i), \emph{ord}(v_i)=\emph{max}\{\emph{ord}(v_j), \ j \in \{1,\ldots ,n\}\}\}$. Then the following hold:
\emph{(a)} Assume $|R/m_R| \neq 2$. Then there exists a vector $(a_1,\ldots ,a_n) \in (R^{*})^n$ such that $\sum_{i=1}^{n}a_iv_i=0$ if and only if $|Y|\neq 1$.
\emph{(b)} Assume $|R/m_R|=2$. Then there exists a vector $(a_1,\ldots ,a_n) \in (R^{*})^n$ such that $\sum_{i=1}^{n}a_iv_i=0$ if and only if $|Y| \equiv 0 \ mod \ 2$. \begin{proof}
(a) Assume $|Y| \neq 1$. We can assume $|Y| \neq 0$ because otherwise $(v_1,\ldots ,v_n)$ is the zero vector and any $(a_1,\ldots ,a_n) \in (R^{*})^n$ would prove the conclusion. Since $|R/m_R| \neq 2$ we can find $\lambda \in R^{*}$ such that $\lambda \not \equiv 1 \ \text{mod} \ m_R$. Now pick $i,j$ distinct elements of $Y$, and observe that at least one of the following two hold: \\ 1) $\text{ord}(v_i)=\text{ord}(v_j+ \sum_{h \not \in \{i,j\}}v_h)$. \\ 2) $\text{ord}(v_i)=\text{ord}(\lambda v_j+ \sum_{h \not \in \{i,j\}}v_h)$. \\
In each case, 1) and 2), we can find $\mu \in R^{*}$ such that, respectively $\mu v_i=v_j+ \sum_{h \not \in \{i,j\}}v_h$, or $\mu v_i=\lambda v_j+ \sum_{h \not \in \{i,j\}}v_h$. In each of the two cases we obtain the desired conclusion. Conversely assume that there exists a vector $(a_1,\ldots ,a_n) \in (R^{*})^n$ such that $\sum_{i=1}^{n}a_iv_i=0$. Suppose that $|Y|=1$, call $k$ its unique element: then we have $\text{ord}(v_k)=\text{ord}(\sum_{i=1}^{n}a_iv_i)=0$, contradicting the definition of $Y$.
(b) Assume $|Y| \equiv 0 \ \text{mod} \ 2$. We can assume $|Y| \neq 0$ because otherwise $(v_1,\ldots ,v_n)$ is the zero vector and any $(a_1,\ldots ,a_n) \in (R^{*})^n$ would prove the conclusion. So pick $i \in Y$. Then observe that, since $|Y - \{i\}| \equiv 1 \ \text{mod} \ 2$ and $|R/m_R|=2$, we have that $\text{ord}(v_i)=\text{ord}(\sum_{h \neq i}v_h)$. Thus, it follows that there exists $\mu \in R^{*}$ such that $\mu v_i=\sum_{h \neq i}v_h$, which is the desired conclusion. Conversely assume there exists a vector $(a_1,\ldots ,a_n) \in (R^{*})^n$ such that $\sum_{i=1}^{n}a_iv_i=0$. Suppose that $|Y| \equiv 1 \ \text{mod} \ 2$. Then pick $k \in Y$ and observe that, since $|R/m_R|=2$, we have $\text{ord}(v_k)=\text{ord}(\sum_{i=1}^{n}a_iv_i)=0$ contradicting the definition of $Y$. \end{proof} \end{lemma} We can now give a criterion to decide if an extended jump set $(I,\beta)$ is realizable as a set of jumps of a character of $M_{\bullet}$. Such a criterion consists in a combinatorial comparison between $(I,\beta)$ and $(I_{M_{\bullet}},\beta_{M_{\bullet}})$. The precise condition for $(I,\beta)$ to be ruled out are conditions 2.1) and 2.2) of the following theorem (in case (a) and (b) respectively). \begin{theorem}\label{classification of characters} Let $f$ be a positive integer and let $\rho$ be a shift. Let $M_{\bullet}$ be a $(f,\rho)$-quasi-free filtered $R$-module that is not free. Let $(I,\beta) \in Jump_{\rho}^{*}$. Define $\emph{Max}((I,\beta),(I_{M_{\bullet}},\beta_{M_{\bullet}})):=\{i \in I \cap I_{M_{\bullet}}:\beta(i)-\beta_{M_{\bullet}}(i)>0 \wedge \forall j \in I \cap I_{M_{\bullet}}, \ \beta(i)-\beta_{M_{\bullet}}(i) \geqslant \beta(j)-\beta_{M_{\bullet}}(j) \}$. In what follows we denote by $\emph{Max}:=\emph{Max}((I,\beta),(I_{M_{\bullet}},\beta_{M_{\bullet}})$.
\emph{(a)} Suppose $|R/m_R| \neq 2$. Then one has that $(I,\beta) \not \in \mathcal{J}_{M_{\bullet}}$ if and only if the following two conditions are satisfied:\\
\emph{(a.1)} $|\emph{Max}|=1$ and if $f>1$ then $\emph{Max}=\{e_{\rho}^{*}\}$. \\ \emph{(a.2)} Let $j$ be the unique element of $\emph{Max}_{M_{\bullet}}((I,\beta))$. For every $i \in I_{M_{\bullet}}-I$, the point $(i,\beta(j)-\beta_{M_{\bullet}}(j)+\beta_{M_{\bullet}}(i))$ is maximal in $\emph{Graph}(\beta) \cup \{(i,\beta(j)-\beta_{M_{\bullet}}(j)+\beta_{M_{\bullet}}(i))\}$, with respect to the ordering $\leqslant_{\rho}$.
\emph{(b)} Suppose $|R/m_R|=2$. Then $(I,\beta) \not \in \mathcal{J}_{M_{\bullet}}$ if and only if the following two conditions are satisfied: \\
\emph{(b.1)} $|\emph{Max}| \equiv 1 \ \emph{mod} \ 2$ and if $f>1$ then $\emph{Max}=\{e_{\rho}^{*}\}$. \\ \emph{(b.2)} Let $j$ be any element of $\emph{Max}$. For every $i \in I_{M_{\bullet}}-I$, the point $(i,\beta(j)-\beta_{M_{\bullet}}(j)+\beta(i))$ is maximal in $\emph{Graph}(\beta) \cup \{(i,\beta(j)-\beta_{M_{\bullet}}(j)+\beta_{M_{\bullet}}(i))\}$, with respect to the ordering $\leqslant_{\rho}$. \begin{proof} (a) Denote by $\{b_{i,j}: i \in T_{\rho}, \ j \in \{1, \ldots , f\} \} \cup \{b_{e_{\rho}^{*},1} \}$ the standard filtered basis for $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$). With this notation we have that $$v_{(I_{M_{\bullet}},\beta_{M_{\bullet}})}=\sum_{i \in I} \pi_R^{\beta_{M_{\bullet}}(i)}b_{i,1}. $$ We divide the proof in 9 elementary steps.
1) We fix a presentation $\varphi:M_{\rho}^{f-1} \oplus {M}_{\rho}^{*} \to M_{\bullet}$ as in Proposition \ref{presentations}, with $\text{ker}(\varphi)=Rv_{(I_{M_{\bullet}},\beta_{M_{\bullet}})}$ as in Theorem \ref{classification of quasi free}.
2) The task of realizing $(I,\beta)$ from a character is equivalent to the task of finding a $\chi:M_{\rho}^{f-1} \oplus {M}_{\rho}^{*} \to Q(R)/R$ such that $(I_{\chi},\beta_{\chi})=(I,\beta)$, and $\sum_{i \in I_{M_{\bullet}}}\pi_R^{\beta_{M_{\bullet}}(i)}\chi(b_{i,1})=0$.
3) Suppose that $I \cap I_{M_{\bullet}}$ is either empty or that $\beta-\beta_{M_{\bullet}}$ does not assume a strictly positive maximum on $I \cap I_{M_{\bullet}}$. We claim that then task 2) is realizable. Indeed thanks to Lemma \ref{units in a killer vector} part (a), we can find for each $i \in I \cap I_{M_{\bullet}}$ a unit $\varepsilon_i \in R^{*}$ with the property that $$\sum_{i \in I \cap I_{M_{\bullet}}}\frac{\varepsilon_i}{\pi_R^{\beta(i)-\beta_{M_{\bullet}}(i)}}=0. $$ Therefore we can realize task 2) with the following character $\chi$. For $i \in I \cap I_{M_{\bullet}}$ we put $\chi(b_{1,i}):=\frac{\varepsilon_i}{\pi_R^{\beta(i)}}$. For $i \in I-I_{M_{\bullet}}$ we put $\chi(b_{1,i}):=\frac{1}{\pi_R^{\beta(i)}}$. For any $(i,h) \in T_{\rho} \times \{1, \ldots ,f \} \cup \{e_{\rho}^{*}\} \times \{1\}$ with $i \not \in I$ or $h>1$ we put $\chi(b_{i,h})=0$. With Proposition \ref{when characters are reduced} we conclude immediately that $J_{\chi}=(I,\beta)$ and we are done. So we can assume that $I \cap I_{M_{\bullet}}$ is non-empty and that $\beta-\beta_{M_{\bullet}}$ assumes a positive maximum at a unique point of $I \cap I_{M_{\bullet}}$, which we shall call $j$.
4) Assume that $j \neq e_{\rho}^{*}$ and $f>1$. Then we proceed by distinguishing two cases. \\ 4.1) There is no other $k \in I \cap I_{M_{\bullet}}$ different from $j$ such that $\beta(k)-\beta_{M_{\bullet}}(k)>0 $. Then we consider the following character $\chi$. For each $i$ in $I-\{e_{\rho}^{*}\}$ we put $\chi(b_{i,2})=\frac{1}{\pi_R^{\beta(i)}}$. If $e_{\rho}^{*} \in I$ we put $\chi(b_{e_{\rho}^{*},1})=\frac{1}{\pi_R^{\beta(e_{\rho}^{*})}}$. For all the other $(i',h)$ in $T_{\rho}^{*} \times \{1, \ldots , f\}$ we put $\chi(b_{i',h})=0$. We see that since $f>1$ and $j \neq e_{\rho}^{*}$ we trivially obtain $$\sum_{i \in I_{M_{\bullet}}}\pi_R^{\beta_{M_{\bullet}}(i)}\chi(b_{i,1})=0. $$ Hence task 2) is accomplished thanks to Proposition \ref{when characters are reduced}. So we can assume that such a $k$ exists. \\ 4.2) Suppose that there exists $k' \in I \cap I_{M_{\bullet}}$ different from $j$ such that $\beta(k')-\beta_{M_{\bullet}}(k')>0$. Choose a $k$ such that $\beta(k)-\beta_{M_{\bullet}}(k) \geqslant \beta(k')-\beta_{M_{\bullet}}(k')$ for each $k' \in I \cap I_{M_{\bullet}}$ with $k'$ different from $j$. Next observe that thanks to Proposition \ref{units in a killer vector} part (a), we can find for each $i \in I \cap I_{M_{\bullet}}$ a unit $\varepsilon_i \in R^{*}$ in such a way that $$\sum_{i \in (I \cap I_{M_{\bullet}})-\{j\}}\frac{\varepsilon_i}{\pi_R^{\beta(i)-\beta_{M_{\bullet}}(i)}}+\frac{\varepsilon_j}{\pi_{R}^{\beta(k)-\beta_{M_{\bullet}}(k)}}=0. $$ Now we proceed constructing a character $\chi$. We put $\chi(b_{j,1})=\frac{\varepsilon_j}{\pi_R^{\beta(k)-\beta_{M_{\bullet}}(k)+\beta_{M_{\bullet}}(j)}}$ and $ \chi(b_{j,2})=\frac{1}{\pi_R^{\beta(j)}}$. For all $i \in (I \cap I_{M_{\bullet}})-\{j\}$ we put $\chi(b_{i,1})=\frac{\varepsilon_i}{\pi_R^{\beta(i)}}$. For all $i \in I-I_{M_{\bullet}}$ we put $\chi(b_{i,1})=\frac{1}{\pi_R^{\beta(i)}}$. For all remaining vectors $b$ of the basis we put $\chi(b)=0$. Since $\beta(k)-\beta_{M_{\bullet}}(k)+\beta_{M_{\bullet}}(j) < \beta(j) $ we conclude by Proposition \ref{reduction for characters} and Proposition \ref{when characters are reduced} that $J_{\chi}=(I,\beta)$. Moreover, by construction, $$\sum_{i \in I_{M_{\bullet}}} \pi_R^{\beta_{M_{\bullet}}(i)}\chi(b_{i,1})=0. $$ Hence we have realized task 2) in this case as well.
5) Thanks to Step 1)--4) we can assume that $|\text{Max}|=1$, and that either $f=1$ or $\text{Max}=\{e_{\rho}^{*}\}$. Otherwise we have shown, in the previous steps, that we can accomplish task 2). Keep denoting by $j$ the unique point of $\text{Max}$.
6) Assume there is $i' \in I_{M_{\bullet}}-I$ such that the point $(i',\beta(j)-\beta_{M_{\bullet}}(j)+\beta_{M_{\bullet}}(i'))$ is not maximal in $\text{Graph}(\beta) \cup \{(i',\beta(j)-\beta_{M_{\bullet}}(j)+\beta_{M_{\bullet}}(i')\}$, with respect to the ordering $\leqslant_{\rho}$. Then we can accomplish task 2) constructing a character $\chi$ in the following manner. Observe that, thanks to Proposition \ref{units in a killer vector} part (a), we can attach to each $i \in (I \cap I_{M_{\bullet}}) \cup \{i'\}$ a unit $\varepsilon_i \in R^{*}$ in such a way that $$\sum_{i \in I \cap I_{M_{\bullet}}} \frac{\varepsilon_i}{\pi_R^{\beta(i)-\beta_{M_{\bullet}}}} +\frac{\varepsilon_{i'}}{\pi_R^{\beta(j)-\beta_{M_{\bullet}}(j)}}=0. $$ For each $i \in I \cap I_{M_{\bullet}}$ put $\chi(b_{i,1})=\frac{\varepsilon_i}{\pi_R^{\beta(i)}}$. Moreover put $\chi(b_{i',1})=\frac{1}{\pi_R^{\beta(j)-\beta_{M_{\bullet}}(j)+\beta_{M_{\bullet}}(i')}}$ and $\chi(b_{i,1})=\frac{1}{\pi_R^{\beta(i)}}$ for each $i \in I-I_{M_{\bullet}}$. By construction we obtain $$\sum_{i \in I_{M_{\bullet}}}\pi_R^{\beta_{M_{\bullet}}(i)}\chi(b_{i,1})=0. $$ Finally the hypothesis that the point $(i',\beta(j)-\beta_{M_{\bullet}}(j)+\beta_{M_{\bullet}}(i'))$ is not larger, with respect to $\leqslant_{\rho}$, than some point in $\text{Graph}(\beta)$, tells us, through Proposition \ref{reduction for characters} and Proposition \ref{when characters are reduced}, that $J_{\chi}=(I,\beta)$.
7) Steps 1)--6) prove that if (a.1) and (a.2) are not both satisfied then $(I,\beta) \in \mathcal{J}_{M_{\bullet}}$. We next proceed proving the converse implication.
8) Observe that if a set $A\subseteq {\mathbb{Z}}^{2}$ is given, together with a point $(x,y) \in A$ that is maximal in $A$ with respect to $\leqslant_{\rho}$, then any point of the form $(x,\tilde{y})$ with $\tilde{y} \geqslant y$ is maximal in $A$, with respect to $\leqslant_{\rho}$.
9) Suppose (a.1) and (a.2) both hold. Denote by $j$ the unique element of $\text{Max}$. Let $\chi$ be a character of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ with $J_{\chi}=(I,\beta)$. We shall prove that $$ \sum_{i \in I}\pi_R^{\beta_{M_{\bullet}}(i)}\chi(b_{i,1}) \neq 0. $$ We proceed by contradiction. Suppose that $ \sum_{i \in I}\pi_R^{\beta_{M_{\bullet}}(i)}\chi(b_{i,1})=0 $. By Proposition \ref{reduction for characters} and Proposition \ref{when characters are reduced} we have that $\text{ord}(\chi(b_{j,1}))=\beta(j)$: this is clear for $f=1$ and if $f \geqslant 2$ we are using that in this case $j$ must be $e_{\rho}^{*}$. Next using Lemma \ref{units in a killer vector} part (a), we see that at least one $i \in I_{M_{\bullet}}-\{j\}$ must satisfy $\text{ord}(\chi(b_{i,1})) \geqslant \beta_{M_{\bullet}}(i)+\beta(j)- \beta_{M_{\bullet}}(j)$. Such an $i$ cannot be in $I$. Indeed in that case we would conclude by Proposition \ref{reduction for characters} and Proposition \ref{when characters are reduced} that $J_{\chi} \neq (I,\beta)$ since we would have $\beta(i) \geqslant \beta_{M_{\bullet}}(i)+\beta(j)- \beta_{M_{\bullet}}(j)$, which would contradict the defining property of $\text{Max}$. Hence it must be that $i \in I_{M_{\bullet}}-I$. But then Step 8) together with assumption (a.2) and Proposition \ref{reduction for characters} and Proposition \ref{when characters are reduced} imply again that $\chi$ does not belong to the orbit of characters $\chi'$ having $J_{\chi'}=(I,\beta)$. This ends the proof.
Statement (b) can be proved by the same 9 steps of part (a) of this proof, replacing each time, part (a) of Lemma \ref{units in a killer vector} with part (b) of Lemma \ref{units in a killer vector}. \end{proof} \end{theorem}
\section{$U_1$ as a filtered module}\label{U1 as filtered module} In this section we apply the results of Section \ref{filtered modules} to classify the possible structures of $U_{\bullet}$ as a filtered $\mathbb{Z}_p$-module. Let $p$ be a prime number and let $e$ be in $(p-1)\mathbb{Z}_{\geqslant 1} \cup \{\infty\}$. Recall the definition of $\rho_{e,p}$ from Example \ref{main example shift}.
Let $K$ be a local field with residue characteristic $p$. Denote by $f_K$ the residue degree, $f_K=[O_K/m_K:\mathbb{F}_p]$. Denote by $\rho_K:=\rho_{e_K,p}$. Recall that $e_{\rho_K}^{*}=\frac{pe_K}{p-1}$ and $e_{\rho_K}^{'}=\frac{e_K}{p-1}$. \begin{proposition}\label{rho for loc fields} One has that $U_{\bullet}(K)$ is a $(f_K,\rho_K)$-quasi-free filtered $\mathbb{Z}_p$-module. \begin{proof} Firstly one has that $U_i/U_{i+1} \simeq_{\text{ab.gr.}} O/m$, which gives for every positive integer $i$ that $f_i(U_{\bullet}(K))=f_K$ (for a definition of $f_i(U_{\bullet}(K))$ see \ref{definition: f, defect, codefect}). Observe that the formula $(1+x)^p=1+px+\ldots +x^p$ implies that given $u \in U_i(K)$ then $u^p \in U_{\rho_K(i)}$. Moreover if $u \in U_i(K)-U_{i+1}(K)$ and $u^p \in U_{\rho_K(i)+1}$, then $pi=i+e_K$, which implies that $i=e'_{\rho_K}$. So we have firstly that $\rho_{U_{\bullet}(K)} \geqslant \rho_K$ (for a definition of $\rho_{U_{\bullet}(K)}$ see subsection \ref{rho}), which means that $U_{\bullet}(K)$ is a $\rho_K$-filtered-$\mathbb{Z}_p$-module (see subsection \ref{shit}), and secondly that $\text{defect}_{U_{\bullet}(K)}(i)=\text{codefect}_{U_{\bullet}(K)}(i)=0$ for every positive integer $i \neq e'_{\rho_K}$ (for a definition of $\text{defect}_{U_{\bullet}(K)}(i)$ and $\text{codefect}_{U_{\bullet}(K)}(i)$, see \ref{definition: f, defect, codefect}). On the other hand we know that $\mu_p(U_1(K))$ is a cyclic group. Thus we conclude by Proposition \ref{quasi free via torsion}. \end{proof} \end{proposition} Therefore we deduce the following. \begin{theorem} \label{no torsion} One has that $U_{\bullet}(K)$ is a free $(f_K,\rho_K)$-filtered module if and only if $\mu_{p}(K)=\{1\}$. In other words, $U_{\bullet}(K) \simeq_{\emph{filt}} M_{\rho_K}^{f_K}$ if and only if $\mu_{p}(K)=\{1\}$. \begin{proof} This follows immediately from Proposition \ref{charact. free-filt-mod}, Proposition \ref{quasi free via torsion} and Corollary \ref{where is the torsion} combined. \end{proof} \end{theorem} If instead $\mu_p(K) \neq \{1\}$ the following holds. \begin{theorem}\label{mup neq 1} Let $K$ be a local field with $\mu_p(K) \neq \{1\}$. Then there is a \emph{unique} $(I_K,\beta_K) \in \emph{Jump}_{\rho_K}^{*}$ such that
$$U_1 \simeq M_{\rho_K}^{f_K-1} \oplus (M_{\rho_K}^{*}/\mathbb{Z}_pv_{(I_K,\beta_K)})$$
as filtered $\mathbb{Z}_p$-module. \begin{proof} This follows immediately from Proposition \ref{rho for loc fields} and Theorem \ref{classification of quasi free} combined. \end{proof} \end{theorem} We now fix $e_K=e$, and therefore we have $\rho_{K}=\rho_{e,p}$. Fix as well $f_K=f$. Our next goal is to show that every $(\rho_{e,p},f)$-quasi-free filtered module can be realized as $U_{\bullet}(K)$ for some $K$, a totally ramified degree $\frac{e}{p-1}$ extension of $\mathbb{Q}_{p^f}(\zeta_p)$. In view of Theorem \ref{classification of quasi free}, this is tantamount to prove that every jump set realizable from a filtered module can be realized by a local field. Recall from Theorem \ref{classification of quasi free} that the latter are precisely the admissible extended $\rho_{e,p}$-jump sets. For a definition of these jump sets see the discussion immediately before Theorem \ref{classification of quasi free}. \begin{theorem}\label{admissible j.s. occur} Let $(I,\beta)$ be an extended admissible $\rho_{e,p}$-jump set. Then there is a totally ramified extension $K/\mathbb{Q}_{p^f}(\zeta_p)$ with $e_K=e$ and with $$ (I_K,\beta_K)=(I,\beta).$$
\end{theorem} During the proof we will make use of the two propositions that follow below. Recall that if $\zeta_p \in K$, then the extension $L/K:=K(\sqrt[p]{U_{\frac{pe_K}{p-1}}/U_{\frac{pe_K}{p-1}+1}})/K$ is the unique unramified extension of degree $p$ of $K$. Indeed $[L:K]=p$, so if $e_{L/K}>1$ then $e_{L/K}=p$. Observe that the inclusion $U_{pe_K}(K) \subseteq U_{pe_L}(L)$ would, in case that $e_{L/K}=p$, induce an isomorphism $U_{\frac{pe_K}{p-1}}(K)/U_{\frac{pe_K}{p-1}+1}(K) \to U_{\frac{pe_L}{p-1}}(L)/U_{\frac{pe_L}{p-1}+1}(L)$, which, by construction would imply that $\text{codefect}_{U_{\bullet}(L)}(e_{L}^{'})=0$, which is impossible since $\zeta_p \in L$. So it must be that $e_{L/K}=1$ and $f_{L/K}=[L:K]$. \begin{proposition}\label{when the star is in I} Let $K$ be a finite extension of $\mathbb{Q}_p(\zeta_p)$. Then $e_{K}^{*} \in I_K$ if and only if $K(\sqrt[p]{\mu_{p^{\infty}}(K)})/K$ is unramified. \begin{proof} Let $\zeta_{p^j}$ be a generator of $U_1(K)_{\text{tors}}$. Thanks to Proposition \ref{break points of g}, we have that $e_{K}^{*} \in I_K$ if and only if $w_{U_1(K)/U_1(K)^p}(\zeta_{p^j})=\frac{pe_K}{p-1}$. On the other hand this is equivalent to $K(\zeta_{p^{j+1}})=K(\sqrt[p]{U_{\frac{pe_K}{p-1}}/U_{\frac{pe_K}{p-1}+1}})$, which, as explained just above this proposition, is the unique unramified degree $p$ extension of $K$. \end{proof} \end{proposition} Let $j$ be a positive integer. The following notation will be helpful. Consider the compositum extension $\mathbb{Q}_{p^{pf}}(\zeta_{p^j})\cdot \mathbb{Q}_{p^f}(\zeta_{p^{j+1}})/\mathbb{Q}_{p^f}(\zeta_{p^j})$, which is a Galois extension with Galois group $C_p \times C_p$. So one is provided with $p+1$ degree $p$ sub-extensions. We denote the unique unramified one as $\mathbb{Q}_{p^f}(\zeta_{p^j})(0)$ (which of course is just $\mathbb{Q}_{p^{pf}}(\zeta_{p^j})$). Further we list the $p-1$ totally ramified ones without an element of order $p^{j+1}$ as $\mathbb{Q}_{p^f}(\zeta_{p^j})(i)$ with $i$ running through $\{1,\ldots ,p-1\}$. And we will sometimes make use of an extended notation for $i=p$, by letting $\mathbb{Q}_{p^f}(\zeta_{p^j})(p):=\mathbb{Q}_{p^f}(\zeta_{p^{j+1}})$. \begin{proposition} \label{when star is in for tot ram} Let $j$ be a positive integer. Let $K$ be a totally ramified extension of $\mathbb{Q}_{p^f}(\zeta_p)$ with $e_K=:e$. Then the following are equivalent: \\ \emph{(1)} $e^{*} \in I_K$ and $\beta_K(e^{*})=j$. \\ \emph{(2)} There is exactly one $i \in \{1,\ldots ,p-1\}$ such that $K$ contains $\mathbb{Q}_{p^f}(\zeta_{p^j})(i)$. \begin{proof} $(1) \to (2)$ Thanks to Proposition \ref{when the star is in I}, we have that $(1)$ implies that $K(\zeta_{p^{j+1}})/K$ is unramified, thus we have that $K(\zeta_{p^{j+1}})/\mathbb{Q}_{p^f}(\zeta_{p^j})$ contains $\mathbb{Q}_{p^f}(\zeta_{p^{j+1}}) \cdot \mathbb{Q}_{p^{pf}}(\zeta_{p^j})$. But this last one must then intersect $K$ non-trivially, otherwise one would have $[K(\zeta_{p^{j+1}}):K]=p^2$, which is impossible. At the same time the intersection cannot be $\mathbb{Q}_{p^{pf}}(\zeta_{p^j})$ because $f_K=f$, and it cannot be $\mathbb{Q}_{p^f}(\zeta_{p^{j+1}})$. Indeed we have $\beta_K(e^{*})=j$ and Proposition \ref{how much torsion} implies that $p^j=\#\mu_{p^{\infty}}(K)$. So there must be an $i \in \{1,\ldots ,p-1\}$ such that $K$ contains $\mathbb{Q}_{p^f}(\zeta_{p^j})(i)$. But there must be exactly one since otherwise the whole extension $\mathbb{Q}_{p^f}(\zeta_{p^{j+1}}) \cdot \mathbb{Q}_{p^{pf}}(\zeta_{p^j})$ would be in $K$, which has been already explained to be not possible.
$(2) \to (1)$ We have that $K(\sqrt[p]{U_1(K)_{\text{tors}}})/K$ contains $\mathbb{Q}_{p^f}(\zeta_{p^j})(i)(\zeta_{p^{j+1}}) \supset \mathbb{Q}_{p^{pf}}$, thus one concludes that $K(\sqrt[p]{U_1(K)_{\text{tors}}})/K$ is unramified and by Proposition \ref{when the star is in I} one concludes that $e^{*} \in I_K$. Moreover we must have that $\beta_K(e^{*})=j$. Indeed $K$ contains in particular $\zeta_{p^j}$, which, by Proposition \ref{how much torsion}, implies that $\beta_K(e^{*}) \geqslant j$. If we would have $\beta_K(e^{*})>j$ then, still by Proposition \ref{how much torsion}, the field $K$ would contain also $\mathbb{Q}_{p^f}(\zeta_{p^{j+1}})$. Hence $K$ would contain the compositum of $\mathbb{Q}_{p^f}(\zeta_{p^{j+1}})$ and $\mathbb{Q}_{p^f}(\zeta_{p^j})(i)$. Therefore $K$ would contain the field $\mathbb{Q}_{p^{pf}}$ providing a contradiction with $f_K=f$. Hence, since $\beta_K(e^{*}) \geqslant j$ and $\beta_K(e^{*})<j+1$, it must be that $\beta_K(e^{*})=j$. \end{proof} \end{proposition} In particular we derive the following: \begin{corollary} \label{I,beta for twisted cyclotomic} Let $j,f$ be positive integers, and $i \in \{1,\ldots ,p-1\}$. Then $$I_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)}=\{1,p^{j+1}\}, \ \beta_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)}(1)=j+1 \ \text{and} \ \beta_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)}(p^{j+1})=j.$$ \begin{proof} Since the jump set must be admissible, we know that $1 \in I_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)}$ with $\beta_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)}(1)=j+1$. By Proposition \ref{when the star is in I}, we know that $p^{j+1} \in I_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)}$ with $\beta_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)}(p^{j+1})=j$. Moreover we certainly have that $1=\text{min}(I_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)})$ and $p^{j+1}= \text{max}(I_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)})$ since $1$ and $p^{j+1}$ are respectively the smallest and the largest elements of $T_{\rho}^{*}$, for $\rho:=\rho_{e,p}$ with $e:=p^j(p-1)$. Moreover the very beginning of this proof gives us in particular that $\beta_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)}(1)-\beta_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)}(p^{j+1})=1$. Therefore, recalling that the map $\beta$ is strictly decreasing (by definition of a jump set), we must conclude that between $1$ and $p^{j+1}$ no other value of $I_{\mathbb{Q}_{p^f}(\zeta_{p^j})(i)}$ can be found. This gives us the desired conclusion. \end{proof} \end{corollary} Now we can proceed proving Theorem \ref{admissible j.s. occur}. Take $(I,\beta)$ an extended admissible $\rho_{e,p}$-jump set. We distinguish two cases, depending on whether $e^{*} \in I$. First assume that $e^{*} \not \in I$. Next define the polynomial $$G(x):=\prod_{i \in I}(1+x^i)^{p^{\beta(i)-1}}-\zeta_p \in \mathbb{Q}_{p^f}(\zeta_p)[x]. $$ Using the fact that $(I,\beta)$ is admissible (for a definition see immediately before the statement of Theorem \ref{classification of quasi free}) one finds that the Newton polygon of $G(x)$ consists of the segment connecting $(0,1)$ and $(\frac{e}{p-1},0)$ continued with a horizontal segment starting from $(\frac{e}{p-1},0)$. Therefore there exists a degree $\frac{e}{p-1}$ Eisenstein polynomial $g(x) \in \mathbb{Q}_{p^f}(\zeta_p)$, such that $g(x)$ divides $G(x)$. Define $$K:=\mathbb{Q}_{p^f}(\zeta_p)[x]/g(x). $$ Clearly $\pi:=x$ is a uniformizer in $K$. Moreover we have that $$\prod_{i \in I} (1+\pi^i)^{p^{\beta(i)}}=1 $$ with $\text{v}_{K}((1+\pi^i)-1)=i$, thus giving $$(I_{K},\beta_K)=(I,\beta), $$
thanks to Corollary \ref{The relation between the units}. Now suppose that $e^{*} \in I$ and write $j:=\beta(e^{*})$. We prove that $p^j(p-1)|e$. Indeed we have that $\text{min}(I)<e^{*}$ giving that $\beta(\text{min}(I))\geqslant j+1$. Thus, since we have that $\rho^{\beta(\text{min}(I))}(\text{min}(I))=p^{\beta(\text{min}(I))}(\text{min}(I))=\frac{pe}{p-1}$, we obtain that $p^j(p-1)|e$. Next, pick $u_1,u_2 \in \mathbb{Q}_{p^f}(\zeta_{p^j})(1)$ such that $$\text{v}_{\mathbb{Q}_{p^f}(\zeta_{p^j})(1)}(u_1-1)=1, \ \text{v}_{\mathbb{Q}_{p^f}(\zeta_{p^j})(1)}(u_2-1)=p^{j+1}, \ u_2 \not \in (\mathbb{Q}_{p^f}(\zeta_{p^j})(1))^{*p} $$ and $$u_1^{p^{j+1}}u_2^{p^j}=1 $$ as guaranteed by Corollary \ref{I,beta for twisted cyclotomic}. Now define $$G^{*}(x):=\prod_{i \in I, i<e^{*}}(1+x^i)^{p^{\beta(i)-j-1}}-u_1 \in \mathbb{Q}_{p^f}(\zeta_{p^j})(1)[x]. $$ Using the fact that $(I,\beta)$ is admissible one finds that the Newton polygon of $G^{*}(x)$ consists of the segment connecting $(0,1)$ and $(\frac{e}{p^j(p-1)},0)$ continued with a horizontal segment starting from $(\frac{e}{p^j(p-1)},0)$. Therefore there exists a degree $\frac{e}{p^j(p-1)}$ Eisenstein polynomial $g^{*}(x) \in \mathbb{Q}_{p^f}(\zeta_{p^j})(1)[x]$ such that $g^{*}(x)$ divides $G^{*}(x)$. Define $$\tilde{K}:=\mathbb{Q}_{p^f}(\zeta_{p^j})(1)[x]/g^{*}(x). $$ Clearly $\pi:=x$ is a uniformizer in $\tilde{K}$. Moreover we have that $$(\prod_{i \in I, i<e^{*}}(1+\pi^i)^{p^{\beta(i)}})u_2^{p^j}=1 $$ with $\text{v}_{\tilde{K}}((1+\pi^i)-1)=i$ for each $i \in I$, with $i<e^{*}$ and with $\text{v}_{\tilde{K}}(u_2-1)=e^{*}$. Thus, in order to apply Corollary \ref{The relation between the units}, we are only left with checking that $u_2 \not \in \tilde{K}^{*p}$. But this follows at once from the fact that $\mathbb{Q}_{p^{pf}} \subseteq \mathbb{Q}_{p^f}(\zeta_{p^j})(1)(\sqrt[p]{u_2})$ and the fact that $g^{*}(x)$ is an Eisenstein polynomial and thus $\tilde{K}/\mathbb{Q}_{p^f}(\zeta_{p^j})(1)$ is totally ramified. This ends the proof of Theorem \ref{admissible j.s. occur} and therefore of Theorem \ref{realizable j.s. are realized} in the Introduction. \section{Upper jumps of cyclic extensions}\label{wild extension} In this section we use Theorem \ref{classification of characters}, together with Theorem \ref{rho for loc fields}, to establish Theorem \ref{classification wild characters}, a classification in terms of jump sets for the possible sets of upper jumps of a cyclic wild extension of a local field $K$. We next prove combinatorially that the classification obtained is equivalent to that obtained by Miki \cite{Miki}, Maus \cite{Maus} and Sueyoshi \cite{Sueyoshi}: in this way those results are \emph{deduced} from Theorem \ref{classification wild characters}. Finally we give a sense of how in practice the classification of Theorem \ref{classification wild characters} may look, by examining it for several possible values of the triple $((I,\beta),f,p)$, and in particular we do so for the most typical occurrences of $(I,\beta)$ in the sense of Theorem \ref{counting}. We also show that for $K/\mathbb{Q}_p(\zeta_p)$ totally ramified, the knowledge of the filtered $\mathbb{Z}_p$-module $U_{\bullet}(K)$ is equivalent to the knowledge of all possible sets of upper jumps of cyclic wild totally ramified extensions of $K$ (see Corollary \ref{reconstructing}).
\subsection{Classification of possible sets of jumps} In the rest of the section $K$ will denote as usual a local field of residue characteristic a prime number denoted by $p$. We fix $K^{\text{sep}}$ a separable closure of $K$ and we denote by $G_K:=\text{Gal}(K^{\text{sep}}/K)$ the absolute Galois group of $K$. Let $H$ be a normal closed subgroup of $G_K$. Recall that for every $\alpha \in \mathbb{R}_{\geqslant 0}$ the Galois group $G_K/H$ is provided with a subgroup $(G_K/H)^{\alpha}$ via the so-called upper ramification filtration (see \cite{Local fields}). Let $L/K$ be a finite cyclic totally ramified extension of $K$, with degree a power of $p$. Denote by $G$ the Galois group $\text{Gal}(L/K)$. A number $\alpha \in \mathbb{R}_{\geqslant 0}$ is said to be an \emph{upper jump} for $L/K$ if $G^{\alpha} \supsetneq G^{\alpha+\varepsilon}$ for each $\varepsilon>0$. We denote by $J(L/K)$ the set of upper jumps for $L/K$. From the Hasse--Arf Theorem (see \cite{Local fields}) we have that $J(L/K) \subset \mathbb{Z}_{\geqslant 1}$. We denote by $\mathcal{J}_K$ the collection of all such subsets of $\mathbb{Z}_{\geqslant 1}$ as $L$ varies among all cyclic, $p$-power, totally ramified extensions of $K$. The set $\mathcal{J}_{K}$ can be also described as follows. We consider all totally ramified continuous homomorphisms $$\chi:G_{K} \to \mathbb{Q}_p/\mathbb{Z}_p, $$ where $\chi$ is said to be totally ramified if the corresponding field extension is totally ramified. The set of upper jumps for $\chi$ are the $\alpha \in \mathbb{R}_{\geqslant 0}$ such that $\chi(G_K^{\alpha}) \neq \chi(G_K^{\alpha+\varepsilon})$ for all $\varepsilon>0$. This set is denoted by $J_{\chi}$. One has that $J_{\chi}=J((K^{\text{sep}})^{\text{ker}(\chi)}/K)$, so that $\mathcal{J}_K$ consists of the collection of all $J_{\chi}$ as $\chi$ varies among continuous totally ramified characters $\chi:G_K \to \mathbb{Q}_p/\mathbb{Z}_p$. Of course the set of such continuous totally characters $\chi:G_K \to \mathbb{Q}_p/\mathbb{Z}_p$ can be equivalently described as the set of all $\chi:G_K^{\text{ab}} \to \mathbb{Q}_p/\mathbb{Z}_p$ continuous totally ramified. Finally it is not difficult to see that $\mathcal{J}_K$ is also the collection of all $J_{\chi}$ for all continuous homomorphisms $\chi:(G_{K}^{\text{ab}})^{1} \to \mathbb{Q}_p/\mathbb{Z}_p$. On the other hand $(G_{K}^{\text{ab}})^{1} \simeq_{\mathbb{Z}_p\text{-filt}}U_{\bullet}(K)$ via the Artin local reciprocity law. Therefore we see that the definition of $\mathcal{J}_K$ given in this section is equivalent to the one given in the introduction: we have $\mathcal{J}_K=\mathcal{J}_{U_{\bullet}(K)}$, where the right hand side is defined at the beginning of Section \ref{characters}. Therefore we are in a position to apply the results of Section \ref{characters}, notably Theorem \ref{classification of characters}. To make the statements simpler we first make a definition. Let $\rho$ denote a general shift with $T_{\rho}$ finite, $f$ a positive integer, and $p$ a prime number. Let moreover $(I,\beta)$, $(I',\beta')$ be in $\text{Jump}_{\rho}^{*}$. \begin{definition} \label{incompatibility} We say that $(I,\beta)$ is $((I',\beta'),f,p)$-incompatible if the following conditions hold.
$(1)$ The set $I \cap I'$ is non-empty. Moreover the subset $\text{Max}((I,\beta),(I',\beta'))$ of $I \cap I'$ consisting of those $i$ in $I \cap I'$ where $\beta(i)-\beta'(i)$ is strictly positive and assumes the maximal possible value, which we denote by $c$, has precisely one element if $p>2$ and an odd number of elements if $p=2$.
$(2)$ If $f>1$ then $\text{Max}((I,\beta),(I',\beta'))=\{e_{\rho}^{*}\}$.
$(3)$ Given any $i \in I'-I$, there is no $j \in I$ such that $(j,\beta'(j)) \geqslant_{\rho} (i,c+\beta'(i))$.
We say that $(I,\beta)$ is $((I',\beta'),f,p)$-compatible if it is not $((I',\beta'),f,p)$-incompatible. \end{definition} Combining Theorem \ref{character for free modules} and Theorem \ref{classification of characters} together with Theorem \ref{rho for loc fields} we obtain the following. \begin{theorem} \label{classification wild characters} Suppose that $\mu_p(K)=\{1\}$. Then $\mathcal{J}_K=\emph{Jump}_{\rho_K}$. Suppose that $\mu_p(K) \neq \{1\}$. Then $\mathcal{J}_K$ consists precisely of the elements of $\emph{Jump}_{\rho_K}^{*}$ that are $((I_K,\beta_K),f_K,p)$-compatible. \end{theorem} We conclude this subsection by proving that the notion of $((I',\beta'),f,p)$-incompatibility is equivalent to a slightly simpler criterion. This is given by the next proposition, which will be repeatedly applied in the next subsection. We make first the following definition. \begin{definition} \label{floors in I} Let $a$ be a positive integer and let $(I,\beta)$ be an extended $\rho$-jump set with $I \neq \emptyset$. Suppose that $a \geqslant \text{min}(I)$, then we denote by $ \lfloor a \rfloor_{I}$ the largest element $i$ of $I$ such that $i \leqslant a$. Suppose that $a \leqslant \text{max}(I)$, then we denote by $\lceil a \rceil_{I}$ the smallest element $i$ of $I$ such that $a \leqslant i$.
\end{definition} Let now $(I,\beta)$ and $(I',\beta')$ be two extended $\rho$-jump sets. \begin{proposition}\label{a cheaper criterion for incompatibility} The jump set $(I,\beta)$ is $((I',\beta'),f,p)$-incompatible if and only if the following holds.
\emph{(a)} Conditions $(1)$ and $(2)$ from definition \ref{incompatibility} hold. If that is the case, let $c(I,I'):=\beta(i_0)-\beta'(i_0)$ for any $i_0 \in \emph{Max}((I,\beta),(I',\beta'))$.
\emph{(b)} For every point $i \in I'-\emph{Max}((I,\beta),(I',\beta'))$, we have that $$\beta'(i)+c(I,I')>\beta(\lceil i \rceil_{I}), $$ whenever $i \geqslant \emph{min}(I)$ and $$\rho^{\beta'(i)+c(I,I')}(i)>\rho^{\beta(\lfloor i \rfloor_{I})}(\lfloor i \rfloor_{I}), $$ whenever $i \leqslant \emph{max}(I)$. \begin{proof} This follows immediately by noticing that condition $(3)$ of definition \ref{incompatibility}, requires only the comparisons with $\lfloor i \rfloor_{I}$ and $\lceil i \rceil_{I}$, as soon as they are defined, since the two inequalities in part (b) of the present statement must certainly hold, but they trivially imply all the others since $j \mapsto \beta(j)$ is strictly decreasing and $j \mapsto \rho^{\beta(j)}(j)$ is strictly increasing, by definition of a jump set. \end{proof}
\end{proposition} \subsection{Comparison with Miki-Maus-Sueyoshi} In this subsection we give a direct combinatorial verification that Theorem \ref{classification wild characters} and the main Theorem of \cite{Maus} are indeed classifying precisely the same sets. Of course this follows also from applying both theorems, but both criteria being of a purely combinatorial form, it is natural to provide a combinatorial proof of their equivalence, not relying on local fields. As an upshot we can \emph{deduce} Miki's classification from Theorem \ref{classification wild characters} and the bit extra of combinatorial work of this subsection. Moreover the combinatorial nature of the equivalence between the two classification is highlighted from the fact that it follows from a statement about a general shift, see Proposition \ref{inadequacy equivalent to incompatibility}. Recall indeed that the case discussed in the present section is only a very special case of the classification we provide Theorem \ref{classification of characters}, which is about a general $(f,\rho)$-quasi-free $R$-module (see Definition \ref{def of quasi free}), where $R$ is any complete DVR, $f$ is any positive integer and $\rho$ is a general shift. In the case $\mu_p(K)=\{1\}$ the two descriptions are literally equal. So we pass to examine the case $\mu_p(K) \neq \{1\}$, where both Theorems say that $\mathcal{J}_K \subseteq \text{Jump}_{\rho_K}^{*}$ and they both provide a criterion for an element of $\text{Jump}_{\rho_K}^{*}$ to be realizable as the set of jumps of a character. In the case of Theorem \ref{classification wild characters}, this is precisely the notion of being $((I_K,\beta_K),f_K,p)$-compatible. For the convenience of the reader we recap the formulation of Miki's criterion as stated in \cite{Sueyoshi} in terms of a general definition, valid for any shift $\rho$ with $T_{\rho}$ finite. As usual, let $p$ denote a prime number and $f$ a positive integer. Let moreover $(I,\beta)$ and $(I',\beta')$ be in $\text{Jump}_{\rho}^{*}$, with both $I,I'$ being non-empty. \begin{definition} \label{inadequate} We say that $(I,\beta)$ is $((I',\beta'),f,p)$-inadequate if the following holds. Write $J_{(I,\beta)}=\{t_1, \ldots ,t_m\}$ and $J_{(I',\beta')}=\{\lambda_1, \ldots ,\lambda_l\}$ (see immediately above Proposition \ref{equivalent data} for the notation $J_{(I',\beta')}$) with $\{t_i\}_{1 \leqslant i \leqslant m}$ and $\{\lambda_i\}_{1 \leqslant i \leqslant l}$ written in increasing order. Write $s=\beta'(\text{max}(I'))$. Then there is a positive integer $L$ with $L<m-(s-1)$ such that the sequences $\{x_i\}_{0 \leqslant i < l-(s-1)}, \{y_i\}_{0 \leqslant i < l-(s-1)}$ defined as $x_i:=t_{L-i}, y_i:=\lambda_{l-i-(s-1)}$, with $x_i=0$ when $L \leqslant i$, satisfy the following condition. Whenever $y_i \in I'$, then $x_i \leqslant y_i$, with equality occurring, among these inequalities, precisely once if $p>2$, and an odd number of times if $p=2$. Moreover, in case $f>2$, equality occurs precisely once, for all $p$, and it occurs for $i=0$ with $x_0=y_0=e_{\rho}^{*} \in I \cap I'$ (i.e. $x_1,y_1<e_{\rho}^{'}$). Recall that $e_{\rho}^{*}=\text{max}(T_{\rho})+1$ and that $e_{\rho}^{'}$ is the unique positive integer such that $\rho(e_{\rho}^{'})=e_{\rho}^{*}$.
We say that $(I,\beta)$ is $((I',\beta'),f,p)$-adequate if it is not $((I',\beta'),f,p)$-inadequate. \end{definition} \begin{remark} In \cite{Sueyoshi}, the final condition requires only that $x_1<e_{\rho}^{'}$ (i.e. that $e_{\rho}^{*} \in I$) because in that case $(I',\beta')$ is admissible (since it is the jump set of a local field, see definition right after Theorem \ref{realizable j.s. are realized}), so the condition $y_0=e_{\rho}^{*}$, which is equivalent to $e_{\rho}^{*} \in J_{(I',\beta')}$, is actually equivalent to $e_{\rho}^{*} \in I'$. \end{remark} We next furnish a direct combinatorial proof that incompatibility and inadequacy are the same notion. \begin{proposition} \label{inadequacy equivalent to incompatibility} Let $\rho$ be a shift with finite $T_{\rho}$, let $p$ be a prime number and $f$ a positive integer. Let $(I,\beta)$ and $(I',\beta')$ be in $\emph{Jump}_{\rho}^{*}$. Then $(I,\beta)$ is $((I',\beta'),f,p)$-inadequate if and only if it is $((I',\beta'),f,p)$-incompatible. \begin{proof}
Suppose that $(I,\beta)$ is $((I',\beta'),f,p)$-inadequate. Let $L<m-(s-1)$ and the two sequences $\{x_i\}_{0 \leqslant i \leqslant l-(s-1)}, \{y_i \}_{0 \leqslant i \leqslant l-(s-1)}$ be as in definition \ref{inadequate}. Let $M$ be the set of non-negative integers $i_0$, with $i_0 \leqslant l-(s-1)$, $y_{i_0} \in I'$ and $x_{i_0}=y_{i_0}$: the size of $M$ must be, by definition, equal to $1$ if $p>2$, and odd if $p=2$. We claim that $\{y_i \}_{i \in M}=\text{Max}((I,\beta),(I',\beta'))$ (for a definition of $\text{Max}((I,\beta),(I',\beta'))$ see Theorem \ref{classification of characters}). We know that, in either case, $M$ is non-empty. Let $i_0$ be one of its elements. Firstly, from the fact that $L<m-(s-1)$ we deduce precisely that the set $J_{(I,\beta)} \cap [y_{i_0},\infty)$ has strictly more elements than $J_{(I',\beta')} \cap [y_{i_0},\infty)$. In other words $y_{i_0} \in I \cap I'$ with $\beta(y_{i_0})-\beta'(y_{i_0})>0$. Next let $0 \leqslant i \leqslant l-(s-1)$ be any other index such that $y_i \in I \cap I'$. Assume $y_i>y_{i_0}$, i.e. that $i<i_0$. From the fact that $x_i \leqslant y_i$, we conclude that in the interval $[y_{i_0},y_i]$ there are at least as many points of $J_{(I,\beta)}$ as there are points of $J_{(I',\beta')}$, which amounts to $\beta(y_{i_0})-\beta(y_i) \geqslant \beta'(y_{i_0})-\beta'(y_i)$, which can be rewritten as $\beta(y_{i_0})-\beta'(y_{i_0}) \geqslant \beta(y_{i})-\beta'(y_i)$, with equality iff $i \in M$. A completely analogous reasoning in the case $i>i_0$ brings us to the same conclusion. In other words we have just shown that $y_{i_0} \in \text{Max}((I,\beta),(I',\beta'))$ and all other $i \in M$ are precisely the $i$ such that $y_i \in \text{Max}((I,\beta),(I',\beta'))$. Therefore we conclude by the very definition of inadequacy that conditions $(1)-(2)$ of definition \ref{incompatibility} hold. We are left with proving condition $(3)$. Let $i_1$ be an index such that $y_{i_1} \in I'-I$. Take $i_0 \in M$, and suppose $i_1<i_0$. Since $|J_{(I,\beta)} \cap [y_{i_0},x_{i_1}]|=|J_{(I',\beta')} \cap [y_{i_0},y_{i_1}]|$, we have that $\beta(y_{i_0})-\beta(\lceil y_{i_1} \rceil_{I})>\beta'(y_{i_0})-\beta'(y_{i_1})$, which can be rewritten as $c+\beta'(y_{i_1})>\beta(\lceil y_{i_1} \rceil_{I})$. This last inequality is precisely the first of the two inequalities in Proposition \ref{a cheaper criterion for incompatibility}. Next, always assuming $i_1<i_0$, consider the two possible cases: $x_{i_1}<\lfloor y_{i_1} \rfloor_{I}$ or $\lfloor y_{i_1} \rfloor_{I} \leqslant x_{i_1}<\lceil y_{i_1} \rceil_{I}$. In the first case observe that $\beta(y_{i_0})-\beta(\lfloor y_{i_1} \rfloor_{I})>\beta'(y_{i_0})-\beta'(y_{i_1})$, which can be recast as $c+\beta'(y_{i_1})>\beta(\lfloor y_{i_1} \rfloor_{I})$. This last inequality trivially implies that $\rho^{c+\beta'(y_{i_1})}(y_{i_1})>\rho^{\beta(\lfloor y_{i_1} \rfloor_{I})}(\lfloor y_{i_1} \rfloor_{I})$, since $\rho$ is strictly increasing. So in the first case one, trivially, obtains the second inequality of Proposition \ref{a cheaper criterion for incompatibility}. In the second case observe that $(\beta'(y_{i_0})-\beta'(y_{i_1}))-( \beta(x_{i_0})-\beta(\lfloor y_{i_1} \rfloor_{I}))=v_{\rho}(x_{i_0})$, i.e. $\rho^{\beta'(y_{i_0})-\beta'(y_{i_1}))-( \beta(y_{i_0})-\beta(\lfloor y_{i_1} \rfloor_{I})}(\lfloor y_{i_1} \rfloor_{I})=x_{i_1}<y_{i_1}$, which can be rewritten as $\rho^{\beta(\lfloor y_{i_1} \rfloor_{I})}(\lfloor y_{i_1} \rfloor_{I})<\rho^{c+\beta'(y_{i_1})}(y_{i_1})$. This last inequality is precisely the second inequality in Proposition \ref{a cheaper criterion for incompatibility}. The case $i_1>i_0$ can be treated in the same way. Altogether this proves that $(I,\beta)$ is $((I',\beta'),f,p)$-incompatible.
The proof of the converse implication proceeds analogously, and basically it can be obtained by inverting the above arguments. \end{proof} \end{proposition} From Proposition \ref{inadequacy equivalent to incompatibility} we can infer the main Theorem in \cite{Sueyoshi}. \begin{theorem} (Miki's Theorem) Suppose that $\mu_p(K) \neq \{1\}$. Then $\mathcal{J}_K$ consists precisely of the elements of $\emph{Jump}_{\rho_K}^{*}$ that are $((I_K,\beta_K),f_K,p)$-adequate. \begin{proof} This follows immediately from Theorem \ref{classification wild characters} and Proposition \ref{inadequacy equivalent to incompatibility} together. \end{proof}
\end{theorem} \subsection{Examples and special cases} We begin by providing several cases where Theorem \ref{classification wild characters} specializes to something much simpler, the interesting case being clearly that $\mu_p(K) \neq \{1\}$, which we will assume in the rest of this subsection. \begin{corollary} \label{cor1} Let $(I,\beta) \in \emph{Jump}_{\rho}^{*}$, with $I \cap I_K= \emptyset$. Then $(I,\beta) \in \mathcal{J}_K$. \begin{proof} Indeed, in this case condition $(1)$ of definition \ref{incompatibility} cannot possibly hold if $(I',\beta'):=(I_K,\beta_K), f:=f_K, p:=\text{char}(O_K/m_K)$. Therefore $(I,\beta)$ is $((I_K,\beta_K),f,p)$-compatible and the conclusion follows from Theorem \ref{classification wild characters}. \end{proof} \end{corollary} As soon as $f_K \geqslant 2$ we can say the following. \begin{corollary} \label{corollary2} Suppose that $f_K \geqslant 2$. Then the following facts holds.
\emph{(a)} Suppose that $e_{\rho_K}^{*} \not \in I_K$. Then $\mathcal{J}_K=\emph{Jump}_{\rho_K}^{*}$.
\emph{(b)} Suppose that $e_{\rho_K}^{*} \in I_K$. Then $\emph{Jump}_{\rho_K} \subseteq \mathcal{J}_K \subsetneq \emph{Jump}_{\rho_K}^{*}$.
\emph{(c)} Suppose that $e_{\rho_K}^{*} \in I_K$. Then each $(I,\beta) \in \emph{Jump}_{\rho_K}^{*}$ with $e_{\rho_K}^{*} \in I$ and $\beta(e_{\rho_K}^{*}) \leqslant \beta_K(e_{\rho_K}^{*})$ is in $\mathcal{J}_{K}$. \begin{proof} Let $(I,\beta)$ be in $\text{Jump}_{\rho_K}^{*}$. Then condition $(2)$ of definition \ref{incompatibility} cannot possibly hold if $(I',\beta'):=(I_K,\beta_K), f:=f_K, p:=\text{char}(O_K/m_K)$, therefore by Theorem \ref{classification wild characters}, we obtain that $(I,\beta) \in \mathcal{J}_K$, thus giving (a). Similarly if $e_{\rho_K}^{*} \not \in I$, which amounts to saying that $(I,\beta) \in \text{Jump}_{\rho_K}$, then condition $(2)$ from definition \ref{incompatibility} cannot possibly hold, giving $\text{Jump}_{\rho_K} \subseteq \mathcal{J}_K$ from (b). The inclusion $\mathcal{J}_K \subseteq \text{Jump}_{\rho_K}^{*}$ always holds, thanks to Theorem \ref{classification wild characters}, so, to conclude the proof of (b), we only need to prove the strict inclusion, i.e. to provide, under the conditions of (b), an element of $\text{Jump}_{\rho_K}^{*}$ that is not in $\mathcal{J}_K$. Consider $(\{e_{\rho_K}^{*}\},(e_{\rho_K}^{*} \mapsto n))$ with $n>\beta_K(e_{\rho_K}^{*})$: it trivially satisfies condition (a) from Proposition \ref{a cheaper criterion for incompatibility}. Condition (b) amounts to saying that for any $i \in I_K -\{e_{\rho_K}^{*}\}$ we need to have $n-\beta_K(e_{\rho_K}^{*})+\beta_K(i)>n$. This last inequality is equivalent to the inequality $\beta_K(i)-\beta_K(e_{\rho_K}^{*})>0$ and this inequality holds by definition of jump set. Hence we conclude by Theorem \ref{classification wild characters} and Proposition \ref{a cheaper criterion for incompatibility} that $(\{e_{\rho_K}^{*}\},(e_{\rho_K}^{*} \mapsto n)) \not \in \mathcal{J}_K$. This concludes the proof of (b).
For (c), notice that the assumption $\beta(e_{\rho_K}^{*})<\beta_K(e_{\rho_K}^{*})$ together with $f_K \geqslant 2$ makes condition $(2)$ of \ref{incompatibility} impossible to hold for $(I,\beta)$, giving by Theorem \ref{classification wild characters} that $(I,\beta) \in \mathcal{J}_K$. \end{proof} \end{corollary} If instead $f_K=1$, then there are always exceptions. \begin{corollary} \label{acazz} If $f_K=1$, then $\mathcal{J}_K \subsetneq \emph{Jump}_{\rho_K}^{*}$. \begin{proof} We proceed as in (b) of the previous corollary. For any $i \in I_{K}$ we consider the jump set $(\{i\},(i \mapsto n))$ with $n>\beta_K(i)$. We proceed to show that this jump set is $((I_K,\beta_K),1,p)$-incompatible. Condition (a) of Proposition \ref{a cheaper criterion for incompatibility} is clearly satisfied, so we proceed to verify condition (b) of that Proposition. Taking $j \in I_K$ with $j<i$, we need to check that $\beta_K(j)+n-\beta_K(i)>n$, or equivalently that $\beta_K(j)>\beta_K(i)$. This last inequality follows from the definition of a jump set. Take now $j \in I_K$ with $j>i$, we need to check that $\rho_{K}^{\beta_K(j)+n-\beta_K(i)}(j)>\rho_K^{n}(i)$, which, $\rho_K$ being strictly increasing, reduces to $\rho_{K}^{\beta_K(i)-\beta_K(j)}(i)<j$, which follows from the definition of a jump set. Therefore we conclude from Theorem \ref{classification wild characters} and Proposition \ref{a cheaper criterion for incompatibility} that $(\{i\},(i \mapsto n)) \not \in \mathcal{J}_K$. \end{proof} \end{corollary} We remark that if we would have put $n \leqslant \beta_K(i)$ during the proof of Corollary \ref{acazz} we would have found, thanks to Theorem \ref{classification wild characters}, that $(\{i\},(i \mapsto n)) \in \mathcal{J}_K$, since condition $(1)$ of \ref{incompatibility} is not satisfied. This will be helpful in the next corollary. It turns out that in the case $f_K=1$ there are even enough exceptions to reconstruct the full structure of the filtered $\mathbb{Z}_p$-module $U_{\bullet}(K)$ out of $\mathcal{J}_K$. Namely we have the following. \begin{corollary} \label{reconstructing} Let $K_1,K_2$ be two totally ramified extensions of $\mathbb{Q}_p(\zeta_p)$. Then $\mathcal{J}_{K_1}=\mathcal{J}_{K_2}$ if and only if $U_{\bullet}(K_1) \simeq_{\mathbb{Z}_p\emph{-filt}} U_{\bullet}(K_2)$. \begin{proof}
Firstly observe that $T_{\rho_K}^{*}$ consists precisely of the positive integers $i \in \mathbb{Z}_{\geqslant 1}$ such that $(\{i\}, (i \mapsto n)) \in \mathcal{J}_K$ for some positive integer $n$. Indeed if $i \not \in I_K$, then by Corollary \ref{cor1} any $n \in \mathbb{Z}_{\geqslant 1}$ is allowed. If instead $i \in I_K$ then any $n \leqslant \beta(i)$ will be allowed, since in that way $\text{Max}((\{i\},(i \mapsto n)),(I_K,\beta_K))=\emptyset$ (for the definition of $\text{Max}((\{i\},(i \mapsto n)),(I_K,\beta_K))=\emptyset$ see Theorem \ref{classification of characters}). Conversely, by definition of a jump set, it is clear that for any $i \in \mathbb{Z}_{\geqslant 1}$ such that $(\{i\}, (i \mapsto n)) \in \mathcal{J}_K$ for some positive integer $n$, one has $i \in T_{\rho_K}^{*}$. Hence $T_{\rho_K}^{*}$ can be reconstructed from $\mathcal{J}_K$, and, since $e_K=|T_{\rho_K}^{*}|-1$, we can reconstruct $e_K$ from $\mathcal{J}_K$.
Next, from the proof of the previous corollary, it is clear that under the assumption $f_K=1$, the set $I_K$ can be reconstructed from $\mathcal{J}_K$ as the set of $i \in T_{\rho_K}^{*}$ for which there exists a positive integer $n$ such that the extended $\rho_K$-jump set $(\{i\},(i \mapsto n))$ is not in $\mathcal{J}_K$. Moreover in that proof we saw that, for $i \in I_K$, the set of such integers consists precisely of the left interval $[\beta_K(i)+1,\infty) \cap \mathbb{Z}_{\geqslant 1}$, hence also $\beta_K$ can be reconstructed from $\mathcal{J}_K$. Hence we can reconstruct $(I_K,\beta_K)$.
So given $K_1$ and $K_2$ as in the statement we have that $1=f_{K_1}=f_{K_2}$, and we have shown above that we have $e_{K_1}=e_{K_2}$ and so $\rho_{K_1}=\rho_{K_2}$. Moreover by the reasoning just made, from $\mathcal{J}_{K_1}=\mathcal{J}_{K_2}$ we conclude that $(I_{K_1},\beta_{K_1})=(I_{K_2},\beta_{K_2})$. Hence we conclude by Theorem \ref{mup neq 1} that $U_{\bullet}(K_1) \simeq_{\mathbb{Z}_p\text{-filt}} U_{\bullet}(K_2)$. The converse is a triviality. \end{proof} \end{corollary} In other words, for $K_1,K_2$, totally ramified extension of $\mathbb{Q}_p(\zeta_p)$, one has $\mathcal{J}_{K_1}=\mathcal{J}_{K_2}$ if and only if $e_{K_1}=e_{K_2}$ and $(I_{K_1},\beta_{K_1})=(I_{K_2},\beta_{K_2})$.
We conclude this subsection providing a more explicit description of $\mathcal{J}_K$ in a family of simple cases, namely when $|I_K| \leqslant 2$. Observe that thanks to Theorem \ref{counting}, the equality $|I_K|=2$ is the most typical phenomenon. If $\text{v}_{\mathbb{Q}_p}(e) \geqslant 2$, the probability that $|I_K|>2$ is at most $(\frac{1}{q})^{p-1} \cdot \frac{q-1}{q}$ and at least $(\frac{1}{q})^{p-1} \cdot (\frac{q-1}{q})^{2}$, while if $\text{v}_{\mathbb{Q}_p}(e) \leqslant 1$, then $|I_K| \leqslant 2$ always. Observe also that $|I_K|=1$ if and only if $K$ is a tame extension of $\mathbb{Q}_{p^{f_K}}(\zeta_{p^n})$ where $n$ is the unique element of $\beta_K(I_K)$. The classification for $|I_K|=1$ takes a very simple form. Denoting by $e_0(K)$ the part of $\frac{e_K}{p-1}$ coprime to $p$, recall that, from the definition of admissible jump sets, one has that $|I_K|=1$ if and only if $I_K=\{e_0(K)\}$. Recall by admissibility that $\beta_K(e_0(K))=\text{v}_{\mathbb{Q}_p}(e_K)+1$. \begin{corollary}
\emph{(a)} Suppose $|I_K|=1, f_K=1$. An extended $\rho_K$-jump set $(I,\beta)$ belongs to $\mathcal{J}_K$ if and only if either $e_0(K) \not \in I$ or both $e_0(K) \in I$ and $\beta(e_0(K)) \leqslant \emph{v}_{\mathbb{Q}_p}(e_K)+1$. \\
\emph{(b)} Suppose $|I_K|=1, f_K \geqslant 2$. Then $\mathcal{J}_K=\emph{Jump}_{\rho_K}^{*}$. \begin{proof} (a) If $e_0(K) \not \in I$ we conclude by Corollary \ref{cor1}. If $e_0(K) \in I$ and $\beta(e_0(K)) \leqslant \beta_K(e_0(K))$, then condition $(1)$ of definition \ref{incompatibility} cannot possibly hold, hence we conclude by Theorem \ref{classification wild characters}. Suppose instead that $\beta(e_0(K)) > \beta_K(e_0(K))$. Then all three conditions of definition \ref{incompatibility} are trivially satisfied and we conclude by Theorem \ref{classification wild characters}, finishing the proof.
(b) This follows immediately from Corollary \ref{corollary2}, given the fact that $e_{\rho_K}^{*} \neq e_0(K) \in I_K$. \end{proof} \end{corollary}
We next proceed providing an explicit classification in the case $|I_K|=2, f_K=1$. \begin{corollary}
Suppose $|I_K|=2, f_K=1$. Write $I_K=\{e_0(K),i\}$. Let $(I,\beta) \in \emph{Jump}_{\rho_K}^{*}$. Then $(I,\beta) \in \mathcal{J}_K$ if and only if one of the following two conditions holds. \\ $(1)$ One has that $I_K \cap I=\emptyset$. \\ $(2)$ One has that $I_K \subseteq I$, with $\emph{Max}((I,\beta),(I_K,\beta_K))=I_K$ or $\emph{Max}((I,\beta),(I_K,\beta_K))= \emptyset$. \begin{proof}
From Corollary \ref{cor1} we see that condition $(1)$ indeed implies that $(I,\beta) \in \mathcal{J}_K$. On the other hand condition $(2)$ implies that $|\text{Max}((I,\beta),(I_K,\beta_K))|$ is even, which makes the condition $(1)$ of definition \ref{incompatibility} impossible to hold. Hence we see that condition $(2)$ also implies that $(I,\beta) \in \mathcal{J}_K$. Conversely, assume that $|\text{Max}((I,\beta),(I_K,\beta_K))|=1$ but $I_K \subseteq I$. Then conditions $(1)-(2)-(3)$ of definition \ref{incompatibility} are clearly satisfied, since $I_K-I=\emptyset$. So are left with the case $\text{Max}((I,\beta),(I_K,\beta_K))=I \cap I_K$. Suppose $I \cap I_K=\{e_0(K)\}$. Then we have to check that $\rho_K^{\beta_K(i)+\beta(e_0(K))-\beta_K(e_0(K))}(i)>\rho_K^{\beta(e_0(K))}(e_0(K))$ which is equivalent to $\rho_K^{\beta_K(e_0(K))-\beta_K(i)}(e_0(K))<i$: this follows from the definition of a jump set. Suppose that $I \cap I_K=\{i\}$. Then we have to check that $\beta_K(e_0(K))+\beta(i)-\beta_K(i)>\beta(i)$ which is saying that $\beta_K(e_0(K))>\beta_K(i)$: this follows from the definition of a jump set. \end{proof} \end{corollary} \begin{remark}
The reason why for $|I_K|=2$ one gets such a simple criterion can be learned from the proof of the previous corollary. Namely the inequality in condition (3) in definition \ref{incompatibility} will always hold when tested against elements of $\text{Max}((I,\beta),(I',\beta'))$, but if $I_K$ has two elements and $I \cap I_K$ has only one, then that is the only possible test to do. So one is left with either $I_K \subseteq I$ or $I \cap I_K= \emptyset$, where in both cases it is very easy to say what Theorem \ref{classification wild characters} prescribes. Indeed the ease of the latter case was formalized in Corollary \ref{cor1}. For convenience we formalize also the ease of the case $I_K \subseteq I$ in the following last corollary. \end{remark} \begin{corollary}
Suppose that $f_K=1$. Suppose that $(I,\beta) \in \emph{Jump}_{\rho_K}^{*}$, with $I_K \subseteq I$. Then $(I,\beta) \in \mathcal{J}_K$ if and only if $|\emph{Max}((I,\beta),(I_K,\beta_K))| \neq 1$ when $\emph{char}(O_K/m_K)>2$ and $|\emph{Max}((I,\beta),(I_K,\beta_K))| \nequiv 1 \ \emph{mod} \ 2$ when $\emph{char}(O_K/m_K)=2$. \begin{proof} The third condition of definition \ref{incompatibility} becomes trivially satisfied, and the first two conditions are precisely translated in the statement. \end{proof} \end{corollary} \section{The shooting game} \label{shooting game} The goal of this section is to explain the rules of a certain Markov process, which we called the \emph{shooting game}, and some of its variants. This process is the bridge between the two sides of the equality in Theorem \ref{counting}. This will be explained in detail in the next two sections. We shall begin with an informal description.
Let $\rho$ be a shift, and let $r$ be a positive integer. Let $p$ be a prime, $f$ a positive integer and let $q:=p^f$. We will use the following notation: given $m \in \mathbb{Z}_{\geqslant 1}$, denote by $v_{\rho}(m):= \text{max}(i \in \mathbb{Z}_{\geqslant 0}: m \in \text{im}(\rho^i))$. Denote by $n:=v_{\rho}(r)$. Imagine there are $n+1$ shooters $S_0,S_1,\ldots,S_n$, and a rabbit $R$ placed in initial position $r$. The activity of the shooters is to shoot at the rabbit in turns. If the rabbit sits in position $x$ the shooter will always shoot from the $y \in T_{\rho}$ such that $\rho^{v_{\rho}(x)}(y)=x$. We shall call such a $y$ the \emph{shooting position} of the shot. The value $v_{\rho}(x)$ is called the \emph{length} of the shot. The rules describing how the shooters take turns and what the outcome of each turn is are the following.
$(1)$ The shooter $S_i$ cannot perform any shot of length strictly smaller than $i$.
$(2)$ Whenever it turns out (with the above rules) that a shot of length strictly smaller than $i$ must be performed, then $S_i$ leaves the game forever.
$(3)$ A shooter $S_i$ can start shooting only when all the other shooters $S_j$ with $j>i$ had to leave the game by rule $(2)$. In this case he will actually shoot.
$(4)$ The rabbit $R$ moves only when someone shoots. At each shot the rabbit moves somewhere forward on $\mathbb{Z}_{\geqslant 1}$. If $h$ is a positive integer, then $R$ moves exactly $h$ steps forward with probability $\frac{q-1}{q^h}$.
$(5)$ The rabbit $R$ starts in position $r$.
We next explain a natural way to attach to a shooting game $G$ a $\rho$-jump set $(I_G,\beta_G)$. Suppose that during the game $G$ we keep track of the shooting positions where a new shooter came in. Let's call this set $I_G$. To each element of $I_G$ we attach the length of the corresponding shot plus one, and call $\beta_G$ the resulting map from $I_G$ to $\mathbb{Z}_{\geqslant 1}$. Observe that, thanks to the rules, it is clear that $\beta_G$ gives also exactly one plus the number of shooters still participating in that round. Indeed this is true for $n$ by the assumption that the first round must be played with length $n$ (and so must be played necessarily by $S_n$ otherwise rule $(3)$ would be contradicted). For $b<n$, the shooter $S_b$ cannot enter the game playing a shot of length smaller than $b$ by virtue of rule $(1)$, moreover, by virtue of rule $(3)$, it must be that $S_{b+1}$ has left the game if $S_b$ is playing so the length of the shot cannot be more than $b$, otherwise $S_{b+1}$ is still allowed to play and, still by rule $(3)$, he will do so. Thus the length must be $b$. So the map $\beta_G$ is strictly decreasing. Moreover, by rule $(4)$, the rabbit moves forward, which means that the map $i \to \rho^{\beta_G(i)}(i)$ is strictly increasing on $I_G$. In other words we have shown the following fact. \begin{proposition} For each game $G$, the pair $(I_G,\beta_G)$ is a $\rho$-jump set. \end{proposition} Shooting games can be conveniently formalized in the language of discrete-time Markov processes. We recall the basic definition in the generality that will be relevant for us.
A discrete-time Markov process consists of a set $S$, called the \emph{state space}, equipped with a \emph{transition function} $$P:S \times S \to [0,1], $$ and with a point $x_0 \in S$, called the \emph{initial state} of the process. Moreover we require that for each $x$ in $S$ the function $y \mapsto P(x,y)$ is a probability measure on $S$, with respect to the discrete sigma-algebra on $S$. In other words we require that $\sum_{y \in S}P(x,y)=1$. We shall refer to $P(x,y)$ as the probability to \emph{transition} from $x$ to $y$. The data $(S,P,x_0)$ with the above properties, suffice to construct a probability space that models the behavior of a discrete random walk in $S$ starting at $x_0$ and proceeding at each stage from $x$ to $y$ with probability $P(x,y)$. To do so we consider the \emph{space of paths} $$\Omega:=S^{\mathbb{Z}_{\geqslant 1}}, $$ as a topological space with the product topology, where $S$ is viewed as a topological space with the discrete topology. On $\mathcal{B}(\Omega)$, the sigma-algebra of Borel sets of $\Omega$, a unique probability measure $\mu_{P,x_0}$ is defined with the following property. Take $m$ a positive integer. Let $y_1, \ldots ,y_m$ be elements of $S$. For convenience put $y_0:=x_0$. Let $Y$ be the \emph{cylinder} set $Y:=\{y_0\} \times \ldots \times \{y_m\} \times S^{\mathbb{Z}_{\geqslant m+1}}$. We have that $$\mu_{P,x_0}(Y)=\prod_{i=0}^{m-1}P(y_i,y_{i+1}). $$ Moreover we ask that $\mu_{P,x_0}(\{x_0\} \times S^{\mathbb{Z}_{\geqslant 2}})=1$. The existence of such a measure is a simple consequence of the Kolmogorov extension theorem \cite[Theorem 2.4.3]{Tao}.
For the shooting game the triple $(S,P,x_0)$ is as follows. We take as state space $$S:=\{(x_1,x_2) \in \mathbb{Z}_{\geqslant 1} \times \mathbb{Z}_{\geqslant 0}: v_{\rho}(x_1)=x_2 \}. $$ In the informal description being in state $(x_1,x_2) \in S$, means that the rabbit $R$ is in position $x_1$ and that the next shot will be of length $x_2=v_{\rho}(x_1)$. The initial point is $$x_0:=(r,n). $$ The transition function is defined as follows. Let $x:=(x_1,x_2)$ and $y:=(y_1,y_2)$ be in $S$ with $y_1>x_1$. Then we put $$P(x,y):=\frac{q-1}{q^{y_1-x_1}}. $$ For all other choices of $x,y \in S$ we put $P(x,y)=0$. We shall denote by $(\mathcal{S}(\rho,r,q),\mu_{q,r})$ the pair $(\Omega,\mu_{P,x_0})$ defined in the above paragraph. This is the space of shooting games. Sometimes we shall also use the notation $\mathcal{S}(\rho,r)$ to denote merely the topological space $\Omega=S^{\mathbb{Z}_{\geqslant 1}}$. Observe that $$\mu_{q,r}(\{(\omega_1,\omega_2) \in \mathcal{S}(\rho,r): \text{$\omega_1$ is strictly increasing} \})=1. $$ The informal description, at the beginning of this section, gives us a map $$\mathcal{S}(\rho,r) \to \text{Jump}_{\rho}, $$ which can be described as follows. Let $(\omega_1, \omega_2)$ be in $\mathcal{S}(\rho,r)$ with $\omega_1$ strictly increasing. Define $$ I_{(\omega_1,\omega_2)}:=\{i \in \mathbb{Z}_{\geqslant 1}:\ \text{for all positive integers $j$ smaller than $i$ we have} \ \omega_2(i)<\omega_2(j) \}. $$ We put $\beta_{(\omega_1,\omega_2)}$ to be the restriction of $\omega_2+1$ to $I_{(\omega_1,\omega_2)}$. One readily sees that if $G$ is the shooting game corresponding to $(\omega_1,\omega_2)$, then the jump set $(I_{(\omega_1,\omega_2)},\beta_{(\omega_1,\omega_2)})$ coincides with $(I_G,\beta_G)$. In the subspace, having measure $0$, of $(\omega_1,\omega_2)$ such that $\omega_1$ is not an increasing map, we let $I_{(\omega_1,\omega_2)}=\emptyset$. Extended jump sets arise from a natural modification of the shooting game, called the extended shooting game. From now on we assume that $T_{\rho}$ is finite. Moreover from now on we shall restrict the variable $r$ to be smaller than $e_{\rho}^{*}=\text{max}(T_{\rho})+1$. The key difference with a shooting game is that in an extended shooting game the shooters can shoot from $T_{\rho}^{*}$ and not only from $T_{\rho}$. We shall directly introduce the extended shooting game in terms of Markov processes.
For the extended shooting game we consider the following triple $(S^{*},P^{*},x_0)$. For any two positive integers $k_1,k_2$ define $v_{\rho}(e_{\rho}^{*},k_1,k_2):=\#\{m \in \mathbb{Z}_{\geqslant 0}: k_1 < \rho^m(e_{\rho}^{*}) \leqslant k_2\}$. We take as state space the set $S^{*}$ of points $(x_1,x_2) \in \mathbb{Z}_{\geqslant 1} \times \mathbb{Z}_{\geqslant 0}$ such that one of the following two holds. Either we have $v_{\rho}(x_1)=x_2$: in this case $(x_1,x_2)$ is said to be of the \emph{first kind}. Or we have that $\rho^{x_2}(e_{\rho}^{*})=x_1$: in this case $(x_1,x_2)$ is said to be of the \emph{second kind}. The initial point is $$x_0:=(r,v_{\rho}(r)). $$ The definition of the transition function is slightly more involved. However, right after the definition, we will give an intuitive perspective on such functions. Let $x:=(x_1,x_2)$ and $y:=(y_1,y_2)$ be in $S$ with $y_1>x_1$. If $y$ is of the first kind we put $$P^{*}(x,y):=\frac{q-1}{q^{y_1-x_1}p^{v_{\rho}(e_{\rho}^{*},x_1,y_1)}}. $$ If $y$ is of the second kind we put $$P^{*}(x,y):=\frac{p-1}{q^{y_1-x_1-1}p^{v_{\rho}(e_{\rho}^{*},x_1,y_1)}}. $$ In all the other cases we put $P^{*}(x,y)=0$. A straightforward calculation shows that this function $P^{*}$ satisfies the equations of a transition functions. We shall instead explain this in a different way, which offers a more intuitive perspective on the formula for $P^{*}$. This can be done by considering an auxiliary family of Markov processes: informally these can be imagined as the Markov processes modeling the behavior of a repeated coin toss that is stopped at the first win. Fix $x_1 \in \mathbb{Z}_{\geqslant 0}$. Let the state space be $S_{x_1}:=\{0,1\} \times \mathbb{Z}_{\geqslant x_1}$. Let the initial point be $x_0:=(1,x_1)$. Let $y$ be an integer larger than $x_1$. We put $P_{x_1}((1,y),(1,y)):=1$, and $P_{x_1}((1,y),z)=0$ for all the other values of $z$ in $S_{x_1}$. Moreover we put $P_{x_1}((0,y),(0,y+1))=\frac{1}{q}$ and $P_{x_1}((0,y),(1,y+1))=\frac{q-1}{q}$. Finally we put $P_{x_1}((1,x_1),(0,x_1+1))=\frac{1}{q}$ and $P_{x_1}((1,x_1),(1,x_1+1))=\frac{q-1}{q}$. For all the other values of $z_1,z_2$ in $S_{x_1}$ we put $P_{x_1}(z_1,z_2)=0$. In this manner one obtains a Markov process where with probability $1$ a path is eventually constant, with second coordinate strictly greater than $x_1$. In this manner a probability measure on $\mathbb{Z}_{>x_1}$ is induced, with respect to the discrete sigma-algebra. This measure is precisely the one used in the shooting games: it gives to each $x \in \mathbb{Z}_{>x_1}$ weight equal to $\frac{q-1}{q^{x-x_1}}$. We can imagine a path in $S_{x_1}$ as given by the following scenario. A walker is equipped with a coin $C$ giving $1$ with probability $\frac{q-1}{q}$ and $0$ with probability $\frac{1}{q}$. He starts his walk at $x_1$ and moves at $x_1+1$ to see if he will stop there forever. He tosses $C$ and if the result is $1$ he will stop there forever, otherwise he has to move at $x_1+2$ and repeat the operation. On the other hand, to obtain the formula for $P^{*}$ we are in the following scenario. Our walker has also a second special coin $C^{*}$, this coin takes $1$ with probability $\frac{p-1}{p}$ and $0$ with probability $\frac{1}{p}$. The rule is that he can use $C^{*}$ only when he arrives at a position $x$ such that there is a nonnegative integer $m$ with $\rho^m(e_{\rho}^{*})=x$. In this case before using $C$ he uses $C^{*}$. In case $C^{*}$ gives $1$ he will remain in $x$ forever and he is also provided with a cash prize. If $C^{*}$ gives $0$ he will use $C$ that will still follow the rules as before, telling him if he will stay forever at $x$ (though without cash prize) or if he has to move to $x+1$ to try again his luck. In this manner we obtain a natural probability measure on $\{0,1\} \times \mathbb{Z}_{>x_1}$, which we denote by $\mathbb{P}_{x_1}^{*}$, where the first coordinate is $1$ precisely when the walker has obtained also a cash prize. One has that if $(y_1,y_2)$ is of the first type, then $$\mathbb{P}_{x_1}^{*}((0,y_1))=P^{*}((x_1,x_2),(y_1,y_2)), $$ and if $(y_1,y_2)$ is of the second type, then $$\mathbb{P}_{x_1}^{*}((1,y_1))=P^{*}((x_1,x_2),(y_1,y_2)). $$
The triple $(S^{*},P^{*},r)$ gives rise to the probability space of extended shooting games. This is the pair $(\mathcal{S}^{*}(\rho,r,q),\mu_{q,r}^{*}):=(\Omega^{*},\mu_{P^{*},r})$, where $\Omega^{*}=(S^{*})^{\mathbb{Z}_{\geqslant 1}}$ is the space of paths and $\mu_{q,r}^{*}$ is the natural probability measure on it, as explained above. Sometimes we shall use the notation $\mathcal{S}^{*}(\rho,r)$ to denote merely the topological space $\Omega^{*}=(S^{*})^{\mathbb{Z}_{\geqslant 1}}$.
Imitating what we have done in the case of shooting games, we obtain a map $$\mathcal{S}^{*}(\rho,r) \to \text{Jump}_{\rho}^{*}. $$
Equip $\text{Jump}_{\rho}^{*}$ with the discrete sigma algebra. Pushing forward $\mu_{q,r}^{*}$ via the map $\mathcal{S}^{*}(\rho,r,q) \to \text{Jump}_{\rho}^{*}$, we obtain a probability measure on $\text{Jump}_{\rho}^{*}$, which we will denote also by $\mu_{q,r}^{*}$. Let $(I,\beta)$ be in\ $\text{Jump}_{\rho}^{*}$. Observe that for $r=e_{\rho}^{'}$ the measure $\mu_{q,r}^{*}$ gives positive probability to $(I,\beta)$ if and only if $(I,\beta)$ is admissible (for a definition see immediately before Theorem \ref{classification of quasi free}).
We devote the next subsection to describe a number of subspaces and quotients of $\mathcal{S}^{*}(\rho,r)$ that will play an important role in the proof of Theorem \ref{counting}. \subsection{Subspaces and quotients of extended shooting games} \label{subspaces and quotient shooting}
For any positive integer $j$ we define $\mathcal{S}_{\geqslant j}^{*}(\rho,r)$ to be the set of $G \in \mathcal{S}^{*}(\rho,r)$ such that $I_G \neq \emptyset$ and $\text{min}(\beta_G) \geqslant j$. We denote by $\mathcal{S}_{\geqslant j}^{*}(\rho,r,q)$ the above set viewed as a measure space with the restriction of $\mu_{q,r}^{*}$. If we normalize the measure in the unique way to get a probability space, we will denote the resulting probability space as $\mathcal{S}_{\geqslant j}^{*}(\rho,r|q)$: this is the probability space of games $G$ \emph{conditioned} to never invoke a shooter of lower index then $S_j$.
Next we define $\mathcal{S}_{=j}^{*}(\rho,r)$ to be the set of $G \in \mathcal{S}^{*}(\rho,r)$ such that $I_G \neq \emptyset$ and $\text{min}(\beta_G) = j$ and $e_{\rho}^{*} \not \in I_G$. We denote by $\mathcal{S}_{= j}^{*}(\rho,r,q)$ the above set viewed as a measure space with the restriction of $\mu_{q,r}^{*}$. If we normalize the measure in the unique way to get a probability space, we will denote the resulting probability space as $\mathcal{S}_{=j}^{*}(\rho,r|q)$: this is the probability space of games $G$ \emph{conditioned} to invoke as a last shooter $S_j$, with his first shot being not from $e_{\rho}^{*}$.
We define $\mathcal{S}_{=j,*}^{*}(\rho,r)$ to be the set of $G \in \mathcal{S}^{*}(\rho,r)$ such that $\text{min}(\beta_G) = j$ and $e_{\rho}^{*} \in I_G$. We denote by $\mathcal{S}_{=j,*}^{*}(\rho,r,q)$ the above set viewed as a measure space with the restriction of $\mu_{q,r}^{*}$. If we normalize the measure in the unique way to get a probability space, we will denote the resulting probability space as $\mathcal{S}_{=j,*}^{*}(\rho,r|q)$: this is the probability space of games $G$ \emph{conditioned} to invoke as a last shooter $S_j$, and by letting him shoot for the first time from $e_{\rho}^{*}$.
Finally for any positive integers $x$ we define $\mathcal{S}_{x,\text{stop}}(\rho,r,q)$ as the quotient probability space of $\mathcal{S}^{*}(\rho,r,q)$, where two games are identified precisely when the trajectories of the rabbit are identical as long as the rabbit stays below $x$.
For all the following remarks $m$ will denote $v_{\rho}(e_{\rho}^{*})$. We will assume that $\rho$ is such that $m \geqslant 2$. Equivalently we are assuming that in $\mathcal{S}^{*}(\rho,e_{\rho}^{'},q)$ the subspace $\mathcal{S}^{*}_{=1}(\rho,e_{\rho}^{'},q)$ has probability strictly smaller than $1$: in case the subspace $\mathcal{S}^{*}_{=1}(\rho,e_{\rho}^{'},q)$ has probability equal to $1$, then the shooting game gives constantly the jump set $(\{e_{\rho}^{'}\},e_{\rho}^{'} \mapsto 1)$.
For a positive integer $x$ and a nonnegative integer $m'$, such that $x \in \text{Im}(\rho^{m'})$ we denote by $\rho^{-m'}(x)$ the unique positive integer $y$ such that $\rho^{m'}(y)=x$. \begin{remark} \label{dec. remark 1}
Let $j \in \{1,\ldots ,m\}$. Given $G$ an element of $\mathcal{S}^{*}(\rho,\rho^{-(j-1)}(e_{\rho}^{'}))$, by applying $\rho^{j-1}$ to it, we obtain an element of $\mathcal{S}^{*}_{\geqslant j}(\rho,e_{\rho}^{'})$, this map is a bijection. Moreover the map $\rho^{j-1}$ induces an isomorphism of measure spaces $\mathcal{S}^{*}_{\geqslant j}(\rho,e_{\rho}^{'}|q) \simeq _{\text{meas. space}} \mathcal{S}^{*}(\rho,\rho^{-(j-1)}(e_{\rho}^{'}),q)$. The map induced on jump sets consists simply of shifting $\beta$ by $j-1$. We call $\psi_j$ the inverse of the isomorphism given by $\rho^{j-1}$. \end{remark} \begin{remark} \label{dec. remark 2} The map described in Remark \ref{dec. remark 1} induces an isomorphism of measure spaces $\mathcal{S}^{*}_{=j}(\rho,e_{\rho}^{'},q) \simeq _{\text{meas. space}} \mathcal{S}^{*}_{=1}(\rho,\rho^{-(j-1)}(e_{\rho}^{'}),q)$. \end{remark} \begin{remark} \label{dec. remark 3} For all elements $G$ of a given equivalence class $C$ in $\mathcal{S}_{x,\text{stop}}(\rho,r,q)$ the set $\{i \in I_{G}:\rho^{\beta_G(i)}(i) \leqslant x \}$ will be the same, and similarly the restriction of $\beta_{G}$ to this set will be the same. The resulting pair $(I_C,\beta_C)$ is also an extended $\rho$-jump set. In particular the set $\mathcal{S}^{*}_{=1}(\rho,e_{\rho}^{'})$ consists of a union of equivalence classes for the projection to $\mathcal{S}_{e_{\rho}^{*},\text{stop}}(\rho,e_{\rho}^{'})$. Moreover in each such equivalence class $C$, the jump set $(I_C,\beta_C)$ coincides with the jump set $(I_G,\beta_G)$ for any $G$ belonging to $C$. \end{remark} \begin{remark} \label{dec. remark 4}
Let $j$ be in $\{1,\ldots ,m-1\}$. Observe that the projection of $\mathcal{S}^{*}_{=j,*}(\rho,e_{\rho}^{'}|q)$ to $\mathcal{S}_{\rho^{j}(e_{\rho}^{*}),\text{stop}}(\rho,e_{\rho}^{'},q)$ lands in the image of $\mathcal{S}^{*}_{\geqslant j+1}(\rho,e_{\rho}^{'},q)$. Thus we can apply to it $\psi_{j+1}$, landing in $\mathcal{S}^{*}_{e_{\rho}^{'},\text{stop}}(\rho,\rho^{-j}(e_{\rho}^{'}),q)$. We denote by $\psi_{j}^{*}:\mathcal{S}^{*}_{=j,*}(\rho,e_{\rho}^{'}|q) \to \mathcal{S}_{\rho^{-j+1}(e_{\rho}^{'}),\text{stop}}(\rho,\rho^{-j}(e_{\rho}^{'}),q)$ the resulting map. If $C=\psi_{j}^{*}(G)$ then $I_G= I_C \cup \{e^{*}\}$ with ${\beta_G}_{|I_C}=\beta_C+j-1$ and $\beta_G(e^{*})=j$. This provides a reconstruction of $(I_G,\beta_G)$ from $(I_{\psi(G)},\beta_{\psi(G)})$. \end{remark} \begin{remark} \label{dec. remark 5} Given $j \in \{1,\ldots ,n\}$, it can be easily shown that one has always that $$(p-1)\mu_{q,e_{\rho}^{'}}^{*}(\mathcal{S}^{*}_{\geqslant j+1}(\rho,e_{\rho}^{'},q))=\mu_{q,e_{\rho}^{'}}^{*}(\mathcal{S}^{*}_{=j,*}(\rho,e_{\rho}^{'},q)). $$ In the setting of local fields this fact is mirrored by Proposition \ref{when star is in for tot ram}.
\end{remark} \section{Shooting game and filtered orbits}\label{shot-game and orbit} Fix $p$ a prime number. Using the notation of Section \ref{filtered modules}, we take $R=\mathbb{Z}_p$, and we fix $f$ a positive integer and $\rho$ a shift. Let $q:=p^f$, and we recall that the module $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ was defined on top of subsection \ref{transitive}. On the other hand recall from Theorem \ref{bijection orbits jumps} that the set of extended jump sets is in bijection with the set of $\text{Aut}_{\text{filt}}(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$-orbits of vectors in $\pi_R(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$. Thus, by using the Haar measure, this induces naturally a probability measure on the set of extended admissible $\rho$-jump sets. We call this measure $\mu_{q,\text{Haar}}$: given $(I,\beta)$ an admissible extended $\rho$-jump set, we have that $\mu_{q,\text{Haar}}(I,\beta):=\mu_{\text{Haar}}(\text{filt-ord}^{-1}(I,\beta))$, where the Haar measure is normalized giving total mass $1$ to the set of orbits corresponding to admissible jump sets (for a definition of admissible see right before Theorem \ref{classification of quasi free}).
On the other hand Section \ref{shooting game} provides us with another measure on admissible extended jump sets, namely the probability that a shooting game in $\mathcal{S}(\rho,e_{\rho}^{'},q)$ gives the jump set $(I,\beta)$. We denoted by $\mu_{q,e_{\rho}^{'}}^{*}$ this probability measure on $\text{Jump}_{\rho}^{*}$.
\begin{proposition} \label{filtered orbits and shootings} For any extended admissible $\rho$-jump set $(I,\beta)$ one has that $$\mu_{q,\emph{Haar}}(I,\beta)=\mu_{q,e_{\rho}^{'}}^{*}(I,\beta). $$ \begin{proof} We prove slightly more. Namely we construct a map $G(-)$ sending an admissible vector $v$ into a shooting game $G(v) \in \mathcal{S}^{*}(\rho,e_{\rho}^{'},q)$, in such a way that $G^{*}(\mu_{\text{Haar}})=\mu_{q,e_{\rho}^{'}}^{*}$ and that $(I_v,\beta_v)=(I_{G(v)},\beta_{G(v)})$.
To construct such a map fix a filtered basis $\mathcal{B}$ for $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$, in the sense of Definition \ref{definition of a basis}. This provides us for each $i \in T_{\rho}$ with elements $b_{i,1},\ldots ,b_{i,f}$ of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ with weights obeying $w(b_{i,j})=i$, and for $e_{\rho}^{*}$ we are provided with an element $b_{e_{\rho}^{*}}$ with $w(b_{e_{\rho}^{*}})=e_{\rho}^{*}$ and $b_{e_{\rho}^{*}} \not \in \pi_R \cdot (M_{\rho}^{f-1} \oplus M_{\rho}^{*})$. Next fix $\mathcal{A}:=\{\alpha_1,\ldots ,\alpha_{|R/m_R|-1}\}$ a set of representatives in $R$ of $(R/m_R)^{*}$. For every $i \in T_{\rho}$, denote by $\mathcal{F}_i:=\mathcal{A}\cdot \mathcal{B}_i$ the set of $ab$ with $a \in \mathcal{A}$ and $b \in \mathcal{B}$. Denote by $i^{*}$ the unique element of $T_{\rho}$ such that there exists a positive integer $m$ with $e_{\rho}^{*}=\rho^m(i^{*})$. Furthermore denote by $\mathcal{F}_{e_{\rho}^{*}}:=\pi_R^{v_{\rho}(e_{\rho}^{*})}\mathcal{F}_{i^{*}}+\mathcal{A}\{b_{e_{\rho}^{*}}\}$. The sets $\mathcal{F}_i$ as $i$ runs in $T_{\rho}^{*}$ are pairwise disjoint. For any vector $z \in M_{\rho}^{f-1} \oplus M_{\rho}^{*}$, there exists a unique $i \in T_{\rho}^{*}$, which we denote by $i_z$, such that there exist $b_z \in \mathcal{F}_{i_z}$ and $v(z) \in \mathbb{Z}_{\geqslant 0}$ with $w(-\pi_R^{v(z)}b_{z}+z)>w(z)$, where $\rho^{v(z)}(i_z)=w(z)$ and $b_{z} \in \mathcal{F}_{i_z}$. The elements $b_z,v(z)$ are unique.
Now let $v \in \pi_R(M_{\rho}^{f-1} \oplus M_{\rho}^{*})$ be a vector in an admissible orbit. Let $x$ be the vector in $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ such that $\pi_Rx=v$. We inductively construct a sequence of vectors by letting $x_1=x$ and setting $x_{j+1}=x_j-\pi_R^{v(x_j)}b_{x_j}$ the unique expression explained above. We use this sequence of vectors to attach to $v$ a shooting game $G(v)$ as follows: we consider the map $(f_1,f_2): \mathbb{Z}_{\geqslant 1} \to \mathbb{Z}_{\geqslant 1} \times \mathbb{Z}_{\geqslant 1}$, given by the relation $f_1(i)=w(x_i)$ and $f_2(i)=v(x_i)$. One can easily verify that the pushforward with $G(-)$ of the Haar measure is $\mu_{q,e_{\rho}^{'}}^{*}$ and that the map $G(-)$ preserves jump sets. \end{proof} \end{proposition} \section{A mass-formula for $U_1$}\label{mass formula} Let $p$ be a prime number, $f$ be a positive integer, denote by $q=p^f$, let $e \in (p-1)\mathbb{Z}_{\geqslant 1}$ and let $(I,\beta)$ be an extended admissible $\rho_{e,p}$-jump set. The goal of this section is to provide a proof of Theorem \ref{counting}. In virtue of Proposition \ref{filtered orbits and shootings}, this task is equivalent to proving the following Theorem. \begin{theorem} \label{counting rephrased} $$ \mu_{\frac{e}{p-1},\mathbb{Q}_{p^f}(\zeta_p)}(\{K \in S(\frac{e}{p-1},\mathbb{Q}_{p^f}(\zeta_p)):(I_K,\beta_K)=(I,\beta)\})=\mu_{q,e_{\rho}^{'}}^{*}(I,\beta). $$ \end{theorem} If $F$ is a local field and $h$ is a positive integer, then $\text{Eis}(h,F)$ denotes the set of degree $h$ Eisenstein polynomials in $F[x]$. These are monic polynomials $f(x)$ with coefficients in $O_F$, that reduced modulo $m_F$, the maximal ideal of $O_F$, become $x^h$ and such that $f(0) \not \in m_F^2$.
\subsection{Proof outline} Since our proof of Theorem \ref{counting rephrased} is quite long, we shall first explain its basic idea. In subsection \ref{idea} we give an overview of the main ideas of the proof. In subsection \ref{strategy} we explain how the proof reduces to the construction of certain maps from certain spaces of Eisenstein polynomials to shooting games. Finally we spend the rest of the section to construct such maps and to show that they meet all the requirements explained in subsection \ref{strategy}. \subsubsection{The idea of the proof} \label{idea} In this subsection the discussion is \emph{informal}. Our priority here is to provide some intuition about how the proof of Theorem \ref{counting rephrased} goes. For a formal proof see from subsection \ref{strategy} on.
The starting idea is to proceed as in the proof of Proposition \ref{filtered orbits and shootings}. One has immediately a difference between the set-up of Proposition \ref{filtered orbits and shootings} and the one of Theorem \ref{counting rephrased}. In Proposition \ref{filtered orbits and shootings} one has a \emph{fixed} free-filtered module where it is possible to successively ``shoot at elements of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$" as done in that proof, using a fixed filtered basis. In this manner a measure-preserving map is obtained sending each vector of $M_{\rho}^{f-1} \oplus M_{\rho}^{*}$ to an extended shooting game. By measure-preserving here we mean that the push-forward of the measure on the source is equal to the measure on the target. In Theorem \ref{counting rephrased} we have a \emph{varying} quasi-free filtered module, namely $U_{\bullet}(K)$, so we need firstly to find a common manner to successively ``shoot at the units" in order to proceed as in the proof of Proposition \ref{filtered orbits and shootings}. This step can be done by fixing the set of polynomials $\mathcal{B}:=\{(1+\gamma x^i): i \in T_{\rho_{e,p}}, \gamma \in \text{Teich}(\mathbb{Q}_{p^f})-\{0\} \} \cup \{1+\varepsilon' x^{\frac{pe}{p-1}} \}$, where $\text{Teich}(\mathbb{Q}_{p^f})$ denotes the set of Teichm\"uller representatives of $\mathbb{F}_{p^f}$ in $\mathbb{Q}_{p^f}$. Here $\varepsilon'$ is a Teichm\"uller representative of a fixed element $ \varepsilon' \in \mathbb{F}_{p^f}$ with $\text{Tr}_{\mathbb{F}_{p^f}/\mathbb{F}_p}(\varepsilon) \neq 0$. To keep a stricter analogy with the proof of Theorem \ref{filtered orbits and shootings}, we should only allow a set of Teich\"muller representatives that, once it is reduced modulo $p\mathbb{Z}_{p^f}$, it becomes a basis of $\mathbb{F}_{p^f}$. Since this restriction would make the description of the next steps heavier and is irrelevant for the present discussion, we shall disregard it. One can attempt to proceed precisely as in the proof of Theorem \ref{filtered orbits and shootings} in order to construct a measure-preserving function from the set of Eisenstein polynomials to the set of shooting games. As we will see, we will use only a part of this idea, one that is still good enough to obtain a proof of Theorem \ref{counting rephrased} and that combined with a different set of observations (explained at the end of this subsection) leads to more informative results. More concretely one starts with an Eisenstein polynomial $g(x)=x^{\frac{e}{p-1}}+\sum_{i=0}^{\frac{e}{p-1}-1}a_ix^i$. Next one finds a unit $u$ in $\mathbb{Z}_{p^f}[\zeta_p]^{*}$ in such a way that $ug(x)=ux^{\frac{e}{p-1}}+\sum_{i=1}^{\frac{e}{p-1}-1}ua_ix^i+1-\zeta_p$. Hence in the field $\mathbb{Q}_{p^f}(\zeta_p)[x]/g(x)$ one can write $\zeta_p=1+\sum_{i=1}^{\frac{e}{p-1}-1}ua_ix^i+ux^{\frac{e}{p-1}}=:g_1(x)$. At this point one multiplies $g_1(x)$ by $(1+\gamma x^{e_0})^{p^{\text{v}_{\mathbb{Q}_p}(e)}}$ for a suitable $\gamma \in \text{Teich}(\mathbb{Q}_{p^f})$, where $e_0$ denotes the largest divisor of $\frac{e}{p-1}$ coprime to $p$. \emph{After} expanding the product, we replace all the powers of $x$ having degree larger than $\frac{e}{p-1}$ with their remainder upon division by $g(x)$. In this way a second expression $g_2(x)=1+\sum_{i=1}^{\frac{pe}{p-1}-1}a_i(2)x^i$ is obtained. Now we would like to iterate this. We do so as long as this unit has weight less than $\frac{pe}{p-1}$. In this case we have precisely one way to choose an element of $\mathcal{B}$ that does the same job $1+\gamma x^{e_0}$ did for $g_1(x)$: in particular we do not use the element $1+\varepsilon' x^{\frac{pe}{p-1}}$. If we iterate this procedure as long as the weight stays below $\frac{pe}{p-1}$, we obtain a sequence of polynomials $g_1(x), \ldots ,g_k(x)$ where $g_{s+1}(x)$ is obtained by ``shooting" $g_s(x)$ with an element of $\mathcal{B}$ in the way hinted above. Moreover it is relatively easy to determine that the change of weight from $g_s(x)$ to $g_{s+1}(x)$ obeys the same rule as the change of positions of the rabbit during the shooting game. Indeed, as we shall see in the proof, although the expressions for $g_{s+1}(x)$ can become increasingly complicated, there is a simple way to get the \emph{probability} that the weight of $g_{s+1}(x)$ will be larger than a given $y$, with $y<\frac{pe}{p-1}$. The reason for this is that we can divide in two pieces the expressions that decide whether the weight of $g_{s+1}(x)$ will be larger than $y$. One piece comes from ``lower order terms" and it behaves in the proof, from the probabilistic point of view, as a \emph{constant}. The other piece comes in a very simple manner from the Eisenstein polynomial $g(x)$ and one sees, directly from the definition of Haar measure on Eisenstein polynomials, that it is a uniform random variable in $\text{Teich}(\mathbb{Q}_{p^f})$. In this way we can prove Theorem \ref{counting rephrased} for all $(I,\beta)$ with $\text{min}(\beta)=1$ and $\frac{pe}{p-1} \not \in I$. To proceed further we need to deal with the case that, in the above ``shooting process", the unit has reached a weight at least $\frac{pe}{p-1}$ and we have not yet used a shot of length $0$. That means that either $\frac{pe}{p-1} \in I_K$ with $\beta(\frac{pe}{p-1})=1$ or $\zeta_{p^2} \in K$. The last remark in Section \ref{subspaces and quotient shooting} tells us that the former possibility should occur precisely $p-1$ times as often as the latter. On the other hand Proposition \ref{when star is in for tot ram} tells us that the same happens for local fields. Indeed the fields $\{\mathbb{Q}_{p^f}(\zeta_p)(s): s \in \{1, \ldots , p\}$ have all the same mass, therefore by Proposition \ref{when star is in for tot ram} we conclude that they partition the set of local fields $K$, having either $\frac{pe}{p-1} \in K$ or $\zeta_{p^2} \in K$, into $p$ disjoint sets $X_1, \ldots ,X_p$ having all the same mass, with $K \in X_s$ if and only if $\mathbb{Q}_{p^f}(\zeta_p)(s) \subseteq K$. For $s \in \{1, \ldots , p-1 \}$ we have that the total mass of $X_s$ equals $\frac{1}{p-1}$ of the total mass of the fields $K$ with $\frac{pe}{p-1} \in I_K$ and $\beta_K(\frac{pe}{p-1})=1$. On the other hand $X_p$ consists of those fields $K$ with $\zeta_{p^2} \in K$. So we deal with the set $X_1, \ldots ,X_{p-1}$ working with Eisenstein polynomials over $\mathbb{Q}_{p^f}(\zeta_p)(1), \ldots ,\mathbb{Q}_{p^f}(\zeta_p)(p-1)$ and we deal with $X_p$ using $\mathbb{Q}_{p^f}(\zeta_{p^2})$. Thanks to Proposition \ref{when star is in for tot ram}, by repeating the above ``shooting argument" for the sets $X_1, \ldots , X_{p-1}$, Theorem \ref{counting rephrased} is proved also in the case that $\frac{pe}{p-1} \in I$ with $\beta(\frac{pe}{p-1})=1$. The idea is to repeat this whole proof structure over $\mathbb{Q}_{p^f}(\zeta_{p^2})$.
\subsubsection{Proof strategy} \label{strategy} The plan of the proof is the following. Let $n:=\text{v}_{\mathbb{Q}_p}(e)$. We will use the notation from Section \ref{shooting game} and in particular from Section \ref{subspaces and quotient shooting}. For each $j \in \{0,1,\ldots ,n\}$ we construct maps $$\sigma_j:\text{Eis}(\frac{e}{p^j(p-1)}, \mathbb{Q}_q(\zeta_{p^{j+1}})) \to \mathcal{S}_{\frac{pe}{p-1}, \text{stop}}(\rho_{e,p},\frac{e}{p^j(p-1)},q) $$ and for each $j_1 \in \{0,\ldots ,n-1\}$ and $j_2 \in \{1,..,p-1\}$ we construct maps $$\sigma_{j_1,j_2}:\text{Eis}(\frac{e}{p^{j_1+1}(p-1)}, \mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2)) \to \mathcal{S}_{\frac{e}{p-1}, \text{stop}}(\rho_{e,p},\frac{e}{p^{j_1+1}(p-1)},q), $$ having the following two properties.
$(P.1)$ For any $j \in \{0,1,\ldots ,n\}$ and $f(x) \in \text{Eis}(\frac{e}{p^j(p-1)}, \mathbb{Q}_q(\zeta_{p^{j+1}}))$, denoting by $K_{f(x)}:=\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/f(x)$, we have that $$\{i \in I_{K_{f(x)}}: \rho_{e,p}^{\beta_{K_{f(x)}}-(j+1)}(i)<\frac{pe}{p-1} \}=I_{\sigma_j(f(x))} $$ and for each $i \in I_{\sigma_j(f(x))}$ we have that $$\beta_{K_{f(x)}}(i)=\beta_{\sigma_j(f(x))}(i)+j. $$
For any $j_1 \in \{0,\ldots ,n-1\}$, $j_2 \in \{1,\ldots ,p-1\}$ and $f(x) \in \text{Eis}(\frac{e}{p^{j+1}(p-1)}, \mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2))$ , we have that $$I_{K_{f(x)}}=I_{\sigma_{j_1,j_2}(f(x))} \cup \{\frac{pe}{p-1}\}. $$ For $i \in I_{\sigma_{j_1,j_2}(f(x))}$ we have $$\beta_{K_{f(x)}}(i)=\beta_{\sigma_{j_1,j_2}(f(x))}(i)+j_1+1. $$ Finally we have $$\beta_{K_{f(x)}}(\frac{pe}{p-1})=j_1+1. $$
$(P.2)$ For any $j \in \{0,1,\ldots ,n\}$ pushing forward $\mu_{\text{Haar}}$, the natural probability measure on $\text{Eis}(\frac{e}{p^j(p-1)},\mathbb{Q}_q(\zeta_{p^{j+1}}))$ coming from the Haar measure on the coefficients, with $\sigma_j$ one obtains $\mu_{q,\frac{e}{p^j(p-1)},\rho_{e,p}}^{*}$, the probability measure on $\mathcal{S}_{\frac{pe}{p-1}, \text{stop}}(\rho_{e,p},\frac{e}{p^j(p-1)},q)$ introduced in Section \ref{shooting game}.
For any $j_1 \in \{0,1,\ldots ,n-1\}$ and $j_2 \in \{1,\ldots ,p-1\}$, pushing forward $\mu_{\text{Haar}}$ with $\sigma_{j_1,j_2}$ from $\text{Eis}(\mathbb{Q}_{q}(\zeta_{p^{j_1+1}})(j_2))$ to $\mathcal{S}_{\frac{e}{p^{j_1}(p-1)}}(\rho_{e,p},\frac{e}{p^{j_1+1}(p-1)},q)$, one obtains $\mu_{q,\frac{e}{p^{j_1+1}(p-1)},\rho_{e,p}}^{*}$.
The construction of such maps $\sigma_{j}$ and $\sigma_{j_1,j_2}$ satisfying $(P.1)$ and $(P.2)$ as above, is sufficient to prove Theorem \ref{counting rephrased} and thus Theorem \ref{counting}. Indeed, thanks to Remark \ref{dec. remark 3}, we can conclude with $\sigma_0$ that Theorem \ref{counting rephrased} holds for all $(I,\beta)$ with $\text{min}(\beta)=1$ and $\frac{pe}{p-1} \not \in I$. At that point we know that the probability of the event $\{\text{min}(\beta)>1 \ \text{or} \ \frac{pe}{p-1} \in I \}$ has equal probability on both sides (Eisenstein polynomials and shooting games). We remark that this conclusion can be reached alternatively also by a direct computation. By Remark \ref{dec. remark 5} we know that, at the level of shooting games, the probability of the event $\{\frac{pe}{p-1} \in I, \beta(\frac{pe}{p-1})=1 \}$ is $p-1$ times as large as the event $\{\text{min}(\beta)>1 \}$. On the other hand this is clearly true also at the level of Eisenstein polynomials: the fields $\mathbb{Q}_{q}(\zeta_p)(j)$ have the same mass as $j$ runs through $\{1,\ldots ,p\}$, and by Proposition \ref{when star is in for tot ram}, precisely the first $p-1$ of them give the event $\{\frac{pe}{p-1} \in I, \beta(\frac{pe}{p-1})=1 \}$, while the last (which is $\mathbb{Q}_{q}(\zeta_{p^2})$) gives the event $\{\text{min}(\beta)>1 \}$. Thus we can go on in the proof of Theorem \ref{counting rephrased} by conditioning on both sides with either the event $\{\frac{pe}{p-1} \in I, \beta(\frac{pe}{p-1})=1\}$ or the event $\{ \text{min}(\beta)>1 \}$. Thus by Remark \ref{dec. remark 4}, and with the $\sigma_{0,j_2}$ we conclude the validity of Theorem \ref{counting rephrased} for $(I,\beta)$ with $\frac{pe}{p-1} \in I$ and $\beta(\frac{pe}{p-1})=1$. Here we are using that if $F/K$ is a totally ramified Galois extension of local fields, then, for an extension $F/E$ and a positive integer $d \in [F:E]\mathbb{Z}_{\geqslant 1}$, the conditional probability measure $\mu_{d,E}(-|\{F \text{\ is a subfield}\})$ equals the probability measure $\mu_{\frac{d}{[F:E]},F}$\footnote{Here we are using the following standard notation. If $(X,\mu)$ is a probability space and $A \subseteq X$ is a measurable subset with $\mu(A)>0$, then $\mu(-|A)$ denotes the probability measure on $A$, defined by the formula $\mu(-|A)(B):=\frac{\mu(B)}{\mu(A)}$ for each $B \subseteq A$ measurable.}. That justifies the passage to Eisenstein polynomials over the extensions $\mathbb{Q}_{q}(\zeta_{p}(j))$.
Now we continue working over $\mathbb{Q}_{q}(\zeta_{p^2})$ and we proceed precisely as above. Namely we first use the map $\sigma_{1}$ to show that Theorem \ref{counting rephrased} holds for $(I,\beta)$ with $\text{min}(\beta)=2$ and $\frac{pe}{p-1} \not \in I$. If $n=1$ we are done. Otherwise we again obtain that the measure of the event $\{\text{min}(\beta)>2 \ \text{or} \ \frac{pe}{p-1} \in I\}$ coincides on both sides of Theorem \ref{counting rephrased}. Finally Remark \ref{dec. remark 5} gives that, at the level of shooting games, the event $\{\frac{pe}{p-1} \in I, \beta(\frac{pe}{p-1})=2 \}$ is $p-1$ times as frequent as the event $\{\text{min}(\beta)>2\}$. This holds also for Eisenstein polynomials thanks to the fact that the extensions $\mathbb{Q}_{q}(\zeta_{p^2})(j)$ of $\mathbb{Q}_{q}(\zeta_{p^2})$ for $j \in \{1,\ldots ,p\}$ have all the same mass, and by Proposition \ref{when star is in for tot ram} we have that the first $p-1$ give the event $\{\frac{pe}{p-1} \in I, \beta(\frac{pe}{p-1})=2 \}$ while the last (which is $\mathbb{Q}_{q}(\zeta_{p^3})$) gives the event $\{\text{min}(\beta)>2\}$. Thus we use the maps $\sigma_{1,j}$ to prove Theorem \ref{counting rephrased}, with the same considerations made above, and we go on working over $\mathbb{Q}_{q}(\zeta_{p^3})$. Iterating this argument we prove Theorem \ref{counting rephrased} for every $(I,\beta)$, an extended admissible jump set. Therefore to finish the proof, we are left with constructing the maps $\sigma_{j}, \sigma_{j_1,j_2}$ and showing that they have properties $(P.1)$ and $(P.2)$. This done in the next two subsections. \subsection{Construction of the maps $\sigma_j, \sigma_{j_1,j_2}$} \label{Construction of the maps} Let $j \in \{0,\ldots ,n\}$, we begin with the construction of $\sigma_j$. To lighten the notation, denote $e_j:=\frac{e}{p^j(p-1)}$. An element $$f(x):=x^{e_j}+\sum_{i=0}^{e_j-1}a_ix^i $$ in $\text{Eis}(e_j,\mathbb{Q}_q(\zeta_{p^{j+1}}))$ can be equivalently represented as $$ \tilde{f}(x):=1+\sum_{i=1}^{e_j}{\tilde{a}}_ix^i $$ where $\tilde{f}(x):=\frac{1-\zeta_{p^{j+1}}}{a_0}f(x)+\zeta_{p^{j+1}}$. This gives us an embedding of $\text{Eis}(e_j, \mathbb{Q}_q(\zeta_{p^{j+1}}))$ into $H_{e_j}(\mathbb{Q}_q(\zeta_{p^{j+1}})):=\{g \in \mathbb{Z}_q[\zeta_{p^{j+1}}]:\text{deg}(g) \leqslant e_j, g(0)=1, g(x) \equiv 1 \ \text{or} \ g(x) \equiv 1+ax^{e_j} \ \text{mod} \ (1-\zeta_{p^{j+1}}) \ \text{for some $a \in \mathbb{Z}_q[\zeta_{p^{j+1}}]^{*}$} \}$. Starting with $f_0(x):=\tilde{f}(x)$, we define inductively a sequence $\{f_n(x)\}_{n \in \mathbb{Z}_{\geqslant 0}}$ with $f_n(x) \in H_{e_j}(\mathbb{Q}_q(\zeta_{p^{j+1}}))$ for every $n \in \mathbb{Z}_{\geqslant 0}$. To do so we first define a weight map on $H_{e_j}(\mathbb{Q}_q(\zeta_{p^{j+1}}))$ by $$w(1+\sum_{i=1}^{e_j}b_ix^i)=\text{min}_{1 \leqslant i \leqslant e_j: b_i \neq 0}(e_j\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(b_i)+i). $$ Now, suppose that $w(f_n(x)) \geqslant \frac{pe}{p-1}$, then declare $$f_{n+1}(x)=f_n(x). $$ So, suppose that $w(f_n(x))<\frac{pe}{p-1}$. Observe that $$f_n(x) \in U_{w(f_n(x))}(\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/f(x))-U_{w(f_n(x))+1}(\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/f(x)),$$ thus there exist unique $i_n \in T_{\rho_{e,p}}, \beta_n \in \mathbb{Z}_{\geqslant 0}$ and unique $\varepsilon_n$, a Teichm\"uller representative in $\mathbb{Q}_q$, such that $$(1+\varepsilon_nx^{i_n})^{p^{\beta_n}}f_n(x) \in U_{w(f_n(x))+1}(\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/f(x)). $$ It is not difficult to show that there exists a unique element of $H_{e_j}(\mathbb{Q}_q(\zeta_{p^{j+1}}))$ congruent to $(1+\varepsilon_nx^{i_n})^{p^{\beta_n}}f_n(x)$ modulo $f(x)$ and of degree at most $e_j$. We put $f_{n+1}(x)$ to be this element. It follows by construction that $w(f_{n+1}(x)) \geqslant w(f_n(x))$ with equality occurring iff $w(f_{n}(x)) \geqslant \frac{pe}{p-1}$. Moreover in the case of equality we have $f_{n+1}(x)=f_n(x)$. Thus we define $$\sigma_j(f(x)):=\{n \mapsto (w(f_{n-1}(x)),\beta_{n-1})\}_{n \in \mathbb{Z}_{\geqslant 1}} \in \mathcal{S}_{\frac{pe}{p-1},\text{stop}}(\rho_{e,p},\frac{e}{p^j(p-1)},q). $$
Let now $j_1 \in \{0,\ldots ,n-1\}$ and $j_2 \in \{1,\ldots ,p-1\}$. The map $\sigma_{j_1,j_2}$ is defined similarly to how the maps $\sigma_j$ were defined. We briefly explain the modifications. Fix units $u_1,u_2 \in \mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2)$ with $$ u_1^{p}u_2=\zeta_{p^{j_1+1}}, \ \text{v}_{\mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2)}(u_1-1)=1, \ \text{v}_{\mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2)}(u_2-1)=\frac{pe}{p-1} $$ and $u_2 \not \in (\mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2))^{*p} $, as guaranteed by Proposition \ref{I,beta for twisted cyclotomic} and Corollary \ref{The relation between the units} together. Next, given $f(x):=x^{e_{j_1+1}}+\sum_{i=0}^{e_{j_1}}a_ix^i \in \text{Eis}(e_{j_1+1}, \mathbb{Q}_q(\zeta_{p^{j_1+1}}))$, define this time $$\tilde{f}(x):=\frac{1-u_1}{a_0}f(x)+u_1. $$ Also change $H_{e_{j_1+1}}(\mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2))$ to be the set $$\{g \in \mathbb{Z}_q[\zeta_{p^{j_1+1}},u_1]:\text{deg}(g) \leqslant e_{j_1+1}, g(0)=1, g(x) \equiv 1 \ \text{or} \ g(x) \equiv 1+x^{e_j} \ \text{mod} \ (1-u_1) \},$$ and set the cut-off for concluding $f_{n+1}(x)=f_n(x)$ to be $w(f_n(x)) \geqslant e_j$. Following the above procedure, with these modifications, we get the construction of $$\sigma_{j_1,j_2}(f(x)) \in \mathcal{S}_{\frac{e}{p-1},\text{stop}}(\rho_{e,p},\frac{e}{p^{j_1+1}(p-1)},q). $$ \subsection{The maps $\sigma_j,\sigma_{j_1,j_2}$
satisfy properties $(P.1), (P.2)$} Let us begin showing that, for $j \in \{0,\ldots ,n\}$, the map $\sigma_j$ obeys the property $(P.1)$. By construction, we know that for $f(x) \in \text{Eis}(e_j, \mathbb{Q}_q(\zeta_{p^{j+1}}))$, we have that $$\zeta_{p^{j+1}} \cdot \prod_{n:w(f_n(x))<\frac{pe}{p-1}}(1+\alpha_nx^{i_n})^{p^{\beta_n}} \in U_{\frac{pe}{p-1}}(\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/f(x)), $$ with $p^{\beta_n}i_n=w(f_n(x))$, so the sequence $n \mapsto p^{\beta_n}i_n$ is strictly increasing as $n$ runs with the constraint $w(f_n(x))<\frac{pe}{p-1}$. Of course, the weight of $1+\alpha_nx^{i_n}$ in $U_{\bullet}(\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/f(x))$ is precisely $i_n$. Therefore one sees that the values of $n$ such that $i_n \in I_{\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/f(x)}$ are precisely those where $\beta_n$ reaches a new minimum. This is precisely the same rule that implies $i_n \in I_{\sigma_j(f(x))}$. For such an $i_n$ it easily follows from Corollary \ref{The relation between the units} that $$\beta_n+j+1=\beta_{\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/f(x)}.$$ This shows that $\sigma_j$ enjoys the property $(P.1)$ for each $j \in \{0,\ldots ,n\}$.
We next show that for $j_1 \in \{0,\ldots ,n-1\}$ and $j_2 \in \{1,\ldots ,p-1\}$, the map $\sigma_{j_1,j_2}$ satisfies $(P.1)$. Recall the definition of the units $u_1,u_2$ introduced during the construction of the map $\sigma_{j_1,j_2}$. By construction, we know that for $f(x) \in \text{Eis}(e_{j_1+1}, \mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2))$, we have that $$u_1 \cdot \prod_{n:w(f_n(x))<e_{j_1}}(1+\alpha_nx^{i_n})^{p^{\beta_n}} \in U_{\frac{e}{p-1}}(\mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2)[x]/f(x)). $$ This implies that $$u_2^{-p^{j_1}} \cdot \prod_{n:w(f_n(x))<e_{j_1}}(1+\alpha_nx^{i_n})^{p^{\beta_n}+j_1+1} \in U_{\frac{p^{j_1+1}e}{p-1}}(\mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2)[x]/f(x)). $$ Therefore all the other units that will be employed in order to write the full relation, cannot give a contribution to $(I_{\mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2)[x]/f(x)},\beta_{\mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2)[x]/f(x)})$, due to the presence of $\frac{pe}{p-1}$ in $I_{\mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2)[x]/f(x)}$, with $\beta_{\mathbb{Q}_q(\zeta_{p^{j_1+1}})(j_2)[x]/f(x)}(\frac{pe}{p-1})=j_1$ as guaranteed by Proposition \ref{when star is in for tot ram}. Thus one concludes using the same argument employed for $\sigma_j$.
We next prove that the maps $\sigma_j,\sigma_{j_1,j_2}$ satisfy $(P.2)$. We will do so for $\sigma_j$, the argument for $\sigma_{j_1,j_2}$ being basically the same with different notation.
Given $f \in \text{Eis}(e_j, \mathbb{Q}_q(\zeta_{p^{j+1}}))$, let us begin expanding each of the coefficients of $$\tilde{f}(x):=1+\sum_{i=1}^{e_j}\tilde{a}_jx^j $$ as $$\tilde{a}_i=\sum_{k=1}^{\infty}\varepsilon_{k,i}(1-\zeta_{p^{j+1}})^k, $$ for $1 \leqslant i<e_j$ and for $i=e_j$ we write $$\tilde{a}_{e_j}=\sum_{k=0}^{\infty}\varepsilon_{k,e_j}(1-\zeta_{p^{j+1}})^k, $$ where all $\varepsilon_{k,i}$ are Teichm\"uller representatives of $ \mathbb{Z}_q$. We can consider any finite subset of the $\varepsilon_{k,i}$ as independent random variables taking values in all possible Teichm\"uller representatives with the uniform distribution if $(k,i) \neq (0,e_j)$ and uniformly in the non-zero Teichm\"uller representatives of $\mathbb{Z}_q$ for $\varepsilon_{0,e_j}$.
Next, for each $n \in \mathbb{Z}_{\geqslant 1}$, we set $$f_n(x)=1+\sum_{i=1}^{e_j}a_i(n)x^i, $$ and we let $$a_i(n):=\sum_{k=1}^{\infty}\varepsilon_{k,i}(n)(1-\zeta_{p^{j+1}})^k. $$ the corresponding Teichm\"uller expansion with respect to $(1-\zeta_{p^{j+1}})$. The fact that the second sum started with $k=1$ is a consequence of the fact that the weights of $f_n(x)$ are strictly increasing as $n$ increases together with the definition of $H_{e_j}(\mathbb{Q}_q(\zeta_{p^{j+1}}))$.
For any fixed $n \in \mathbb{Z}_{\geqslant 0}$ the monomials $\varepsilon_{k,i}(n)(1-\zeta_{p^{j+1}})^kx^i$ can be given the weight $ke_j+i$ if $\varepsilon_{k,i}(n) \neq 0$ and $\infty$ otherwise. This induces a total order on the various non-zero monomials, and the weight $w(f_n(x))$ of $f_n(x)$ as defined before, equals the minimum weight of the various monomials as long as there is a monomial with weight less than $\frac{pe}{p-1}$, otherwise we have already arrived at the point where the sequence $f_n(x)$ is eventually constant.
From the rule to obtain $f_{m+1}(x)$ out of $f_m(x)$ we see that for any $n \in \mathbb{Z}_{\geqslant 1}$, and any positive integer $\frac{pe}{p-1}>w_0>w(f_{n-1}(x))$, there exists a function $F_{w_0,n}$ taking as input the sequence of $(\varepsilon_{k,i})_{e_jk+i<w_0}$ and giving as output a Teichm\"uller representative of $\mathbb{Z}_q$, in such a way that if we write $w_0=e_jq'+h$, the division with remainder of $w_0$ by $e_j$, we have that $$\varepsilon_{q',h}(n)=[F_{w_0,n}((\varepsilon_{k,i})_{e_jk+i<w_0})+\varepsilon_{q',h}]_{\text{Teich}}, $$ where for $a \in \mathbb{Z}_q$, the symbol $[a]_{\text{Teich}}$ denotes the unique Teichm\"uller representative $\varepsilon$ in $\mathbb{Z}_q$ such that $\varepsilon \equiv a \ \text{mod} \ p$. It thus follows at once that for the collection of $(q',h)$ with $\frac{pe}{p-1}>e_jq'+h>w(f_{n-1}(x))$, the variables $\varepsilon_{q',h}(n)$ are independent random variables taking values in the Teichm\"uller representatives of $\mathbb{Z}_q$ with the uniform distribution. Therefore the change of weights from $w(f_{n-1}(x))$ to $w(f_n(x))$ is governed precisely by the rules of the shooting game. This ends the proof. \section{Finding jump sets inside an Eisenstein polynomial} \label{finding jump sets inside} The primary goal of this Section is to establish Theorem \ref{valuation coefficients more general}, which is a generalization of Theorem \ref{valuation coefficients} from the Introduction. We will next specialize Theorem \ref{valuation coefficients more general} to obtain several consequences that aim to give a sense to the reader on how efficiently one can establish the value of $(I,\beta)$ in the range of the Theorem. Most notably we will see that for $q$ odd or for $j \geqslant 1$, the set of \emph{strongly} Eisenstein polynomials (see Definition \ref{strongly Eisenstein}) over $\mathbb{Q}_q(\zeta_{p^{j+1}})$ is precisely the set of polynomials giving the jump set that has the highest probability. Also we will see the relation between Theorem \ref{valuation coefficients more general} and Theorem \ref{counting rephrased}. Indeed we shall prove Theorem \ref{valuation coefficients more general}, by establishing the equality between the jump set of a shooting game coming from the valuation of the coefficients of an Eisenstein polynomial (denoted as $\tilde{\sigma}_j$ below) and (part of the) jump set of the shooting game constructed using the maps introduced during the proof of Theorem \ref{counting rephrased} (denoted as $\sigma_j$). Also we observe that Theorem \ref{counting rephrased} partially \emph{follows} as a direct counting from Theorem \ref{valuation coefficients more general}, namely it does so for the jump sets coming from the region of Eisenstein polynomials where Theorem \ref{valuation coefficients} applies (which for instance for $p=2$ and $j=0$ (i.e. over $\mathbb{Q}_2$) is empty, and for general $p$ it misses an open set of Eisenstein polynomials). Finally we shall give examples, showing that without the main assumption on the different, the conclusion of Theorem \ref{valuation coefficients more general} is not anymore valid in general.
Let $j \in \mathbb{Z}_{\geqslant 0}$, $p$ a prime number, $f \in \mathbb{Z}_{\geqslant 1}$ and $q:=p^f$. Let $e \in p^{j}(p-1)\mathbb{Z}_{\geqslant 1}$. Recall the notation $e_j:=\frac{e}{p^j(p-1)}$, used during the proof of Theorem \ref{counting rephrased}. Let $g(x) \in \text{Eis}(e_{j}, \mathbb{Q}_q(\zeta_{p^{j+1}}))$ (see notation from the proof of Theorem \ref{counting rephrased}). We proceed to define a stopping shooting game attached to $g$, which will be denoted as $$ {\tilde{\sigma}}_{j}(g(x)) \in \mathcal{S}_{e,\text{stop}}(\rho_{e,p},e_j,q). $$ It is defined with the following simple rule. Write $g(x)=x^{e_j}+\sum_{i=0}^{e_j-1}a_ix^i$ and give to each monomial $a_ix^i$ weight $w(a_ix^i):=e_j\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_i)+i$. Arrange $w(a_ix^i)$ as an increasing sequence $n \mapsto w(a_{i_n}x^{i_n})$. Then the sequence $$\tilde{\sigma_j}(g(x)):=\{(w(a_{i_n}x^{i_n}),v_p(i_n))\}_{n:w(a_{i_n}x^{i_n}) \leqslant e}$$ is an element of $$\mathcal{S}_{e,\text{stop}}(\rho_{e,p},e_j,q). $$ One can now see that the pair $(I_{g(x)},\beta_{g(x)})$ defined in the Introduction right before Theorem \ref{valuation coefficients} is simply the jump set of the shooting game $\tilde{\sigma}_j(g(x))$. We now explain more closely how one calculates this pair. It is clear that the smallest weight is precisely $e_j=w(x^{e_j})$, consistently with the fact that in $\tilde{\sigma}_j(g(x))$ the rabbit is supposed to start from $e_j$. So we start with $\alpha_0=e_j$. Next, given $\alpha_h$ (thus the rabbit being at $\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_h})e_j+\alpha_h$), to obtain a larger weight, either we find other weights that are contained in the interval $[\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_h})e_j,(\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_h})+1)e_j ]$ or there are no such other weights. In the first case the contribution comes only from the weights $\alpha$ that are larger than $\alpha_h$ (otherwise the weight is smaller). Among these, in order to have a change of shooters and thus a contribution to the jump set, we are only interested in those requiring a smaller shot-length, i.e. with smaller $\text{v}_{\mathbb{Q}_p}(\alpha_h)$, in good harmony with rule $(3)$ of the shooting game. Thus the first such weight with smaller $\text{v}_{\mathbb{Q}_p}$ is precisely where the shooter is changed. In the second case, the weight will be larger anyway, thus (as long as larger weight matters) we are now interested in examining all $\alpha$ with $\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha})>\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_h})$. Again, among these we are only interested in those where $\text{v}_{\mathbb{Q}_p}(\alpha)$ becomes smaller. The smallest such weight, again, is the first place where the shot length became smaller and a new shooter came in giving the next contribution to the jump set of the shooting game. We formalize this explanation in the following procedure.
\emph{Procedure.} \label{Procedure Eis}Let $g(x):=x^{e_j}+\sum_{i=0}^{e_j-1}a_ix^i \in \text{Eis}(e_j, \mathbb{Q}_q(\zeta_{p^{j+1}}))$. Set $\alpha_0=e_j$. Given $\alpha_h$, construct $\alpha_{h+1}$ as follows. Search if there is $\alpha$ such that $\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha})=\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_h})$ and $\alpha \geqslant \alpha_h$. If such an $\alpha$ exists, search if there is among them one with $\text{v}_{\mathbb{Q}_p}(\alpha)<\text{v}_{\mathbb{Q}_p}(\alpha_h)$. If there is such $\alpha$, pick the smallest such $\alpha$ and declare $\alpha_{h+1}=\alpha$. If no such $\alpha$ exists, then look if there is an $\alpha$ such that $\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha})>\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_{h}})$ and $e_j\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha})<e$. If no such $\alpha$ exists then set $\alpha_{h+1}=\alpha_h$. Otherwise let $d$ be the next valuation that attains the above constraints. Look if there is $\alpha$ with $\text{v}_{\mathbb{Q}_p}(\alpha)<\text{v}_{\mathbb{Q}_p}(\alpha_h)$ and $\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha})=d$, in that case take the smallest such $\alpha$ as $\alpha_{h+1}$. If there is none, go to the next valuation with the above constraints and do the same search, iterating until you either have to set $\alpha_{h+1}=\alpha_h$, or you have found an $\alpha_{h+1} \neq \alpha_h$. In this way the sequence $\{\alpha_i\}$ is produced. With this notation, writing $I_{\tilde{\sigma}_j(g(x))}=\{i_1<\ldots <i_s\}$, we have that $\beta_{\tilde{\sigma}_j}(i_k)=v_p(\alpha_k)$ and $p^{\beta_{\tilde{\sigma}_j}(i_k)}i_k=e_j\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_k})+\alpha_k$.
Let $j$ be a positive integer. Recall that for an integer $e$ in $p^j(p-1)\mathbb{Z}_{\geqslant 1}$ we define $e_j:=\frac{e}{p^j(p-1)}$. \begin{theorem}\label{valuation coefficients more general} Let $j$ be a positive integer and let $e \in p^j(p-1)\mathbb{Z}_{\geqslant 1}$. For any $g(x) \in \emph{Eis}(e_j, \mathbb{Q}_q(\zeta_{p^{j+1}}))$, we have that the set $$i_1<\ldots <i_s, $$ described in the above procedure, is equal to the set $$ \{i \in I_{\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/g(x)}:p^{\beta_{\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/g(x)}(i)-j-1}i<e \} $$ and for each $k \in \{1,..,s\}$ we have that $$\beta_{\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/g(x)}(i_k)=\emph{v}_{\mathbb{Q}_p}(\alpha_k)+j+1. $$ \begin{proof} We proceed by looking more closely at the construction of the maps $\sigma_j$ in the proof of Theorem \ref{counting rephrased}. One first crucial ingredient is that, thanks to the shape of the conclusion, we can disregard, in the setting of the proof of Theorem \ref{counting rephrased}, monomials with weights larger than $e$, so that we can perform $p$-th powering as if we were in a characteristic $p$ field. Keeping this in mind one sees from the construction of the sequence $g_n(x)$ in the proof of Theorem \ref{counting rephrased}, that given an $\alpha_k$ as above, then as long as $n$ satisfies $w(g_n(x))<e_j\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_k})+\alpha_k$, then for each positive integer $w_0$ with $$w(g_n(x))<w_0 \leqslant e_j\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_k})+\alpha_k, $$ and $$\text{v}_{\mathbb{Q}_p}(w_0) \leqslant \text{v}_{\mathbb{Q}_p}(\alpha_k), $$ one sees that
$$F_{w_0,n}((\varepsilon_{k,i})_{e_jk+i<e_ja_{\alpha_k}})=0.$$ This is seen by induction on $n$ and direct inspection. The key observation is that, once we can disregard the multiples of $p$ in $p$-powering, when we perform a shot, as in the proof of Theorem \ref{counting rephrased}, it sends all the monomials having weight \emph{smaller} than $e_j\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_k})+\alpha_k$ only to monomials having an index with \emph{larger} $p$-adic valuation. With this, the formula appearing at the end of the proof of Theorem \ref{counting rephrased} gives $$\varepsilon_{w_0,h}(n)=[F_{w_0,n}((\varepsilon_{k,i})_{e_jk+i<w_0})+\varepsilon_{q',h}]_{\text{Teich}}=[\varepsilon_{q',h}]_{\text{Teich}}, $$ where $w_0=e_jq'+h$. In terms of the shooting game $\sigma_j(g(x))$, this means precisely that the rabbit will visit the position $e_j\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_k})+\alpha_k$, and that all the shots used before that event are of length strictly larger than $\text{v}_{\mathbb{Q}_p}(\alpha_k)$. Indeed the rabbit doesn't visit any of the positions $w_0<e_j\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_k})+\alpha_k$ with $\text{v}_{\mathbb{Q}_p}(w_0) \leqslant \text{v}_{\mathbb{Q}_p}(\alpha_k)$. But these are precisely the positions where a stop of the rabbit would have given a shot of length at most $\text{v}_{\mathbb{Q}_p}(\alpha_k)$ before the position $e_j\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_{\alpha_k})+\alpha_k$ was reached.
\end{proof} \end{theorem} Observe that from the \emph{Procedure} it is clear that the set of Eisenstein polynomials $g(x)$ such that the full jump sets of the field $\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/g(x)$ can be reconstructed from Theorem \ref{valuation coefficients more general}, consists precisely of those polynomials $g(x)$ having a coefficient $a_i$, with $(i,p)=1$, such that $\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(a_i)<\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(p)=p^{j}(p-1)$. This condition is precisely equivalent to the condition on the different $$\text{v}_{\mathbb{Q}_q(\zeta_{p^{j+1}})}(\delta(\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/g)/\mathbb{Q}_q(\zeta_{p^{j+1}}))<p^j(p-1). $$
For $j=0$, this shows Theorem \ref{valuation coefficients} from the Introduction. The only case where this is an empty set of Eisenstein polynomials is if $p=2$, $j=0$ and $2|e$: one would get a non extremal coefficient of an Eisenstein polynomial being a unit, which is impossible by definition. For all other values of $p$ and $j$ one obtains, with Theorem \ref{valuation coefficients more general}, a positive proportion of the Eisenstein polynomials where the jump set can be read off completely from the valuation of the coefficients of the polynomial, also in a fairly easy way. For $p$ or $j$ getting large the volume of this region gets quickly pretty large. In particular, if $(p,j) \neq (2,0)$, we next see that one can identify the set of Eisenstein polynomials giving the \emph{most likely} jump set. \begin{definition}\label{strongly Eisenstein} If $K$ is a local field, $d \geqslant 2$ an integer, and $g(x):=x^d+\sum_{i=0}^{d-1}a_ix^i \in \text{Eis}(d,K)$, we say that $g(x)$ is strongly Eisenstein if $\text{v}_K(a_1)=1$. \end{definition} The following is a very special case of Theorem \ref{valuation coefficients more general}. Recall that if $e \in p^j(p-1)\mathbb{Z}_{\geqslant 1}$ we have the notation $e_j:=\frac{e}{p^j(p-1)}$. \begin{theorem}\label{strongly Eisenstein characterized} Let $p,j$, such that $(p,j) \neq (2,0)$. Let $e \in p^{j+1}(p-1)\mathbb{Z}_{\geqslant 1}$, $f$ a positive integer and set $q:=p^f$. Then $g(x) \in \emph{Eis}(e_j, \mathbb{Q}_q(\zeta_{p^{j+1}}))$ is strongly Eisenstein if and only if $$I_{\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/g(x)}=\{\frac{e}{p^{\emph{v}_{\mathbb{Q}_p}(e)}(p-1)},e_j+1\} $$ with $$\beta_{\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/g(x)}(\frac{e}{p^{\emph{v}_{\mathbb{Q}_p}(e)}(p-1)})=\emph{v}_{\mathbb{Q}_p}(e)+1, \ \beta_{\mathbb{Q}_q(\zeta_{p^{j+1}})[x]/g(x)}(e_j+1)=j+1. $$ \end{theorem} Observe that this gives, explicitly, the counting that the above jump set, $\{\frac{e}{p^{\text{v}_{\mathbb{Q}_p}(e)}(p-1)},e_j+1\}$ with $\frac{e}{p^{\text{v}_{\mathbb{Q}_p}(e)}(p-1)} \mapsto \text{v}_{\mathbb{Q}_p}(e)+1$ and $e_j+1 \mapsto j+1$, occurs with probability $\frac{q-1}{q}$ among all totally ramified degree $e_j$-extensions of $\mathbb{Q}_q(\zeta_{p^{j+1}})$: this is the jump set occurring with highest probability. We know that this jump set occurs with probability $\frac{q-1}{q}$ also from Theorem \ref{counting rephrased}. So in particular this fact is true also for $(2,0)$. To see that explicitly for $e=2$, observe that among the $6$ totally ramified quadratic extension of $\mathbb{Q}_2$, the only ones not giving the above jump set are $\mathbb{Q}_2(\zeta_4)$ and $\mathbb{Q}_2(\zeta_4)(1)=\mathbb{Q}_2(\sqrt{3})$; they have same mass (as we saw in general) and it equals $\frac{1}{4}$, hence the remaining mass equals $\frac{1}{2}$. But we can immediately see that in this case the same conclusion of Theorem \ref{strongly Eisenstein characterized} does not hold. Consider for instance $x^2+2x+2 \in \text{Eis}(2, \mathbb{Q}_2)$: it is a strongly Eisenstein polynomial. But $\mathbb{Q}_2[x]/g(x)$ is isomorphic to the extension $\mathbb{Q}_2(\zeta_{4})$, whose jump set is merely $\{1\}$, with $1 \mapsto 2$, in contrast to the conclusion of Theorem \ref{strongly Eisenstein characterized}. Thus in Theorem \ref{strongly Eisenstein characterized} the requirement $(p,j) \neq (2,0)$ cannot be dropped, and so in particular the assumption in Theorem \ref{valuation coefficients} of being strongly separable cannot be avoided.
\section{Filtered inclusions of principal units} \label{generalization of I beta} In this section we explain how to attach to any strongly separable extension of local fields, $L/K$, a $\rho_{\infty,p}$-jump set $(I_{L/K},\beta_{L/K})$, which is an invariant of the filtered inclusion $$U_{\bullet}(K) \subseteq U_{\bullet}(L). $$ Moreover for $K=\mathbb{Q}_{q}(\zeta_p)$, we will have that $$(I_{L/K},\beta_{L/K})=(I_L,\beta_L). $$ As we shall see, the fact that the extension is strongly separable will force $(I_{L/K},\beta_{L/K})$ to be a $\rho_{e_L,p}$-jump set as well for $e_L=v_L(p)$.
We will begin to attach to any $u \in U_1(K)-U_2(K)$ a $\rho_{e_L,p}$-jump set $(I_{L/K}(u),\beta_{L/K}(u))$. We will immediately see that it is also a $\rho_{\infty,p}$-jump set, thanks to strong separability. Finally we will see the big effect of assuming strong separability: the jump set $(I_{L/K}(u),\beta_{L/K}(u))$ is independent on the choice of $u \in U_1(K)-U_2(K)$ and can be computed, by means of an immediate generalization of Theorem \ref{valuation coefficients}, from an Eisenstein polynomial giving the extension $L/\tilde{K}$, where $\tilde{K}$ is the largest unramified extension of $K$ in $L$.
Let $u \in U_1(K)-U_2(K)$. Recall from Section \ref{reading jump inside} that we can attach to $u$ the function $g_{u,U_{\bullet}(L)}$. We have the following. The proof is along the same lines seen in Proposition \ref{reconstruction process} and is therefore omitted. \begin{proposition} \label{boh} There exists a unique jump set $(I_{L/K}(u),\beta_{L/K}(u))$ such that $g_{u,U_{\bullet}(L)}$ breaks at the elements of $\emph{Im}(\beta_{L/K}(u))-1$. Moreover if $i \in I_{L/K}(u)$, then $$g_{u,U_{\bullet}(L)}(i+1)=\rho_{e_L,p}^{\beta_{L/K}(u)(i)-1}(i). $$ \end{proposition} In the torsion-free case, the jump set $(I_{L/K}(u),\beta_{L/K}(u))$ has a more familiar interpretation. In what follows the function $\text{filt-ord}$ (as introduced in Proposition \ref{bijection orbits jumps}) will always be with respect to the filtered module (denoted as) $U_{\bullet}(L)$. \begin{proposition} Let $u,L,K$ as above and suppose moreover that $\mu_p(L)=\{1\}$. Then $$\emph{filt-ord}(u^p)=(I_{L/K}(u),\beta_{L/K}(u)). $$ Moreover for $u_1,u_2 \in U_1(K)-U_2(K)$ we have that $$(I_{L/K}(u_1),\beta_{L/K}(u_1))=(I_{L/K}(u_2),\beta_{L/K}(u_2)), $$ if and only if $u_1,u_2$ are in the same orbit under $\emph{Aut}_{\emph{filt}}(U_{\bullet}(L))$. \begin{proof} This is a simple consequence of Theorem \ref{no torsion} and Proposition \ref{bijection orbits jumps} combined. \end{proof} \end{proposition} We now show that the jump set of Proposition \ref{boh} is independent of the choice of $u \in U_1(K)-U_2(K)$ for all strongly separable extensions $L/K$. Recall the way we attached to any strongly separable Eisenstein polynomial $g(x)$ a jump set $(I_{g(x)},\beta_{g(x)})$ right after Theorem \ref{counting} in the Introduction. \begin{theorem} \label{independence} Let $L/K$ be any strongly separable extension of local fields. Let $u_1,u_2 \in U_1(K)-U_2(K)$. Then $$(I_{L/K}(u_1),\beta_{L/K}(u_1))=(I_{L/K}(u_2),\beta_{L/K}(u_2)). $$ Denote by $(I_{L/K},\beta_{L/K}):=(I_{L/K}(u),\beta_{L/K}(u))$ for any $u \in U_1(K)-U_2(K)$. Denote by $\tilde{K}$ the maximal unramified extension of $K$ in $L$, and let $g(x)$ be any Eisenstein polynomial in $\tilde{K}[x]$ giving the extension $L/\tilde{K}$. We have that $$(I_{L/K},\beta_{L/K})=(I_{g(x)},\beta_{g(x)}). $$ \begin{proof} This can be shown by precisely the same argument used in the proof of Theorem \ref{valuation coefficients more general}. \end{proof} \end{theorem} In particular we find the following corollary. \begin{corollary} \label{all in same orbit} Let $L/K$ be a strongly separable extension of local fields, with $\mu_p(L)=\{1\}$. Then $U_1(K)-U_2(K)$ is contained in one orbit under $\emph{Aut}_{\emph{filt}}(U_{\bullet}(L))$. Call this orbit $\mathcal{O}_{L/K}$. The set $\mathcal{O}_{L/K}$ can be also characterized as follows $$\mathcal{O}_{L/K}=\{u \in U_{\bullet}(L): u^p \in \emph{filt-ord}^{-1}((I_{L/K},\beta_{L/K}))\}. $$ \end{corollary} In positive characteristic the statement further simplifies. \begin{corollary} Let $L/K$ be a separable extension of local fields with $\text{char}(K)=p$. Then $U_1(K)-U_2(K)$ is contained in one orbit under $\emph{Aut}_{\emph{filt}}(U_{\bullet}(L))$. Call this orbit $\mathcal{O}_{L/K}$. The set $\mathcal{O}_{L/K}$ can be also characterized as follows $$\mathcal{O}_{L/K}=\{u \in U_{\bullet}(L): u^p \in \emph{filt-ord}^{-1}((I_{L/K},\beta_{L/K})) \}. $$ \end{corollary} \section{Jump sets under field extensions} \label{Jump sets under field extensions} Let $K_1/\mathbb{Q}_p(\zeta_p)$ be a finite extension. Fix a positive integer $d$. Consider the following natural question. \\ \\ \textbf{Question}: Which extended admissible $\rho_{de_{K_1},p}$-jump sets are realizable as $(I_{K_2},\beta_{K_2})$ for some totally ramified extension $K_2/K_1$ of degree $d$?\\
In case $(d,p)=1$ the answer is very easy. \begin{proposition} \label{The tame case} Let $K_2/K_1$ be totally ramified degree $d$ extension, with $(d,p)=1$. Then $$I_{K_2}=dI_{K_1} $$ with $$\beta_{K_2}(di)=\beta_{K_1}(i), $$ for each $i \in I_{K_1}$. \begin{proof} First notice that, since $(d,p)=1$, we have $dT_{\rho_{e,p}}^{*} \subseteq T_{\rho_{de,p}}^{*}$. Moreover we notice that the assignment $(I_{K_2},\beta_{K_2})$ given in the statement is clearly an extended $\rho_{de_{K_1},p}$-jump set. Next we write $$\prod_{i \in I_{K_1}}u_i^{p^{\beta_{K_1}(i)-1}}=\zeta_p, $$ with $u_i \in U_i(K_1)-U_{i+1}(K_1)$ for each $i \in I_{K_1}$, and $\frac{pe_{K_1}}{p-1} \in I_{K_1}$ implies $u_{\frac{pe_{K_1}}{p-1}} \not \in K_1^{*p}$. We thus conclude with Corollary \ref{The relation between the units} by noticing that $u_i \in U_{di}(K_2)-U_{di+1}(K_2)$ for each $i \in I_{K_1}$, and that if $\frac{pe_{K_1}}{p-1} \in I_{K_1}$ then we must have that $u_{\frac{pe_{K_1}}{p-1}} \not \in K_2^{*p}$. Indeed taking a $p$-th root of $u_{\frac{pe_{K_1}}{p-1}}$ gives an unramified degree $p$ extension of $K_1$ which would contradict both that $(d,p)=1$ and that $K_2/K_1$ is totally ramified. \end{proof} \end{proposition} The previous proof teaches us also what is the difficulty when $(d,p) \neq 1$ in answering Question. In this case the relation $$\prod_{i \in I_{K_1}}u_i^{p^{\beta_{K_1}(i)-1}}=\zeta_p, $$ cannot be directly used to calculate $(I_{K_2},\beta_{K_2})$, because $\text{v}_{K_2}(u_i-1) \not \in T_{\rho_{de_{K_1},p}}$ for each $i< \frac{pe_{K_1}}{p-1}$. Nevertheless, a more careful inspection shows that this relation can sometimes be used to extrapolate properties of $(I_{K_2},\beta_{K_2})$. This is the content of the next theorem, which, together with Theorem \ref{last guy}, contains as a very special case Proposition \ref{The tame case}. \begin{theorem} \label{constraining j.s. under ext.} Let $d$ be a positive integer and $K_2/K_1$ a degree $d$ totally ramified extension. Let $i \in I_{K_1}$ with $i \neq \frac{pe_K}{p-1}$. Suppose that if the set $J:=\{j \in I_{K_1}: j<i\}$ is not empty, then $$ \beta_{K_1}(\emph{max}(J))-\beta_{K_1}(i)>\emph{v}_{\mathbb{Q}_p}(d). $$ Then $$\frac{d}{p^{\emph{v}_{\mathbb{Q}_p}(d)}}i \in I_{K_2}$$ with $$\beta_{K_2}(\frac{d}{p^{\emph{v}_{\mathbb{Q}_p}(d)}}i)=\beta_{K_1}(i)+\emph{v}_{\mathbb{Q}_p}(d). $$ \begin{proof} Take $i \neq \frac{pe_{K_1}}{p-1}$ as in the assumptions of this theorem. Write $$\prod_{i' \in I_{K_1}}u_{i'}^{p^{\beta_{K_1}(i')-1}}=\zeta_p, $$ with $u_{i'} \in U_{i'}(K_1)-U_{i'+1}(K_1)$ for each $i' \in I_{K_1}$, and $\frac{pe_{K_1}}{p-1} \in I_{K_1}$ implies $u_{\frac{pe_{K_1}}{p-1}} \not \in K_1^{*p}$. Next, for each $i' \in I_{K_1}$, write $$\prod_{j \in A(i')}u_{i',j}^{p^{\beta(i',j)}}, $$ with $A(i') \subseteq T_{\rho_{K_2}}^{*}$, $\text{v}_{K_2}(u_{i',j}-1)=j$ for each $j \in A(i')$ and $\frac{d}{p^{\text{v}_{\mathbb{Q}_p}(d)}}i' \in A(i')$ with $\beta(i',j)=\text{v}_{\mathbb{Q}_p}(d)$ and $\text{v}_{K_2}(u_{i',i'}^{p^{\beta(i',i')}}-1)<\text{v}_{K_2}(u_{i',j}^{p^{\beta(i',j)}}-1)$ for each $j \in A(i')-\{i'\}$. We now proceed to expand the above expression for $\zeta_p$. Attach to each term $u_{i',j}$ the pair $(\text{v}_{K_2}(u_{i',j}-1),\beta_{K_1}(i')+\beta(i,j))$. We see that the point attached to $u_{i,i}$, which is $(\frac{d}{p^{\text{v}_{\mathbb{Q}_p}(d)}}i,\beta_{K_1}(i)+\text{v}_{\mathbb{Q}_p}(d))$, is strictly smaller, with respect to $\leqslant_{\rho_{K_2}}$, than all the other points (and hence occurs precisely once). Indeed, using that $(I_{K_1},\beta_{K_1})$ is a jump set, we see that it must be smaller than any term coming from some $u_{i'}$ with $i'>i$. On the other hand for each $i'<i$, we use the fact that $\beta_{K_2}(i')>\beta_{K_2}(i)+\text{v}_{\mathbb{Q}_p}(d)$ to conclude that the point attached to $u_{i,i}$ must be smaller than any term attached to $u_{i',j}$ with $i'<i$. This is enough to conclude with Corollary \ref{The relation between the units}.
\end{proof} \end{theorem} The case of $e_{K_1}^{*}$ requires no special assumptions and can be treated more easily in a different way. \begin{theorem} \label{last guy} Let $d$ be a positive integer and $K_2/K_1$ a degree $d$ totally ramified extension. Suppose $\frac{pe_K}{p-1} \in I_{K_1}$. Then $di \in I_{K_2}$ and $\beta_{K_2}(di)=\beta_{K_1}(i)$. \begin{proof} This follows immediately from Proposition \ref{when star is in for tot ram} and Proposition \ref{when the star is in I}. \end{proof} \end{theorem} \begin{remark} In the very special case $K_1=\mathbb{Q}_q(\zeta_p)$ one recovers the restriction that $(I_{K_2},\beta_{K_2})$ must be an \emph{admissible} extended $\rho_{d,p}$-jump set as a very special case of Theorem \ref{constraining j.s. under ext.}, see Theorem \ref{realizable j.s. are realized}. \end{remark} In particular Theorem \ref{constraining j.s. under ext.} implies the following fact.\footnote{We take the opportunity here to signal a typo in the way this result was mentioned in \cite{de Boer--Pagano}, where the assumption of Theorem \ref{constraining j.s. under ext.} and the conclusion of Corollary \ref{all of them} were accidentally merged in transcribing the statement. It was stated only with the assumption of Theorem \ref{constraining j.s. under ext.}, but the conclusion mentioned there is about \emph{both} consecutive indexes, which we can guarantee, instead, only under the assumption of Corollary \ref{all of them}.} \begin{corollary} \label{all of them} Let $d$ be a positive integer and $K_2/K_1$ a degree $d$ totally ramified extension. Suppose that for any two consecutive elements $i,j$ in $I_{K_1}$ (that is $(i,j) \cap I_{K_1}=\emptyset$) we have that $$\beta_{K_1}(i)-\beta_{K_1}(j)>\emph{v}_{\mathbb{Q}_p}(d). $$ Then $$\frac{d}{p^{\emph{v}_{\mathbb{Q}_p}(d)}}(I_{K_1}-\{e_{K_1}^{*}\}) \subseteq I_{K_2}, $$ with $$\beta_{K_2}(\frac{d}{p^{\emph{v}_{\mathbb{Q}_p}(d)}}i)=\beta_{K_1}(i)+\emph{v}_{\mathbb{Q}_p}(d) $$ for each $i \in I_{K_1}-\{e_{K_1}^{*} \}$. \end{corollary}
\end{document} | arXiv | {
"id": "1810.09975.tex",
"language_detection_score": 0.7122653126716614,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Distributed Coupled Multi-Agent Stochastic Optimization}
\author{ Sulaiman A. Alghunaim and Ali H.~Sayed, ~\IEEEmembership{Fellow,~IEEE} \thanks{A short preliminary conference version appears in \cite{alghunaim2018icassp}.} \thanks{ S. A. Alghunaim is with the Department of Electrical Engineering, University of California, Los Angeles, CA 90095, USA (e-mail: salghunaim@ucla.edu). A. H. Sayed is with the Ecole Polytechnique Federale de Lausanne (EPFL), School of Engineering, CH-1015 Lausanne, Switzerland (e-mail: ali.sayed@epfl.ch). This work was supported in part by NSF grants CCF- 1524250 and ECCS-1407712. }}
\markboth{} {Shell \MakeLowercase{\textit{et al.}}: Bare Demo of IEEEtran.cls for Journals}
\maketitle
\begin{abstract} This work develops effective distributed strategies for the solution of constrained multi-agent stochastic optimization problems with coupled parameters across the agents. In this formulation, each agent is influenced by only a subset of the entries of a global parameter vector or model, and is subject to convex constraints that are only known locally. Problems of this type arise in several applications, most notably in disease propagation models, minimum-cost flow problems, distributed control formulations, and distributed power system monitoring. This work focuses on stochastic settings, where a stochastic risk function is associated with each agent and the objective is to seek the minimizer of the aggregate sum of all risks subject to a set of constraints. Agents are not aware of the statistical distribution of the data and, therefore, can only rely on stochastic approximations in their learning strategies. We derive an effective distributed learning strategy that is able to track drifts in the underlying parameter model. A detailed performance and stability analysis is carried out showing that the resulting coupled diffusion strategy converges at a linear rate to an $O(\mu)-$neighborhood of the true penalized optimizer. \end{abstract}
\begin{IEEEkeywords} Distributed optimization, diffusion strategy, stochastic optimization, coupled optimization, penalty method, multi-agent networks. \end{IEEEkeywords}
\IEEEpeerreviewmaketitle
\section{Introduction} \subsection{Motivation and Problem Setup} In most multi-agent formulations of distributed optimization problems, each agent generally has an individual cost function, $J_k(.)$, and the goal is to minimize the aggregate sum of the costs subject to some constraints, namely, \begin{align}
\underset{w \in \real^M}{\text{min }}& \quad
\sum_{k=1}^N J_k(w), \ \text{s.t.} \quad
w \in \mathbb{W}_1 \cap \cdots \cap \mathbb{W}_N \label{glob1} \end{align} where $\mathbb{W}_k$ denotes a convex constraint set at agent $k$ and $N$ is the number of agents. The aggregate cost in \eqref{glob1} has one independent variable, $w\in \real^{M}$, which all agents need to agree upon \cite{6,34,35,36,56,23,24,25,29,33,5,7,28,8,27,26,37,38,39}. However, there exist many scenarios where the global cost function may involve multiple independent variables, and, moreover, each local cost may be a function of only a subset of these variables. This situation motivates us to study in this work a broader problem, where each local cost contains multiple variables that get to be chosen by the network cooperatively. Examples of applications where this general scenario arises include web page categorization \cite{14}, web-search ranking \cite{15}, disease progression modeling \cite{16}, distributed unmixing of hyperspectral data \cite{17,18}, minimum-cost flow problems \cite{19}, distributed model predictive control in smart energy systems \cite{11}, remote monitoring of physical phenomena involving discretization of spatial differential equations \cite{20}, distributed wireless acoustic sensor networks \cite{22}, distributed wireless localization \cite{31}, and distributed power systems monitoring \cite{21}.
Thus, assume we have $L$ vector variables denoted by $\{w^1,\cdots,w^L\}$, where $w^\ell \in \mathbb{R}^{M_\ell}$. Let $w \triangleq {\rm col}\{ w^1, w^2, ... , w^L\} \in \mathbb{R}^{M}$ denote the $L \times 1$ block column vector formed by collecting all those variables. The partitioning is used to represent the possibility of multiple independent arguments for the cost functions. Without loss of generality, we assume that the variables $\{w^\ell\}$ are distinct in that they do not share common entries.
\begin{figure}
\caption{A connected network of agents where different agents generally depend on different subsets of parameter vectors. For this example, we have $w=[w^1,w^2,w^3,w^4,w^5,w^6]$.}
\label{fig:network}
\end{figure} \noindent Let $\mathcal{I}_k$ denote the set of variable indices that affect the cost of agent $k$ -- Figure \ref{fig:network} illustrates this situation for a simple network. If we let $w_{k}$ denote the components of $w$ that affect this same agent: \eq{
w_k \triangleq {\rm col}\{w^{\ell}\}_{\ell \in \mathcal{I}_k} \in \mathbb{R}^{Q_k}, \quad Q_k \triangleq \sum_{\ell \in \cI_k} M_\ell. \label{w_I_k}} Then we are interested in determining the solution of the following optimization problem: \begin{align}
\underset{w}{\text{min }}& \ \
J^{\rm glob}(w) \triangleq \sum_{k=1}^N J_k(w_k) \label{glob2}, \\
\text{subject to }& \ \
w \in \mathbb{W}_1 \cap \cdots \cap \mathbb{W}_N \nonumber \end{align} The constraints set $\mathbb{W}_k$ is generally described by equality and inequality conditions of the form: \eq{ \mathbb{W}_{k}= \left\{ w: \begin{array}{c}
h_{k,u}(w_k)=0 ,\hspace{2 mm} u=1,.....,U_k\\
g_{k,v}(w_k)\leq 0 , \hspace{2 mm} v=1,.....,V_k \end{array} \right. } where the functions $\{h_{k,u}(\cdot)\}$ are affine and $\{g_{k,v}(\cdot)\}$ are convex.
We note that algorithms that solve \eqref{glob1} can be used to solve \eqref{glob2}. For example, this can be achieved by extending each local variable $w_k$ into the longer global variable $w$. However, this solution method would require unnecessary communications and memory allocation. This is because in \eqref{glob2} each local function contains only a subset of the global variable $w$. Therefore, solving \eqref{glob2} directly and more effectively is important for large scale networks. Conversely, we also note that algorithms that solve \eqref{glob2} are more general and can be used to solve \eqref{glob1}. To see this, let $L = 1$ and ${\cal I}_k= \{L\}$, then problem \eqref{glob2} will depend only on one variable $w=w^{L}$. In this case, the cost function becomes $J^{\rm glob}(w)=\sum_{k=1}^N J_k(w)$, which is of the same exact form as problem \eqref{glob1}. \begin{assumption}(\textrm{\bf{Strongly convex aggregate cost}})\label{feasible assump}: Problem \eqref{glob2} is feasible and has a strongly-convex cost: \eq{
\left(\grad_w J^{\rm glob}(x)-\grad_w J^{\rm glob}(y)\right)\tran (x-y) \geq \nu \|x-y\|^2 \label{stronglyconvex_bound} } for some constant $\nu>0$.
\qd \end{assumption} \noindent Strong convexity is often assumed in the literature and is not a limitation since regularization is normally used to avoid ill-conditioning and it helps ensure strong convexity. Under this condition, a unique solution $w^o \in \mathbb{W}$ exists. We denote its block entries by \eq{ w^o= {\rm col}\{w^{1,o}, \cdots ,w^{L,o} \} \define \argmin_{w \in \mathbb{W}_1 \cap \cdots \cap \mathbb{W}_N} J^{\rm glob}(w) \label{optimal-original} } \begin{assumption} \label{cost-assump} (\textrm{\bf{Individual costs}}): It is assumed that each cost function, $J_k(w_k)$, is differentiable with a Lipschitz continuous gradient: \eq{
\big\|\grad_{w_k}J_{k}(x_k)-\grad_{w_k}J_{k}(y_k)\big\| \leq \delta_k \|x_k-y_k\| \label{indv-cost-1} }
for some positive constant $\delta_k$.
\qd \end{assumption} \noindent \begin{remark}{\rm \textbf{(Stochastic costs):} \label{remark-special-case} The individual risk functions in Problem \eqref{glob2} can be stochastic in nature, such as $J_k(w_k)=\Ex Q(w_k;\x_{k,i})$, where $\Ex$ denotes the expectation operator over the distribution of the data, $Q(\cdot)$ is some convex loss function in $w_k$, and $\x_{k,i}$ is generic notation for the data at agent $k$ at time $i$. Possible choices for $Q(\cdot)$ are quadratic losses, logistic losses, exponential losses, etc. Generally, the distribution of the data will not be available so that the local risk functions, $\{J_k(w_k)\}$ are not known beforehand by the agents. Our technique for solving Problem \eqref{glob2} will rely on the use of stochastic approximations for the gradient vectors of these risk functions, such as using the gradient of loss functions directly at every iteration (see \eqref{gradient-model} further ahead): \eq{ \widehat{\nabla_{w_k}} J_k(\bzeta_{k,i}) \define \grad_{w_k} Q(\bzeta_{k,i};\x_{k,i}) \label{gradient-Q} } where $\bzeta_{k,i}$ will denote an intermediate estimate for $w_k$ at time $i$. These approximations introduce gradient noise, which refers to the difference between the true gradient vector and the above approximation. The challenge later will be to establish convergence despite the presence of this persistent noise. A special case of Problem \eqref{glob2} was treated in \cite{54}, which did not include constraint sets $\{\mathbb{W}_k\}$ and assumed perfect knowledge of $J_k(\cdot)$ (i.e., there was no gradient noise).} \qd \end{remark} \begin{remark}{\rm \textbf{(Useful case):} \label{remark-useful-case} We illustrate a special case of \eqref{glob2}, which is common in many applications. Consider the scenario where every agent wants to estimate its own variable $w^k$ and is coupled with every neighboring agent, i.e., $L=N$ and $\cI_k=\cN_k$ so that $w_k={\rm col}\{w^\ell\}_{\ell \in \cN_k}$, where $\cN_k$ denotes the neighborhood of agent $k$ (including agent $k$). To explicitly indicate that $w_k$ and the corresponding constraint depend exclusively on the neighborhood variables, we let \eq{ w_{\cN_k}\define w_k , \ \mathbb{W}_{\cN_k} \define \mathbb{W}_{k} }
Then, problem \eqref{glob2} becomes \begin{align}
\underset{w}{\text{min }}& \quad
\sum_{k=1}^N J_k(w_{\cN_k})\label{glob-application}
, \ \text{s.t. } \quad
w \in \mathbb{W}_{\cN_1} \cap \cdots \cap \mathbb{W}_{\cN_N} \end{align} Many important applications fit into problem \eqref{glob-application}. For, instance, such formulations arise in wireless localization where each agent aims to estimate its position based on distance measurements from its neighbors. Two other examples are distributed model predictive control \cite{52} and minimum cost flow problems \cite{19}.} \qd \end{remark} \noindent \begin{example}{\rm \textbf{(Power system state estimation):} \label{example-application}
We describe one example in power system state estimation \cite{21}, which is a special case of formulation \eqref{glob2}. Thus, consider a system consisting of $N$ interconnected sub-systems with each sub-system consisting of some subset of buses (or edges). Let $w=[w^1,\cdots,w^L]$ denote the state of the system (e.g., voltages and currents across all buses). Suppose each subsystem collects measurements related to the voltages and currents across its local buses and voltages and currents across the interconnection between neighboring sub-systems (see Figure \ref{fig:power-system}). \begin{figure}
\caption{Two neighboring sub-systems sharing states across their interconnection, i.e., buses $k_1$-$s_1$ and $k_3$-$s_3$.}
\label{fig:power-system}
\end{figure}
We let $w_k$ denote a vector that collects the states $\{w^\ell\}$ of system $k$ (i.e., voltages and currents across the buses of system $k$ and across the buses to its neighboring subsystems). Then, the goal of each subsystem is to estimate the states $w_k$ from $S_k$ observations: \eq{ y_k=H_k w_k + v_k } where $H_k \in \real^{S_k \times Q_k}$ is the measurement matrix and $v_k \in \real^{S_k}$ is a zero-mean measurement noise with known covariance matrix. One way to estimate the $\{w_k\}$ is by solving the following problem: \begin{align}
\underset{w}{\text{min }}& \quad
\sum_{k=1}^N \|y_k-H_k w_k\|^2\label{power-system-estim} ,
\ \text{s.t. } \ \
w \in \mathbb{W}_1 \cap \cdots \cap \mathbb{W}_N \end{align} where $\{\mathbb{W}_k\}$ are a convex sets that capture some prior information about about the $\{w_k\}$. Now, since neighboring agents measure some similar quantities across their interconnections, it holds that $w_k$ and $w_s$ are expected to partially overlap if $s \in \cN_k$ and hence this problem is a special case of \eqref{glob2}. }
\qd \end{example} \noindent \begin{example}{\rm \textbf{(Network flow optimization)\cite{19}:} We provide a second example that fits into problem \eqref{glob2}; a third example involving model-fitting when different subsets of data are dispersed over different agents is discussed in the simulations section. Consider a directed network with $N$ agents and $L$ links. Let $w^\ell$ denote the net flow in link $\ell$. Let $b_k$ denote an external supply (or demand) of flow for node $k$ such that $\sum_{k=1}^N b_k=0$ (i.e., flows entering the network match the flows leaving the network). Let $\cI_k$ denote the links connected to node $k$ so that $w_k={\rm col}\{w^\ell\}_{\ell \in \cI_k}$ denotes the vector of flows across the links connected to node $k$. Then, we can formulate the problem where the network of agents is interested in solving:
\begin{align}
\underset{w}{\text{min}} \
\sum_{k=1}^N J_k(w_k) , \quad
\text{subject to }& \quad a_k\tran w_k =b_k, \quad \forall \ k \label{appl-flow} \\ & \quad w \in \mathbb{W}_1 \cap \cdots \cap \mathbb{W}_N \nonumber \end{align} where $a_k\tran w_k =b_k$ is a flow conservation constraint such that $a_k$ is a vector with entries $+1$ and $-1$ with $+1$ at the position of entering flows and $-1$ at the position of leaving flows -- see Figure \ref{fig:flow-system}. Moreover, $J_k(\cdot)$ is some convex function and the $\{\mathbb{W}_k\}$ denote some convex constraints such as link capacity constraints -- see \cite{19}. \begin{figure}
\caption{An illustrative example to show how the constraint $a_k\tran w_k =b_k$ is formed.}
\label{fig:flow-system}
\end{figure}
Now we note that it is usually assumed that $b_k$ is measured exactly. However, in practice noise is usually present in $b_k$ and thus it can be modeled as $\b_k(i)=a_k\tran w_k+\v_k(i)$ where $\v_k(i)$ is some unknown measurement noise. If this is the case, then the constraint $b_k=a_k\tran w_k$ cannot be satisfied and one approach to address this situation is to employ a penalty method to solve instead \cite{1}:
\begin{align}
\underset{w}{\text{minimize }}& \quad
\sum_{k=1}^N J_k(w_k)+\eta \Ex\|a_k\tran w_k -\b_k(i)\|^2 \label{appl-flow-penalty} \\
\text{subject to }& \quad
w \in \mathbb{W}_1 \cap \cdots \cap \mathbb{W}_N \nonumber \end{align} for some finite large penalty $\eta>0$. } \qd \end{example} \subsection{Contribution and Related Work} There has been extensive work in the literature on solving problems of the type \eqref{glob1}, which would therefore be applicable to problem \eqref{glob2} albeit by going through the costly step of extending the local vectors, as explained before. For example, incremental strategies have been used in \cite{6,34,35,36}, consensus strategies in \cite{23,24,25,29,33}, diffusion strategies in \cite{5,7,28,8,27}, and ADMM strategies in \cite{26,37,38,39}. For sparse networks with a large number of parameters to estimate, it is much more efficient to devise distributed techniques that solve \eqref{glob2} {\em directly} rather than transform \eqref{glob2} into the form in \eqref{glob1} via vector extension. It is shown in the simulations in \cite{54} and \cite{12} that this extension technique not only increases complexity but it often degrades convergence performance as well, which we show analytically in this work.
Therefore, it is desirable to address the solution of problem \eqref{glob2} directly. Problems of this type have received less attention in the literature. For example, in deterministic formulations, ADMM techniques have been used to solve $\eqref{glob2}$ or its special case \eqref{glob-application} in \cite{21,10,12,32}. In particular, the work \cite{21} applies an ADMM method to solve a distributed power system state estimation problem of the form \eqref{glob2}, while the work \cite{12} solves \eqref{glob2} by employing an extended ADMM method to reduce communications at the expense of some stronger assumptions. Likewise, in the model predictive control literature, most of the methods used are specific for the special case \eqref{glob-application} \cite{52}. For example, to solve \eqref{glob-application} in \cite{10} another ADMM method is proposed, while \cite{32} uses an inexact fast alternating minimization algorithm; this second method is equivalent to an inexact accelerated proximal-gradient method applied to a dual problem \cite{48}. In all of these methods, a second auxiliary (sub-minimization) problem needs to be solved at each iteration, which requires an inner iteration unless a closed form solution exists. In the stochastic optimization literature, some special cases of \eqref{glob2} have also been considered. For example, the work \cite{4} focuses on multi-task unconstrained quadratic problems and employs game-theoretic techniques. In \cite{1,2} another quadratic problem is solved, where every agent has their own variable $w^k$ (i.e, $L=N$) and the agents are coupled through linear constraints with neighboring node's variables $\{w^\ell\}_{\ell \in \cN_k}$. Moreover, it is further assumed that the agents involved in a constraint are fully connected, i.e., they can communicate directly.
Since in problem \eqref{glob2} different agents are influenced by different block vectors $w^\ell$, the network will be divided into overlapping clusters and each cluster $\ell$ will involve the agents that need to agree on $w^\ell$ -- see \eqref{cluster}. Similar clustering was used in \cite{12} for the deterministic problem where the ADMM method was employed with identical penalty factors across all clusters. In our previous work \cite{54}, we studied the same deterministic case but developed instead a first-order method for solving \eqref{glob2} without constraints by relying on the exact diffusion strategy from \cite{45,55}, which unlike ADMM does not require inner minimization steps.
There are also works that deal with problem \eqref{glob1} where all agents need to agree on the same $w$, however to reduce communication, different agents transmit different blocks $\{w^\ell\}$ at each time instant \cite{arablouei2014distributed,notarnicola2017distributed}. In this case, each cluster involves different agents at each time instant and, over time, all agents will be involved in all clusters. Note also that the group diffusion algorithm used in \cite{47} deals with the problem where each agent is interested in its own minimizer $ w^{\bullet}_k=\argmin_{w} J_k(w)$ and can solve the problem individually but cooperation is used since the estimation accuracy can be enhanced by cooperation if part of the minimizers $\{w^{\bullet}_k\}$ are common across neighbors. To take advantage of this overlap, each agent assigns different weights to different blocks $w^\ell$.
In this work, we will solve problem \eqref{glob2} under a {\em stochastic} environment where agents do not necessary know the exact gradient information but are subject to noisy perturbations. We will also employ {\em constant} step-size learning in order to endow the resulting recursions with adaptation abilities to drift in the models. It was shown in \cite{40,41} that under such scenarios, diffusion strategies have superior performance than consensus strategies and primal-dual methods over adaptive networks. The superiority is due to some inherent asymmetries that exist in the updates of these latter methods, and which cause degradation in performance when the algorithms are required to learn continually from streaming data; the details and the origin of this behavior are explained in \cite{40,41}. Additionally, it is explained in \cite{45} that diffusion strategies can be motivated by optimizing penalized costs. For these reasons, we shall employ in this work penalized diffusion methods to solve problem \eqref{glob2}. We prove that the algorithm converges linearly to an $O(\mu)$ neighborhood of the solution of a penalized cost, which can be made arbitrarily close to the solution of the original problem. One important conclusion from our analysis is to clarify the effect of the clustering step on the convergence rate. This was observed in \cite{12,54} through simulation and is explained analytically in this work.
While it is common in the literature to employ projection methods to solve constrained problems of the form \eqref{glob1}, by continuously projecting the iterated estimates onto the convex constraint sets, these methods nevertheless require the projection sets to be geometrically simple so that the projections can be computed efficiently. When this is not the case, penalty-based methods become more efficient. For example, in large-scale Markov decision problems (MDPs) exact dynamic programming techniques are not feasible since the computational complexity scales quadratically with the number of states. As such, in \cite{44} a stochastic penalty based method is suggested to reduce the computational cost for large MDPs. Penalty-based methods are also attractive when the constraints are stochastic in nature, as happens in statistical estimation \cite{30} and stochastic minimum-cost flow problems \cite{1}. They are also useful when the constraints are soft in that they need not be satisfied exactly or when the constraints are used to encode prior information about the unconstrained optimal value.
\noindent {\bf Notation}. We use boldface letters to denote random quantities and regular font to denote their realizations or deterministic variables. All vectors are column vectors unless otherwise stated. We use ${\rm col}\{x_j\}_{j=1}^{N}$ to denote a column vector formed by stacking $x_1, ... , x_N$ on top of each other, $\text{diag}\{x_j\}_{j=1}^{N}$ to denote a diagonal matrix consisting of diagonal entries $x_1, ... , x_N $, and $\text{blkdiag}\{X_j\}_{j=1}^{N}$ to denote a block diagonal matrix consisting of diagonal blocks $X_1, ... , X_N $. The notation $x \prec y$ ($x \preceq y$) means each entry of the vector $x$ is less than (or equal) to the corresponding vector $y$ entry. We use $O(\alpha)$ to indicate values of the order the scalar $\alpha$ (i.e., $O(\alpha)=c\alpha$ for some constant $c$ independent of $\alpha$). For any set $\cZ=\{z_1,z_2,\cdots,z_r\}$, where $z_1<z_2<\cdots<z_r$ are integers. We let $U=[G_{mn}]_{m,n \in \cZ}$ denote the $r \times r$ matrix with $(i,j)$-th entry equal to $G_{z_i z_j}$. \section{Problem Formulation} \subsection{Penalized Formulation} In what follows we adjust the formulation from \cite{5} to problem \eqref{glob2}. One key difference in relation to what was studied in \cite{5} is that problem \eqref{glob2} allows individual agents to depend on {\em different} subsets of the parameter vector. That is, {\em coupling} now exists between the individual cost functions through the sharing of common sub-vectors. In contrast, in the formulation studied in \cite{5}, all agents were assumed to share the {\em same} parameter vector $w$ and were interested in reaching agreement about it. Here, instead, agents share {\em different} components of the larger vector $w$ and, through cooperation, they need to arrive at agreement. We shall develop a distributed scheme that enables this coupled learning objective and analyze its performance. In addition, the analysis in \cite{5} assumes each individual cost is twice-differentiable and strongly convex. Unlike \cite{5}, we do not impose any convexity assumption on the individual costs. We only require the aggregate cost to be strongly convex and Assumption \ref{cost-assump} to be satisfied. As a result the analysis becomes more challenging and is substantially different from the techniques used in \cite{5}.
One of the initial steps used in \cite{5} is to replace the constrained problem \eqref{glob2} by an unconstrained problem through the introduction of a penalty term; the purpose of this term is to penalize deviations from the constraints. Thus, we start by relaxing problem \eqref{glob2} by the following penalized form parametrized by a scalar $\eta > 0$: \begin{align}
\underset{w}{\text{minimize }}& \quad
J^{\rm glob}_{\eta}(w) \triangleq \sum_{k=1}^N J_{k,\eta}(w_k) \label{penalized_cost} \end{align} where the individual costs on the right-hand side incorporate a penalty term, as follows: \eq{ J_{k,\eta}(w_k) \triangleq J_k(w_k)+\eta \hspace{1mm} p_k(w_k)} with each penalty function given by \eq{
p_k(w_k)\triangleq \sum_{u=1}^{U_k} \delta^{\rm EP}\big(h_{k,u}(w_k)\big) +\sum_{v=1}^{V_k}\delta^{\rm IP}\big(g_{k,v}(w_k)\big) } Here, the symbols $\delta^{\rm EP}(x)$ and $\delta^{\rm IP}(x)$ denote differentiable convex functions chosen by the designer in order to penalize the violation of the constraints, namely, they are chosen to satisfy the requirements:
\eq{ \label{penalty-f} \delta^{\rm EP}(x)= \left\{\scalemath{0.9}{ \begin{array}{c}
0 ,\hspace{5.5 mm} x=0\\
>0 ,\hspace{2 mm} x\neq0 \end{array},} \right.\quad
\delta^{\rm IP}(x)= \left\{\scalemath{0.9}{ \begin{array}{c}
0 ,\hspace{5 mm} x\leq0\\
>0 ,\hspace{2 mm} \text{otherwise} \end{array}} \right.} For example, the following two continuous, convex, non-decreasing, and twice differentiable choices that satisfy \eqref{penalty-f} are given in \cite{5}: \eq{ \delta^{\rm EP}(x)= x^2, \quad \delta^{\rm IP}(x)= \max\bigg(0, {x^3\over \sqrt{x^2+\rho^2}}\bigg) } for some parameter $\rho>0$. \begin{assumption} \label{penalty-assump} (\textrm{\bf{Penalty functions}}): The penalty function $p_k(w_k)$ is convex and differentiable with a Lipschitz continuous gradient: \eq{
\big\|\grad_{w_k}p_{k}(x_k)-\grad_{w_k}p_{k}(y_k)\big\| \leq \delta_{p,k} \|x_k-y_k\| \label{penalty-cost-1} } for some positive scalar $\delta_{p,k}$.\qd \end{assumption} Since $J^{\rm glob}(w)$ is strongly convex and each $p_k(w_k)$ is convex, the cost $J^{\rm glob}_{\eta}(w)$ is also strongly convex and, thus, a unique solution exists for problem \eqref{penalized_cost}, which we denote by: \eq{ w^\star= {\rm col}\{w^{1,\star}, \cdots ,w^{L,\star} \} \define \argmin_{w^1,\cdots ,w^L} J^{\rm glob}_{\eta}(w) \label{optimal-penalized} } \subsection{Centralized Solution} We first show how to solve problem \eqref{penalized_cost} in a centralized manner, assuming the presence of a central processor with knowledge of each individual cost $J_{k,\eta}(.)$. We define: \eq{ p^{\rm glob}(w) \define \sum_{k=1}^N p_k(w_k) }
Then, the gradient vector of \eqref{penalized_cost} relative to $w = {\rm col}\{w^1, w^2, ... , w^L\}$ is given by: \eq{ \grad_w J_{\eta}^{\rm glob}(w)&=\grad_w J^{\rm glob}(w) +\eta \grad_w p^{\rm glob}(w) \nonumber \\ &= \begin{bmatrix}
\sum\limits_{k=1}^{N} \grad_{w^1}J_{k}(w_k) \\ \vdots \\
\sum\limits_{k=1}^{N} \grad_{w^L}J_{k}(w_k) \end{bmatrix} + \eta \begin{bmatrix}
\sum\limits_{k=1}^{N} \grad_{w^1}p_{k}(w_k) \\ \vdots \\
\sum\limits_{k=1}^{N} \grad_{w^L}p_{k}(w_k) \end{bmatrix} \label{centralized-gradient} } Using a gradient descent algorithm, we can solve \eqref{penalized_cost} iteratively as follows: \eq{ w_i=w_{i-1} - \mu D \grad_w J_{\eta}^{\rm glob}(w_{i-1}), \quad i \geq 0 \label{centralized-recursion} } where $\mu>0$ is a small step-size parameter and for generality, we are introducing a diagonal matrix $D={\rm diag}\{d_{\ell} I_{M_\ell}\}_{\ell=1}^L \in \real^{M \times M}$, with $\{d_{\ell}\}$ being strictly positive (the choice $D=I$ is a special case). The initialization $w_{-1}$ is arbitrary. Since the gradient vector on the right-hand side of \eqref{centralized-recursion} is the sum of two separate gradient vectors shown in \eqref{centralized-gradient}, we can split the update into two incremental steps and write \cite{5,46}: \begin{subequations} \eq{ \psi_i &\ =\ w_{i-1} - \mu \eta D \ \grad_w p^{\rm glob}(w_{i-1}) \label{central-inc1}\\ w_i&\ =\ \psi_{i} - \mu D \ \grad_w J^{\rm glob}(\psi_i) \label{central-inc2} } \label{Centralized-sol} \end{subequations}
\hspace{-2.5mm} In \eqref{central-inc1}--\eqref{central-inc2}, the vector $w_i$ is the estimate of the extended parameter $w$ at iteration $i$, while $\psi_i$ is an intermediate estimate for the same $w$. Note that the order of the incremental steps can be switched. However, in this work we consider penalizing first. \subsection{Problem Reformulation for Distributed Solution} In order to solve \eqref{penalized_cost} in a distributed manner, we first need to adjust the notation to account for one additional degree of freedom. Recall that the costs of two different agents, say, agents $k$ and $s$, may depend on the same sub-vector, say, $w^{\ell}$. Since these two agents will be learning $w^{\ell}$ over time, each one of them will have its own local estimate for $w^{\ell}$. While we expect these estimates to agree with each other over time, they nevertheless evolve separately and we need to use different notation to distinguish between them in the analysis. We do so by referring to the estimate of $w^{\ell}$ at agent $k$ by $w^{\ell}_k$ and to the estimate of $w^{\ell}$ at agent $s$ by $w^{\ell}_{s}$. In other words, we create virtual copies of the same sub-vector, $w^{\ell}$, with one copy residing at each agent. In this way, agent $k$ will evaluate iterates $w^{\ell}_{k,i}$ for $w^{\ell}$ over time $i$, and agent $s$ will evaluate iterates $w^{\ell}_{s,i}$ for the same $w^\ell$ over time $i$. As time evolves, we will show that these iterates will approach each other so that the subset of agents influenced by $w^\ell$ will reach agreement about it. With this in mind, recall that we denoted the collection of all sub-vectors that influence agent $k$ by $w_k$; defined earlier in \eqref{w_I_k}. In that definition, the sub-vectors $\{w^{\ell}\}$ influencing $J_k(\cdot)$ were used to construct $w_k$. In view of the new notation using virtual copies, we now redefine the same $w_k$ using the local copies instead, namely, we now write \be w_k \ \define\ \col\left\{w_k^{\ell}\right\}_{\ell \in{\cal I}_k}\;\in\real^{Q_k} \label{wk} \ee where $w_k^\ell \in \mathbb{R}^{M_\ell}$ is the local copy of the variable $w^\ell$ at agent $k$. We further let $\mathcal{C}_\ell$ denote the cluster of nodes that contains the variable $w^\ell$ in their costs:
\eq{\mathcal{C}_\ell= \{k \ |\ \ell \in \mathcal{I}_k\} \label{cluster}} We can view the cluster ${\cal C}_{\ell}$ as a smaller network (or sub-graph) where all agents in this sub-network are interested in the same parameter $w^{\ell}$. To require all local copies $w_{k}^{\ell}$ to coincide with each other, we need to introduce the constraint \be w_k^{\ell} = w_{s}^{\ell},\;\;\forall \ k,s\in{\cal C}_{\ell} \label{consensus} \ee Using relations \eqref{wk} and \eqref{consensus}, we rewrite problem \eqref{penalized_cost} as \begin{align}
\underset{w_{1},....,w_{N}}{\text{minimize }}& \quad J^{\rm glob}_{\eta}(w_1,....,w_N) \triangleq \sum_{k=1}^N J_{k,\eta}(w_k) \label{penalized_cost2} \\
\text{subject to }& \quad
w^\ell_k = w^\ell_s, \hspace{2 mm} \forall \hspace{2 mm} k, s \in \mathcal{C}_\ell, \hspace{1mm}\forall \hspace{1mm} \ell \nonumber \end{align} \noindent The following example illustrates the above construction. \\
\noindent \begin{example}{\rm Consider the network with $5$ agents shown in Figure \ref{fig:illustrative(a)}. \begin{figure}
\caption{A 5-agent network to illustrate the setting of problem \eqref{penalized_cost2}.}
\label{fig:illustrative(a)}
\label{fig:illustrative(c)}
\end{figure}
In this network, we have $w = \col\{w^1, w^2, w^3,w^4\}$, $\cI_1 = \{1,2\}, \cI_2 = \{1\}$, $\cI_3=\{1,3\}$, $\cI_4=\{1,3,4\}$, and $\cI_5=\{1,4\}$. Consider further the penalized problem: \eq{\label{xcwbsn} \hspace{-2mm}\underset{\{w^1, w^2, w^3,w^4\}}{\mathrm{min.}}\, &J_{1,\eta}(w^1, w^2) + J_{2,\eta}(w^1) + J_{3,\eta}(w^1,w^3)+\nonumber \\ &J_{4,\eta}(w^1,w^3,w^4)+J_{5,\eta}(w^1,w^4) } To design a fully distributed algorithm, we introduce $w_k^{\ell}$ as the local copy of $w^{\ell}$ at agent $k$, and rewrite problem \eqref{xcwbsn} as: \eq{\label{zw3nsdn} {\text{minimize}}\quad &J_{1,\eta}(w_1^1, w_1^2) + J_{2,\eta}(w_2^1) + J_{3,\eta}(w_3^1,w_3^3)+\nonumber \\ & J_{4,\eta}(w_4^1,w_4^3,w_4^4)+J_{5,\eta}(w_5^1,w_5^4), \nonumber\\ \text{subject\ to} \quad & w_1^1 = w_2^1=w_3^1=w_4^1=w_5^1 \nonumber \\
\quad &w_3^3 = w_4^3 \nonumber \\
\quad &w_4^4 = w_5^4 } If we introduce \eq{ w_1 &\define \col\{w_1^1, w_1^2\},\ w_2 \define \col\{w^1_2\},\ w_3 \define \col\{w_3^1, w_3^3\}, \nonumber \\ w_4 &\define \col\{w^1_4,w^3_4,w^4_4\},\ w_5 \define \col\{w_5^1, w_5^4\} \label{238sdh} } and organize the network into $L=4$ clusters as shown in Figure \ref{fig:illustrative(c)} with $\cC_1 = \{1,2,3,4,5\}$, $\cC_2=\{1\}$, $\cC_3 = \{3,4\}$, and $\cC_4 = \{4,5\}$, then problem \eqref{zw3nsdn} becomes equivalent to \eq{ \underset{w_1, w_2, w_3,w_4}{\mbox{minimize}}& \quad \sum_{k=1}^{N} J_{k,\eta}(w_k), \nonumber \\ \mbox{subject to} &\quad w_k^\ell = w_s^\ell,\ \forall \hspace{1mm} k,s \in \cC_\ell,\ \ell = 1, 2, 3,4. } \qd} \end{example} \section{Coupled Diffusion Strategy} \subsection{Cluster Combination Weights} \label{section_network_model}
To solve \eqref{penalized_cost2}, we associate weights $\{a_{\ell,sk}\}_{s,k \in \cC_\ell}$ with each cluster $\cC_\ell$ and these weights are chosen to satisfy: \eq{ &\sum_{s \in \cC_\ell} a_{\ell,sk} = 1, \quad a_{\ell,sk}=0, \ \text{if } s \notin \cN_k \cap \cC_\ell \label{cluster_left_stochastic} } \begin{assumption} (\textrm{\bf{Each cluster is a connected sub-graph}}): \label{assump:connected} The neighboring agents can communicate in both directions. Moreover, each ${\cal C}_{\ell}$ is connected. This implies that for any two arbitrary agents in cluster $\cC_\ell$, there exists at least one path with nonzero weights $\{a_{\ell,sk}\}_{s,k \in \cC_\ell}$ linking one agent to the other. We also assume that at least one self weight $\{a_{\ell,kk}\}_{k \in \cC_\ell}$ is nonzero. \qd
\end{assumption}
\noindent We remark that two agents are coupled if they share the same variable $w^\ell$ and we are only requiring the coupled agents to be connected. If all agents share the entire $w$, then all agents are coupled by $w$ and the above assumption translates into requiring the network to be strongly-connected. Assumption \ref{assump:connected} is satisfied for most networks of interest. For example, all applications that fit into problem \eqref{glob-application} given in Remark \ref{remark-useful-case} naturally satisfy this assumption, including but not limited to applications in distributed power system monitoring, distributed control, and maximum-flow --- see \cite{21,1,10}. This is due to the construction of problem \eqref{glob-application}: there exist $L=N$ clusters, where $w^\ell$ affects the neighborhood of agent $k=\ell$, and hence $\cC_k= \cN_k$ forms a star shaped graph (i.e., all agents $s \in \cC_k$ are connected through agent $k$), and hence this cluster is connected. Moreover, multitask applications satisfy this assumption \cite{4,14,15,16}.
We emphasize that Assumption \ref{assump:connected} is not limited to the case described in Remark \ref{remark-useful-case} since it can be satisfied for any connected network, as we further clarify. To being with, independently of the clusters, let us assume that the entire network is connected. Now, if some cluster ${\cal C}_{\ell}$ happens to be unconnected, we can embed it into a larger {\em connected} cluster $\cC_{\ell}^\prime$ such that $\cC_{\ell} \subset \cC_{\ell}^\prime$. For example, consider the network shown in Figure \ref{fig:illustrative-23hb}. \begin{figure}
\caption{\small A five-agent network with unconnected $\cC_2$ and $\cC_3$.}
\label{fig:illustrative-23hb}
\end{figure} In this network, we have \eq{ \cC_1 = \{1,2,3,4,5\}, \ \cC_2 = \{2, 4\}, \ \cC_3 = \{1, 3\}, \ \cC_4=\{5\} } In these clusters, we find that $\cC_4$ is a singleton. Therefore, $w^4$ will be optimized solely and separately by agent $5$, and no communication is needed for that variable. Cluster $\cC_1$ is connected, and agents $\{1,2,3,4,5\}$ cooperate in order to optimize $w^1$, with each agent sharing its estimate with neighbors. However, clusters ${\cal C}_2$ and $\cC_3$ have disconnected graphs. This implies that agents $2$ and $4$ cannot communicate to optimize and reach consensus on $w^2$. Likewise, for agents $\{1,3\}$ regarding the variable $w^3$. To circumvent this issue, we redefine $J_{1,\eta}(w^1, w^3)$ and $J_{2,\eta}(w^1,w^2)$ as: \eq{ J_{1,\eta}'(w^1, w^2, w^3) &\define J_{1,\eta}(w^1, w^3) + 0 \cdot w^2 \\ J_{2,\eta}'(w^1, w^2, w^3) &\define J_{2,\eta}(w^1, w^2) + 0 \cdot w^3 } By doing so, the augmented costs $J_{1,\eta}'(w^1, w^2, w^3)$ and $J_{2,\eta}'(w^1, w^2, w^3)$ now involve $w^2$ and $w^3$, respectively, and the new clusters become \eq{ \cC_2^\prime = \{1, 2, 4\}, \quad \cC_3^\prime = \{1, 2, 3\} } which are connected and satisfy $\cC_2 \subset \cC_2^\prime$ and $\cC_3 \subset \cC_3^\prime$. Therefore, in this scenario, agents $\{1,2,4\}$ will now cooperate to optimize $w^2$ with agent $1$ acting as a connection that allows information about $w^2$ to diffuse in the cluster. Likewise, for agents $\{1,2,3\}$, with agent $2$ allowing information about $w^3$ to diffuse in the cluster. A second extreme approach would be to extend each local variable $w_k$ to the global variable $w$, which reduces problem \eqref{glob2} to the formulation \eqref{glob1}. This way of embedding the clusters into larger connected clusters can be done in a distributed fashion -- see for example \cite{12}. \subsection{Coupled Diffusion Development} Let $N_\ell$ denote the cardinality of cluster $\cC_\ell$ and further introduce the $N_\ell \times N_\ell$ matrix $A_{\ell}$ that collects the coefficients $\{a_{\ell,sk}\}_{s,k \in \mathcal{C}_{\ell}}$, namely, \eq{ A_{\ell}\define [a_{\ell,sk}]_{s,k \in \mathcal{C}_{\ell}} \label{A_l} }
Under condition \eqref{cluster_left_stochastic} and Assumption \ref{assump:connected}, the combination matrix $A_{\ell}$ will be left stochastic and primitive, i.e., $A_{\ell}\tran \one=\one$ and there exists a large enough $j_0$ such that the elements of $A_{\ell}^{j_0}$ are strictly positive. It follows from the Perron-Frobenius Theorem \cite[~ Lemma F.4]{9} that the matrix $A_{\ell}$ has a single eigenvalue at one of multiplicity one and all other eigenvalues are strictly less than one in magnitude. Moreover, the right eigenvector corresponding to the eigenvalue at one (the Perron vector), which we denote by $r_{\ell}$, \eq{ A_{\ell}r_\ell=r_\ell, \quad \one\tran r_\ell=1 } is such that all its entries are positive and they are normalized to add up to one.
To facilitate the derivation that follows, we shall assume for the time being that $A_{\ell}$ is a locally balanced matrix \cite{45}, namely, that: \eq{ R_\ell A_\ell\tran= A_\ell R_\ell \label{locally_balanced} } where $R_\ell \define {\rm diag}\{r_\ell(k)\}_{k \in \cC_\ell}$ is a diagonal matrix constructed from the Perron vector $r_\ell$ with $r_\ell(k)$ denoting the entry corresponding to agent $k \in \cC_\ell$. Condition \eqref{locally_balanced} is only used here to motivate the algorithm. Once derived, we will actually show that the algorithm is still convergent even when $A_\ell$ is only left stochastic but not necessarily locally balanced. To derive our proposed algorithm we state the following auxiliary result proven in \cite{45}. \begin{lemma} \label{null_v0}
Let $R$ be a diagonal matrix constructed from the Perron vector of a left stochastic matrix $A \in \mathbb{R}^{Q \times Q}$. If $A$ is locally balanced matrix, i.e., $R A\tran= A R$, then it holds that $R- R A\tran$ is symmetric and positive semi-definite. Moreover, if we introduce the eigen-decomposition ${1\over2}(R- R A\tran)=U \Sigma U\tran$, the symmetric square-root matrix $Y\triangleq U \Sigma^{1/2} U\tran$ and let:
\eq{
\cR=R \otimes I_{M}, \quad \sa=A \otimes I_{M}, \quad \mathcal{Y} = Y \otimes I_M
}
then, for primitive $A$ and any block vector $\ssx=\text{\em col}\{x^1,...,x^{Q}\}$ in the nullspace of $\cR-\cR \sa\tran $ with entries $x^q \in \mathbb{R}^{M}$ it holds that:
\eq{
\mathcal{Y}\ssx=0 \iff (\cR-\cR \sa\tran )\ssx=0 \iff x^1=x^2=...=x^{Q}
\label{nullspace}} \qd \end{lemma} \noindent Lemma \ref{null_v0} allows us to rewrite \eqref{penalized_cost2} in an equivalent form that is amenable to distributed implementations. First, we introduce \eq{\label{sadfads}
\sw^{\ell} &\triangleq \text{col}\{w^{\ell}_k\}_{k \in \mathcal{C}_{\ell}} \in \real^{N_\ell M_\ell},
} which is the collection of all the local copies of $w^\ell$ across the agents in cluster $\mathcal{C}_\ell$. With this notation, we rewrite the cost function in problem \eqref{penalized_cost2} as \eq{\label{equivalent-cost-function}
\cJ(\sw^1, \sw^2, \cdots, \sw^L)+ \ \eta \cP(\sw^1, \sw^2, \cdots, \sw^L) } where \eq{ \cJ(\sw^1, \sw^2, \cdots, \sw^L) &\define \sum_{k=1}^{N} J_{k}(w_k) \label{cost-cal-J} \\ \cP(\sw^1, \sw^2, \cdots, \sw^L) &\define \sum_{k=1}^{N} p_{k}(w_k) \label{cost-cal-P} } Next we use Lemma \ref{null_v0} to rewrite the constraints of problem \eqref{penalized_cost2} in an equivalent manner. We appeal to Lemma \ref{null_v0} to decompose \eq{{1\over2} (R_\ell-R_\ell A_{\ell}\tran )=U_{\ell} \Sigma_{\ell} U_{\ell}\tran. } If we let \eq{
Y_{\ell} \triangleq U_{\ell} \Sigma_{\ell}^{1/2} U_{\ell}\tran, \quad \mathcal{Y}_{\ell} \triangleq Y_{\ell} \otimes I_{M_{\ell}}, } then using Lemma \ref{null_v0} and the definition of $\sw^\ell$ in \eqref{sadfads} we have \eq{\label{equi-constraint}
w_k^\ell = w_s^\ell,\ \forall \ k,s \in \cC_\ell \Longleftrightarrow \cY_\ell \sw^\ell = 0,\quad \forall \hspace{1mm} \ell. } Using relations \eqref{equivalent-cost-function} and \eqref{equi-constraint}, we can rewrite problem \eqref{penalized_cost2} equivalently as \begin{align} \underset{\ssw^1,....,\ssw^L}{\text{minimize }}& \quad \cJ(\sw^1, \cdots, \sw^L)+\eta \cP(\sw^1, \cdots, \sw^L) \label{glob_exact0} \\ \text{subject to }& \quad \mathcal{Y}_{\ell}\sw^{\ell}=0, \hspace{1 mm} \forall \hspace{1 mm} \ell\nonumber \end{align} To rewrite problem \eqref{glob_exact0} more compactly, we introduce \eq{ \sw &\define \text{col}\{\sw^{\ell}\}_{\ell=1}^{L} \in \real^Q \label{wcaligraphic} \\ \mathcal{Y} &\define \text{blkdiag}\{\mathcal{Y}_{\ell}\}_{\ell=1}^L, \
\\
\cJ(\sw) &\define \cJ(\sw^1, \cdots, \sw^L) \label{network-cost-J} \\
\cP(\sw) &\define \cP(\sw^1, \cdots, \sw^L) \label{network-cost-P} } where $Q\triangleq \sum\limits_{\ell=1}^L N_{\ell}M_{\ell}$. Then, problem \eqref{glob_exact0} becomes: \begin{align} \underset{\ssw}{\text{minimize }} \quad \cJ(\sw)+\eta \cP(\sw),\hspace{1mm} \label{glob_exact} \text{s.t. } \mathcal{Y}\sw=0 \end{align} Instead of solving the constrained problem \eqref{glob_exact}, we relax it and solve the penalized version: \begin{align} \boxed{ \underset{\ssw}{\text{minimize }} \quad
\cJ(\sw)+\eta \cP(\sw)+{1\over \mu}\|\cY \sw\|^2} \label{glob_exact_penalized} \end{align} with $\mu>0$. In \eqref{glob_exact_penalized} we see that ${1 \over \mu}$ is the penalty factor used for the consensus constraint \eqref{equi-constraint} and thus the smaller the value of $\mu$ is, the closer the solutions of problem \eqref{glob_exact} and \eqref{glob_exact_penalized} become to each other \cite{42,43}. We now note: \eq{\cY^2=\text{blkdiag}\{\mathcal{Y}_{\ell}^2\}_{\ell=1}^L={1\over2}(\cR-\cR\sa\tran) } where \eq{ \cR &\define {\rm blkdiag}\{\cR_{\ell}\}_{\ell=1}^L, \quad &\cR_\ell &\define R_\ell \otimes I_{M_\ell}
\label{calblockR} \\ \sa &\define {\rm blkdiag}\{\sa_{\ell}\}_{\ell=1}^L, \quad &\sa_\ell &\define A_\ell \otimes I_{M_\ell}
\label{calblockA} }
Applying three diagonally weighted incremental gradient descent steps to problem \eqref{glob_exact_penalized}, we get: \begin{equation} \begin{cases} \begin{aligned}
\zeta_i&=\sw_{i-1} - \mu \eta \cR^{-1} \grad_{\ssw} \cP(\sw_{i-1}) \\
\psi_i&= \zeta_i -\mu \cR^{-1} \grad_{\ssw} \cJ(\zeta_{i}) \\
\sw_i&=\psi_i - \mu \cR^{-1}\left({2\over \mu} \cY^2\right)\psi_i=\sa\tran \psi_i \end{aligned} \end{cases} \label{coupled-diff-network} \end{equation} where $\mu$ is the step size. Using the definition of $\cJ(\sw)$ and $\cP(\sw)$ from \eqref{network-cost-J}--\eqref{network-cost-P}, we have: \eq{ &\scalemath{0.9}{ \grad_{\ssw} \cJ(\sw)= \begin{bmatrix} \grad_{\ssw^1} \cJ(\sw) \\ \vdots \\ \grad_{\ssw^L} \cJ(\sw) \end{bmatrix} }, \ \scalemath{0.9}{ \grad_{\ssw^\ell} \cJ(\sw)} = \text{col}\{\grad_{w_k^\ell} J_k(w_k)\}_{k \in \mathcal{C}_\ell} \label{gradient-network-cost}\\ &\scalemath{0.9}{ \grad_{\ssw} \cP(\sw)= \begin{bmatrix} \grad_{\ssw^1} \cP(\sw) \\ \vdots \\ \grad_{\ssw^L} \cP(\sw) \end{bmatrix} }, \ \scalemath{0.9}{ \grad_{\ssw^\ell} \cP(\sw)} = \text{col}\{\grad_{w_k^\ell} p_k(w_k)\}_{k \in \mathcal{C}_\ell} \label{gradient-network-penalty} } Therefore, using the definition of $\sa$ from \eqref{calblockA} and $R_\ell = {\rm diag}\{r_\ell(k)\}_{k \in \cC_\ell}$, recursion \eqref{coupled-diff-network} can be rewritten more explicitly in distributed form as listed below. \begin{subequations} \label{Coupled diffusion} \eq{ \zeta_{k,i}&=w_{k,i-1}- \mu \eta \Omega_k \grad_{w_k} p_k(w_{k,i-1}) \label{Coupled diffusion-(a)}\\ \psi_{k,i}&=\zeta_{k,i}-\mu \Omega_k \grad_{w_k} J_k(\zeta_{k,i}) \label{Coupled diffusion-(b)}\\
w^\ell_{k,i}&= \sum_{\substack{s \in \mathcal{N}_k \cap \mathcal{C}_\ell}} a_{\ell,sk}\psi^\ell_{s,i}, \hspace{2mm} \forall \hspace{1mm} \ell \in \cI_k \label{Coupled diffusion-(c)} } \end{subequations} where \eq{ \Omega_k={\rm diag} \left\{ I_{M_\ell}/ r_\ell(k) \right\}_{\ell \in \cI_k} \label{Omega_step-size} }
and $r_\ell(k)$ denote the entry of the Perron vector $r_\ell$ corresponding to agent $k \in \cC_\ell$. In this description, the variables $\{\zeta_{k,i}, \psi_{k,i}\}$ are intermediate estimates for the vector $w_k$, which contains all the parameters that influence agent $k$. These vectors have dimension $Q_k\times 1$ each. On the other hand, the variable $w_{k,i}^{\ell}$ has size $M_{\ell}\times 1$ and is an estimate for the specific parameter of index $\ell$, i.e., $w_k^{\ell}$. Thus, note that in steps \eqref{Coupled diffusion-(a)}--\eqref{Coupled diffusion-(b)}, a traditional diagonally weighted gradient-descent step is applied by each agent using the gradients of the corresponding penalty and risk functions; these steps generate the intermediate $Q_k-$dimensional iterates $\{\zeta_{k,i},\psi_{k,i}\}$. The last step \eqref{Coupled diffusion-(c)} is a convex combination step, where each agent $k$ combines the iterates of index $\ell$ from their neighbors to construct $w_{k,i}^{\ell}$. More specifically, for every $\ell \in \cI_k$, each agent $k$ combines its entry $\psi_{k,i}^{\ell}$ with the neighboring entries $\{\psi_{s,i}^\ell \ | \ s \in \cN_k \cap \cC_\ell\}$ using weights $\{a_{\ell,sk}\}_{s \in \cN_k \cap \cC_\ell}$. It should be noted that each agent $k$ gets to choose its own combination weights. For example, let $n_{\ell,k}= \big|\mathcal{N}_k \cap \mathcal{C}_\ell \big|$ denote the number of agents that belong to $\cC_\ell$ and are neighbors of agent $k$ (including agent $k$). Then, we can use the Metropolis rule to construct the combinations weights $\{a_{\ell,sk}; \ s \in \cN_k \cap \mathcal{C}_\ell,\ \ell \in \cI_k\}$ as follows \cite{9}: \begin{equation} a_{\ell,sk} = \begin{cases} \begin{aligned}
&{1 \over \max\{n_{\ell,k},n_{\ell,s}\}} ,& \quad& \text{if } s \in \mathcal{N}_k \cap \mathcal{C}_\ell,\ s \neq k \\ &1-\sum_{e \in \cN_k \cap \mathcal{C}_\ell \backslash \{k\}} a_{\ell,ek},& \quad& s=k, \\ &0, & \quad& \text{otherwise.} \end{aligned} \end{cases} \label{Metropolis} \end{equation} or we can use the averaging rule \cite{9}: \begin{equation} a_{\ell,sk} = \begin{cases} \begin{aligned}
&{1 \over n_{\ell,k}} ,& \quad& \text{if } s \in \mathcal{N}_k \cap \mathcal{C}_\ell, \\ &0, & \quad& \text{otherwise.} \end{aligned} \end{cases} \label{averaging_rule} \end{equation}
\noindent \begin{remark} {\rm \textbf{(Step-size)}\label{remark-step-size} Due to the use of left stochastic matrices, and in order for the algorithm to converge to the minimizer of the sum of costs, the step-sizes used for each entry $w^\ell_{k,i-1}$ in $w_{k,i-1}$ need to be divided by the entry of the Perron eigenvector $r_\ell(k)$ corresponding to cluster $\ell$ for agent $k$ as in \eqref{Coupled diffusion-(a)}--\eqref{Coupled diffusion-(b)}. Otherwise, the algorithm will converge to a neighborhood of a different limit point that satisfies: \eq{ \sum_{k \in \mathcal{C}_\ell} r_\ell(k) \grad_{w_k^\ell}J_{k,\eta}(w_k)=0 , \quad \forall \ \ell } For the case of $L=1$ and $\cI_k=\{L\}$, this will correspond to a Pareto optimal solution \cite{7,9}. We further remark that for many combination rules, the entries $r_\ell(k)$ are known. For example, for the Metropolis rule $r_\ell(k)=(N_\ell)^{-1}$, while for the averaging rule it is $r_\ell(k)=n_{\ell,k} \left(\sum_{s \in \cC_\ell}n_{\ell,s}\right)^{-1}$. This is not an issue since the common part can be absorbed into $\mu$. For example, for the averaging rule we have $\mu /r_\ell(k) =\mu' n_{\ell,k}^{-1} $, where now the agents need to agree on $\mu'=\mu \left(\sum_{s \in \cC_\ell}n_{\ell,s}\right)$ instead of $\mu$. For general left stochastic matrices, results already exist in the literature that can estimate the Perron entries in a distributed fashion \cite{45}. \qd } \end{remark} \section{Stochastic Analysis Setup} As mentioned before in Remark \ref{remark-special-case}, we will also allow for the possibility of stochastic risks, in which case the true gradient vectors are not available. Therefore, we introduce the gradient noise vector for each agent at time $i$: \eq{
\v_{k,i}(\bzeta_{k,i}) & \define \grad_{w_k} J_k(\bzeta_{k,i}) -\grad_{w_k} Q(\bzeta_{k,i};\x_{k,i}) \nonumber \\ & \hspace{1mm}= \grad_{w_k} J_k( \bzeta_{k,i}) - \widehat{\grad_{w_k} J}_k( \bzeta_{k,i}) \label{gradient-model}
}
that is required to satisfy certain conditions given in Assumption \ref{noisemodel:assump}. \begin{assumption} \label{noisemodel:assump} (\textrm{\bf{Gradient noise model}}): Conditioned on the past history of iterates $\cf_{i} \triangleq \{ \w_{k,j-1} : k=1 , ... , N \text{ and } j \leq i \}$, the gradient noise $\v_{k,i}(\bzeta_k)$ is assumed to satisfy: \eq{ \Ex\{\v_{k,i}(\bzeta_k)\mid \cf_{i} \} &= 0 \label{noise-model(a)} \\
\Ex\{\|\v_{k,i}(\bzeta_k)\|^2\mid \cf_{i} \} &\leq \bar{\alpha}_k \|\bzeta_k\|^2 + \bar{\sigma}_k^2 \label{noise-model(b)} } for some nonnegative constants $\bar{\alpha}_k$ and $\bar{\sigma}_k^2$.
\qd \end{assumption} We again emphasize that in this work we account for noisy gradients like \eqref{gradient-model} and, therefore, we shall incorporate the presence of the gradient noise into the analysis. In the presence of stochastic gradient constructions, the coupled diffusion algorithm \eqref{Coupled diffusion} becomes the one listed in \eqref{GM Stochastic Diffusion}. Note that we are now using boldface letters in \eqref{GM Stochastic Diffusion} to highlight the fact that the variables are stochastic in nature due to the randomness in the gradient noise component. \begin{algorithm}[H] \caption{(Coupled diffusion strategy)} {\bf Setting:} Let $\Omega_k={\rm diag} \left\{I_{M_\ell} / r_\ell(k) \right\}_{\ell \in \cI_k}$ and $\w_{k,-1}$ arbitrary. \\ {\bf For every agent $k$, repeat for $i\geq 0$:} \begin{subequations} \label{GM Stochastic Diffusion} \eq{ \bzeta_{k,i}&= \w_{k,i-1}-\mu\eta \Omega_k \grad_{w_k} p_k(\w_{k,i-1}) \label{stochastic-diff(a)}\\ \bpsi_{k,i}&=\bzeta_{k,i}-\mu \Omega_k \widehat{\grad_{w_k} J}_k( \bzeta_{k,i}) \label{stochastic-diff(b)}\\
\textrm{\bf For}& \ \textrm{\bf every block entry $\ell \in \cI_k$, combine:} \nonumber \\ & \w^\ell_{k,i}= \sum_{\substack{s \in \mathcal{N}_k \cap \mathcal{C}_\ell}} a_{\ell,sk}\bpsi^\ell_{s,i} \label{stochastic-diff(c)} } \end{subequations} \end{algorithm}
We will measure the performance of the distributed strategy by examining the mean-square-error between the random iterates $\boldsymbol \w^\ell_{k,i}$ and the corresponding optimal component from \eqref{optimal-original}, denoted by $w^{\ell,o}$. For this purpose, we first note that in terms of the optimal solution $w^{\ell,\star}$ for the penalized problem \eqref{optimal-penalized}, we can write: \eq{
&\limsup \limits_{i\rightarrow \infty}\hspace{1mm} \Ex \|w^{\ell,o} - \w^\ell_{k,i} \|^2 \nonumber \\ & \quad =\limsup \limits_{i\rightarrow \infty}
\Ex \|w^{\ell,o} -w^{\ell,\star}+w^{\ell,\star}- \w^\ell_{k,i} \|^2 \nonumber \\
&\quad \leq 2 \underbrace{\|w^{\ell,o} -w^{\ell,\star}\|^2}_{\text{Approximation Error}} +2 \limsup \limits_{i\rightarrow \infty} \Ex \|w^{\ell,\star}- \w^\ell_{k,i} \|^2 \label{difference to optimal} } The following result is proven in \cite{5} concerning the size of the first component as the size of the penalty factor becomes unbounded. \begin{theorem}(\textrm{\bf{Approaching Optimal Solution}}): Under Assumptions \ref{feasible assump}--\ref{penalty-assump}, it holds that: \eq{ \lim \limits_{\eta \rightarrow \infty}
\|w^o -w^\star\| = 0 \label{th-approachoptiml}} \label{theorem-approachoptimal} \qd \end{theorem}
\noindent Therefore, to assess \eqref{difference to optimal} we will characterize the second term $\Ex \|w^{\ell,\star}- \w^\ell_{k,i} \|^2$ and show that we can drive it to arbitrarily small values. In order to carry out the analysis we need to examine the error dynamics of the algorithm more closely.
For ease of reference we collect all the main symbols into the following table. \begin{table}[h]
\centering \caption{A listing of the main symbols and their interpretation.}
\begin{tabular}{ | c || l | }
\hline \hline
\cellcolor{gray!25} \bf Symbol & \multicolumn{1}{c|}{ \bf \cellcolor{gray!25} Meaning} \\
\hline
$\cI_k$ & The set of variable indices that influence the cost of agent $k$.
\\ \hline
$w^\ell_{k}$ & Local copy of $w^\ell$ at agent $k$. \\
\hline
$w_{k}$ & Stacks the parameters influencing agent $k$, $w_{k} \triangleq \text{col}\{w^{\ell}_k\}_{\ell \in \mathcal{I}_k}$ \\
\hline
$\mathcal{C}_\ell$ & Cluster of nodes that is influenced by the variable $w^\ell$.\\
\hline
$\sw^\ell$ & Stacks all local copies of $w^{\ell}$ across ${\cal C}_{\ell}$,
$\sw^{\ell} \triangleq \text{col}\{w^{\ell}_k\}_{k \in \mathcal{C}_{\ell}}$ \\
\hline
$\sw$ & Stacks $\sw^{\ell}$ for all parameters, $\sw\triangleq \text{col}\{\sw^{\ell}\}_{\ell=1}^L$\\
\hline
$\cJ(\sw)$ & Global risk, $\cJ(\sw)\triangleq \sum_{k=1}^N J_k(w_k)$ \\
\hline
$\cP(\sw)$ & Global penalty, $\cP(\sw)\triangleq \sum_{k=1}^N p_k(w_k)$\\
\hline \hline
\end{tabular}
\label{table-notation} \end{table} \section{Error Dynamics} \subsection{Network Error Recursion} We start by expanding \eqref{gradient-model} into its individual components: \eq{
\v_{k,i}^\ell(\bzeta_{k,i}) = \grad_{w_k^\ell} J_k( \bzeta_{k,i}) -\widehat{\grad_{w_k^\ell} J}_k( \bzeta_{k,i}) , \ \ell \in \cI_k \label{gradient-model-components}
} where $\v_{k,i}^\ell(\bzeta_{k,i})$ is the part of the gradient noise related to approximating $\grad_{w_k^\ell} J_k(\bzeta_{k,i})$. We collect the noise terms $\{\v_{k,i}^\ell(\bzeta_{k,i})\}_{k \in \cC_\ell}$ across all agents and clusters into block vectors: \eq{
\v_{i}^\ell \define {\rm col}\{\v_{k,i}^\ell(\bzeta_{k,i})\}_{k \in \cC_\ell}, \quad \v_{i} \define {\rm col}\left\{\v_i^\ell \right\}_{\ell=1}^L \label{transform-vector-noise} } Similarly, motivated by \eqref{sadfads} and \eqref{wcaligraphic} we define the network vectors: \eq{ \bsw_i^{\ell} \define \text{col}\{\w^{\ell}_{k,i}\}_{k \in \mathcal{C}_{\ell}}, \quad \bsw_i \define \text{col}\{\bsw_i^{\ell}\}_{\ell=1}^{L}, }
Incorporating the gradient noises \eqref{transform-vector-noise} into \eqref{coupled-diff-network} we obtain the network recursion of \eqref{GM Stochastic Diffusion}:
\begin{subequations} \eq{ \bzeta_i&=\bsw_{i-1} - \mu \eta \cR^{-1}\grad_{\ssw} \cP(\bsw_{i-1}) \label{network-recur-noise(a)}\\
\bpsi_i&= \bzeta_i -\mu \cR^{-1} \grad_{\ssw} \cJ(\bzeta_i)+\mu \cR^{-1} \v_i \label{network-recur-noise(b)}\\
\bsw_i&=\sa\tran \bpsi_i \label{network-recur-noise(c)}
}
\end{subequations} Now recall that problems \eqref{penalized_cost} and \eqref{glob_exact} are equivalent and, therefore, the optimal solution to \eqref{glob_exact} is given by \eq{ \sw^\star \define {\rm col}\{\sw^{\ell,\star}\}_{\ell=1}^L, \quad \sw^{\ell,\star} \define \one_{N_\ell} \otimes w^{\ell,\star} } Subtracting $\sw^\star$ from both sides of \eqref{network-recur-noise(a)}--\eqref{network-recur-noise(c)} we get: \begin{subequations} \eq{ \tzeta_i&=\tsw_{i-1} + \mu \eta \cR^{-1} \grad_{\ssw} \cP(\bsw_{i-1}) \label{zeta-error} \\ \tpsi_{i} &= \tzeta_{i} + \mu \cR^{-1} \grad_{\ssw} \cJ(\bzeta_i) - \mu \cR^{-1} \v_i \label{psi-error} \\ \tsw_{i} &= \sa\tran \tpsi_{i} \label{Werror} } \end{subequations} where $\tsw_{i-1}\define \sw^\star-\bsw_{i-1}$ denotes the error at time $i-1$ and similarly for $\{\tzeta_i,\tpsi_{i}\}$. Combining \eqref{zeta-error}, \eqref{psi-error}, and \eqref{Werror} we arrive at the following statement. \begin{lemma}(\textrm{\bf{Network error recursion}}). The network error vector evolve according to the following dynamics: \eq{ \scalemath{0.95}{ \tsw_{i}= \sa\tran \bigg(\tsw_{i-1} + \cR^{-1} \big( \mu \eta \grad_{\ssw} \cP(\bsw_{i-1})+ \mu \grad_{\ssw} \cJ(\bzeta_i) - \mu \v_i \big) \bigg) } \label{error1} } where \eq{ \scalemath{0.95}{ \bzeta_i=\bsw_{i-1} - \mu \eta \cR^{-1} \grad_{\ssw} \cP(\bsw_{i-1}) } } \qd \end{lemma} \section{Transformed Network Error Dynamics} \subsection{Similarity Transformation I} The convergence analysis of recursion \eqref{error1} is facilitated by transforming it to a convenient basis. Since each $A_{\ell}$ is left-stochastic and primitive, it admits a Jordan decomposition of the form \cite{9}: \eq{ A_\ell &\triangleq V_{\ell} J_\ell V_{\ell}^{-1} } where \eq{
V_{\ell} = \begin{bmatrix}
r_\ell & V_{1,\ell} \end{bmatrix}, \ \ J_\ell &= \begin{bmatrix}
1 & 0 \\
0 & \check{J}_{\ell}
\end{bmatrix} , \ \
V_{\ell}^{-1} = \begin{bmatrix}
\one_{N_\ell}\tran \\
V_{2,\ell}\tran
\end{bmatrix} \label{decomposition-A_l} } and the matrices $V_{1,\ell}$ and $V_{2,\ell}$ have dimensions $N_\ell \times (N_\ell-1)$. Moreover, the matrix $\check{J}_{\ell}$ is $(N_\ell-1) \times (N_\ell-1)$ and consists of Jordan blocks, with each one having the generic form (with a positive scalar $\epsilon$ replacing the usual unit entries in the lower diagonal):
\eq{ \scalemath{0.9}{ \begin{bmatrix} \lambda & &\\ \epsilon & \lambda & \\ & \ddots & \ddots & \\ & & \epsilon& \lambda \end{bmatrix}} } If we define \eq{ \mathcal{V}_{\ell} &\triangleq V_{\ell} \otimes I_{M_\ell}=\begin{bmatrix} r_\ell \otimes I_{M_\ell} & V_{1,\ell}\otimes I_{M_\ell} \end{bmatrix} \label{calV_ell}\\
\mathcal{J}_\ell &\triangleq J_\ell \otimes I_{M_\ell} = \begin{bmatrix}
I_{M_\ell} & 0 \\
0 & \check{J}_{\ell} \otimes I_{M_\ell}
\end{bmatrix} \label{calJ_ell} } then we can decompose $\sa$ from \eqref{calblockA} as: \eq{ \sa =& \underbrace{\text{ blkdiag}\{\mathcal{V}_{\ell} \}_{\ell=1}^L}_{\define \cV} \underbrace{\text{ blkdiag}\{\mathcal{J}_\ell \}_{\ell=1}^L}_{\define \mathcal{J}} \underbrace{\text{ blkdiag}\{\mathcal{V}_{\ell}^{-1} \}_{\ell=1}^L}_{\define \cV^{-1}} } We now multiply both sides of the error recursion \eqref{error1} from the left by $\cV\tran$: \eq{ \cV\tran \tsw_{i}&= \cJ\tran \bigg( \cV\tran \tsw_{i-1} + \mu \eta \cV\tran \cR^{-1} \grad_{\ssw} \cP(\bsw_{i-1}) \nonumber \\ & \quad + \mu \cV\tran \cR^{-1} \grad_{\ssw} \cJ(\bzeta_i) - \mu \cV\tran \cR^{-1} \v_i\bigg)
\label{trans-error}
} and denote the individual block entries of the various quantities in \eqref{trans-error} by \begin{align} \cV\tran \tsw_{i} ={\rm col}\left\{\begin{bmatrix} (r_\ell\tran \otimes I_{M_\ell}) \tsw_{i}^\ell \\ (V_{1,\ell}\tran \otimes I_{M_\ell}) \tsw_{i}^\ell
\end{bmatrix}\right\}_{\ell=1}^L
&\triangleq {\rm col}\left\{\begin{bmatrix} \bar{\w}_{i}^\ell \\ \cw_{i}^\ell
\end{bmatrix} \right\}_{\ell=1}^L \label{w-cal-bar-check} \\
\mu \cV\tran \cR^{-1} \grad_{\ssw} \cJ(\bzeta_i)
&\triangleq {\rm col}\left\{\begin{bmatrix} \bar{\g}^\ell(\bzeta_i) \\ \check{\g}^{\ell}(\bzeta_i)
\end{bmatrix} \right\}_{\ell=1}^L \label{g-bar-check} \\
\mu \eta \cV\tran \cR^{-1} \grad_{\ssw} \cP(\bsw_{i}) & \triangleq {\rm col}\left\{\begin{bmatrix} \bar{\f}^\ell(\bsw_{i}) \\ \check{\f}^\ell(\bsw_{i})
\end{bmatrix} \right\}_{\ell=1}^L \label{f-bar-check}
\\
-\mu \cV\tran \cR^{-1} \v_i &\triangleq {\rm col}\left\{\begin{bmatrix} \bar{\v}_{i}^\ell \\ \check{\v}_{i}^\ell
\end{bmatrix} \right\}_{\ell=1}^L \label{v-bar-check} \end{align}
Note that the quantities $\{\bar{\w}_{i}^\ell,\bar{\g}^\ell(\bzeta_{i}),\bar{\f}^\ell(\bsw_{i}),\bar{\v}_{i}^\ell\}$ are vectors of dimension $M_\ell \times 1$ while the quantities $\{\cw_{i}^\ell,\check{\g}^\ell(\bzeta_{i}),\check{\f}^{\ell}(\bsw_{i}),\check{\v}_{i}^\ell\}$ are vectors of dimension $M_\ell (N_\ell-1) \times 1$. Using these transformations, we can rewrite the previous recursion \eqref{trans-error} as:
\eq{ \scalemath{0.96} { {\rm col}\left\{\begin{bmatrix} \bar{\w}_{i}^\ell \\ \cw_{i}^\ell
\end{bmatrix} \right\} = \mathcal{J}\tran \ {\rm col}\left\{\begin{bmatrix} \bar{\w}_{i-1}^\ell+\bar{\g}^\ell(\bzeta_{i})+\bar{\f}^\ell(\bsw_{i-1})+\bar{\v}_{i}^\ell \\ \cw_{i-1}^\ell+\check{\g}^\ell(\bzeta_{i})+\check{\f}^{\ell}(\bsw_{i-1})+\check{\v}_{i}^\ell
\end{bmatrix} \right\} } \label{errorJ} } With a slight abuse of notation we dropped $\ell$ from ${\rm col}\{.\}_{\ell=1}^L$ and wrote it as ${\rm col}\{.\}$, we continue to do so in the following unless it is not clear from the presentation. \subsection{Similarity Transformation II} If we examine the structure of the entries in the transformed vector \eqref{w-cal-bar-check}, we observe that \eq{ \bar{\w}_{i}^\ell =\sum_{k \in \cC_\ell} r_\ell(k)\tw_{k,i}^\ell \label{centroid_error} }
is the centroid of the errors relative to $w^{\ell,\star}$ across all agents in $\cC_\ell$. Moreover, we note that \eq{ \tsw_i^\ell=(\cV_\ell^{-1})\tran \begin{bmatrix} \bar{\w}_{i}^\ell \\ \cw_{i}^\ell
\end{bmatrix} = \one_{N_\ell} \otimes \bar{\w}_{i}^\ell + (V_{2,\ell} \otimes I_{M_\ell}) \cw_{i}^\ell \label{clustererr=avg+dev} } so that we can write each $\tw_{k,i}^\ell$ as \eq{ \tw_{k,i}^\ell = \bar{\w}_{i}^\ell + \left([V_{2,\ell}]_k \otimes I_{M_\ell} \right) \cw_{i}^\ell \label{erro-ind-av+dev}} where $\left([V_{2,\ell}]_k \otimes I_{M_\ell} \right)$ are the rows of $\left(V_{2,\ell} \otimes I_{M_\ell} \right)$ corresponding to the position of $\tw_{k,i}^\ell$ in the cluster vector $\tsw_i^\ell$. From \eqref{erro-ind-av+dev} we see that $\{([V_{2,\ell}]_k \otimes I_{M_\ell}) \cw_{i}^\ell\}$ is the deviation of the individual errors $\{\tw_{k,i}^\ell\}$ from the weighted centroid error $\bar{\w}_{i}^\ell$ -- see Figure \ref{fig:relationerr}. \begin{figure}
\caption{Geometric relation between the error $\tw^\ell_{k,i}$ and the transformed parts $\bar{\w}_{i}^\ell$ and $\cw_{i}^\ell$.}
\label{fig:relationerr}
\end{figure} In the following we will show that we can drive $\bar{\w}_{i}^\ell$ and $\cw_{i}^\ell$ to arbitrarily small values. We first re-order the elements in \eqref{w-cal-bar-check}, so that iterates that correspond to the weighted centroids $\{\bar{\w}_{i}^\ell\}$ appear stacked together. For this purpose, we introduce a permutation matrix ${\cal T}$ such that \eq{\st {\rm col}\left\{\begin{bmatrix} \bar{\w}_{i}^\ell \\ \cw_{i}^\ell
\end{bmatrix} \right\}_{\ell=1}^L =\begin{bmatrix} {\rm col}\{ \bar{\w}_{i}^\ell
\}_{\ell=1}^L \\ {\rm col}\{ \cw_{i}^\ell \}_{\ell=1}^L
\end{bmatrix} \define \begin{bmatrix} \bw_i \\ \cw_i
\end{bmatrix} \label{permutaion}
}
We then transform the error recursion \eqref{errorJ} by multiplying both sides by $\mathcal{T}$ on the left to get:
\eq{ \begin{bmatrix} \bw_i \\ \cw_i
\end{bmatrix}= & \st \mathcal{J}\tran \st\tran \begin{bmatrix} \bw_{i-1}+\bar{\g}(\bzeta_{i})+\bar{\f}(\bsw_{i-1})+\bar{\v}_{i} \\ \cw_{i-1} +\check{\g}(\bzeta_{i})+\check{\f}(\bsw_{i-1})+\check{\v}_{i}
\end{bmatrix} \label{errorT}} where \eq{ \begin{bmatrix} \bar{\g}(\bzeta_{i}) \\ \check{\g}(\bzeta_{i})
\end{bmatrix} &\define \scalemath{0.95}{ \st {\rm col}\left\{\begin{bmatrix} \bar{\g}^\ell(\bzeta_{i}) \\ \check{\g}^\ell(\bzeta_{i})
\end{bmatrix} \right\} = \begin{bmatrix} {\rm col}\big\{ \bar{\g}^\ell(\bzeta_{i})
\big\} \\ {\rm col}\big\{ \check{\g}^\ell(\bzeta_{i}) \big\}
\end{bmatrix} } \\
\begin{bmatrix} \bar{\f}^\ell(\bsw_{i-1}) \\ \check{\f}^\ell(\bsw_{i-1})
\end{bmatrix} &\define \scalemath{0.95}{\st {\rm col}\left\{\begin{bmatrix} \bar{\f}^{\ell}(\bsw_{i-1}) \\ \check{\f}^{\ell}(\bsw_{i-1})
\end{bmatrix} \right\} = \begin{bmatrix} {\rm col}\big\{ \bar{\f}^{\ell}(\bsw_{i-1})
\big\} \\ {\rm col}\big\{ \check{\f}^{\ell}(\bsw_{i-1}) \big\}
\end{bmatrix} }\\
\begin{bmatrix} \bar{\v}_{i} \\ \check{\v}_{i}
\end{bmatrix} &\define \st {\rm col}\left\{\begin{bmatrix} \bar{\v}_{i}^\ell \\ \check{\v}_{i}^\ell
\end{bmatrix} \right\} = \begin{bmatrix} {\rm col}\left\{ \bar{\v}_{i}^\ell
\right\} \\ {\rm col}\left\{ \check{\v}_{i}^\ell \right\}
\end{bmatrix} \label{calT-noise} }
Since $\cJ$ is block diagonal, the operation $\st \mathcal{J}\tran \st\tran $ performs a similar reordering with respect to the diagonal blocks -- Figure \ref{fig:TXT} illustrates this operation visually. \begin{figure}
\caption{A visual illustration of transformations $\st \mathcal{J}\tran \st\tran$ for $L=3$ blocks represented by different colors.}
\label{fig:TXT}
\end{figure} \noindent Thus, \eq{ \st \mathcal{J} \st\tran &=\begin{bmatrix} I_M & 0 \\ 0 & {\rm blkdiag}\{\check{J}_\ell \otimes I_{M_\ell} \} \end{bmatrix} } plugging into \eqref{errorT} we arrive at thew following conclusion. \begin{lemma} {\bf (Transformed error recursion)}: Following similarity and permutation transformations, the error recursion \eqref{error1} can be transformed into the following form \eq{ \begin{bmatrix} \bw_{i} \\ \cw_i
\end{bmatrix} &= \begin{bmatrix} \bw_{i-1}+\bar{\g}(\bzeta_{i})+\bar{\f}(\bsw_{i-1})+\bar{\v}_{i} \\ \check{\cJ}\big(\cw_{i-1} +\check{\g}(\bzeta_{i})+\check{\f}(\bsw_{i-1})+\check{\v}_{i} \big)
\end{bmatrix} \label{errorT-scaled}} where \eq{ \check{\cJ} \define {\rm blkdiag}\{\check{J}_\ell \otimes I_{M_\ell} \} } \qd \end{lemma}
\noindent We are now ready to state the main result regarding the coupled diffusion algorithm \eqref{GM Stochastic Diffusion}. \subsection{Mean-Square Convergence} \begin{theorem}(\textrm{\bf{Mean-square convergence)}}: Under Assumptions \ref{feasible assump}--\ref{noisemodel:assump}, the coupled diffusion algorithm \eqref{GM Stochastic Diffusion} converges in the mean-square-error sense for sufficiently small step-sizes $\mu$ (see \eqref{step-size(all)}), namely, it holds that for $i \geq 0$ \eq{ \begin{bmatrix}
\Ex \|\bw_{i}\|^2 \\
\Ex \|\cw_{i}\|^2
\end{bmatrix} \preceq \Gamma \begin{bmatrix}
\Ex \|\bw_{i-1}\|^2 \\
\Ex \|\cw_{i-1}\|^2
\end{bmatrix} +\begin{bmatrix} c_1\\ c_2
\end{bmatrix} \label{theorem-meansquare_gamma} } where $\Gamma$ is a stable matrix and $\{c_1,c_2\}$ are independent of time (see \eqref{gamma_c_expressions}). It follows that, for every agent $k$, \eq{ \limsup\limits_{i\rightarrow \infty}
\Ex \|w^{\ell,\star}- \w^\ell_{k,i} \|^2 &\leq O(\mu)+O(\mu^2 \eta^4) \label{theorem-meansquare-agent-k-var-l}} for all $\ell \in \cI_k$. \qd \label{theorem-meansquare} \end{theorem}
Proof: See Appendix \ref{appendix-proof}.
Theorem \ref{theorem-meansquare} means that the expected squared distance between $\w_{k,i}^\ell$ and $w^{\ell,\star}$ is upper bounded by some value on the order of $\mu$ or $\mu^2 \eta^4$, whichever is larger. This implies that we can get arbitrarily close to the optimal penalized solution $w^\star={\rm col}\{w^{\ell,\star}\}_{\ell=1}^L$ by choosing $\mu$ arbitrarily small. Moreover, from Theorem \ref{theorem-approachoptimal}, we can get arbitrarily close to the original problem \eqref{glob2} by choosing $\eta$ arbitrarily large. From the step size condition \eqref{step-size(all)} we see that $\mu<O(1/\eta^2)$, therefore we can choose $\eta={c / \mu^\theta}$ for some constant $c$ and $0<\theta<0.5$. This way the problem will depend on $\mu$ only and as $\mu \rightarrow 0$, the iterates $\{\w_{k,i}^\ell\}_{k \in \cC_\ell}$ approach the optimizer of the original problem $w^{\ell,o}$ asymptotically. Another conclusion from Theorem \ref{theorem-meansquare} is that the convergence rate is upper bounded by the spectral radius of the matrix $\Gamma$ (from \eqref{conv_rate} and ignoring the $\eta$ terms): \eq{ \rho(\Gamma) = \max \{1-\mu \nu +O(\mu^2),\lambda(2)+O(\mu)\} }
where $\lambda(2)=\max_{ \ell \in \{1,\cdots,L\}} | \lambda_{\ell}(2)|$ and $\lambda_{\ell}(2)$ is the second largest eigenvalue in magnitude of the combination matrix $A_\ell$ (the largest eigenvalues is equal to one). The smaller $\lambda_{\ell}(2)$ is, the more connected $\cC_\ell$ is. Apart from reducing communication and memory allocation, this result shows the importance of solving \eqref{glob2} directly and how the clusters affect the convergence rate, i.e., the convergence rate is directly affected by the connectivity of the clusters instead of the network.
\begin{remark}\label{remark-tighter_bounds}{ \rm Note that the $O(\mu)$ term in \eqref{theorem-meansquare-agent-k-var-l} is due to the persistence gradient noise component. In adaptive systems, constant step sizes are used to allow the algorithm to track changing minimizers. For example, when the distribution of the streaming data changes, the minimizer also changes and if a decaying step size is used then the algorithm will lose track of the minimizer as the step size approaches zero. This means that in practice we only need to choose a sufficiently small step size and sufficiently large penalty factor. For example, in the flow problem given in \eqref{appl-flow-penalty}, the penalty factor $\eta$ is set to be large enough so that under constant step size and slowly varying flows $\{\b_k(i)\}$, the algorithm is still able to track the changing minimizer. In general, the penalized problem optimizer approaches the minimizer of the original problem as $\eta$ approaches infinity. Under some additional assumptions, an exact differentiable penalty function can be constructed \cite{huyer2003new} and, therefore, there exists scenarios such that the minimizers of problems \eqref{glob2} and \eqref{penalized_cost} approach each other for large enough $\eta< \infty$. \qd } \end{remark} \begin{remark}\label{remark-th}{ \rm
In practice, the $O(\mu^2 \eta^4)$ term in \eqref{theorem-meansquare-agent-k-var-l} is tighter and $\mu$ can be chosen to satisfy $\mu <O(1/\eta)$ instead of $\mu<O(1/\eta^2)$. Although unnecessary for the convergence analysis, if desired, this can be tightened to $O(\mu^2 \eta^2)$ by calling upon the following observation. The optimality condition of \eqref{penalized_cost} is: \eq{
0&=\sum_{k \in \mathcal{C}_\ell} \big( \grad_{w^\ell}J_{k}(w_k^\star)+\eta \grad_{w^\ell}p_{k}(w_k^\star) \big) \nonumber \\
&=\sum_{k \in \mathcal{C}_\ell} \bigg( \grad_{w^\ell}J_{k}(w_k^\star) + \sum_{u=1}^{U_k} \eta \grad\delta^{{\rm EP}}(h_{k,u}(w_k^\star)) \grad_{w^\ell}h_{k,u}(w_k^\star) \nonumber \\
& \quad +\sum_{v=1}^{V_k} \eta \grad\delta^{{\rm IP}}(g_{k,v}(w_k^\star)) \grad_{w^\ell}g_{k,v}(w_k^\star) \bigg), \ \forall \ \ell \label{lagrangian} } Assume the optimal value $w^o$ is a regular point for the constraints, meaning that the gradients of the equality constraints and the active inequality constraints $\{\grad_{w} h_{k,u}(w^o),\grad_{w} g_{k,v'}(w^o)\}$ are linearly independent (where an active constraint means that $g_{k,v'}(w_k^o)=0$ for some $v'$ where $w_k^o={\rm col}\{w^{\ell,o}\}_{\ell \in \cI_k}$). Then, it is shown in \cite[pp.~479-481]{43} (see also \cite[pp.~392-393]{bertsekas1999nonlinear}): \eq{ \eta \grad\delta^{{\rm EP}}(h_{k,u}(w_k^\star)) \rightarrow y^o_{k,u}, \
\eta \grad\delta^{{\rm IP}}(g_{k,v}(w_k^\star)) \rightarrow z^o_{k,v} }
as $\eta \rightarrow \infty$. Here, the variables $y^o_{k,u}$ and $z^o_{k,v}$ correspond to the optimal unique dual variables of problem \eqref{glob2} associated with the constraints $h_{k,u}(.)$ and $g_{k,v}(.)$. Thus, it holds that each $\eta \grad_{w^\ell}p_{k}(w_k^\star)$ converges. We know that any convergent sequence is bounded and, hence, $\|\eta \grad_{\ssw}\cP(\sw^\star)\|$ is bounded by some constant independent of $\eta$. This observation can be used in the proof of Theorem \ref{theorem-meansquare} to tighten the bound in \eqref{theorem-meansquare-agent-k-var-l} to $O(\mu)+O(\mu^2 \eta^2)$ (see Remark \ref{remark-proof}). \qd } \end{remark} \begin{figure*}
\caption{ \small (a) Network MSD for two different step-sizes. (b) Network MSD for $\mu=0.001$ and $\eta=100$ showing the adaptive coupled diffusion algorithm where in iteration $2000$ the minimizer $w^\star$ changes by randomly regenerating the constraints. (c) Average steady-state MSD for different values of $\mu$ and $\eta$.}
\label{fig:simulation_unconstrained}
\label{fig:simulation_adaptive_constrained}
\label{fig:eta_mu}
\end{figure*} \section{Example and Simulation Results} In this section, we illustrate the performance of the coupled diffusion strategy \eqref{stochastic-diff(a)}--\eqref{stochastic-diff(c)} for a least-squares model fitting problem under streaming data.
Let $w={\rm col}\{w^1,\cdots,w^L\} \in \real^{M}$ and consider $N$ agents with distributed cost: \eq{
\scalemath{0.96} {J^{{\rm glob}}(w)=\Ex \left\|\H_{i}w-\y_i\right\|^2 =\sum_{k=1}^N \Ex \big(\h_{k,i}\tran w - \y_k(i)\big)^2 } \label{sim-glob}} where $\h_{k,i} \in \real^{M}$ and $\y_k(i) \in \real$ are the $k$-th row of $\H_{i} \in \real^{N \times M}$ and the $k$-th element of $\y_i \in \real^{N}$, respectively. Assume $\H_{i} \in \real^{N \times M}$ and $\y_i \in \real^{N}$ are related via the linear model: \eq{ \y_i=\H_{i}w^\bullet+ \v_i } with $\v_i \in \real^{N}$ representing a random Gaussian noise independent of $\H_{i}$ with covariance $\Sigma={\rm diag}\{\sigma_{v,k}(i)\}_{k=1}^N$. Assume the features $\{\h_{k,i}\}$ are sparse in the sense that they are zero at the location of $\{w^\ell\}$ if $\ell \notin \cI_k$ where $\cI_k$ represent the indices where the features are nonzero. Specifically, if we divide each feature as $\h_{k,i}=[\h_{k,i}^1,\cdots,\h_{k,i}^L]$, then we assume $\h_{k,i}^\ell=0_{M_\ell}$ if $\ell \notin \cI_k$. Thus, if we let \eq{ w_{k} \define{\rm col}\{w^\ell\}_{\ell \in \cI_k}, \quad \overline{\h}_{k,i}\define {\rm col}\{\h_{k,i}^\ell\}_{\ell \in \cI_k} } then, we can rewrite \eqref{sim-glob} as \eq{ J^{{\rm glob}}(w)=\sum_{k=1}^N \Ex \left(\overline{\h}_{k,i}\tran w_k - \y_k(i)\right)^2 \label{sim-glob2}} Problems with a global function of the type \eqref{sim-glob2} naturally arises when different subsets of data are dispersed over $N$ processors (or agents) \cite[pp.~53]{26}. Also, in robust power system state estimation as in \eqref{power-system-estim} with streaming data instead. \subsection{Unconstrained Case} We first show the performance of the proposed algorithm for an unconstrained case by minimizing the cost \eqref{sim-glob2}. In our simulation we consider the network of $N=20$ agents shown in Figure \ref{fig:simulation_network}. The global vector $w$ is of size $M=25$. The number of sub-vectors is $L=5$ each of size $5 \times 1$ and each cluster is given in Figure \ref{fig:simulation_network}. The model parameter $w^\bullet$ is chosen randomly and normalized to one. The noise variances $\{\sigma_{v,k}\}$ are chosen uniformly at random between $-20$ and $-30$ dB. The covariance matrix $\Ex \overline{h}_{k,i} \overline{h}_{k,i}\tran=R_{h,k} \in \real^{Q_k \times Q_k}$ is generated as $R_{h,k}=U_k \Lambda_k U_k\tran$ where $U_k$ is a randomly generated orthogonal matrix and $\Lambda_k$ is a diagonal matrix with each diagonal entry uniformly chosen between $1$ and $3$. To compare the performance with other algorithms, we simulate a linearized version of the ADMM approach from \cite{26}. The algorithm from \cite{26} is: \eq{
w_{k,i+1}&=\argmin_{w_k} \left(J_k(w_k)+y_{k,i}\tran w_k+{\rho \over 2}\|w_k-z_{k,i}\|^2 \right)\label{admm1} \\ z^\ell_{i+1}&={1 \over N_\ell} \sum_{k \in \cC_\ell} (w_{k,i+1}^\ell + {1 \over \rho} y_{k,i}^\ell), \ \forall \ \ell=1,\cdots,L \label{admm2}\\ y_{k,i+1}&=y_{k,i}+\rho (w_{k,i+1}-z_{k,i+1})\label{admm3} } where $y_k={\rm col}\{y_k^\ell\}_{\ell \in \cI_k}$, $z_{k}=\{z^\ell\}_{\ell \in \cI_k}$, and $\rho>0$. Now note that unlike the coupled diffusion strategy, this algorithm is not a first order algorithm and step \eqref{admm1} requires an inner iteration loop unless a closed form solution exists. Moreover, under adaptive networks it is not possible to solve step \eqref{admm1} using for example gradient descent with constant step-size due to the unknown cost $J_k(w_k)$, and a decaying step-size is required, which only converges asymptotically. Therefore, in our simulation for comparison we linearize this step by employing one stochastic gradient step with constant step-size $\mu$. Note also that step \eqref{admm2} requires global knowledge. We also simulate the centralized recursion \eqref{central-inc1}--\eqref{central-inc2} using stochastic gradient and with diagonal scaling $D={\rm diag}\{{1 \over N_\ell} I_{M_\ell}\}_{\ell=1}^L$ used to make the convergence rate practically the same. \begin{figure}
\caption{Network topology and clusters used in the simulation.}
\label{fig:simulation_network}
\end{figure}
The simulation result is shown in Figure \ref{fig:simulation_unconstrained}, which plots the instantaneous network MSD \eq{
{\rm MSD}(i)=\sum\limits_{\ell=1}^L{1\over N_\ell}\sum\limits_{k \in \cC_\ell} \Ex \|w^{\ell,\star}-\w^{\ell}_{k,i}\|^2 } for different step sizes. The combination matrices $A_\ell$ are chosen using the Metropolis rule \eqref{Metropolis}. We see that the coupled diffusion algorithm outperforms its ADMM counterpart even though the ADMM uses global information in step \eqref{admm2}, which indicates that primal-dual methods do not necessarily perform well under adaptive networks as shown in \cite{41}. We also notice that the smaller the step-size is, the smaller is the steady-state MSD, and, moreover, the closer the coupled diffusion becomes to the centralized.
\subsection{Constrained Case}
We now add constraints to the previous setting where now the network is interested in solving a linearly constrained least-squares with the cost given in \eqref{sim-glob2} and $L$ linear constraints $ Gw=b $, where $G \in \real^{L \times M}$ and $b \in \real^{L}$. Let $g_\ell\tran$ and $b_\ell$ denote the $\ell$-th row of $G$ and the $\ell$-th element in $b$, then we assume that only one agent $k_c$ in cluster $\cC_\ell$ is aware of the $\ell$-th constraint, which has the form: \eq{ g_\ell\tran w = g_{\ell,k_c}\tran w_{k_c} = b_\ell } where $g_{\ell,k_c} \in \real^{Q_k}$. This means that $g_\ell$ has zero entries at the location of $w^\ell$ if $\ell \notin \cI_{k_c}$. This situation occurs for example when agent $k_c$ is the decision making agent regarding variable $w^\ell$ with knowledge about the constraint concerning $w^\ell$, while the other agents are just observation agents that collects data related to $w^\ell$.
In our simulation, $\{g_{\ell,k_c}\}$ are generated using the standard Gaussian distribution and normalized to one and $\{b_\ell\}$ are generated uniformly between $-1$ and $1$. The agents that have knowledge about the constraints $\ell=\{1,2,3,4,5\}$ are $k_c=\{2,10,16,5,17\}$, respectively. We used the quadratic penalty $\delta^{\rm EP}(x)= x^2$ to penalize the constrained. Figure \ref{fig:simulation_adaptive_constrained} shows the MSD for the coupled diffusion algorithm with $\mu=0.001$ and $\eta=100$. In order to illustrate that the algorithm is capable of tracking a changing minimizer, we randomly regenerate $\{g_{\ell,k_c}\}$ and $\{b_\ell\}$ at $i=2000$ so that the constraints changes and, thus, $w^\star$ changes. We see that the algorithm is capable of tracking a changing minimizer. In Figure \ref{fig:eta_mu}, we plot the steady-state MSD to the constrained problem minimizer $w^o$ for different values of step-sizes $\mu$ and penalty parameters $\eta$. It is observed that for relatively small penalty parameter $\eta=10$, the MSD becomes unaffected by how small $\mu$ is. This is because for small $\eta$, the penalized optimal vector $w^\star$ is not a good approximation of $w^o$. However, for large penalties, we notice that the smaller the step-size $\mu$ is, the smaller the MSD becomes. This is because from Theorem \ref{theorem-approachoptimal} we know that for sufficiently large $\eta$, the penalized optimal $w^\star$ becomes close to $w^o$ and from Theorem \ref{theorem-meansquare}, we know that the smaller the value of $\mu$ is, the closer the coupled diffusion iterates becomes to $w^\star$.
\section{Concluding Remarks} In this work, we developed a distributed optimization algorithm that solves a general sum of local cost functions subject to the intersection of all local constraints, where each local cost and constraint may contain partial blocks of the global variable. We proved convergence under constant-step sizes in stochastic scenarios, where we show that we can get arbitrarily close to the optimal solution. Constant-step sizes are important in adaptation and learning where the optimizer location may drift with time. We also showed how the coupling across agents affects the convergence rate of the designed algorithm. \begin{appendices}
\section{Useful Bounds} \label{appendix-bounds}
In this appendix we give bounds that will be used in Appendix \ref{appendix-proof} for the proof of the main convergence Theorem \eqref{theorem-meansquare}.
\begin{lemma} Under Assumptions \ref{feasible assump}--\ref{penalty-assump} and if \eq{ \mu < {1 \over \nu+N(\delta+\eta \delta_p) } \label{step-size(a)} } then it holds that: \eq{
\|\widetilde{w}_{i-1}+\mu \grad_w J^{\rm glob}_{\eta}(w_{i-1})\|^2 \leq (1-\mu \nu)^2 \|\widetilde{w}_{i-1}\|^2
\label{lemma_inequal_3}} where $\widetilde{w}_{i-1}\define w^\star-w_{i-1}$ and \eq{
\delta \define \max_{k} \delta_{k} , \quad
\delta_{p} \define \max_{ k } \delta_{p,k}
} \end{lemma} \noindent {\bf Proof:} The proof follows from \cite[~Lemma 10]{qu2017harnessing} with the Lipschitz constant set to $N(\delta+\eta \delta_p)$. We now show that the Lipschitz constant of $J^{\rm glob}_{\eta}(.)$ is upper bounded by $N(\delta+\eta \delta_p)$. Under Assumptions \ref{cost-assump}--\ref{penalty-assump} and for any $x$ and $y$ of the same structure as $w$, it holds that: \eq{
& \big\|\grad_w J^{\rm glob}_{\eta}(x)-\grad_w J^{\rm glob}_{\eta}(y) \big\| \nonumber \\
& = \left\| \sum_{k =1}^N \big( \grad_{w}J_{k,\eta}(x_{k})- \grad_{w} J_{k,\eta}(y_{k}) \big)\right\| \nonumber \\
& \overset{(a)}{\leq} \sum_{k =1}^N \big\| \grad_{w_k}J_{k,\eta}(x_k)- \grad_{w_k} J_{k,\eta}(y_k) \big\| \nonumber \\
& \leq \sum_{k =1}^N (\delta_{k}+\eta \delta_{p,k}) \big\| x_k- y_k \big\| \leq N (\delta+\eta \delta_p) \big\| x- y \big\| } where in step (a) we used the triangle inequality and the fact that the block entries $\{\grad_{w^\ell} J_{k,\eta}(.)\}$ are zero if $k \notin \cC_\ell$.
\qd
\noindent The following simple fact will be useful for the proof of the next lemma.
\noindent {\bf Fact 1.}
{\em Let $x_{k}={\rm col}\{z_{k}^\ell\}_{\ell \in \cI_k}$, $\ssx^\ell={\rm col}\{z_{k}^\ell\}_{k \in \cC_\ell}$ , and $\ssx={\rm col}\{\ssx^\ell\}_{\ell=1}^L$. Then, it holds that:
\eq{
\|\ssx\|^2=\sum_{\ell=1}^L \sum_{k \in \cC_\ell} \|z^\ell_k\|^2 = \sum_{k=1}^N \sum_{\ell \in \cI_k} \|z^\ell_k\|^2 = \sum_{k=1}^N \|x_k\|^2 \label{fact}
}
} {\bf Proof:} The proof follows from fact that the squared norm of a vector is equal to the sum of squared norms of its individual blocks.
\qd
For the statement and proof of the next Lemma we introduce the centroid vectors:
\eq{ \w_{c,i} &\define{\rm col}\left\{ \w^{\ell}_{c,i} \right\}_{\ell=1}^L \in \real^{M} \\ \w'_{k,i} &\define{\rm col}\left\{ \w^{\ell}_{c,i} \right\}_{\ell \in \cI_k} \in \real^{Q_k} \label{average_itertes}} where $\w^{\ell}_{c,i} \define \sum_{s \in \cC_\ell} r_\ell(s) \w_{s,i}^\ell \in \real^{M_\ell}$ is the centroid of the estimates $\{\w_{s,i}^\ell\}_{s \in \cC_\ell}$ of the variable $w^\ell$.
\begin{lemma}
Under Assumptions \ref{cost-assump} and \ref{penalty-assump}, the following bounds hold: \eq{
& \big\| \bar{\g}(\bsw_{i-1})+ \bar{\f}(\bsw_{i-1})-\mu \grad_w J^{\rm glob}_{\eta}(\w_{c,i-1})\big\|^2 \nonumber \\
& \quad \leq \mu^2 (\delta+\eta \delta_p)^2 v_1 N_x \|\cw_{i-1} \|^2
\label{lemma_inequal_1}}
and
\eq{
&\big\|\bar{\g}(\bzeta_{i})-\bar{\g}(\bsw_{i-1})\big\|^2 \leq 2 \mu^4 \eta^2 \delta^2 N_x n \|\grad_{\ssw} \cP(\sw^\star)\|^2 \nonumber \\
& \quad +
2 \mu^4 \eta^2 \delta^2 \delta_p^2 v_2 N_x n ( \|\bw_{i-1}\|^2+\|\cw_{i-1}\|^2)
\label{lemma_inequal_2}}
where $v_1 = \max_\ell (V_{2,\ell} \otimes I_{M_\ell})^2$, $v_2\define\|\cV^{-1}\|^2$, $N_x=\max_\ell\{N_\ell\}$, and $n=\|\cR^{-1}\|^2$.
\end{lemma} \noindent {\bf Proof:} We first show \eqref{lemma_inequal_1}. Note that from \eqref{g-bar-check}: \eq{ \bar{\g}^\ell(\bsw_{i-1}) &= \mu (r_\ell\tran R_\ell^{-1} \otimes I_{M_\ell}) \grad_{\ssw^\ell} \cJ(\bsw_{i-1}) \nonumber \\ &= \mu(\one_{N_\ell}\tran \otimes I_{M_\ell}) \grad_{\ssw^\ell} \cJ(\bsw_{i-1}) \nonumber \\ &=\mu \sum_{k \in \cC_\ell} \grad_{w_k^\ell}J_{k}(\w_{k,i-1}) } Likewise for $\bar{\f}^\ell(\bsw_{i-1})$. Thus, we have: \eq{ &\bar{\g}^\ell(\bsw_{i-1})+\bar{\f}^\ell(\bsw_{i-1}) =\mu \sum_{k \in \cC_\ell} \grad_{w_k^\ell}J_{k,\eta}(\w_{k,i-1}) } and, therefore, \eq{
& \big\|\bar{\g}(\bsw_{i-1})+ \bar{\f}(\bsw_{i-1})-\mu \grad_w J^{\rm glob}_{\eta}(\w_{c,i-1}) \big\|^2 \nonumber \\
& =\sum_{\ell=1}^L \left\| {\mu N_\ell \over N_\ell} \sum_{k \in \cC_\ell} \left( \grad_{w_k^\ell}J_{k,\eta}(\w_{k,i-1})- \grad_{w^\ell} J_{k,\eta}(\w'_{k,i-1}) \right)\right\|^2 \nonumber \\
& \leq \mu^2 N_x \sum_{\ell=1}^L \sum_{k \in \cC_\ell} \left\| \grad_{w_k^\ell}J_{k,\eta}(\w_{k,i-1})- \grad_{w^\ell} J_{k,\eta}(\w'_{k,i-1}) \right\|^2 \nonumber \\
& \overset{\eqref{fact}}{=} \mu^2 N_x \sum_{k =1}^N \left\| \grad_{w_k}J_{k,\eta}(\w_{k,i-1})- \grad_{w_k} J_{k,\eta}(\w'_{k,i-1}) \right\|^2 \nonumber \\
& \overset{(a)}{\leq} \mu^2 N_x(\delta+\eta \delta_p)^2 \sum_{k =1}^N \left\|\w_{k,i-1}- \w'_{k,i-1} \right\|^2 \nonumber \\
& \overset{\eqref{fact}}{=} \mu^2 N_x (\delta+\eta \delta_p)^2 \left\|\bsw_{i-1}-{\rm col} \{\one_{N_\ell} \otimes \w^{\ell}_{c,i-1}\} \right\|^2 \nonumber \\
& \overset{\eqref{clustererr=avg+dev}}{=} \mu^2 (\delta+\eta \delta_p)^2 N_x \big\|{\rm diag}\{V_{2,\ell} \otimes I_{M_\ell}\}\cw_{i-1} \big\|^2 \nonumber \\
& \leq \mu^2 (\delta+\eta \delta_p)^2 N_x v_1 \|\cw_{i-1} \|^2
\label{pppp_lemma00}
} Step (a) holds because of conditions \eqref{indv-cost-1} and \eqref{penalty-cost-1}. We now show the validity of inequality \eqref{lemma_inequal_2}. It holds that: \eq{
& \| \mu \eta \grad_{\ssw} \cP(\bsw_{i-1})\|^2 \nonumber \\
& = \mu^2 \eta^2 \|\grad_{\ssw} \cP(\bsw_{i-1})-\grad_{\ssw} \cP(\sw^\star)+\grad_{\ssw} \cP(\sw^\star)\|^2 \nonumber \\
& \overset{\eqref{penalty-cost-1}}{\leq} 2 \mu^2 \eta^2 \delta_p^2 \|\tsw_{i-1}\|^2+2 \mu^2 \eta^2 \|\grad_{\ssw} \cP(\sw^\star)\|^2 \label{pppp_lemma0} } Using an argument similar to the first five steps in \eqref{pppp_lemma00} we get: \eq{
&\|\bar{\g}(\bzeta_{i})-\bar{\g}(\bsw_{i-1})\|^2 \leq \mu^2 \delta^2 N_x \|\bzeta_{i}-\bsw_{i-1}\|^2 \nonumber \\
& \overset{\eqref{network-recur-noise(a)}}{=} \mu^2 \delta^2 N_x \|- \mu \eta \cR^{-1} \grad_{\ssw} \cP(\bsw_{i-1})\|^2 \nonumber \\
& \overset{\eqref{pppp_lemma0}}{\leq} 2 \mu^4 \eta^2 \delta^2 N_x n \left(\delta_p^2 \|\tsw_{i-1}\|^2+ \|\grad_{\ssw} \cP(\sw^\star)\|^2 \right) \label{pppp_lemma} } Note that: \eq{
\|\tsw_{i-1}\|^2&=\big\|(\cV\tran)^{-1}\cV\tran\tsw_{i-1} \big\|^2 \leq v_2( \|\bw_{i-1}\|^2+\|\cw_{i-1}\|^2) \label{bound_wbar_wcheck} } Substituting the previous bound into \eqref{pppp_lemma} gives \eqref{lemma_inequal_2}. \qd
\begin{lemma} The noise terms are bounded by: \eq{
&\Ex \|\bv_{i}\|^2+\Ex \|\cv_{i}\|^2 \leq \bar{\alpha} \left( \Ex \|\bw_{i-1}\|^2+\Ex \|\cw_{i-1}\|^2 \right) + \bar{\sigma}^2 \label{lemma_noise_bound} } where \begin{subequations} \eq{ \bar{\alpha} &\define \mu^2 \alpha v_2 v_3(2+4\mu^2 \eta^2 \delta_p^2 n)\\
\bar{\sigma}^2 &\define 4 \mu^4 \eta^2 v_3 n \|\grad_{\ssw} \cP(\sw^\star)\|^2+\mu^2 v_3 \sigma^2
} \label{alpha_sigma}
\end{subequations}
$v_3\define\|\cV\tran \cR^{-1}\|^2$, $\alpha \define \max_{k} \alpha_k$, $\alpha_k \define 2\bar{\alpha}_k$, $\sigma^2 \define \sum\limits_{k=1}^N \sigma_k^2$, and $\sigma_k^2=\bar{\sigma}_k^2+2\bar{\alpha}_k \|w_k^\star\|^2$. \end{lemma} \noindent {\bf Proof:} We first note that: \eq{
& \Ex \|\bv_{i}\|^2 +\Ex \|\cv_{i}\|^2 =\Ex \left\| \begin{bmatrix} \bar{\v}_{i} \\ \check{\v}_{i}
\end{bmatrix} \right\|^2 \overset{\eqref{calT-noise}}{=} \Ex \left\|\st {\rm col}\left\{ \begin{bmatrix} \bar{\v}_{i}^\ell \\ \check{\v}_{i}^\ell
\end{bmatrix} \right\} \right\|^2 \nonumber \\
& =\Ex \left\|{\rm col}\left\{\begin{bmatrix} \bar{\v}_{i}^\ell \\ \check{\v}_{i}^\ell
\end{bmatrix} \right\} \right\|^2 \overset{\eqref{v-bar-check}}{=} \Ex \big\|\mu \cV\tran \cR^{-1} \v_i \big\|^2 \leq \mu^2 v_3 \Ex \| \v_i \|^2
\label{s-bar-check} }
Using \eqref{noise-model(b)}, it can be easily confirmed that: \eq{
\Ex\{\|\v_{k,i}(\bzeta_k)\|^2\mid \cf_{i} \} \leq \alpha_k \|\tzeta_k\|^2 + \sigma_k^2 \label{noise-error-model(b)} } If we take the expectation of \eqref{noise-error-model(b)} we get: \eq{
\Ex \|\v_{k,i}(\bzeta_k)\|^2 \leq \alpha_k \Ex\|\tzeta_k\|^2 + \sigma_k^2 \label{noise-error-model(bb)} }
Therefore, we can bound $\Ex \|\v_i\|^2$ as follows: \eq{
\Ex \|\v_i\|^2
&\overset{(a)}{=} \sum_{k=1}^N \Ex \| \v_{k,i}(\bzeta_{k,i})\|^2
\leq \sum_{k=1}^N ( \alpha_k \Ex \|\tzeta_{k,i}\|^2+\sigma_k^2) \nonumber \\
&\leq \alpha \sum_{k=1}^N \Ex \|\tzeta_{k,i}\|^2+\sigma^2 \overset{\eqref{fact}}{=} \alpha \Ex \| \tzeta_{i}\|^2 + \sigma^2
\label{noise-bound-zeta}}
Step (a) holds because of \eqref{transform-vector-noise} and \eqref{fact}. Now note that: \eq{
\Ex \| \tzeta_{i}\|^2 &\overset{\eqref{zeta-error}}{=}\Ex \big\|\tsw_{i-1} + \mu \eta \cR^{-1} \grad_{\ssw} \cP(\bsw_{i-1})\big\|^2 \nonumber \\
&\leq (2+4\mu^2 \eta^2 \delta_p^2 n) \Ex \|\tsw_{i-1}\|^2+4 \mu^2 \eta^2 n \|\grad_{\ssw} \cP(\sw^\star)\|^2 \label{zeta-bound-w} } where in the last step we used Jensen's inequality and \eqref{pppp_lemma0}. Therefore, by substituting \eqref{zeta-bound-w} into \eqref{noise-bound-zeta} and using \eqref{bound_wbar_wcheck} we conclude that \eq{
&\Ex \|\v_i\|^2 \overset{\eqref{bound_wbar_wcheck}}{\leq} \alpha' \left( \Ex \|\bw_{i-1}\|^2+\Ex \|\cw_{i-1}\|^2 \right) + \sigma'^2 \label{noise-check-bar-sum-bound} }
where $\alpha' \define \alpha v_2(2+4\mu^2 \eta^2 \delta_p^2 n)$ and $\sigma'^2 \define 4 \mu^2 \eta^2 n \|\grad_{\ssw} \cP(\sw^\star)\|^2+ \sigma^2$. Substituting inequality \eqref{noise-check-bar-sum-bound} into \eqref{s-bar-check} gives the bound \eqref{lemma_noise_bound}.
\qd
\section{Proof of Theorem 2} \label{appendix-proof} For ease of reference, we rewrite \eqref{errorT-scaled}: \eq{ \bw_{i}&=\bw_{i-1}+\bar{\g}(\bzeta_{i})+\bar{\f}(\bsw_{i-1})+\bar{\v}_{i} \label{expanded-1}\\ \cw_{i} &= \check{\cJ}\big(\cw_{i-1} +\check{\g}(\bzeta_{i})+\check{\f}(\bsw_{i-1})+\check{\v}_{i}\big) \label{expanded-2} } We first bound \eqref{expanded-1}. Adding and subtracting $ \bar{\g}(\bsw_{i-1})$ from the right hand side of \eqref{expanded-1}: \eq{ \bw_{i}&=\bw_{i-1}+\bar{\g}_{\eta}(\bsw_{i-1})+\bar{\g}(\bzeta_{i})-\bar{\g}(\bsw_{i-1})+\bar{\v}_{i} } where $\bar{\g}_{\eta}(\bsw_{i-1})\define \bar{\g}(\bsw_{i-1})+ \bar{\f}(\bsw_{i-1})$. Similarly, adding and subtracting $\mu \grad_w J^{\rm glob}_{\eta}(\w_{c,i-1})$: \eq{ \bw_{i}&=\bw_{i-1}+\mu \grad_w J^{\rm glob}_{\eta}(\w_{c,i-1}) + \bar{\h}_i+\bar{\v}_{i} \label{bar_expand} } where we introduced \eq{ \bar{\h}_i\define \bar{\g}_{\eta}(\bsw_{i-1})-\mu \grad_w J^{\rm glob}_{\eta}(\w_{c,i-1})+\bar{\g}(\bzeta_{i})-\bar{\g}(\bsw_{i-1}) \label{bar_h_def}} Conditioning both sides of \eqref{bar_expand} on $\cf_{i}$, and computing the conditional second-order moments, we get: \eq{
&\Ex \|\bw_{i}\mid \cf_{i}\|^2 = \bigg\|\bw_{i-1}+\mu \grad_w J^{\rm glob}_{\eta}(\w_{c,i-1}) +\bar{\h}_i \bigg\|^2 \nonumber \\
& \quad + \Ex \| \bv_{i} \mid \cf_{i}\|^2 \label{bar_expand_1} } where the cross term with the noise component is zero because of the gradient noise conditions given in Assumption \ref{noisemodel:assump}. Note that $\bw_{i-1}=w^\star-\w_{c,i-1}$. Appealing to Jensen's inequality we have: \eq{
& \bigg\|\bw_{i-1}+\mu \grad_w J^{\rm glob}_{\eta}(\w_{c,i-1}) +\bar{\h}_i \bigg\|^2 \nonumber \\
& =\bigg\|{t\over t}\bigg(\bw_{i-1}+\mu \grad_w J^{\rm glob}_{\eta}(\w_{c,i-1})\bigg) +{1-t\over 1-t}\bar{\h}_i \bigg\|^2 \nonumber \\
& \leq {1\over t} \bigg\|\bw_{i-1}+\mu \grad_w J^{\rm glob}_{\eta}(\w_{c,i-1}) \bigg\|^2 +{1\over (1-t)}\|\bar{\h}_i\|^2 \nonumber \\
& \overset{\eqref{lemma_inequal_3}}{\leq} {1\over t} (1-\mu \nu)^2 \|\bw_{i-1}\|^2 +{1\over (1-t)}\|\bar{\h}_i\|^2 } for any $t \in (0,1)$. Note that: \eq{
&\|\bar{\h}_i\|^2 \nonumber \\
&\overset{\eqref{bar_h_def}}{=}\big\|\bar{\g}_{\eta}(\bsw_{i-1})-\mu \grad_w J^{\rm glob}_{\eta}(\w_{c,i-1})+\bar{\g}(\bzeta_{i})-\bar{\g}(\bsw_{i-1})\big\|^2 \nonumber \\
&\leq 2 \big\|\bar{\g}_{\eta}(\bsw_{i-1})-\mu \grad_w J^{\rm glob}_{\eta}(\w_{c,i-1})\big\|^2+2\|\bar{\g}(\bzeta_{i})-\bar{\g}(\bsw_{i-1})\|^2 \nonumber \\
&\leq 2 \mu^2 N_x \bigg( (\delta+\eta \delta_p)^2 v_1 +2 \mu^2 \eta^2 \delta^2 \delta_p^2 v_2 n \bigg) \|\cw_{i-1} \|^2 \nonumber \\
& \quad +4 \mu^4 \eta^2 \delta^2 \delta_p^2 v_2 N_x n \|\bw_{i-1}\|^2+4 \mu^4 \eta^2 \delta^2 N_x n \|\grad_{\ssw} \cP(\sw^\star)\|^2 } where in the last step we used \eqref{lemma_inequal_1}--\eqref{lemma_inequal_2}. Letting $t=1-\mu \nu$ and substituting the previous two bounds into \eqref{bar_expand_1}: \eq{
&\Ex \|\bw_{i}\mid \cf_{i}\|^2 \leq \left(1-\mu \nu+{4\over \nu} \mu^3 \eta^2 \delta^2 \delta_p^2 v_2 N_x n \right) \|\bw_{i-1}\|^2 \nonumber \\
& \quad +{2 N_x \mu \over \nu} \bigg( v_1(\delta+\eta \delta_p)^2 +2 \mu^2 \eta^2 \delta^2 \delta_p^2 v_2 n \bigg) \|\cw_{i-1} \|^2 \nonumber \\
& \quad +{4 \mu^3 \eta^2 \delta^2 N_x n \over \nu} \|\grad_{\ssw} \cP(\sw^\star)\|^2+ \Ex \| \bv_{i} \mid \cf_{i}\|^2 \label{z-bar} } We repeat a similar argument for the second relation \eqref{expanded-2}. Thus, appealing to Jensen' inequality we have: \eq{
&\Ex \|\cw_{i} \mid \cf_{i}\|^2 \nonumber \\
&\leq \|\check{\sj}\tran \|^2 \big\|\cw_{i-1} +\check{\g}(\bzeta_{i})+\check{\f}(\bsw_{i-1})\big\|^2 +\|\check{\sj}\tran \|^2 \Ex \|\check{\v}_{i} \mid \cf_{i}\|^2 \nonumber \\
&\leq {\|\check{\sj}\tran \|^2 \over t} \|\cw_{i-1}\|^2+{ \|\check{\sj}\tran\|^2 \over 1-t}\big\|\check{\g}(\bzeta_{i})+\check{\f}(\bsw_{i-1})\big\|^2 \nonumber \\
& \quad +\|\check{\sj}\tran \|^2 \Ex \|\check{\v}_{i} \mid \cf_{i}\|^2 \label{checkder1}}
for any arbitrary positive number $t \in (0,1)$. Now note that $\| \check{\sj}\tran \|^2$ is equal to the spectral radius of $\check{\sj} \check{\sj}^*$ ( $(.)^*$ is the complex-conjugate transposition), and if we use the property that the spectral radius is upper bounded by any matrix norm, we have \cite[Ch. 9]{9}: \eq{
\scalemath{0.99}{\| \check{\sj}\tran \|^2 \leq \|\check{\sj} \check{\sj}^*\|_1
= \max_{\ell}\|\check{J}_{\ell} \check{J}_{\ell}^*\|_1 =(\lambda(2)+\epsilon)^2 } }
where $\lambda(2)=\max_\ell | \lambda_{\ell}(2)|$, and $\lambda_{\ell}(2)$ is equal to the second largest eigenvalue in magnitude in $\check{J}_{\ell}$, which does not depend on $\epsilon$ and is strictly less than one in magnitude. From this we conclude that $\| \check{\sj}\tran\| < 1$ for any $\epsilon < 1-\lambda(2)$. We select $t=\|\check{\sj}\tran\| \triangleq \rho_\epsilon $, and rewrite \eqref{checkder1} as: \eq{
\Ex \|\cw_{i} \mid \cf_{i}\|^2
&\leq \rho_{\epsilon} \|\cw_{i-1}\|^2+{ 1 \over 1-\rho_{\epsilon}} \big\|\check{\g}(\bzeta_{i})+\check{\f}(\bsw_{i-1}) \big\|^2 \nonumber \\
& \quad +\rho_{\epsilon}^2 \Ex \|\check{\v}_{i} \mid \cf_{i}\|^2 \label{checkder1}} Note that: \eq{
&\big\|\check{\g}(\bzeta_{i})+\check{\f}(\bsw_{i-1})\big\|^2 =\sum_{\ell=1}^L \big\|\check{\g}^\ell(\bzeta_{i})+\check{\f}^\ell(\bsw_{i-1})\big\|^2 \nonumber \\
& \overset{(a)}{=}\sum_{\ell=1}^L \bigg\|(V_{1,\ell}\tran \otimes I_{M_\ell})\cR_\ell^{-1} \big(\mu \grad_{\ssw^\ell} \cJ(\bzeta_i) +\mu \eta \grad_{\ssw^\ell} \cP(\bsw_{i-1})\big)\bigg\|^2 \nonumber \\
&\leq v_4 \big\|\mu \grad \cJ(\bzeta_i) + \mu \eta \grad \cP(\bsw_{i-1})\big\|^2 \nonumber \\
&\leq 2v_4 \mu^2 \big\| \grad \cJ(\bzeta_i)\|^2 + 2v_4 \|\mu \eta \grad \cP(\bsw_{i-1})\big\|^2 \nonumber \\
&\overset{\eqref{pppp_lemma0}}{\leq} 2v_4 \mu^2 \| \grad \cJ(\bzeta_i)\|^2 \nonumber \\
& \quad + 4 v_4 \mu^2 \eta^2 \delta_p^2 \|\tsw_{i-1}\|^2+4v_4 \mu^2 \eta^2 \|\grad_{\ssw} \cP(\sw^\star)\|^2 }
where $v_4 =\max_\ell \|(V_{1,\ell}\tran \otimes I_{M_\ell}) \cR_\ell^{-1}\|^2$ and step (a) holds from \eqref{g-bar-check}--\eqref{f-bar-check}. Note also that: \eq{
&\| \grad \cJ(\bzeta_i)\|^2=\| \grad \cJ(\bzeta_i)-\grad \cJ(\sw^\star)+\grad \cJ(\sw^\star)\|^2 \nonumber \\
& \leq 2\| \grad \cJ(\bzeta_i)-\grad \cJ(\sw^\star)\|^2+2\|\grad \cJ(\sw^\star)\|^2 \nonumber \\
& \overset{\eqref{indv-cost-1}}{\leq} 2\delta^2\| \tzeta_i\|^2+2\|\grad \cJ(\sw^\star)\|^2 \nonumber \\
& \overset{\eqref{zeta-bound-w}}{\leq} 4\delta^2(1+2\mu^2 \eta^2 \delta_p^2 n) \Ex \|\tsw_{i-1}\|^2+8 \delta^2 \mu^2 \eta^2 n \|\grad_{\ssw} \cP(\sw^\star)\|^2 \nonumber \\
& \quad +2\|\grad \cJ(\sw^\star)\|^2} Substituting the last two bounds into \eqref{checkder1} and using \eqref{bound_wbar_wcheck} gives: \eq{
& \Ex \|\cw_{i} \mid \cf_{i}\|^2
\leq \bigg(\rho_{\epsilon}+{ 4 v_2v_4 \mu^2 \over 1-\rho_{\epsilon}}(\eta^2\delta_p^2+a )\bigg) \|\cw_{i-1}\|^2 \nonumber \\
& \quad +{ 4 v_2v_4 \mu^2 \over 1-\rho_{\epsilon}}(\eta^2\delta_p^2+a ) \|\bw_{i-1}\|^2 + { 4v_4\mu^2 \over 1-\rho_{\epsilon}}\|\grad \cJ(\sw^\star)\|^2 \nonumber \\
& \quad + { 4 v_4 \mu^2 \eta^2 \over 1-\rho_{\epsilon}}\left(1+4\mu^2 \delta^2 n \right) \|\grad_{\ssw} \cP(\sw^\star)\|^2 +\rho_{\epsilon}^2 \Ex \|\check{\v}_{i} \mid \cf_{i}\|^2 \label{checkder2}} where $a\define=2\delta^2+4\mu^2 \eta^2 \delta^2 \delta_p^2 n $. If we introduce the scalar coefficients: \begin{subequations} \label{gamma_c_expressions} \eq{ \gamma_{11}&=1-\mu \nu+{4\over \nu} \mu^3 \eta^2 \delta^2 \delta_p^2 v_2 N_x n+\bar{\alpha} \\ \gamma_{12}&= {2 N_x \mu \over \nu} \bigg( v_1(\delta+\eta \delta_p)^2 +2 \mu^2 \eta^2 \delta^2 \delta_p^2 v_2 n \bigg) +\bar{\alpha} \\ \gamma_{21}&= { 4 v_2v_4 \mu^2 \over 1-\rho_{\epsilon}}(\eta^2\delta_p^2+a )+\rho_{\epsilon}^2 \bar{\alpha} \\ \gamma_{22}&= \rho_{\epsilon}+{ 4 v_2v_4 \mu^2 \over 1-\rho_{\epsilon}}(\eta^2\delta_p^2+a )+\rho_{\epsilon}^2\bar{\alpha} \\
c_1 &= {4 \mu^3 \eta^2 \delta^2 N_x n \over \nu} \|\grad_{\ssw} \cP(\sw^\star)\|^2 +\bar{\sigma}^2 \\
c_2&= { 4v_4 \mu^2 \eta^2 \over 1-\rho_{\epsilon}}\left(1+4\mu^2 \delta^2 n\right) \|\grad_{\ssw} \cP(\sw^\star)\|^2 \nonumber \\
& \quad+{ 4v_4\mu^2 \over 1-\rho_{\epsilon}}\|\grad \cJ(\sw^\star)\|^2 +\bar{\sigma}^2 } \end{subequations} where $\bar{\alpha}$ and $\bar{\sigma}^2$ are defined in \eqref{alpha_sigma}. Then, by taking the expectation of \eqref{z-bar} and \eqref{checkder2} and using these parameters along with the gradient noise bound \eqref{lemma_noise_bound}, we can combine \eqref{z-bar} and \eqref{checkder2} into a single compact inequality as follows:
\eq{ \begin{bmatrix}
\Ex \|\bw_{i}\|^2 \\
\Ex \|\cw_{i}\|^2
\end{bmatrix} \preceq \underbrace{ \begin{bmatrix}
\gamma_{11} & \gamma_{12} \\ \gamma_{21} & \gamma_{22}
\end{bmatrix}}_{\define \Gamma} \begin{bmatrix}
\Ex \|\bw_{i-1}\|^2 \\
\Ex \|\cw_{i-1}\|^2
\end{bmatrix} +\begin{bmatrix} c_1\\ c_2
\end{bmatrix} \label{gamma-recursion} } Note $\Gamma$, $c_1$, and $c_2$ have entries of the following form: \eq{ \Gamma &= \scalemath{1}{ \begin{bmatrix}
1-O(\mu) & O(\mu)+O(\mu \eta^2) \\ O(\mu^2 \eta^2) & \rho_\epsilon +O(\mu^2 \eta^2)
\end{bmatrix} }
\label{gammaorder} \\ \scalemath{1}{ \begin{bmatrix} c_1\\ c_2
\end{bmatrix}} &= \begin{bmatrix} O(\mu^3 \eta^2)+O(\mu^2)\\ O(\mu^2 \eta^2 )
\end{bmatrix} \label{c1c2order} }
\begin{remark}\label{remark-proof}{\rm The property referred to in the earlier Remark \ref{remark-th} would have given $ \|\eta\grad_{\ssw} \cP(\sw^\star)\|^2=O(1)$ so that $c_1=O(\mu^2)$ and $c_2=O(\mu^2)$.}
\qd
\end{remark} We continue with $c_2=O( \mu^2 \eta^2)$. Now we show that we can choose a sufficiently small $\mu$ such that $\Gamma$ is a stable matrix (i.e., the spectral radius is strictly less than one, $\rho(\Gamma) < 1$). Since the value of $\gamma_{11}$ is nonnegative under condition \eqref{step-size(a)} and the values of $\{\gamma_{12},\gamma_{21},\gamma_{22}\}$ are nonnegative, we invoke the property that the spectral radius of a matrix is upper bounded by any of its norms, and use the $1$-norm to conclude that: \eq{ \rho(\Gamma) \leq \text{max}\{&\gamma_{11}+\gamma_{21},\gamma_{12}+\gamma_{22} \} \label{conv_rate} } Therefore, to find sufficient conditions that ensure the stability of $\Gamma$, the step-size $\mu$ are chosen such that: \eq{ &1-\mu \nu+{4\over \nu} \mu^3 \eta^2 \delta^2 \delta_p^2 v_2 N_x n + { 4 v_2v_4 \mu^2 \over 1-\rho_{\epsilon}}(\eta^2\delta_p^2+a ) \nonumber \\ & +(1+\rho_{\epsilon}^2 ) \mu^2 \alpha v_2 v_3(2+4\mu^2 \eta^2 \delta_p^2 n) < 1 \label{sumgamma1} } and \eq{ & \rho_{\epsilon}+{ 4 v_2v_4 \mu^2 \over 1-\rho_{\epsilon}}(\eta^2\delta_p^2+a ) \nonumber \\ & + {2 N_x \mu \over \nu} \bigg( v_1(\delta+\eta \delta_p)^2 +2 \mu^2 \eta^2 \delta^2 \delta_p^2 v_2 n \bigg) \nonumber \\ & +(1+\rho_{\epsilon}^2 ) \mu^2 \alpha v_2 v_3(2+4\mu^2 \eta^2 \delta_p^2 n) < 1 \label{sumgamma2} }
We first find conditions such that \eqref{sumgamma1} holds. Note that under condition \eqref{step-size(a)} it holds that:
\eq{
\mu \eta \delta_p < {1 \over N} \label{stricter}
}
Using this and subtracting one from both sides of \eqref{sumgamma1} we get the stricter inequality: \eq{ &-\mu \nu+{4\over N \nu} \mu^2 \eta \delta^2 \delta_p v_2 N_x n +{ 4 v_2v_4 \mu^2 \over 1-\rho_{\epsilon}}(\eta^2\delta_p^2+2\delta^2 a' ) \nonumber \\ & +2(1+\rho_{\epsilon}^2 )\mu^2 \alpha v_2 v_3 a' < 0 \label{sumgamma1-(b)} } where $a' \define 1+{2n \over N^2} $ and we used $a=2\delta^2+4\mu^2 \eta^2 \delta^2 \delta_p^2 n < 2\delta^2 a' $ due to \eqref{stricter}. Therefore, a sufficient condition to satisfy \eqref{sumgamma1} is to choose $\mu$ such that \eqref{sumgamma1-(b)} is satisfied, which gives: \eq{\scalemath{1}{ \mu < \frac{\nu}{ \tau_1+{4\over N \nu} \eta \delta^2 \delta_p v_2 N_x n } }\label{step-size(c)}} where \eq{ \tau_1 &\define { 4 v_2v_4 \over 1-\rho_{\epsilon}}(\eta^2\delta_p^2+2\delta^2 a' )+2(1+\rho_{\epsilon}^2 ) \alpha v_2 v_3 a' \label{tau1} } We now find conditions such that \eqref{sumgamma2} holds. Similarly, equation \eqref{sumgamma2} can be replaced by the stricter inequality: \eq{ & { 4 v_2v_4 \mu^2 \over 1-\rho_{\epsilon}}(\eta^2\delta_p^2+2 \delta^2 a' ) + {2 \mu N_x \over \nu} \left( v_1 (\delta+\eta \delta_p)^2 +{2 \delta^2 v_2 n \over N^2} \right) \nonumber \\ & +2(1+\rho_{\epsilon}^2 )\mu^2 \alpha v_2 v_3 a'< 1-\rho_{\epsilon} \label{sumgamma2-(b)} } To simplify the expression, we let \eq{ \tau_2 \define {2 N_x \over \nu} \left( v_1 (\delta+\eta \delta_p)^2 +{2 \delta^2 v_2 n \over N^2} \right) \label{tau2} } and by using \eqref{tau1}, we can rewrite the previous inequality as: \eq{ & \tau_2 \mu + \tau_1 \mu^2 < (1-\rho_\epsilon) } or, equivalently, by: \eq{ - \big(\tau_2 \mu - \tau_1 \mu^2\big)
+ 2\tau_2 \mu <(1-\rho_\epsilon) \label{232} } Now consider a generic inequality of the form $-(a\mu-b\mu^2)+2a\mu \leq c$. Then, we can guarantee this inequality by selecting $\mu$ to satisfy $(a\mu-b\mu^2)>0 \iff \mu < a/b$, and by also selecting $\mu$ to satisfy $2a \mu \leq c \iff \mu < c/(2a)$. Applying this conclusion to \eqref{232} we find that a sufficient condition for it to hold is to select \eq{ \mu < \min \bigg\{\frac{\tau_2}{\tau_1 },\frac{(1-\rho_\epsilon)}{2 \tau_2} \bigg\} \label{step-size(d)} }
Therefore, we combine conditions \eqref{step-size(a)}, \eqref{step-size(c)}, and \eqref{step-size(d)} into the following sufficient condition: \eq{ \mu < \scalemath{0.95}{ \min \bigg\{{1 \over \nu+N(\delta+\eta \delta_p) },\frac{\nu}{ \tau_1+{4\over N \nu} \eta \delta^2 \delta_p v_2 N_x n },\frac{\tau_2}{\tau_1 },\frac{(1-\rho_\epsilon)}{2 \tau_2} \bigg\} } \label{step-size(all)} } Therefore, under \eqref{step-size(all)} and for sufficiently small step-sizes, it holds that (where in the denominator we are only considering the $O(\mu)$ terms): \eq{ \limsup\limits_{i\rightarrow \infty}
\begin{bmatrix}
\Ex \|\bw_{i}\|^2 \\
\Ex \|\cw_{i}\|^2
\end{bmatrix} &\preceq (I-\Gamma)^{-1} \begin{bmatrix} c_1\\ c_2
\end{bmatrix} \nonumber \\
&= \frac{ \begin{bmatrix}
1-\gamma_{22} & \gamma_{12} \\ \gamma_{21} & 1-\gamma_{11}
\end{bmatrix}}{(1-\gamma_{11})(1-\gamma_{22})-\gamma_{12}\gamma_{21}} \begin{bmatrix} c_1\\ c_2
\end{bmatrix}
\nonumber \\
&= \begin{bmatrix} O(\mu)+O(\mu^2 \eta^4 )\\ O(\mu^2)+O(\mu^2 \eta^2)
\end{bmatrix} } for sufficiently small step-sizes. From which we conclude that \eq{ &\limsup\limits_{i\rightarrow \infty}
\Ex \|\tsw_i\|^2 = \limsup\limits_{i\rightarrow \infty}
\Ex \left\| (\cV^{-1})\tran \mathcal{T}\tran \begin{bmatrix}
\bw_{i} \\ \cw_{i}
\end{bmatrix} \right\|^2 \nonumber \\
&\leq \big\|(\cV^{-1})\tran \big\|^2 \limsup\limits_{i\rightarrow \infty}
\bigg[\Ex \|\bw_{i}\|^2 +\Ex \|\cw_{i}\|^2\bigg] \nonumber \\ &=O(\mu)+O(\mu^2 \eta^4 ) } for sufficiently small step-sizes.
\end{appendices}
\end{document} | arXiv | {
"id": "1712.08817.tex",
"language_detection_score": 0.6473122835159302,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\date{}
\title{The Tur\'an density of tight cycles in three-uniform hypergraphs}
\begin{abstract}
\setlength{\parskip}{
amount}
\setlength{\parindent}{0pt}
\noindent
The \emph{Tur\'an density} of an $r$-uniform hypergraph $\HH$, denoted $\pi(\HH)$, is the limit of the maximum density of an $n$-vertex $r$-uniform hypergraph not containing a copy of $\HH$, as $n \to \infty$.
Denote by $\C_{\ell}$ the $3$-uniform tight cycle on $\ell$ vertices. Mubayi and R\"odl conjectured that the Tur\'an density of $\C_5$ is $2\sqrt{3} - 3 \approx 0.464$, and gave an ``iterated blow-up'' construction showing that this is tight. Their construction also does not contain $\C_{\ell}$ for larger $\ell$ not divisible by $3$, which suggests that it might be the extremal construction for these hypergraphs as well.
Here, we determine the Tur\'an density of $\C_{\ell}$ for all large $\ell$ not divisible by $3$, showing that indeed $\pi(\C_{\ell}) = 2\sqrt{3} - 3$.
To our knowledge, this is the first example of a Tur\'an density being determined where the extremal construction is an iterated blow-up construction.
A key component in our proof, which may be of independent interest, is a $3$-uniform analogue of the statement ``a graph is bipartite if and only if it does not contain an odd cycle''.
\end{abstract}
\section{Introduction}
For an $r$-uniform hypergraph $\mathcal H$, the \emph{Tur\'an number} of $\mathcal H$, denoted $\ex(n, \mathcal H)$, is defined as the maximum number of edges an $n$-vertex $r$-uniform hypergraph can have without containing a copy of $\mathcal H$ as a subgraph. For ($2$-uniform) graphs, we have a fairly good understanding of Tur\'an numbers.
The first theorem proved about them is Mantel's theorem \cite{mantel1907problem}, which says that, for the triangle, we have $\ex(n, K_3)= \floor{n^2/4}$. This was generalised by Tur\'an \cite{turan1941extremal} who showed that $\ex(n,K_r) \approx (1-\frac{1}{r-1})\binom{n}{2}$.
For non-complete graphs we know less, and usually only know what the Tur\'an number of a graph is asymptotically, up to $o(n^2)$ terms.
Because of this, we study the \emph{Tur\'an density} of an $r$-uniform hypergraph $\mathcal H$, denoted $\pi(\HH)$, and defined as $\pi(\mathcal H)=\lim_{n\to \infty} \frac{\ex(n,\mathcal H)}{\binom n r}$. This limit is known to exist, and, moreover, it is clear that $\pi(\mathcal H)\in [0,1]$ for every $H$.
The Tur\'an densities of ($2$-uniform) graphs were completely determined by Erd\H{o}s and Stone~\cite{erdos1946structure}, who showed that every graph $H$ satisfies $\pi(H)=1-\frac{1}{\chi(H)-1}$.
The special case of the Erd\H{o}s--Stone theorem for bipartite graphs can be generalised to higher uniformities, as follows (see \cite{erdos1971extremal}): every $r$-partite $r$-uniform hypergraph $\HH$ satisfies $\pi(\HH) = 0$. (An $r$-uniform hypergraph $\HH$ is said to be \emph{$r$-partite} if its vertices can be $r$-coloured so that every edge has one vertex of each colour.)
Nevertheless, in general, our understanding of Tur\'an numbers in higher uniformities is very limited, and there is only a small number of hypergraphs whose Tur\'an densities are known; see Keevash \cite{keevash11} for a comprehensive survey of the topic listing a number of such hypergraphs.
A notable, relatively early example is a result of de Caen and F\"uredi \cite{de2000maximum} showing that the Tur\'an density of the Fano plane is $3/4$ (see also \cite{furedi2005triple, keevash2005turan}). More recently, the impactful computer-assisted ``flag-algebra'' technique has been used to obtain a number of sharpest known upper bounds on Tur\'an densities (see \cite{baber2011new, keevash11, razborov2013flag} and the references therein).
Given the sporadicity of hypergraphs whose Tur\'an densities are known, it is unsurprising that there are many conjectures about Tur\'an densities of specific hypergraphs. The most famous of these is Tur\'an's conjecture \cite{turan1961research}, that the Tur\'an density of the \textit{tetrahedron} $K_4^{(3)}$ is $5/9$. Frankl and F\"uredi \cite{ff84} conjectured that the the Tur\'an density of the 3-edge subgraph of $K_4^{(3)}$ (usually denoted $K_4^-$) is $2/7$.
A particularly relevant conjecture for us concerns tight cycles. The \emph{$r$-uniform tight cycle} of length $\ell$, denoted $\mathcal C_{\ell}^r$, is defined to be the hypergraph with vertex set $\{1, \dots, \ell\}$ and hyperedges all sets of the form $\{x, x+1,\dots, x+r-1 \!\pmod {\ell}\}$. The following conjecture, usually attributed to Mubayi and R\"odl, appears for instance in \cite{falgas2012turan,mubayi2011hypergraph}.
\begin{conjecture}
\label{Conjecture_Mubayi_Rodl}
$\pi(\mathcal C_5^3)=2\sqrt 3-3$.
\end{conjecture}
The lower bound $\pi(\mathcal C_5^3) \geq 2\sqrt 3-3 \approx 0.464$ was found by Mubayi and R\"odl (see \Cref{ex:iterated-blow-up} below for a description of their example) and the best upper bound is due to Razborov~\cite{razborov20103}, who showed $\pi(\mathcal C_5^3)\leq 0.468$.
One basic reason why hypergraphs are more difficult than graphs is that the extremal $\mathcal H$-free hypergraphs can be much more complicated than the extremal graphs. In the 2-uniform case, the Erd\H{o}s--Stone theorem shows that all optimal graphs are close to being complete multipartite. For higher-uniformity hypergraphs there have been numerous papers discovering more complicated possible extremal hypergraphs, for instance~\cite{brown83,ff84,blm11}, as well as Conjectures~\ref{Conjecture_Mubayi_Rodl} and~\ref{conj:c5-minus}. For some hypergraphs, such as $K_4^{(3)}$, the conjectured extremal constructions are even non-unique and very different from each other~\cite{brown83,kostochka84,flaass88,razborov11}.
One class of extremal examples, which does not occur for graphs, is an ``iterated blow-up construction''. The conjectured extremal example for \Cref{Conjecture_Mubayi_Rodl} is an instance of such a construction.
\begin{example}[Iterated blow-up construction with no copies of $\C_{5}^3$] \label{ex:iterated-blow-up}
Consider nested vertex sets $V_1\supseteq \ldots \supseteq V_t$ with $|V_i|=x_i$. Let $\mathcal H(x_1, \ldots, x_t)$ be a 3-uniform hypergraph on the vertex set $V_1$, where $xyz$ is an edge whenever $x,y\in V_i$ and $z\in V_{i+1}$ for some $i$ (see Figure~\ref{Figure_extremal_hypergraph}).
We claim that there is no copy of $\C_5^3$. To see this, say that an edge with two vertices in $V_i \setminus V_{i+1}$ and one vertex in $V_{i+1}$ has \emph{type $i$}, and observe that if two edges $e$ and $f$ intersect in two vertices, they are of the same type. Thus, if $C = (u_1 \ldots u_5)$ is a cycle, then its edges all have the same type, say $i$. Without loss of generality, $u_1 \in V_i \setminus V_{i+1}$ and $u_2, u_3 \in V_{i+1}$. It follows that $u_4 \in V_i \setminus V_{i+1}$, and thus $u_5 \in V_{i+1}$. But then $u_4 u_5 u_1$ is not an edge of $\HH(x_1, \ldots, x_t)$, a contradiction.
Thus $\pi(\mathcal C_5^3)\geq e(\mathcal H(x_1, \ldots, x_t))/\binom n3$ for all choices of $x_1, \ldots, x_t$. Let $f(n)$ denote the maximum number of edges that such a hypergraph on $n$ vertices can have i.e $f(n):=\max(e(\mathcal H(x_1, \ldots, x_t):x_t\leq\dots\leq x_1=n)$.
It is possible to show $\lim_{n\to \infty} f(n)/\binom n3=2\sqrt 3-3$ (see Section~\ref{sec:optimal} for details), which gives $\pi(\mathcal C_5^3)\geq 2\sqrt 3-3$.
Let $\G_n = \HH(x_1, \ldots, x_t)$ for a choice of $n = x_1 \ge \ldots \ge x_t$ such that $e(\G_n) = f(n)$.
\end{example}
\begin{figure}
\caption{An illustration of the hypergraph $\mathcal H(x_1, x_2, x_3, x_4)$. }
\label{Figure_extremal_hypergraph}
\end{figure}
Note that in the above construction, $\mathcal G_n$ has no tight cycles of lengths $\ell\equiv 1$ or $2 \!\pmod 3$ either. So it is plausible that Conjecture~\ref{Conjecture_Mubayi_Rodl} could be strengthened to say that $\pi(\mathcal C_\ell^3)=2\sqrt 3-3$ for all $\ell\geq 5$ with $\ell\equiv 1$ or $2 \pmod 3$ (notice that $\C_4^3 = K_4^{(3)}$, and there are known examples of $K_4^{(3)}$-free $3$-uniform graphs with density at least $5/9 > 2\sqrt{3} - 3$). The main result of our paper is to show that this is true for sufficiently large $\ell$.
\begin{restatable}{theorem}{thmSingleCycle}\label{thm:single-cycle}
Let $\ell$ be sufficiently large with $\ell\equiv 1$ or $2 \!\pmod 3$. Then $\pi(\mathcal C_\ell^3)=2\sqrt 3-3$.
\end{restatable}
To our knowledge, this is the first example of a Tur\'an density being determined where the extremal construction is an iterated blow-up construction, and could be a step towards Conjecture~\ref{Conjecture_Mubayi_Rodl}.
This is also one of the few examples of hypergraphs with irrational Tur\'an densities. Such hypergraphs were recently found by Yan and Peng~\cite{yp22}, as well as Wu~\cite{wu22}, motivated by the work of Chung and Graham~\cite{cg98}, Baber and Talbot~\cite{baber2011new}, and Pikhurko~\cite{pikhurko2014possible}.
We remark that Conjecure~\ref{Conjecture_Mubayi_Rodl} would imply Theorem~\ref{thm:single-cycle} via \Cref{theorem_blow_up} below (using the same argument as in the proof of Theorem~\ref{thm:single-cycle} in Section~\ref{sec:diameter}).
One of our main tools, which may be of independent interest, is a 3-uniform analogue of the statement ``a graph is bipartite if and only if it does not contain an odd cycle''; see \Cref{thm:good-colouring}. Thus, we characterise 3-uniform hypergraphs $\HH$ which do not contain \textit{homomorphic images} of cycles $\C^{3}_\ell$ with $3 \nmid \ell$, in terms of certain colourings of $V(\HH)^2$, as explained in the proof overview.
Throughout the paper we will informally refer to 3-uniform cycles of length $\ell\equiv 1$ or $2 \pmod 3$ as ``odd cycles'', and we will often refer to $3$-uniform hypergraphs as 3-graphs.
\subsection*{Related results}
As we mentioned, there are very few hypergraphs with a known Tur\'an density, but let us state some recent results on Tur\'an-type problems for tight cycles.
A well-studied hypergraph parameter is the so-called \emph{uniform Tur\'an density}, the infimum over all $d$ for which any
sufficiently large hypergraph with the property that all its linear-size subhyperghraphs have density at
least $d$ contains $\HH$. This line of research was initiated by Erd\H{o}s and S\'os
\cite{erdHos1982ramsey} and, parallel to the classical Tur\'an densities, the motivating questions in the area are determining the uniform Tur\'an densities of the tetrahedron $K_4^{(3)}$ and its 3-edge subgraph $K_4^-$. The latter was found to be $1/4$ by Glebov, Kr\'a\v{l}, and Volec~\cite{gkv16} and later by Reiher, R\"odl, and Schacht~\cite{rrs18} with a different proof. In 2022, Buci\'c, Cooper, Kr{\'a}\v{l}, Mohr, and Munha Correia showed that for $\ell \geq 5$ and not divisible by 3, the uniform Tur\'an density of $\C_{\ell}^3$ is $\frac{4}{27}$~\cite{bckmm21}.
Another question that has attracted a lot of interest in the last few years is this: what is the extremal number of tight cycles (the maximum number of edges in an $n$-vertex $r$-uniform hypergraph containing no tight cycles)? For $r=2$, the answer is of course $n-1$, but it turns out that the behaviour is rather different for $r\geq 3$. More specifically, after a series of results~\cite{hm19,janzer21,st22,letzter2021hypergraphs}, we know that the extremal number of tight $r$-uniform cycles lies between $\Omega\left(n^{r-1} \log n / \log \log n\right)$ and $O\left(n^{r-1} \log^5 n \right)$.
\section{Proof overview} \label{sec:overview}
For an $r$-uniform hypergraph $\HH$, the \emph{$t$-blow-up} of $\HH$, denoted $\HH[t]$, is defined to be the $r$-uniform hypergraph with vertex set $V(\HH)\times [t]$ and edges all $r$-tuples $\{(x_1, i_1), \dots, (x_r, i_r)\}$ with $\{x_1, \dots, x_r\}\in E(\HH)$.
The starting point of our proof is the following theorem, which asserts that the blow-up of a hypergraph $\HH$ has the same Tur\'an density as $\HH$.
\begin{theorem}[\cite{keevash11}, Theorem 2.2] \label{theorem_blow_up}
Let $t$ be an integer and let $\HH$ be an $r$-uniform hypergraph. Then $\pi(\HH[t]) = \pi(\HH)$.
\end{theorem}
It shows that, rather than focusing on the Tur\'an density $\pi(\mathcal C_k^3)$ for an odd cycle $C_k^3$, we can instead work out the Tur\'an density of $\pi(\HH)$ for any hypergraph $\HH$ whose blow-up $\HH[t]$ contains $\mathcal C_k^3$ for some $t$. We refer to such hypergraphs $\HH$ as pseudocycles, and they can be equivalently defined as follows.
\begin{definition} \label{def:pseudocycle}
A \emph{pseudocycle} of length $\ell$ in a 3-uniform hypergraph $\HH$ is a sequence of (not necessarily distinct) vertices $v_1, \ldots, v_{\ell}$, such that for each $i \in [\ell]$, we have that $\{v_i, v_{i+1\!\pmod{\ell}}, v_{i+2\!\pmod{\ell}}\}$ is an edge of $\HH$.
A \emph{pseudopath} of order $\ell$ is defined analogously.
\end{definition}
It is easy to show that for a hypergraph $\HH$, the properties ``$\HH[t]$ contains a $\mathcal C_k^3$ for some $t$'' and ``$\HH$ contains a length $k$ pseudocycle'' are equivalent.
Thus, the next question is --- what is the maximum number of edges that a 3-uniform hypergraph can have without containing an odd pseudocycle? To understand our approach to this, consider the analogous question about graphs --- what is the maximum number of edges in a (2-uniform) graph with no odd circuits? This is easy to answer using Kotzig's Lemma --- a graph has no odd circuit if, and only if, it is bipartite. The maximum number of edges in an $n$-vertex bipartite graph is $\floor{\frac{n^2}{4}}$.
Our approach to the 3-uniform case is analogous to this. We first find the relevant generalisation of bipartite graphs, and then maximise the number of edges over this class of graphs. To define this generalisation, recall
that a graph is bipartite if, and only if, it has a proper 2-vertex-colouring. In our context, we will be colouring the shadow of a 3-uniform hypergraph. The \emph{shadow} of a hypergraph $\HH$, denoted $\partial \HH$, is the graph on vertices $V(\HH)$ whose edges are pairs $xy$ that are contained in an edge in $\HH$.
\begin{definition} \label{def:good-col}
A \emph{good colouring} of a 3-uniform hypergraph $\HH$ is a colouring of its shadow, such that each edge $xy$ in the shadow is either coloured blue or coloured red and given an orientation, and every edge $e$ in $\HH$ can be written as $xyz$ where $xy$ and $xz$ are red and directed from $x$ and $yz$ is blue.
\end{definition}
The key first step of our proof is to show that the notion of ``good colouring'' is exactly equivalent to $\HH$ not containing an odd pseudocycle.
\begin{restatable}{theorem}{thmGoodColouring}\label{thm:good-colouring}
A 3-uniform hypergraph $\HH$ has a good colouring if, and only if, $\HH$ has no pseudocycle of length $\ell$ with $3 \nmid \ell$.
\end{restatable}
\def \mcherry {m_{\mathrm{cherry}}}
\def \mgood {m_{\mathrm{good}}}
This theorem is proved in Section~\ref{sec:good-col}.
Having established the above theorem, we next wish to maximise the number of edges in a hypergraph with a good colouring. To this end, we define a \emph{coloured graph} to be a complete graph whose edges are either coloured blue or coloured red and oriented.
A \emph{cherry} in a coloured graph $G$ is a triple $xyz$ such that $xy$ and $xz$ are red and directed from $x$ and $yz$ is blue. Denote by $c(G)$ the number of cherries in $G$.
Notice that if we have a good colouring of the shadow of $\HH$ (and the remaining vertex pairs can be coloured arbitrarily), then all edges of $\HH$ will be cherries in the resulting coloured graph. Thus, let $\mcherry(n)$ be the maximum number of cherries in an $n$-vertex coloured graph. The quantity $\mcherry(n)$ has been studied before by Falgas-Ravry and Vaughan \cite{falgas2012turan}, who used flag algebras to show that $\lim_{n\to \infty} \mcherry/\binom n3=2\sqrt 3-3$. Huang \cite{huang2014maximum} worked on the area further and determined the maximum number of ``induced out-stars'' of size $t$ in an $n$-vertex coloured graph. The following special case of their results is relevant for us.
\begin{theorem}[Falgas-Ravry--Vaughan~\cite{falgas2012turan}; Huang~\cite{huang2014maximum}]
\label{theorem_intro_falgas_ravry}
Every coloured graph on $n$ vertices contains at most $f(n)$ cherries.
\end{theorem}
Combining Theorem~\ref{thm:good-colouring} and \Cref{theorem_intro_falgas_ravry} already yields the following weakening of Theorem~\ref{thm:single-cycle}.
\begin{restatable}{corollary}{corLongPseudocylces} \label{corollary_intro_long_pseudocycles}
If $\HH$ is a $3$-uniform hypergraph on $n$ vertices which contains no pseudocycles of order $\ell$ with $3 \nmid \ell$, then $e(\HH) \le f(n)$.
\end{restatable}
Thus, the next goal is to prove a version of the above corollary which holds when forbidding \emph{short pseudocycles}. This is done by controlling the diameter of the hypergraph $\HH$.
\begin{definition}
The \emph{diameter} of a hypergraph $\HH$ is the minimum $\ell$ such that the following holds: for every $x, y, z, w \in V(\HH)$ (where $x, y$ are distinct and $z, w$ are distinct) whenever there is a pseudopath from $xy$ to $zw$, there is such a pseudopath of order at most $\ell$.
\end{definition}
In Section~\ref{sec:diameter}, we show that, for $\ell\gg \epsilon^{-1}$, every 3-uniform hypergraph $\HH$ contains a subhypergraph $\HH'$ with $e(\HH')\geq e(\HH)-\epsilon n^3$ such that $\HH'$ has diameter at most $\ell$ (see Proposition~\ref{prop:small-diameter}). Then we show that in every 3-uniform hypergraph of diameter $\ell$, if there is some odd pseudocycle, then there is also an odd pseudocycle of length at most $4\ell$ (see Proposition~\ref{prop:diam-cyc}). Combining these with Corollary~\ref{corollary_intro_long_pseudocycles} shows the following
\begin{restatable}{corollary}{corWeak} \label{cor:weak-main}
Let $1/n \ll 1/\ell \ll \epsilon \ll 1$, and let $\HH$ be an $n$-vertex hypergraph with no odd pseudocycles of length at most $\ell$. Then $e(\HH) \le f(n) + \epsilon n^3$.
\end{restatable}
Note that this is still not strong enough to combine with Theorem~\ref{theorem_blow_up} to yield Theorem~\ref{thm:single-cycle}. The issue is that the length of the cycle $\ell$ depends on $\epsilon$ --- therefore, when combined with Theorem~\ref{theorem_blow_up}, we would only get that $\lim_{m\to \infty}\pi(\mathcal C_m^3)=2\sqrt 3-3$. To go further, we prove a ``stability version'' of Theorem~\ref{theorem_intro_falgas_ravry}. We show that if a coloured graph $D$ on $n$ vertices contains more than $f(n)-\epsilon n^3$ cherries, then $D$ must have a very constrained structure similar to the iterated blow-up construction (see Theorem~\ref{thm:stability} for the precise statement). Once we have this, we can obtain the following strengthening of Corollary~\ref{cor:weak-main}
\begin{restatable}{theorem}{thmPseudocycle} \label{thm:pseudocycles}
There exists $L > 0$ such that the following holds. If $\HH$ is a $3$-uniform hypergraph on $n$ vertices which contains no pseudocycles of length $\ell$, with $\ell \leq L$ and $3 \nmid \ell$, then $e(\HH) \le f(n) + O(1)$.
\end{restatable}
This theorem easily combines with Theorem~\ref{theorem_blow_up} in order to give our main result, Theorem~\ref{thm:single-cycle} (see \Cref{sec:diameter}).
\section{Finding a good colouring} \label{sec:good-col}
Recall that a \emph{pseudocycle} of order $m$ (or \emph{$m$-pseudocycle}) is a sequence $v_1 \ldots v_m$ such that $v_i v_{i+1} v_{i+2}$ is an edge for $i \in [m]$ (indices taken mod $3$). A \emph{pseudopath} of order $m$ is defined analogously.
A hypergraph is called \textit{tightly connected} if there is a pseudopath between any two edges. Given vertices $x, y, z, w$ (not necessarily distinct), a pseudopath from $xy$ to $zw$ (where $xy$ and $zw$ are ordered pairs) is a pseudopath whose first two vertices are $x$ and $y$ (in this order) and the last two vertices are $z$ and $w$.
The \emph{shadow} of a hypergraph $\HH$, denoted $\partial \HH$, is the graph on vertices $V(\HH)$ whose edges are pairs $xy$ that are contained in an edge in $\HH$.
Recall that a \emph{good colouring} of a hypergraph $\HH$ is a colouring of its shadow, such that each edge $xy$ in the shadow is either coloured blue or coloured red and given an orientation, and every edge $e$ in $\HH$ can be written as $xyz$ where $xy$ and $xz$ are red and directed from $x$ and $yz$ is blue. Such an edge is called a \emph{cherry}, and the the vertex $x$ is called its \emph{apex}.
\begin{figure}
\caption{A cherry $xyz$ with apex $x$}
\end{figure}
In this section, we will prove \Cref{thm:good-colouring}, restated here.
\thmGoodColouring*
It is easy to see that a hypergraph with a good colouring has no psedocycles of length $\ell$ with $3 \nmid \ell$, so the main effort will be put into proving the ``if'' direction.
Namely, we need to show that every hypergraph with no odd pseudocycles has a good colouring. Before specifying such a colouring, let us give some intuition. Any proper path (that is, with no repetitions) $v_1 \ldots v_k$ has a good colouring, and this colouring is unique given the colour of $v_1 v_2$ (see \Cref{fig:path} for the three good colourings of a path of order 9, and notice that each such colouring colours each edge in the shadow differently).
\begin{figure}
\caption{The three good colourings of a path of order $9$}
\label{fig:path}
\end{figure}
A proper cycle has a good colouring if and only if it is tripartite (i.e.~the number of vertices is divisible by 3). See \Cref{fig:cycle} for a good colouring of a cycle of length 18 and notice that every third vertex is an apex.
\begin{figure}
\caption{A good colouring of a cycle whose length is divisible by $3$}
\label{fig:cycle}
\end{figure}
Moreover, if there is a path $P = xy \ldots yx$, then the order of $P$ uniquely determines the colour of $xy$. This fact will be used to construct a good colouring of our hypergraph $\HH$ -- we will start from a specific pair $xy$ and extend the colouring uniquely along pseudopaths. The difficulty is to show that this colouring is well defined, so the actual colouring definition will involve some more formalism.
For a pseudopath $P = v_1 \ldots v_k$, define $\tilde{P}$ by
\begin{equation} \label{eqn:tilde}
\tP := v_{k-1} v_k v_{k-2} v_{k-1} v_{k-3} \dots v_4 v_2 v_3 v_1 v_2;
\end{equation}
note that $\tP$ is a pseudopath from $v_{k-1}v_k$ to $v_1 v_2$ of order $2k-2$ (because every vertex but $v_1$ and $v_k$ appears twice).
\begin{proof}[Proof of \Cref{thm:good-colouring}]
Whenever we talk about a path or cycle in this proof, we mean a pseudopath or pseudocycle.
As we said above, it is easy to show that a hypergraph with a good colouring has no odd cycles, so it suffices to show that if $\HH$ has no odd cycles then $\HH$ has a good colouring.
Note that we may assume that $\HH$ is tightly connected.
Let $P_0$ be a shortest path with the property that its first two vertices are the same as the last two but in reversed order, if such a path exists. Write $P_0 := v_0 \ldots v_k$ and denote $x := v_0 = v_k$ and $y := v_1 = v_{k-1}$.
Define $\sigma$ as follows.
\begin{equation} \label{eq:sigma-def}
\sigma \modthree{2k}.
\end{equation}
Intuitively, $\sigma$ is defined so that if $P_0$ has a good colouring, then the apexes in this colouring are at index $\sigma \pmod{3}$.
If $P_0$ does not exist, define $\sigma = 2$.
Let $\{z,w\}$ be an edge in the shadow of $\HH$, and let $P = xy v_2 v_3 \ldots v_k$ be a path from $xy$ whose last three vertices contain $z$ and $w$. Let $i_w \in \{k-2,k-1, k\}$ be the index of $w$ (namely, $v_{i_w} = w$), and define $i_z$ analogously. Define the index
$$\eta(P, \{z,w\}) = \begin{cases}
w, & i_w \modthree{\sigma}, \\
z, &i_z \modthree{\sigma},\\
*, &\text{otherwise}.
\end{cases}$$
In particular, this defines $\eta(xy, \{x, y\})$.
We claim that $\eta(P, \{z,w\})$ is independent of the choice of the path $P$.
\begin{claim} \label{claim:consistent}
Let $z, w \in V(\HH)$ be distinct.
Let $P = v_0 \ldots v_p$ and $Q = u_0 \ldots u_q$ be two paths
starting at $xy$ such that $z$ and $w$ are among their last three vertices. Then
$$\eta(P, \{z, w\}) = \eta(Q, \{z, w\}).$$
\end{claim}
\begin{proof}
Let $i_z \in \{p-2, p-1, p\}$ such that $v_{i_z} = z$, and define $j_z$ similarly with respect to $Q$.
It suffices to prove that $i_z \modthree{\sigma}$ if and only if $j_z \modthree{\sigma}$. For, then the same equivalence holds for $w$. But this implies that $\eta(P, \{z, w\}) = *$ if and only if $ \eta(Q, \{z, w\})= *$.
First, we modify $P$ so as to assume that $P$ ends with $zw$ or $wz$. If this is not the case, then up to swapping $z$ and $w$ we have that $P$ ends with either $zw*$ or $z*w$. In the former case remove the last vertex of $P$, and in the latter case append $z$ to $P$. It is easy to see that the statement of the claim holds for the original $P$ if and only if it holds for the modified path. Similarly, we may assume that $Q$ ends with $zw$ or $wz$.
Assume first that $P$ and $Q$ both end with $zw$. Then $\tilde{Q}$ (defined as in \eqref{eqn:tilde}) is a path from $zw$ to $xy$ of order $2(q+1)-2 = 2q$. Hence $v_2 v_3 \ldots v_{p-2} \tilde{Q}$ is a cycle, and by assumption its order is divisible by 3. That is, $p-3+2q \modthree{0}$, and thus $p \modthree{q}$. Since $i_z = p-1$ and $j_z = q-1$, this proves \Cref{claim:consistent}. The same argument holds when $P$ and $Q$ both end with $wz$.
Secondly, assume that $P$ is a path from $xy$ to $zw$ and $Q$ is a path from $xy$ to $wz$. Note that this case only arises if $P_0$ is defined, as $v_0 v_1 \ldots v_{p-2} zw u_{q-2}\dots u_0$ is a path from $xy$ to $yx$. Then consider the cycle $v_2 \ldots v_{p-2} zw u_{q-2} \ldots u_2 \tilde{P_0}$. This is indeed a cycle because $u_1 u_0 = yx$, $v_0 v_1 = xy$ and $\tilde{P_0}$ is a path from $yx$ to $xy$. The order of this cycle is $p-3+2+q-3+2k \modthree{p+q+2+\sigma}$, using~\eqref{eq:sigma-def}. Now substitute $p = i_z+1$ and $q = j_z$. We have $i_z + j_z +\sigma \modthree{0}$, so $i_z \modthree{\sigma}$ if and only if $j_z \modthree{\sigma}$.
\end{proof}
Note that for every edge $\{z,w\}$ in the shadow of $\HH$ there is a path $P$ starting at $xy$ whose last three vertices contain $z$ and $w$. Indeed, as $\HH$ is tightly connected, there is a path $Q$ such that $x$ and $y$ are among its first three vertices and $z$ and $w$ among its last three vertices. Using a modification as in the proof of \Cref{claim:consistent} we may assume that $Q$ starts with $xy$ or $yx$. If it starts with $xy$ we are done, and otherwise the reverse of the path $\tQ$ satisfies the requirements.
Given an edge $\{z,w\}$ in the shadow of $\HH$, define $\eta(zw) = \eta(P, \{zw\})$, where $P$ is any path from $xy$ whose last three vertices contain $z$ and $w$ (which exists by the previous paragraph). This parameter is well defined by \Cref{claim:consistent}. Now define $\chi$ as follows: let $zw$ be blue if $\eta(zw) = *$, and let it be red and oriented away from $\eta(zw)$ otherwise.
Finally, we show that $\chi$ is a good colouring. To see this, consider an edge $uvw$ of $G$.
Let $P$ be a path from $xy$ whose last three vertices are $u, v, w$ (in some order); such a path exists by the paragraph above. Write $P:=xy v_2 v_3 \ldots v_{p-2}v_{p-1}v_p$, let $i \in \{p-2, p-1, p \}$ with $i \modthree{\sigma}$, and we may assume that $v_i =u$. Then $\eta(uv)=\eta(uw) =u$ and $\eta(vw) = *$, which implies that $uvw$ is a cherry with apex $u$.
\end{proof}
\begin{remark}
Our proof actually shows that if $G$ does not contain a path $P_0$ starting and ending with $xy$ and $yx$ respectively, then the graph is tripartite.
Notice that a good colouring of $\HH$ can be extended from the shadow of $\HH$ to $K_n$ with no restrictions. Thus, in what follows it will suffice to analyse colourings of complete graphs by blue edges and red oriented edges (we will call such graphs \emph{coloured graphs}).
\end{remark}
\section{Maximising the number of cherries} \label{sec:optimal}
The results of the previous section establish a connection between maximising the number of edges in an odd-pseudocycle-free hypergraph and maximising the number of \textit{cherries} in colourings of $K_n$ (formally defined below). It will turn out that both problems have the same extremal construction which yields the maximum $f(n)$. Recall that we have defined $f(n)$ as the maximum number of edges in a hypergraph $\HH(x_1, \ldots, x_k)$ with $\sum_i{x_i} = n$. An explicit expression for $f$ is
\begin{equation} \label{eqn:f}
f(n) = \max_{k \ge 1} \max_{\substack{x_1, \ldots, x_k \ge 1 : \\ x_1 + \ldots + x_k = n}} \left\{\sum_{1 \le i < j \le k} \binom{x_i}{2} \cdot x_j\right\}.
\end{equation}
Equivalently, we have the recursive characterisation
\begin{align} \label{eqn:k}
\begin{split}
& f(1) = 0,\\
& f(n) = \max_{k \in [n-1]} \binom{k}{2} (n-k) + f(n-k) \,\,\text{ for $n \ge 2$}.
\end{split}
\end{align}
Write
\begin{equation} \label{eq:alphabeta}
\beta = \frac{3 - \sqrt{3}}{2} \approx 0.634 \qquad \text{and} \qquad
\alpha = \frac{\beta(1 - \beta)}{2(3 - 3\beta + 3\beta^2)} = \frac{\sqrt{3}}{3} - \frac{1}{2} \approx 0.077.
\end{equation}
The following proposition will be proved in \Cref{subsec:calculus}.
\begin{proposition} \label{prop:alpha}
$f(n) = \alpha n^3 + o(n^3)$.
\end{proposition}
We remark that the density of the corresponding hypergraph $\HH_n$ is $6\alpha = 2\sqrt{3}-3$, as already noted by Mubayi and R\"odl~\cite{mubayi2002turan}.
As in Section~\ref{sec:overview}, we call a graph $G$ \emph{coloured} if it is a complete graph whose edges are either coloured blue or coloured red and oriented.
A \emph{cherry} in a coloured graph $G$ is a triple $xyz$ such that $xy$ and $xz$ are red and directed from $x$ and $yz$ is blue. Denote by $c(G)$ the number of cherries in $G$. Theorem~\ref{theorem_intro_falgas_ravry} states that $c(G) \leq f(n)$ for any $n$-vertex coloured graph $G$. Recall that this was originally proved by Falgas-Ravry and Vaughan \cite{falgas2012turan} (who used flag algebras and also proved a similar result for out-directed stars on four vertices) and by Huang \cite{huang2014maximum} (who used a symmetrisation argument, and proved a similar result for out-directed stars on $k$ vertices, for every $k \ge 3$). Nevertheless, we provide a proof, both for completeness and because we need most of the groundwork to prove a stability version of Theorem~\ref{theorem_intro_falgas_ravry}.
As mentioned in the proof overview (\Cref{sec:overview}), \Cref{corollary_intro_long_pseudocycles}, which is a weak version of our main result and is restated here, follows directly from \Cref{theorem_intro_falgas_ravry} (proved in the next section) and \Cref{thm:good-colouring} (proved in the previous section).
\corLongPseudocylces*
\section{Stability with symmetrisation} \label{sec:symm}
Most of the work in this section will go into proving the following lemma, providing a stability version of Theorem~\ref{theorem_intro_falgas_ravry}.
It will then be iterated to prove a stability result about cherries in coloured graphs; recall that $\beta = (3 - \sqrt{3})/2$ (see \eqref{eq:alphabeta}).
We point out that this stability result is somewhat similar to a general result due to Liu--Pikhurko--Sharifzadeh--Staden \cite{liu2020stability} which allows one to obtain stability versions of a class of extremal results that can be proved using a symmetrisation argument. However, while we indeed prove the extremal result in Theorem~\ref{theorem_intro_falgas_ravry} using a symmetrisation argument, the result in \cite{liu2020stability} does not apply to automatically convert it into a stability result.
\begin{lemma} \label{lem:ind-step}
Let $ 1/n \ll \varepsilon \ll 1$ and let $G$ be a coloured graph on $n$ vertices satisfying $c(G) \ge f(n) - \varepsilon^2 n^3$.
Then there is a coloured graph $G'$ on $V(G)$ satisfying: $c(G') \ge c(G)$; the graphs $G$ and $G'$ differ on at most $800\varepsilon^{1/2} n^2$ edges; moreover, there is a set $Q \subseteq V(G)$ satisfying $\big| |Q| - \beta n \big| \le 100\varepsilon n$; $Q$ is a blue clique in $G'$; and all other edges in $G'$ that are incident with $Q$ are red and oriented towards $Q$.
\end{lemma}
The proof consists of two main parts: first we show that $G$ has a blue almost-clique on a vertex set $Q'$ of size roughly $\beta n$. Then we show that most $(V \setminus Q', Q')$ edges are red and point towards $Q'$. In both parts, we make use of a ``symmetrisation procedure'' which builds blue cliques without decreasing the number of cherries.
A \emph{blue clone-clique} in a coloured graph $G$ is a set of vertices $Q$ such that $Q$ is a blue clique in $G$, and for any $v \notin Q$, either all edges between $v$ and $Q$ are blue, or they are all red and have the same orientation (namely, they all point towards $v$ or all point away from $v$). A \emph{full blue clone-clique} is a blue clone-clique $Q$ such that all $(V \setminus Q, Q)$ edges are red.
The symmetrisation procedure, which will be described in detail in the next section, receives as input a vertex $x$ in a graph $G$, and produces a graph $G'$ on the same vertex set, which has at least as many cherries as $G$ and has a full blue clone-clique $Q$ in $G'$ that contains $x$.
The symmetrisation procedure can be applied repeatedly to a coloured graph $G$ to find a coloured graph $G'$ with at least as many cherries as $G$, and whose vertices can be partitioned into full blue clone-cliques. Some calculus (detailed in \Cref{subsec:calculus}) will show that such a $G'$ contains a full blue clone-clique $Q'$ of size approximately $\beta n$.
To proceed we need two lemmas (\Cref{lem:Q-red,lem:Q-blue}; see \Cref{subsec:Q-lemmas}) that together tell us the following. Suppose that a symmetrisation procedure on $G$ resulted in a full blue clone-clique $Q$, of size approximately $\beta n$. Then (even before performing symmetrisation) almost all edges in $G[Q, V \setminus Q]$ are red and point towards $Q$, and almost all edges in $G[Q]$ are blue.
Applying these lemmas to the previously found blue clone-clique $Q'$, we conclude first that $G[Q']$ is almost fully blue.
We then show that there is a particular instance of the symmetrisation procedure that results in a graph $G'$ and full blue clone-clique $Q$ such that $Q$ and $Q'$ differ on only few vertices. Lemma~\ref{lem:Q-red} implies that almost all $G[Q', V \setminus Q']$ edges are red and point towards $Q'$. This essentially completes the proof. This part is detailed in \Cref{subsec:proof}.
In \Cref{subsec:iteration}, we iterate \Cref{lem:ind-step} to prove the following result.
\begin{theorem}\label{thm:stability}
Let $1/n \ll \varepsilon_1 \ll \varepsilon_2 \ll 1$.
Let $G$ be a coloured graph on $n$ vertices satisfying $c(G) \ge f(n) - \varepsilon_1 n^3$.
Then there exists a coloured graph $G'$ on the same vertex set, satisfying:
\begin{enumerate}[label = \rm(\alph*), ref = \rm (\alph*)]
\item \label{itm:stability-a}
$c(G') \ge c(G)$,
\item \label{itm:stability-b}
$G$ and $G'$ differ on at most $\varepsilon_2 n^2$ edges,
\item \label{itm:stability-c}
the vertices of $G'$ can be partitioned into $Q_1, \ldots, Q_t$ such that:
\begin{enumerate}[label = \rm(\roman*), ref = c\rm(\roman*)]
\item \label{itm:stability-c1}
$|Q_1| \ge \ldots \ge |Q_t|$,
\item \label{itm:stability-c2}
all edges in $Q_i$ are blue, for $i \in [t]$,
\item \label{itm:stability-c3}
all edges in $(Q_i, Q_j)$ are red and directed towards $Q_i$, for $1 \le i < j \le t$,
\item \label{itm:stability-c4} $\big||Q_i| - \beta \cdot |Q_i \cup \ldots \cup Q_t| \big| \le \varepsilon_2 n$ for $i \in [t]$.
\end{enumerate}
\end{enumerate}
\end{theorem}
\def \Nm {N^-}
\def \Np {N^+}
In a coloured graph $G$, let $\Nm_G(x)$ be the red in-neighbourhood of $x$ and let $\Np_G(x)$ be the red out-neighbourhood of $x$ (we sometimes omit the subscript $G$).
\subsection{The symmetrisation procedure} \label{subsec:symmproc}
Given $x \in V(G)$, the \emph{symmetrisation procedure} $S_G(x)$ (or $S(x)$ in short) builds a blue clone-clique containing $x$; see \Cref{fig:symmetrisation} for a detailed description. The result of the procedure depends on the choice of $x_{k+1}$ in step \ref{step:4}, but we suppress this dependence in the notation $S_G(x)$.
\begin{figure}
\caption{Description of the symmetrisation process $S_G(x)$}
\label{step:3}
\label{step:4}
\label{fig:symmetrisation}
\end{figure}
We now show that the procedure $S_G(x)$ does not decrease the number of cherries. In fact, we prove a stronger quantitative claim.
\begin{claim} \label{claim:nhoods}
Let $x_1, \ldots, x_t$, $y_1, \ldots, y_t$ and $G_1, \ldots, G_t$ be sequences produced by $S_G(x)$, let $k \in [t-1]$, and use $\Nm(u)$ as a shorthand for $\Nm_{G_k}(u)$. Then, one of the following holds.
\begin{enumerate}[label = \rm(\roman*)]
\item \label{itm:nhoods1}
$y_{k+1} = x_1$ and $c(G_{k+1})-c(G_k) \geq \frac{k+1}{4} \cdot \big| \Nm(x_1) \,\triangle\, \Nm(x_{k+1})\big|$,
\item \label{itm:nhoodsk}
$y_{k+1} = x_{k+1}$ and $c(G_{k+1})-c(G_k) \geq \frac{k(k+1)}{4} \cdot \big| \Nm(x_1) \, \triangle \, \Nm(x_{k+1})\big|$.
\end{enumerate}
In particular, $c(G_{k+1}) \ge c(G_k)$.
\end{claim}
\begin{proof}
Let $c(x_i)$ denote the number of cherries in $G_k$ containing $x_i$ and no other vertices in $x_1, \ldots, x_k$.
Recall that for $y \in \{x_1, \ldots, x_{k+1}\}$ the graph $G_{k+1}(y)$ is obtained from $G_k$ by replacing $x_1, \ldots, x_{k+1}$ by copies of $y$ that form a blue clique. Write $\Delta_j := c(G_{k+1}(x_j)) - c(G_k)$.
Then
\begin{equation*}
\Delta_j =
(k+1)c(x_j) - \sum_{i \in [k+1]} c(x_i) + \binom{k+1}{2}\big|N^-(x_j)\big| - \frac 12 \sum_{i_1 \neq i_2} \big|N^-(x_{i_1}) \cap N^-(x_{i_2})\big|.
\end{equation*}
Summing over $j \in [k+1]$, we obtain
\begin{align*}
\sum_j \Delta_j
& = \binom{k+1}{2}\sum_j \big|N^-(x_j)\big| - \frac{k+1}{2}\sum_{i \neq j}\big|N^-(x_i) \cap N^-(x_j)\big| \\
& = \frac{k+1}{2}\sum_{i \neq j} \big|N^-(x_j) \setminus N^-(x_i)\big|.
\end{align*}
In particular, since $\{x_1, \dots x_k\}$ is a blue clique-clone,
\begin{align*}
k \Delta_1 + \Delta_{k+1}
& = \frac{k(k+1)}{2} \cdot \left( \big|N^-(x_{k+1}) \setminus N^-(x_1)\big| + \big|N^-(x_{1}) \setminus N^-(x_{k+1})\big|\right) \\
& = \frac{k(k+1)}{2} \cdot \big|\Nm(x_1) \,\triangle\, \Nm(x_{k+1})\big|.
\end{align*}
Now, $\max(k \Delta_1, \Delta_{k+1})$ is at least one half of the RHS. Thus, if $y_{k+1} = x_{k+1}$ then $\Delta_{k+1} \ge k \Delta_1$ and so $\Delta_{k+1} = \max(k \Delta_1, \Delta_{k+1}) \ge \frac{k(k+1)}{4} \cdot \big|\Nm(x_1) \,\triangle\, \Nm(x_{k+1})\big|$, and if $y_1 = x_1$ then $k \Delta_1 > \Delta_{k+1}$ and so
$\Delta_1 = \max(k\Delta_1, \Delta_{k+1}) \geq \frac{k+1}{4} \cdot |\Nm(x_1) \,\triangle\, \Nm(x_{k+1})\big|$.
\end{proof}
\Cref{theorem_intro_falgas_ravry} follows easily from the above claim.
\def \Gf {G_{\final}}
\begin{proof}[Proof of Theorem~\ref{theorem_intro_falgas_ravry}]
Let $G$ be a coloured graph on $n$ vertices. Run the following process: starting with $G' = G$, as long as there is a vertex $x$ which is not in a full blue clone-clique in $G'$, run $S_{G'}(x)$ and replace $G'$ by the resulting graph. Let $\Gf$ be the graph $G'$ at the end of the process (notice that the process will indeed end, because $S_{G'}(x)$ keeps full blue clone-cliques intact). Then $c(\Gf) \ge c(G)$ by \Cref{claim:nhoods}, and the vertices of $\Gf$ can be partitioned into full blue clone-cliques $Q_1, \ldots, Q_t$; for convenience suppose that $|Q_1| \ge \ldots \ge |Q_t|$. Replace $\Gf$ by the graph $\Gf'$ obtained by directing the red edges between $Q_i$ and $Q_j$ towards $Q_i$, for $1 \le i < j \le t$. It is straightforward to verify that $c(\Gf') \ge c(\Gf)$, as the number of cherries in $Q_i \cup Q_j$ is larger when the arcs in $(Q_i, Q_j)$ point towards the larger clique. Finally, denoting $q_i := |Q_i|$, observe that $c(\Gf') = \sum_{i \le j} \binom{q_i}{2}q_j \le f(n)$ (see \eqref{eqn:f}). Thus $c(G) \le f(n)$, as claimed.
\end{proof}
\subsection{Optimising the clique size} \label{subsec:calculus}
Before proceeding to analyse the symmetrisation procedure, we prove the following lemma regarding the structure of a graph whose vertices are partitioned into full blue clone-cliques, mostly using calculus; recall that $\alpha$ and $\beta$ are defined in \eqref{eq:alphabeta}.
\begin{lemma} \label{lem:theta}
Let $1/n \ll \varepsilon \ll 1$.
Let $G$ be a coloured graph on $n$ vertices whose vertices can be partitioned into full blue clone-cliques, and suppose that $c(G) \ge f(n) - \varepsilon^2 n^3$. Then $G$ has a full blue clone-clique $Q$ satisfying $\big| |Q| - \beta n \big| \le 100 \varepsilon n$.
\end{lemma}
Define a function $g:[0,1] \to \mathbb{R}$ as follows.
\begin{equation} \label{eqn:g}
g(x) = \frac{x(1-x)}{2(3-3x+x^2)}.
\end{equation}
It will be convenient to note the following equation.
\begin{equation} \label{eqn:g-var}
\left(1 - (1 - x)^3\right) \cdot g(x) = \frac{1}{2} \cdot x^2(1 - x).
\end{equation}
One can check that $g'$ is decreasing and $g'(\beta) = 0$, showing that
\begin{equation} \label{eqn:g-beta}
g(x) \le g(\beta) = \alpha \qquad \text{for $x \in [0,1]$}.
\end{equation}
We first prove \Cref{prop:alpha} regarding the value of $f(n)$.
\begin{proof}[Proof of \Cref{prop:alpha}]
We show that $f(n) \leq \alpha n^3$ by induction on $n$. This is true for $n=1$. Suppose that $f(m)\leq \alpha m^3$ for $m< n$.
Given $k \in [n-1]$ that maximises the LHS in \eqref{eqn:k}, write $x = k/n$. The recursive definition of $f$ implies that $ \frac{f(n)}{n^3} \leq \frac 12 \cdot x^2(1-x) + \alpha(1-x)^3$. Subtracting $\alpha$ and using \eqref{eqn:g-var}, we obtain
\begin{equation*}
\frac{f(n)}{n^3}- \alpha \leq \frac 12 \cdot x^2(1-x) - \alpha(1-(1-x)^3) = (1-(1-x)^3)(g(x)-\alpha) \le 0,
\end{equation*}
as required.
To verify that $f(n) \geq (\alpha + o(1))n^3$, set $x_i =\lfloor \beta (1-\beta)^i n \rfloor$ in~\eqref{eqn:f}.
\end{proof}
\begin{proof}[Proof of \Cref{lem:theta}]
Let $Q_1, \ldots, Q_t$ be the full blue clone-cliques in $G$, arranged in descending order according to their sizes. Let $G'$ be obtained from $G$ by orienting the $(Q_i, Q_j)$ (red) edges towards $Q_i$, for $1 \le i < j \le t$. As explained before and by assumption on $G$, $c(G') \ge c(G) \ge f(n) - \varepsilon^2 n^3$.
Notice that $|Q_1| \ge 0.01 n$, because otherwise $c(G) \le n^2|Q_1| \le 0.01 n^3 < f(n) - \varepsilon^2 n^3$ (recall that $f(n) \approx 0.077n^3$, by \Cref{prop:alpha}).
Write $|Q_1| = \theta n$. Then, using $f(n) = \alpha n^3 + o(n^3) = g(\beta) n^3 + o(n^3)$ (which follows from \Cref{prop:alpha} and the definition of $\alpha$ in \eqref{eq:alphabeta}),
\begin{equation*}
c(G') \le \binom{|Q_1|}{2}(n - |Q_1|) + f(n - |Q_1|)
\le \frac{1}{2} \theta^2 (1 - \theta)n^3 + g(\beta)(1 - \theta)^3n^3 + o(n^3).
\end{equation*}
Thus, using \eqref{eqn:g-var},
\begin{align*}
\varepsilon^2 \ge \frac{f(n) - c(G')}{n^3}
& \ge g(\beta) - g(\beta)(1 - \theta)^3 - \frac{1}{2}\theta^2(1 - \theta) + o(1) \\
& = \theta \cdot (3 - 3\theta + \theta^2) \cdot (g(\beta) - g(\theta)) + o(1) \\
& \ge 0.02 \cdot (g(\beta) - g(\theta)) + o(1).
\end{align*}
For the last inequality we used $\theta \ge 0.01$, which implies $\theta(3-3\theta+\theta^2) \ge 0.02$.
By bounding the $o(1)$ term by $\varepsilon^2/2$ and using \Cref{claim:calculus} below, we get
\begin{equation*}
100\varepsilon^2 \ge g(\beta) - g(\theta) \ge \min\{0.05(\beta - \theta)^2, 0.005\}.
\end{equation*}
Since $\varepsilon$ is very small, we get $100 \varepsilon^2 \ge 0.05(\beta - \theta)^2$, which implies $|\beta - \theta| \le 100\varepsilon$.
\end{proof}
\begin{claim} \label{claim:calculus}
For $x \in [0,1]$,
\begin{equation}
g(\beta) - g(x) \geq \min\{0.05(\beta-x)^2, 0.005\}.
\end{equation}
\end{claim}
\begin{proof}
We use the following facts that can be checked easily.
\begin{itemize}
\item
The function $g(x)$ is increasing on $[0,\beta]$ and decreasing on $[\beta, 1]$. In particular, its maximum is attained at $\beta$, and $g'(\beta) = 0$.
\item
$g(\beta) - g(0.5) \ge 0.005$.
\item
The second derivative $g''(x)$ (which is $\frac{-x(2x^2 - 9x + 9)}{(x^2 - 3x + 3)^3}$) is non-negative and decreasing on $[0,1]$. In particular $g''(x) \le g''(0.5) \le -0.4$ for $x \in [0.5, 1]$.
\item
By Taylor's expansion: $g(x) = g(\beta) + \frac{1}{2}g'(\beta)(x - \beta) + \frac{1}{6}g''(c_x)(x - \beta)^2$ for every $x \in [0,1]$ and some $c_x$ between $x$ and $\beta$.
\end{itemize}
By the first and second items (using $\beta > 0.5$), if $x \in [0, 0.5]$ then
\begin{equation*}
g(\beta) - g(x) \ge g(\beta) - g(0.5) \ge 0.005.
\end{equation*}
By the first, third and fourth items, if $x \in [0.5, 1]$, then
\begin{equation*}
g(\beta) - g(x) \ge \frac{0.4}{6}(x - \beta)^2 \ge 0.05(x - \beta)^2.
\end{equation*}
The two inequalities prove the claim.
\end{proof}
\subsection{Blue clone-cliques before and after symmetrisation} \label{subsec:Q-lemmas}
The next two lemmas show that if a symmetrisation procedure on $G$ produces a full blue clone-clique $Q$ of size apprximately $\beta n$, then almost all edges in $G[Q, V \setminus Q]$ are red and oriented towards $Q$ and almost all edges in $G[Q]$ are blue.
\begin{lemma} \label{lem:Q-red}
Let $1/n \ll \varepsilon \ll 1$.
Let $G = (V, E)$ be a coloured graph on $n$ vertices with at least $f(n) - \varepsilon^2 n^3$ cherries. Suppose that $G'$ and $Q$ are the output
of a procedure $S_G(x)$, and suppose that $|Q| \ge 0.55n$.
Then all but at most $10\varepsilon n^2$ edges in $G[Q, V \setminus Q]$ are red and directed towards $Q$.
\end{lemma}
\begin{proof}
\def \Vi{V_{\inn}}
\def \Vo{V_{\out}}
Set $U := V \setminus Q$, let $\Vi$ be the set of vertices $u$ in $U$ for which $uq$ is a red arc in $G'$ for every $q \in Q$, and let $\Vo := U \setminus \Vi$.
We will show that $\Vo$ is small, and that not many pairs incident to $\Vi$ were recoloured during the symmetrisation procedure $S_G(x)$.
First, we show $|\Vo| \leq 40\varepsilon^2 n$.
Let $G''$ be obtained from $G'$ by reorienting the edges in $G'[Q, \Vo]$ to point towards $Q$. Notice that the cherries in $G'$ that contain an edge in $(Q, \Vo)$ consist of one vertex in $Q$ and two in $\Vo$, and thus their number is at most $|Q|\binom{|\Vo|}{2}$. Also, every set consisting of two vertices in $Q$ and one in $\Vo$ is a cherry in $G''$ but not in $G'$. Thus, using $|Q| \ge 0.55 n$ which implies $|Q| - |\Vo| \ge 0.1n$,
\begin{align*}
c(G'') - c(G')
& \ge \binom{|Q|}{2}|\Vo| - \binom{|\Vo|}{2}|Q|
= \frac 12 |Q||\Vo|(|Q| - |\Vo|) \\
& \ge \frac{1}{2} \cdot \frac{n}{2} \cdot \frac{n}{10} \cdot |\Vo|
= \frac{n^2}{40} \cdot |\Vo|.
\end{align*}
Recall that $c(G) \ge f(n) - \varepsilon^2 n^3$ by assumption, $c(G') \ge c(G)$ by \Cref{claim:nhoods}, and $c(G'') \le f(n)$ by Theorem~\ref{theorem_intro_falgas_ravry}. Altogether, this implies $c(G'') - c(G') \le \varepsilon^2 n^3$ and thus $|\Vo| \le 40\varepsilon^2 n$, as claimed.
Let $R$ be the set of edges $qv$ in $(Q, V \setminus Q)$ that are red and oriented towards $Q$ in $G'$ but not in $G$.
We now upper-bound $|R|$. Notice that each such edge in $R$ was recoloured to a red arc oriented towards $Q$ at some point during $S_G(x)$ (possibly more than once).
Let $G = G_1, \ldots, G_{t} = G'$ be the graphs obtained during the symmetrisation process on $Q$ and let $x_1, \ldots, x_t$ be the corresponding sequence of vertices.
For each $v \in V$ and $k \in [t]$, let $A_k(v)$ be the set of ordered pairs $vq$ which changed to red arcs in step $k$ (so they were recoloured from $G_{k-1}$ to $G_k$).
We claim that $\sum_{k \geq \varepsilon n} \sum_{v \in \Vi} |A_k(v)| \leq 4 \varepsilon n^2$.
To see this, fix $k \geq \varepsilon n$ and consider the $k$-th step. If $y_k = x_1$, then $A_k(v) = \{vx_k\}$ for $v \in \Nm(x_1) \setminus \Nm(x_k)$ and $A_k(v) = \emptyset$ otherwise, where $\Nm(\cdot)$ refers to the in-neighbourhood with respect to $G_{k-1}$. Thus, using \Cref{claim:nhoods}~\ref{itm:nhoods1},
\begin{equation*}
\sum_{v \in \Vi} |A_k(v)|
\leq \big|N^-(x_1) \setminus N^-(x_k)\big|
\leq \frac{4}{k} \cdot \big(c(G_k) - c(G_{k-1})\big).
\end{equation*}
If $y_k = x_k$, then $A_k(v) = \{vx_1, \ldots, vx_{k-1}\}$ for $v \in \Nm(x_k) \setminus \Nm(x_1)$ and $A_k(v) = \emptyset$ otherwise. Thus, by \Cref{claim:nhoods}~\ref{itm:nhoodsk},
\begin{equation*}
\sum_{v \in \Vi} |A_k(v)|
\leq (k-1) \cdot \big|N^-(x_1) \setminus N^-(x_k)\big|
\leq \frac{4}{k} \cdot \big(c(G_k) - c(G_{k-1})\big),
\end{equation*}
In either case, we get that for $k \geq \varepsilon n$,
\begin{equation*}
\sum_{v \in \Vi} |A_k(v)| \leq \frac{4}{\varepsilon n}\big(c(G_k) - c(G_{k-1})\big).
\end{equation*}
Summing over $k \geq \varepsilon n$, we obtain the required inequality
\begin{equation*}
\sum_{k \ge \varepsilon n} \sum_{v \in \Vi} |A_k(v)| \leq \frac{4}{\varepsilon n}\big(c(G') - c(G_{\varepsilon n})\big) \leq 4 \varepsilon n^2,
\end{equation*}
Where the last equality holds since $c(G') - c(G_{\varepsilon n}) \leq \varepsilon^2 n^2$.
Note that $|R| \le \varepsilon n^2 + \sum_{k \ge \varepsilon n} \sum_{v \in \Vi} |A_k(v)| \le 5\varepsilon n^2$.
In total, all but at most $(40 \varepsilon^2 + 5\varepsilon)n^2 \leq 10 \varepsilon n^2$ pairs in $(Q, V \setminus Q)$ are red and oriented towards $Q$.
\end{proof}
\begin{lemma} \label{lem:Q-blue}
Let $1/n \ll \varepsilon \ll 1$.
Let $G$ be a coloured graph on $n$ vertices with at least $f(n) - \varepsilon^2 n^3$ cherries. Suppose that $G'$ and $Q$ are the graph and full blue clone-clique produced by the procedure $S_G(x)$, and suppose that $0.55n \le |Q| \le 0.65n$.
Then all but $1200\varepsilon n^2$ edges in $G[Q]$ are blue.
\end{lemma}
\begin{proof}
Let $F$ and $F'$ be the graphs obtained from $G$ and $G'$ by colouring all $(Q, V \setminus Q)$ edges red and orienting them towards $Q$. Notice that $F'$ can be obtained from $F$ by colouring all edges in $Q$ blue.
We will first derive an upper bound on $c(F') - c(F)$. By \Cref{lem:Q-red}, the graphs $G$ and $F$ differ on at most $10\varepsilon n^2$ edges and thus $|c(G) - c(F)| \le 10\varepsilon n^3$. Similarly, $|c(G') - c(F')| \le 10\varepsilon n^3$ (the lemma is still applicable, as $S_{G'}(x)$ does not change the graph $G'$). By assumption on $G$ we also have $c(G') - c(G) \le \varepsilon^2 n^3$. Altogether,
\begin{equation} \label{eqn:upper}
c(F') - c(F) \le c(G') - c(G) + 20\varepsilon n^3 \le (\varepsilon^2 + 20\varepsilon)n^3 \le 30 \varepsilon n^3.
\end{equation}
\def \dpp {d^+}
We now obtain a lower bound on the same quantity.
Let $e$ be the number of red edges in $G[Q]$.
The number of cherries in $F$ that are not cherries in $F'$ is at most $\sum_{q \in Q} \binom{\dpp(q)}{2}$, where $\dpp(q)$ denotes the red in-degree of $q$ in $F[Q]$. Notice that
\begin{equation*}
\sum_{q \in Q} \binom{\dpp(q)}{2} \le \frac{1}{2} \sum_{q \in Q}(\dpp(q))^2 \le \frac 12 e|Q|,
\end{equation*}
because $\dpp(q) \le |Q|$ and $e = \sum_q \dpp(q)$.
On the other hand, the number of cherries in $F'$ that are not cherries in $F$ is exactly $e(n - |Q|)$. Thus,
\begin{equation} \label{eqn:lower}
c(F') - c(F)
\ge e(n - |Q|) - \frac 12 e|Q|
= e\cdot \big(n - \frac{3}{2}|Q|\big)
\ge \frac{en}{40},
\end{equation}
using $|Q| \le 0.65 n$.
By \eqref{eqn:upper} and \eqref{eqn:lower}, we have $e \le 1200 \varepsilon n^2$, as claimed.
\end{proof}
\subsection{Proof of \Cref{lem:ind-step}} \label{subsec:proof}
Finally, we start with the actual proof of \Cref{lem:ind-step}. The first step is to find a set $Q'$ of the right size almost all of whose edges in $G$ are blue.
\begin{lemma} \label{lem:beta-clique}
Let $1/n \ll \varepsilon \ll 1$.
Let $G$ be a coloured graph on $n$ vertices, satisfying $c(G) \ge f(n) - \varepsilon^2 n^3$.
Then there is a set $Q' \subseteq V(G)$ such that $\big||Q'| - \beta n\big| \le 100\varepsilon n$ and all but at most $1200\varepsilon n^2$ edges in $G[Q']$ are blue.
\end{lemma}
\begin{proof}
Similarly to the proof of Theorem~\ref{theorem_intro_falgas_ravry}, start with $G' = G$, and, as long as $G'$ has a vertex $x$ which is not in a full blue clone-clique, run the symmetrisation procedure $S_{G'}(x)$, and replace $G'$ by the resulting graphs. Denote by $\Gf$ the graph at the end of the process (as before, the process is guaranteed to end). Then the vertices of $\Gf$ can be partitioned into full blue clone-cliques $Q_1, \ldots, Q_t$.
Let $Q'$ be the vertex set of the largest clone-clique. By \Cref{lem:theta}, we have $\big||Q'| - \beta n\big| \le 100\varepsilon n$. In particular $|Q'| \in [0.55n, 0.65n]$.
Let $F_1$ be the graph created just before the symmetrisation procedure was started on an element of $Q'$, and let $F_2$ be the graph just after $Q'$ was built. Notice that $c(F_2) \ge c(F_1) \ge c(G) \ge f(n) - \varepsilon^2 n^3$.
By \Cref{lem:Q-blue}, all but at most $1200\varepsilon n^2$ edges in $F_1[Q']$ are blue. Notice that during the above process, the edges in $Q'$ remain untouched until right before a symmetrisation process is started on an element of $Q'$. It follows that all but at most $1200 \varepsilon n^2$ edges in $G[Q']$ are blue.
\end{proof}
Now, we can complete the proof by running a symmetrisation procedure that generates a blue clique $Q$ which will be close to $Q'$.
\begin{proof}[Proof of Lemma~\ref{lem:ind-step}]
Apply \Cref{lem:beta-clique} to find $Q'$ such that $\big||Q'| - \beta n\big| \le 100\varepsilon n$ and $G[Q']$ has at most $\delta^2 n^2$ red edges (with $\delta^2= 1200\varepsilon$).
\begin{claim}
We can run a symmetrisation procedure on $G$ which results in a graph $G'$ and a full blue clone-clique $Q$ satisfying $|Q' \setminus Q| \leq 3 \delta n$.
\end{claim}
\begin{proof}
Let $A$ be the set of vertices in $Q'$ with more than $\delta n$ red (in- or out-) neighbours in $G[Q']$. The bound on the number of red edges in $Q'$ gives $|A| < 2\delta n$. Define $Q'' := Q' \setminus A$.
We will run a symmetrisation procedure on $G$, but with a specific ordering of vertices.
We start with $x_1 \in Q''$ (chosen arbitrarily). Assuming that $\{x_1, \ldots, x_k\}$ are defined and contained in $Q''$, if possible we pick $x_{k+1}$ to also be in $Q''$ (we can do this as long as there is a vertex in $Q'' \setminus \{x_1, \ldots, x_k\}$ whose edges to $\{x_1, \ldots, x_k\}$ are blue). Once this is no longer possible, we continue with the symmetrisation procedure using an arbitrary order of vertices. Let $Q$ be the full blue clone-clique build by this procedure.
Let $k$ be largest such that $\{x_1, \ldots, x_k\} \subseteq Q''$. It is easy to see that throughout the procedure, until at least step $k$, every vertex in $Q''$ has at most $\delta n$ non-blue neighbours in $Q'' \setminus \{x_1, \ldots, x_k\}$. Thus $k \ge |Q''| - \delta n \ge |Q| - 3\delta n$, as otherwise we could find a suitable $x_{k+1}$ in $Q''$, contradicting the choice of $k$. It follows that $|Q' \setminus Q| \le 3\delta n$.
\end{proof}
Let $G'$ and $Q$ be as in the above Claim. We claim that $|Q| \le (\beta + 100\varepsilon)n$. Indeed, this follows from \Cref{lem:theta} by running symmetrisation procedures repeatedly, starting from $G'$, until the vertices can be partitioned into full blue clone-cliques (one of which is $Q$). It follows that $|Q \setminus Q'| \le 3\delta n + |Q| - |Q'| \le (3\delta + 200\varepsilon)n \le 5\delta n$. In particular, the number of red edges in $G[Q]$ is at most the number of red edges in $G[Q']$ plus the number of edges incident with $Q \setminus Q'$, which amounts to a total of at most $(\delta^2 + 5\delta)n^2 \le 10\delta n^2$ red edges in $G[Q]$.
By \Cref{lem:Q-red}, all but at most $10\varepsilon n^2$ edges in $G[Q, V \setminus Q]$ are red and oriented towards $Q$, and similarly for $G'[Q, V \setminus Q]$.
\def \Vi{V_{\inn}}
\def \Vo{V_{\out}}
Since $Q$ is a full blue clone-clique, the vertices in $V \setminus Q$ can be partitioned into $\Vi$ and $\Vo$, where $vq$ is a red arc for every $v \in \Vi$ and $q \in Q$ and $qv$ is a red arc for $v \in \Vo$ and $q \in Q$. Thus, by the previous paragraph and because $|Q| \ge n/2$, $|\Vo| \le 20\varepsilon n$.
Let $G''$ be obtained from $G'$ be reorienting all $(Q, V \setminus Q)$ edges towards $Q$. Then
\begin{equation*}
c(G'') - c(G')
\ge \binom{|Q|}{2}{|\Vo|} - \binom{|\Vo|}{2}|Q|
= |Q||\Vo| \cdot (|Q| - |\Vo|) \ge 0.
\end{equation*}
It follows that $c(G'') \ge c(G') \ge c(G)$. Moreover, $G''$ and $G'$ differ on at most $|\Vo|n \le 20\varepsilon n^2$ edges, and thus $G$ and $G''$ differ on at most $(20\varepsilon + 10\varepsilon + 10\delta) \le 20\delta n^2$ edges.
Since $G''$ has the required structure, this proves \Cref{lem:ind-step}.
\end{proof}
\subsection{Full stability result} \label{subsec:iteration}
\begin{proof}[Proof of \Cref{thm:stability}]
Let $\varepsilon_1 \ll \eta \ll \varepsilon_2$.
The idea is simply to iterate \Cref{lem:ind-step}.
We will find graphs $G_1, \ldots, G_s$ and sets $Q_1, \ldots, Q_s$, satisfying the following conditions, for $k \in [s]$ (for convenience set $G_0 := G$, $Q_0 := \emptyset$ and $V := V(G)$).
\begin{enumerate}[label = \rm(\arabic*)]
\item \label{itm:full-stab-1}
$G_k$ is a coloured graph on vertex set $V \setminus (Q_1 \cup \ldots \cup Q_{k-1})$.
\item \label{itm:full-stab-2}
$Q_k$ is a blue clique in $G_k$, all other edges incident with $Q_k$ in $G_k$ are red and point towards $Q_k$.
\item \label{itm:full-stab-3}
$\big||Q_k| - \beta|G_k|\big| \le \eta|G_k|$.
\item \label{itm:full-stab-4}
$G_k$ and $G_{k-1} \setminus Q_{k-1}$ differ on at most $\eta |G_{k}|^2$ edges.
\item \label{itm:full-stab-5}
$c(G_k) \ge c(G_{k-1} \setminus Q_{k-1})$.
\item \label{itm:full-stab-6}
$c(G_k \setminus Q_k) \ge f(|G_k \setminus Q_k|) - \varepsilon_1 n^3$.
\end{enumerate}
To see how such a sequence can be built, suppose that $G_1, \ldots, G_{k-1}$ and $Q_1, \ldots, Q_{k-1}$ are defined and satisfy the above conditions. If $|G_{k-1} \setminus Q_{k-1}| \le \eta n$, we stop the process and set $s := k-1$. Otherwise, we apply \Cref{lem:ind-step} to the graph $G_{k-1} \setminus Q_{k-1}$. Notice that by \ref{itm:full-stab-6} and the assumption on $|G_{k-1} \setminus Q_{k-1}|$, we have $c(G_k \setminus Q_k) \ge f(|G_k \setminus Q_k) - \varepsilon_1 \eta^{-3} |G_k \setminus Q_k|^3$. Since $\varepsilon_1 \eta^{-3} \ll \eta$, the lemma is applicable. The lemma produces a graph $G_k$ on vertex set $V(G_{k-1}) \setminus Q_{k-1} = V \setminus (Q_1 \cup \ldots \cup Q_{k-1})$ satisfying items \ref{itm:full-stab-1} to \ref{itm:full-stab-5}. It remains to verify \ref{itm:full-stab-6}.
Note that
\begin{align*}
c(G_k) = \binom{|Q_k|}{2} \cdot |G_k \setminus Q_k| + c(G_k \setminus Q_k).
\end{align*}
Also
\begin{align*}
c(G_k)
\ge c(G_{k-1} \setminus Q_{k-1})
& \ge f(|G_{k-1} \setminus Q_{k-1}|) - \varepsilon_1 n^3 \\
& = f(|G_k|) - \varepsilon_1 n^3 \\
& \ge \binom{|Q_k|}{2}|G_k \setminus Q_k| + f(|G_k \setminus Q_k|) - \varepsilon_1 n^3,
\end{align*}
where the last inequality follows from the definition of $f$. The two inequalities imply \ref{itm:full-stab-6}.
To finish, run a symmetrisation procedure on $G_s \setminus Q_s$ repeatedly, to obtain a graph $H$ whose vertices are partitioned into full blue clone-cliques $Q_{s+1}, \ldots, Q_t$ (arranged in decreasing size); the edges between any two of them point towards the larger clique; and $c(H) \ge c(G_t \setminus Q_t)$. Let $G'$ be the graph on vertex set $V$, such that $Q_1, \ldots, Q_t$ are blue cliques and the edges between any two of them are red and point towards the larger clique (note that $Q_1, \ldots, Q_t$ partition $V$).
To complete the proof of \Cref{thm:stability}, we need to show that properties \ref{itm:stability-a} to \ref{itm:stability-c} hold.
For \ref{itm:stability-a}, define $G_k'$ to be the graph on vertex set $V$, obtained from $G'$ by replacing $V \setminus (Q_1 \cup \ldots \cup Q_{k-1})$ by a copy of $G_k$ (this makes sense due to \ref{itm:full-stab-1}).
It is easy to see that $c(G_k') - c(G_{k-1}') = c(G_k) - c(G_{k-1} \setminus Q_{k-1}) \ge 0$ for $k \in [s]$, using \ref{itm:full-stab-5}. Similarly, $c(G') \ge c(G_s')$. Altogether, $c(G') \ge c(G_1') = c(G)$, as required for \ref{itm:stability-a}.
Before continuing, we derive an upper bound on $s$. By \ref{itm:full-stab-3} we have $|Q_k| \ge 0.55|G_k|$ for $k \in [s]$, so $|G_k| \le 2^{-(k-1)}n$. Since $|G_t| \le \eta n$, this implies that $s \le 2\log(1/\eta) \le \eta^{-1/2}$, say.
By \ref{itm:full-stab-4} we find that $G'$ and $G$ differ on at most $((s \eta + \eta)n^2 \le 2\eta^{1/2}n^2 \le \varepsilon_2 n^2$ edges. Property \ref{itm:stability-b} follows.
Notice that the estimate $|Q_k| \ge 0.55|G_k|$, which follows from \ref{itm:full-stab-3} implies $|Q_1| \ge \ldots \ge |Q_s|$. Thus \ref{itm:stability-c1} to \ref{itm:stability-c3} clearly hold. Finally, \ref{itm:stability-c4} holds trivially for $k > s$ and, for $k \le s$, it follows from~\ref{itm:full-stab-3} and $\eta \leq \varepsilon_2$.
\end{proof}
\section{Hypergraphs with no short odd pseudocycles} \label{sec:diameter}
In this section we leverage the stability result about cherries, \Cref{thm:stability}, and the connection between hypergraphs with no odd pseudocycles to good colourings (\Cref{thm:good-colouring}) to prove the following result regarding the structure of a dense hypergraph with no short odd pseudocycles. In case of cycles and pseudocycles, the \textit{length} (number of edges) and order (number of vertices) coincide, so, since there is no danger of confusion, we prefer the term \textit{length}. Given vertex sets $X_1, X_2, X_3 \subset V(\HH)$, an \textit{$X_1X_2X_3$-triple} in $\HH$ is an (unordered) edge $x_1x_2x_3 \in E(\HH)$ with $x_i \in X_i$ for $i \in [3]$.
\begin{theorem} \label{thm:partition}
Let $n \gg \ell \gg 1$.
Let $\HH$ be a $3$-uniform hypergraph on $n$ vertices which contains no odd pseudocycles of length at most $\ell$, and which maximises the number of edges under these conditions.
Then there is a partition $\{A, B\}$ of the vertices of $\HH$ into non-empty sets such that all $AAB$ triples are edges of $\HH$ (and there are no $AAA$ and $ABB$ triples).
\end{theorem}
By iterating the above result, we prove Theorem~\ref{thm:pseudocycles}, restated here, which gives an upper bound on the number of edges in a hypergraph with no short odd pseudocycles.
\thmPseudocycle*
\begin{proof}[Proof of Theorem~\ref{thm:pseudocycles} using \Cref{thm:partition}]
Let $L$ and $n_0$ be such that \Cref{thm:partition} holds for $\ell = L$ and $n \ge n_0$. Denote by $g(n)$ the maximum number of edges in an $n$-vertex hypergraph with no odd pseudocycles of length at most $L$. Then for every $n \ge n_0$ there exists $a_n \in [n-1]$ such that $g(n) \le \binom{a_n}{2}(n-a_n) + g(n-a_n)$. Iterating this and recalling the definition of $f(n)$ implies that $g(n) \le f(n) + \binom{n_0}{3}$.
\end{proof}
Recall that \Cref{thm:pseudocycles} is tight, up to the additive $O(1)$ error term, as evidenced by $\HH(x_1, \ldots, x_k)$ for a suitable choice of $x_i$'s.
We next show how \Cref{thm:pseudocycles} implies our main result, \Cref{thm:single-cycle}, restated here.
\thmSingleCycle*
Recall that the \emph{$t$-blow-up} of an $r$-uniform hypergraph $\HH$, denoted $\HH[t]$, is the hypergraph with vertex set $V(\HH) \times [t]$ and edges all $r$-sets $\{(x_1, i_1), \ldots, (x_r, i_r)\}$ such that $\{x_1, \ldots, x_r\} \in E(\HH)$.
For a family $\Fc$ of hypergraphs, we denote by $\Fc[t]$ the family of $t$-blow-ups of members of $\Fc$. Recall that \Cref{theorem_blow_up} (whose proof can be found in \cite{keevash11}) asserts that taking the $t$-blow-up of a hypergraph does not change its Tur\'an density. The following generalisation for finite families of hypergraphs can be proved similarly.
\begin{theorem}[\cite{keevash11}, Theorem 2.2] \label{thm:ex-blowup}
Let $s$ and $t$ be integers, and let $\Fc$ be a family of $r$-graphs with $|\Fc|\leq s$. Then $\pi(\Fc(t)) = \pi(\Fc)$.
\end{theorem}
To prove \Cref{thm:single-cycle}, we will note that an odd cycle $C^{(3)}_m$ is contained in an \textit{$m$-blow-up} of any odd pseudocycle of length at most $m/2$, and apply the last theorem.
\begin{proof}[Proof of Theorem~\ref{thm:single-cycle} using \Cref{thm:pseudocycles}]
Let $m$ be an integer with $m \geq 2L$ and $3 \nmid m$, where $L$ is the constant from Theorem~\ref{thm:pseudocycles}. Recall that $f(n) = (2\sqrt{3} - 3 + o(1)) \binom n3$. Let $\varepsilon >0$ and let $\HH$ be an $n$-vertex 3-uniform hypergraph with
$e(\HH) \geq (2\sqrt{3} - 3 + \varepsilon) \binom n3$ and $n$ sufficiently large. We claim that $\HH$ contains a copy of $C^{(3)}_m$.
Theorem~\ref{thm:pseudocycles} and Theorem~\ref{thm:ex-blowup} imply that $\HH$ contains $F[m]$ for some $\ell$-pseudocycle $F$ with $\ell \leq L$ and $3 \nmid \ell$. It suffices to show that $F$ contains an $m$-pseudocycle, because then $C_m^{(3)}$ will be contained in $F[m]$. To see this, let $v_1 \dots v_\ell$ be an ordering of $V(F)$ such that $v_i v_{i+1} v_{i+2} \in E(F)$, with the indices taken modulo $\ell$.
In case $m \modthree{\ell}$, consider the sequence $$(v_1 v_2 v_3) ^{\frac{m-\ell}{3}}v_1 v_2 \dots v_\ell,$$
where $(v_1 v_2 v_3)^x$ stands for $x$ repetitions of the sequence $v_1 v_2 v_3$.
This is a sequence of order $m$ certifying that $F$ contains an $m$-pseudocycle.
Otherwise, if $m \modthree {2\ell}$, the same is certified for instance by the sequence
\begin{equation*}
(v_1 v_2 v_3) ^{\frac{m-2\ell}{3}} (v_1 v_2 \dots v_\ell) ^2. \qedhere
\end{equation*}
\end{proof}
All that remains now is to prove \Cref{thm:partition}. We will state and prove some preliminary results in the following subsection, and then prove the theorem in \Cref{subsec:proof-partition}.
\subsection{Preparation}
The \emph{diameter} of a hypergraph $\HH$ is the minimum $\ell$ such that the following holds: for every $x, y, z, w \in V(\HH)$ (where $x, y$ are distinct and $z, w$ are distinct) whenever there is a pseudopath from $xy$ to $zw$, there is such a pseudopath of order at most $\ell$.
We have already shown that $n$-vertex hypergraphs with no odd pseudocycles have at most $f(n)$ edges. To prove the same for pseudocycles of bounded length, we will pass to a subhypergraph with bounded diameter, which is the purpose of the following two propositions.
\begin{proposition} \label{prop:diam-cyc}
Let $\HH$ be a 3-uniform hypergraph of diameter $\ell \geq 4$. If $\HH$ has an odd pseudocycle, then it has an odd pseudocycle of length at most $4 \ell$.
\end{proposition}
\begin{proof}
Let $C$ be the shortest odd pseudocycle in $\HH$. Assuming that its length is at least $3\ell +4$, we may index it by $xy v_1 \ldots v_k ab u_1 \ldots u_t$ with $t \geq 2 \ell$, $k \geq \ell $. Note that the length of $C$ is $k+t+4 \not\equiv 0 \pmod{3}$.
Since $\HH$ contains a pseudopath from $xy$ to $ab$, it also contains such a pseudopath $P = xy w_1 \ldots w_r ab$ with $r \leq \ell -4$.
The pseudocycle $xy w_1 \ldots w_r ab u_1 \ldots u_t$ is shorter than $C$, so it must not be odd, that is, $r+t+4 \modthree{0}$.
Now consider the pseudocycle $C_1 = v_1 \ldots v_k \tilde{P}$. Recall that $\tP$ is a $(2r+2)$-vertex pseudopath from $ab$ to $xy$ (see \eqref{eqn:tilde}), so $C_1$ is indeed a pseudocycle.
The length of $C_1$ is $k + 2r +6 \equiv k-r \equiv k+t+4 \not\equiv 0 \pmod{3}$. Noting that $k+2r +6 \leq k + 2\ell - 2 \le k + t$, this contradicts the minimality of $C$.
\end{proof}
\begin{proposition} \label{prop:small-diameter}
Let $ 1/\ell \ll \varepsilon \ll 1$, and let $\HH$ be an $n$-vertex hypergraph.
Then there is a subgraph $\HH' \subseteq \HH$ with $e(\HH') \ge e(\HH) - \varepsilon n^3$ whose diameter is at most $\ell$.
\end{proposition}
\begin{proof}
First we form a subgraph $\HH' \subseteq \HH$ in which each vertex pair has codegree either $0$ or at least $\varepsilon n$, as follows. If there are vertices $u, v$ whose codegree in the \emph{current} hypergraph is smaller than $\varepsilon n$, delete all edges containing $uv$. Repeat this step until each pair has codegree degree either $0$ or at least $\varepsilon n$. Denote the resulting hypergraph by $\HH'$. Observe that the number of deleted edges is at most $\varepsilon n \cdot \binom n2$ since the edges containing each pair were removed at most once. Hence $e(\HH') \geq e(\HH)-\varepsilon n^3$.
Given ordered pairs $uv$ and $u'v'$ which have a pseudopath connecting them, let $P=uv x_0 x_1 \dots x_t u'v'$ be a shortest such pseudopath. For each $i$, let $B_i$ be the set of ordered pairs $ab$ such that $x_ix_{i+1}ab$ is a tight path. We claim that the sets $B_{10i}$ are mutually disjoint for $0\leq i < \frac{t}{10}$. Suppose not, and take $ab \in B_{10i} \cap B_{10j}$ for some $0 \le i<j < \frac{t}{10}$. Then $x_ix_{i+1}ab x_{j+1}a x_j x_{j+1}$ is a pseudopath with only five vertices between $x_i$ and $x_j$, which can be used to form a shorter pseudopath than $P$ connecting $uv$ and $u'v'$, contradiction.
Now since $|B_i| \geq \varepsilon^2 n^2/2$ for every $i$, using the codegree condition, we have
$$
\floor{\frac{t}{10}}\cdot \frac{\varepsilon^2 n^2}{2} \leq n^2,
$$
so $t \leq \frac{20}{\varepsilon^2}$.
Hence the diameter of $\HH'$ is at most $\ell:=\frac{20}{\varepsilon^2}+4$, as required.
\end{proof}
As alluded to in \Cref{sec:overview}, we can already prove \Cref{cor:weak-main}, restated here, which is a weakening of \Cref{thm:pseudocycles}, with only an asymptotic upper bound, which depends on $\ell$, on the number of edges.
\corWeak*
This bound will be used in the proof of \Cref{prop:max-deg}. Note that the analogous bound on the extremal number of proper odd tight cycles follows from \Cref{thm:ex-blowup}.
\begin{proof}[Proof of \Cref{cor:weak-main}]
Assume the opposite, that $e(\HH) \geq f(n) + \varepsilon n^3$. Applying Proposition~\ref{prop:small-diameter} with the parameters $\ell/4$ and $\varepsilon/2$, we obtain a hypergraph $\HH' \subseteq \HH$ with at least $f(n) + \varepsilon n^3 /2$ edges whose diameter is at most $\ell/4$. $\HH'$ contains no odd pseudocycles of lenght at most $\ell$, so by \Cref{prop:diam-cyc}, it contains no odd pseudocycles. Hence we may apply Theorem~\ref{thm:good-colouring} to obtain a good colouring of $\partial \HH'$ with $e(\HH') > f(n)$ cherries, contradicting \Cref{theorem_intro_falgas_ravry}.
\end{proof}
The following proposition shows that hypergraphs with no short odd cycles whose number of edges is large are close to being regular.
\begin{proposition} \label{prop:min-deg}
Let $1/n \ll 1/\ell \ll \varepsilon \ll 1$, and let $\HH$ be an $n$-vertex hypergraph with no odd pseudocycles of length at most $\ell$, which maximises the number of edges under these conditions. Then $d(u) \ge (3\alpha - \varepsilon)n^2$ for every vertex $u$.
\end{proposition}
\begin{proof}
Given vertices $u$ and $v$ in $\HH$, consider the hypergraph $\HH_{uv}$ obtained from $\HH$ by removing all edges containing $v$ and then adding the edge $e - u + v$, for each edge $e$ that contains $u$ but not $v$. Observe that $\HH$ has no odd pseudocycles of length at most $\ell$; indeed, if there were such a cycle then we could replace each instance of $v$ by $u$ to obtain an odd pseudocycle of the same length in $\HH$ (whereby it is important that $\HH_{uv}$ has no edges containing both $u$ and $v$), a contradiction. Since $e(\HH_{uv}) \ge e(\HH) - d(v) + d(u) - n$ and by maximality of $\HH$, we have $d(v) \ge d(u) - n$. Since $u$ and $v$ were arbitrary, this implies that the maximum and minimum degrees of $\HH$ differ by at most $n$. In particular, using $e(\HH) \ge f(n) = \alpha n^3 + o(n^3)$, which follows from the maximality of $\HH$ and \Cref{prop:alpha},
\begin{equation*}
\delta(\HH)
\ge \frac{3e(\HH)}{n} - n \ge \frac{3f(n)}{n} - n \ge (3 \alpha - \varepsilon)n^2.
\qedhere
\end{equation*}
\end{proof}
Next, we prove a stability version of the previous proposition.
\begin{proposition} \label{prop:max-deg}
Let $1/n \ll 1/\ell \ll \varepsilon_1 \ll \varepsilon_2 \ll 1$, and let $\HH$ be an $n$-vertex $3$-uniform hypergraph with no odd pseudocycles of length at most $\ell$. If $e(\HH) \ge f(n) - \varepsilon_1 n^3$ then $d(u) \le (3\alpha + \varepsilon_2)n^2$ for every vertex $u$.
\end{proposition}
\begin{proof}
Let $\mu = \sqrt{\varepsilon_1} \leq \varepsilon_2/10$.
Let $X$ be the set of vertices $x$ with $d(x) \le 3(\alpha + \mu)n^2$. Then $e(\HH) \ge (n - |X|)(\alpha + \mu)n^2$. By \Cref{cor:weak-main} (and the properties of $f(n)$) we also have $e(\HH) \le (\alpha + \varepsilon_1)n^3$.
Putting the two inequalities together, we get
\begin{align*}
& (\alpha + \varepsilon_1)n^3 \ge (n - |X|)(\alpha + \mu)n^2 \\
\Longrightarrow \quad
& |X| \ge \frac{(\alpha + \mu)n - (\alpha + \varepsilon_1)n}{\alpha + \mu} = \frac{\mu - \varepsilon_1}{\alpha + \mu} \cdot n \ge \mu n.
\end{align*}
Let $u$ be a vertex of maximum degree in $\HH$, and let $X'$ be a subset of $X$ of size $t := \mu n$. We may assume $u \notin X'$ because otherwise $d_{\HH}(u) \le (3\alpha + 3\mu)n^2 \le (3\alpha + \varepsilon_2)n^2$, as required.
Now consider the hypergraph $\HH_1$ formed in two steps as follows. First, define $\HH_0 = \HH \setminus X'$; then $e(\HH_0) \ge e(\HH) - t \cdot 3(\alpha + \mu)n^2$ and $d_{\HH_0}(u) \ge d_{\HH}(u) - t n$. Second, let $\HH_1$ be the hypergraph obtained by adding $|X'|$ copies of $u$ to $\HH_0$. Then
\begin{align*}
e(\HH_1)
& \ge e(\HH_0) + t \cdot d_{\HH_0}(u) \\
& \ge e(\HH) - t \cdot 3(\alpha + \mu)n^2 + t \cdot (d_{\HH}(u) - tn) \\
& \ge f(n) - \varepsilon_1 n^3 + t \cdot (d_{\HH}(u) - tn - 3(\alpha + \mu)n^2) \\
& = f(n) - \varepsilon_1 n^3 + \mu n \cdot (d_{\HH}(u) - (3\alpha + 4\mu)n^2).
\end{align*}
Notice that $\HH_1$ has no odd pseudocycles of length at most $\ell$. Thus, by \Cref{cor:weak-main}, we have $e(\HH_1) \le f(n) + \varepsilon_1 n^3$. Hence, using $\mu = \sqrt{\varepsilon_1} \leq \varepsilon_2 / 10$,
\begin{equation*}
d_{\HH}(u)
\le (3\alpha + 4\mu)n^2 + (2\varepsilon_1/\mu)n^2
\le (3\alpha + \varepsilon_2)n^2,
\end{equation*}
as required.
\end{proof}
\subsection{The structure of odd-pseudocycle-free graphs} \label{subsec:proof-partition}
We now prove the main result in the section, \Cref{thm:partition}.
The starting point of the proof uses the relation between hypergraphs with no odd pseudocycles and good colourings of $K_n$, as well the stability result about cherries from the previous section, to conclude the following: there is a coloured graph $G$ with a nice structure such that almost all cherries in $G$ are triples in $\HH$ and vice versa. This readily implies the existence of a partition $\{A, B\}$ of the vertices such that $|A| \approx \beta n$ and for almost every vertex $u$ in $\HH$ the following holds: almost all vertices in $A$ are joined to almost all $A \times B$ pairs, and almost all vertices in $B$ are joined to almost all $A^{(2)}$ pairs. The main difficulty of the proof lies in showing that there is such a partition for which \textit{every} vertex in $A$ is joined to almost all pairs in $A \times B$, and similarly for vertices in $B$. This is achieved in \Cref{claim:structure} and the main idea is to compare several graphs obtained by modifying the triples containing a given vertex. Given a partition as above, to conclude the proof, we argue (using the fact that $\HH$ has no short odd pseudocycles) that the number of $AAB$ ``non-edges'' exceeds the number of $AAA$ and $ABB$ edges, unless all of these numbers are 0. The maximality of $\HH$ implies that all these numbers are indeed $0$, meaning that $\HH$ has all $AAB$ edges and no $AAA$, $ABB$ edges.
\begin{proof}[Proof of \Cref{thm:partition}]
Let $\varepsilon_7 = 0.1$ and let $\varepsilon_1, \ldots, \varepsilon_6$, and $\ell$ satisfy
\begin{equation*}
0 < 1/\ell \ll \varepsilon_1 \ll \ldots \ll \varepsilon_7.
\end{equation*}
Let $\HH'$ be a subgraph of $\HH$ on the same vertex set with at least $e(\HH) - \varepsilon_1 n^3$ edges, that has diameter at most $\ell/4$; such $\HH'$ exists by \Cref{prop:small-diameter}. By \Cref{prop:diam-cyc}, $\HH'$ has no odd pseudocycles, so by \Cref{thm:good-colouring}, there is a good colouring of $\partial\HH'$.
Extending the good colouring of $\partial\HH'$ arbitrarily to also cover vertex pairs which are not in the shadow, we obtain a coloured graph (recall that this is a complete graph whose edges are either blue or oriented and red) $G'$ on vertex set $V := V(\HH)$, such that every edge in $\HH'$ is a cherry in $G'$.
By maximality of $\HH$, we have $c(G') \ge e(\HH') \ge e(\HH) - \varepsilon_1 n^3 \ge f(n) - \varepsilon_1 n^3$.
Thus, by \Cref{thm:stability}, there is a graph $G$ satisfying \ref{itm:stability-a}--\ref{itm:stability-c} in \Cref{thm:stability} on vertex set $V$. That is, $G$ has at least as many cherries as $G'$, all but at most $\varepsilon_2 n^3$ cherries in $G$ are cherries in $G'$, and $V$ can be partitioned into sets $X_1, \ldots, X_k$ such that: $G[X_i]$ is blue for $i \in [k]$; $|X_i| = (\beta \pm \varepsilon_2 )n \cdot (|X_{i}| + \ldots + |X_k|)$ for $i \in [k]$; and all $X_i \times X_j$ pairs in $G$ are red and oriented towards $X_i$, for $1 \le i < j \le k$. Recall that $\beta=\frac{3 - \sqrt{3}}{2}$ was defined in \eqref{eq:alphabeta}.
Define $X_{>i} := X_{i+1} \cup \ldots \cup X_k$, and define $X_{\ge i}$ analogously.
Let $H$ be the subgraph of $G$ whose edges are either pairs in $X_i \times X_i$ that are in at least $(|X_{i+1}| + \ldots + |X_k|) - \varepsilon_3 n$ triples in $(X_i \times X_i \times X_{>i}) \cap E(\HH)$, or pairs in $X_i \times X_j$, where $i < j$, that are in at least $|X_i| - \varepsilon_3 n$ triples in $(X_i \times X_i \times X_j) \cap E(\HH)$.
Denoting the number of non-edges in $H$ by $\be(H)$, we have that the number of cherries in $G$ that are not edges in $\HH$ is at least $\be(H) \cdot \varepsilon_3 n / 3$.
Recall that $e(\HH') \ge e(\HH) - \varepsilon_1 n^3 \ge f(n) - \varepsilon_1 n^3$ and that all edges in $\HH'$ are cherries in $G'$. But $c(G') \leq f(n)$ (by~Theorem~\ref{theorem_intro_falgas_ravry}), so all but $\varepsilon_1 n^3$ cherries in $G'$ are edges in $\HH'$ and thus in $\HH$. Since there are at most $\varepsilon_2 n^3$ cherries in $G$ that are not cherries in $G'$, it follows that all but at most $(\varepsilon_1 + \varepsilon_2)n^3 \le 2\varepsilon_2 n^3$ cherries in $G$ are edges in $\HH$.
Hence $\be(H) \cdot \varepsilon_3 n / 3 \le 2\varepsilon_2 n^3$, showing $\be(H) \le (6\varepsilon_2/ \varepsilon_3)n^3 \le \varepsilon_3 n^2$.
Let $k_0$ be the maximum $i$ such that $|X_i| \ge \varepsilon_4 n$.
Define subsets $X_i' \subseteq X_i$ as follows: if $i < k_0$ let $X_i'$ be the set of vertices in $X_i$ that have degree at least $|X_i| - \varepsilon_4 n$ in $H[X_i]$ and degree at least $|X_{>i}| - \varepsilon_4 n$ in $H[X_i, X_{>i}]$; if $i \ge k_0$, define $X_i' := \emptyset$.
Since $ (\varepsilon_4 n / 2)\sum_{i < k_0} |X_i \setminus X_i'| \le \be(H) \le \varepsilon_3 n^2$ and $|X_{\ge k_0}| \le 10 \varepsilon_4 n$ (using \ref{itm:stability-c4}), we have $$\sum_{i \in [k]} |X_i \setminus X_i'| \le 10\varepsilon_4 n + (2\varepsilon_3/\varepsilon_4)n \le 20 \varepsilon_4n.$$
Let $X := X_1' \cup \ldots \cup X_k'$ and $Y := V \setminus X$. We have seen that $|Y| \le 20\varepsilon_4 n \le \varepsilon_5 n$.
For $v \in V$, let $N(v)$ be the \emph{link} of $v$, namely the graph spanned by pairs $uw$ such that $uvw \in E(\HH)$.
Write $A := X_1'$ and $B := X \setminus X_1'$.
\begin{claim} \label{claim:structure}
One of the graphs $N(u)[A]$ and $N(u)[A, B]$ has at most $\varepsilon_6 n^2$ non-edges, for every $u \in V$.
\end{claim}
\begin{proof}
Let $\varepsilon_5 \ll \mu \ll \varepsilon_6$. Note that the claim holds for all $u \in X$, so it suffices to prove it for $u \in Y$. Fix such $u$.
Let $\F$ be the hypergraph on vertex set $X$ whose edges are all $X_i'X_i'X_j'$ triples with $1 \le i < j \le k_0$. We will construct two hypergraphs $\F_i^{+}$ (for $i \in \{1, 2\}$), that consist of $\F$ with one additional vertex $u_i$, which is a suitable modification of $u$, and that have no odd pseudocycles of length at most $\ell / 10$. We will argue that if both $N(u)[A]$ and $N(u)[A, B]$ have at least $\varepsilon_6 n^2$ non-edges then $d_{\F_i^+}(u_i) >(3\alpha + \mu)n^2$ for some $i \in [2]$, contradicting \Cref{prop:max-deg}.
Let $F_0$ be the graph on vertex set $X$ with edges $E(H) \cap E(N(u))$. Recall that vertices in $X_i'$ have at most $2\varepsilon_4 n$ non-neighbours in $H[X'_{>i}]$. Thus, using \Cref{prop:min-deg} for a lower bound on $d_{\HH}(u)$, we have $e(F_0) \ge d_{\HH}(u) - |Y| \cdot n - |X| \cdot 2\varepsilon_4 n \ge (3\alpha - 10\varepsilon_5)n^2$.
We modify $F_0$ as follows, while possible: remove each edge $xy$ satisfying: $x, y \in A$ and $x$ has degree $1$ in $A$; or $x \in A$, $y \in B$, and $x$ has degree $1$ into $B$ or $y$ has degree $1$ into $A$. Call the resulting graph $F$ and notice that $|E(F_0) \setminus E(F)| \le 2n$, implying that
\begin{equation} \label{eqn:deg-u}
e(F) \ge (3\alpha - 20 \varepsilon_5)n^2.
\end{equation}
Recall that $\F$ is the hypergraph on vertex set $X$ whose edges are all $X_i'X_i'X_j'$ triples with $1 \le i < j \le k_0$, and let $\Fp$ be the hypergraph obtained by adding the vertex $u$ to $\F$ along with all edges $uvw$ such that $vw \in E(F)$.
We argue that $\Fp$ has no odd pseudocycles of length at most $\ell/10$. To do so, we prove the following.
\begin{align} \label{eqn:F-to-H}
\begin{split}
&\text{Let $xy, vw \in E(H)$, and let $P$ be a pseudopath in $\F$ from $xy$ to $vw$ on $t$ vertices. Then} \\
&\text{there is a pseudopath $P'$ in $\HH$ from $xy$ to $vw$ of order $t$ (if $t \in \{2, 3\}$) or $t + 3$ (otherwise).}
\end{split}
\end{align}
We prove \eqref{eqn:F-to-H} by induction on $t$. If $t = 2$ we can take $P' = P$. Suppose that $t = 3$, so $P = xyw$. Let $i_1, i_2, i_3$ be such that $x \in X_{i_1}'$, $y \in X_{i_2}'$ and $w \in X_{i_3}'$.
Since $xy \in E(H)$, we know that for almost every $a \in X_{i_3}'$ the following holds: $xya \in E(\HH)$ and $ya \in E(H)$; pick such an $a$ with $a \neq w$. Similarly, $yab$ and $ywb$ are edges in $\HH$ for almost every $b \in X_{i_1}'$; pick such $b$. The path $xyabyw$ satisfies the requirements.
Next, suppose that $t = 4$, so $P = xyvw$. Let $i_1, i_2, i_3, i_4$ be such that $x \in X_{i_1}'$, $y \in X_{i_2}'$, $v \in X_{i_3}'$ and $w \in X_{i_4}'$. As $xy \in E(H)$, almost all $a \in X_{i_3}'$ satisfy $xya \in E(\HH)$ and $ya \in E(H)$; fix such $a$. Similarly, almost all $c \in X_{i_2}'$ satisfy $cvw \in E(\HH)$, $cv \in E(H)$ and $ac \in E(H)$; fix such $c$. Finally, almost every $b \in X_{i_3}'$ satisfies $yab, abc, bcv \in E(\HH)$; fix such $b$. Then $P' = xyabcvw$ satisfies the requirements.
Finally, suppose that $t \ge 5$, and write $P = v_1 \ldots v_t$, so $x = v_1$, $y = v_2$, $v = v_{t-1}$ and $w = v_t$. Let $i_j$ be such that $v_j \in X_{i_j}'$ for $j \in [t]$. As usual, since $xy = v_1 v_2 \in E(H)$, almost all $a \in X_{i_3}'$ satisfy: $v_2 a \in E(H)$ and $v_1 v_2 a \in E(\HH)$. Let $Q = v_2 a v_4 \ldots v_t$. Then $Q$ is a pseudopath in $\F$ of order $t-1$ that starts and ends with edges in $H$. By induction, there is a pseudopath $Q'$ in $\HH$ from $v_2 a$ to $v_{t-1} v_t$ of order $t + 2$. Then we can take $P' = v_1 Q'$, completing the proof of \eqref{eqn:F-to-H}.
Now suppose that $C = v_1 \ldots v_t$ is a pseudocycle in $\Fp$, where $t \le \ell/10$. We need to show that $t$ is divisible by $3$. If $C$ does not go through $u$, then $C$ is in $\F$, implying that $t$ is indeed divisible by $3$. So we may assume that $C$ goes through $u$ at least once. This shows that $C$ can be written as $u P_1 u \ldots u P_k$, where $P_i$ is a pseudopath in $\F$ whose first two vertices and last two vertices form edges in $F$. It follows from \eqref{eqn:F-to-H} that for each $i \in [k]$ there is a pseudopath $P_i'$ in $\HH$ whose first two vertices and last two vertices match those of $P_i$ and whose order satisfies $|P_i'| - |P_i| \in \{0, 3\}$. Then $C' := v P_1' v \ldots v P_k'$ is a cycle in $\HH$ with $|C'| \le |C| + 3k \le 4|C| \le \ell$ and $|C'| \modthree{|C|}$. By the properties of $\HH$, we have that $|C'|$ is divisible by $3$, implying that $|C|$ is divisible by $3$, as required.
Let $A_0$ and $A_1$ be the sets of vertices in $A$ incident with $AA$ and $AB$ edges in $F$, respectively (that is, $a_0 \in A_0$ if $F$ contains an edge $a_0x$ with $x \in A$).
We claim that $A_0$ and $A_1$ are disjoint. Indeed, if $a_1 \in A_0 \cap A_1$, then there are vertices $b_0 \in B$ and $a_0, a_2 \in A - a_1$ such that $a_0a_1b_0a_2$ is a path in $F$. Let $a_3$ and $b_1$ be arbitrary vertices in $A$ and $B$, respectively (distinct from previously chosen vertices). Then $a_0 a_1 u b_0 a_2 a_3 b_1$ is cycle of length 7 in $\Fp$, a contradiction.
Let $B_1$ be the set of vertices in $B$ incident with $AB$ edges in $F$. Using a similar argument to the above paragraph, we will show that $B_1$ is independent in $F$. Indeed, otherwise there is a path $a_1b_1b_2a_2$ in $F$. Choosing $a_3, a_4 \in A$ and $b_3 \in B$ to be arbitrary unused vertices, we obtain a cycle $a_1b_1ub_2a_2 a_3 b_3 a_4$ of length 8 in $\Fp$ and reach a contradiction.
Let $F_1$ and $F_2$ be graphs on vertex set $X$, defined as follows: $E(F_1) = A \times B$ and $E(F_2) = A^{(2)} \cup E(F[B])$. Now define $\Fp_i$ to be the graph obtained from $\F$ by adding a new vertex $u_i$ and edges $u_i vw$ such that $vw \in E(F_i)$, for $i \in [2]$. Thus $\Fp_i$ and $\Fp$ differ only on edges touching $u_i$ or $u$.
We claim that $\Fp_i$ has no odd pseudocycles of length at most $\ell/10$. Indeed, this is easy to see for $i = 1$, because we can think of $\Fp_1$ as obtained by extending $X_1'$ by one vertex. To see that this also holds for $i = 2$, notice that in $\Fp_2$, the $AAB$ and $BBB$ triples are in different strong components, so any pseudocycle $C$ in $\Fp_2$ is either a pseudocycle in $\Fp$ or consists only of edges containing exactly two vertices from $A$.
Notice that $e(\Fp_i) \ge c(G) - |Y|n^2 \ge f(n) - (\varepsilon_1 + \varepsilon_5) n^3 \ge f(n) - 2\varepsilon_5 n^3$, because all cherries in $G$ that do not touch $Y$ are edges in $\F$ and $c(G) \ge c(G') \ge f(n) - \varepsilon_1 n^3$. Using this lower bound and the fact that $\Fp_i$ has no odd pseudocycles of length at most $\ell/10$, \Cref{prop:max-deg} implies that $d_{\Fp_i}(u_i) \le (3\alpha + \mu)n^2$.
Since $d_{\Fp}(u) = e(F) \ge (3\alpha - 20\varepsilon_5)n^2$ (see \eqref{eqn:deg-u}), we have $e(F_i) - e(F) = d_{\Fp_i}(u_i)-d_{\Fp}(u) \le (\mu + 20\varepsilon_5)n^2 \le 2\mu n^2$ for $i \in [2]$.
To finish, suppose first that $|A_0| \ge |B_1|$. Recalling that $F$ and $F_1$ coincide on $B$, and that $F$ has no edges in $(A_1 \cup B_1) \times A_0$ or $A_1^{(2)}$, we have
\begin{align*}
2\mu n^2 \ge e(F_2)-e(F)
& \ge - |A_1||B_1| + |A_0||A_1| + \binom{|A_1|}{2} + \be(F[A_0]) \\
& \ge \frac{|A_1|^2}{2} + \be(F[A_0]) + O(n).
\end{align*}
It follows that $|A_1| \le 5\mu^{1/2} n$ and $\be(F[A_0]) \le 5\mu n^2$. Altogether $\be(F[A]) \le |A_1| \, n + \be(F[A_0]) \le 10 \mu^{1/2} n^2 \le \varepsilon_6 n^2$. Since $F[A] \subseteq N(u)[A]$, \Cref{claim:structure} is proved in this case.
Now we consider the remaining case, namely that $|A_0| \le |B_1|$. Let $B_0 = B \setminus B_1$, and recall that $F$ has no edges in $B_1^{(2)}$ or in $A_0 \times B_1$. Using $|A| \ge |B| = |B_0| + |B_1|$,
\begin{align*}
2\mu n^2 & \ge e(\Fp_1) - e(\Fp) \\
& \ge -\binom{|A_0|}{2} - \binom{|B_0|}{2} - |B_0||B_1| + |A||B_0| + |A_0||B_1| + \bar{e}(F[A_1, B_1]) \\
& \ge |A_0|(|B_1| - |A_0|) + |B_0|(|A| - |B_0| - |B_1|) + \frac{|A_0|^2}{2} + \frac{|B_0|^2}{2} + \be(F[A_1, B_1]) + O(n) \\
& \ge \frac{|A_0|^2}{2} + \frac{|B_0|^2}{2} + \be(F[A_1, B_1]) + O(n).
\end{align*}
Thus, we have $|A_0|, |B_0| \le 5 \mu^{1/2} n$ and $\be(F[A_1, B_1]) \le 5\mu n^2$.
This implies that $\be(F[A, B]) \le |A_0|\, n + |B_0|\, n + \be(F[A_1, B_1]) \le \varepsilon_6 n^2$, proving \Cref{claim:structure}.
\end{proof}
Let $\As$ be the set of vertices $u$ such that $N(u)[A, B]$ has at most $\varepsilon_6 n^2$ non-edges, and let $\Bs := V \setminus \As$. Note that $A \subseteq \As$, and by \Cref{claim:structure}, for every $u \in \Bs$ the graph $N(u)[A]$ has at most $\varepsilon_6 n^2$ non-edges.
Let $t_1$ be the number of $\As\As\As$ triples in $\HH$, let $t_2$ be the number of $\As\Bs\Bs$ triples in $\HH$, and let $s$ be the number of $\As\As\Bs$ triples that are not edges in $\HH$.
Let $\Hs$ be the hypergraph obtained from $\HH$ by removing all $\As\As\As$ and $\As\Bs\Bs$ triples and adding all missing $\As\As\Bs$ triples. Then $\Hs$ has no odd pseudocycle of length at most $\ell$; this follows from observing that every pseudocycle in $\Hs$ is either a pseudocycle in $\HH$ or each of its edges has exactly two vertices in $\As$. Moreover, $e(\Hs) - e(\HH) = s - (t_1 + t_2)$. By maximality of $\HH$ we have $s \le t_1 + t_2$.
\begin{claim}
$t_1 \le \varepsilon_7 s$.
\end{claim}
\begin{proof}
Let $\varepsilon_6 \ll \mu \ll \varepsilon_7$.
We first show that for every distinct $u, v \in \As$, there are at most $\mu n$ vertices $w \in \As$ such that $uvw \in E(\HH)$.
Suppose there exist $u, v \in \As$ violating this. Let $W$ be the set of vertices $w \in \As$ such that $uvw \in E(\HH)$, so $|W|\geq \mu n$.
Consider the graph $(N(u) \cap N(v))[W, B]$; its edges are pairs $wb$ such that $w \in W$, $b \in B$, and $uwb, vwb \in E(\HH)$. This graph has at most $2\varepsilon_6 n^2$ non-edges, by \Cref{claim:structure}. Thus there exists $b \in B$ with at least $\frac 12 \mu n$ neighbours in the aforementioned graph; denote its set of neighbours by $W'$. Now, by \Cref{claim:structure}, $b$ is adjacent in $\HH$ to all but at most $\varepsilon_6 n^2$ pairs in $W'$, so there exists a triple $w_1w_2b \in E(\HH)$ with $w_1, w_2 \in W'$. Thus $uvw_1bw_2$ is a pseudocycle of length 5, contradiction.
To finish the argument, we count the four-tuples
\begin{equation*}
Q := \{ \{u, v, w, z \}: u, v, w \in \As, z \in \Bs, uvw \in E(\HH), uvz \notin E(\HH)\}
\end{equation*}
in two different ways. For each vertex $b \in \Bs$ and $\As\As\As$ triple $uvw \in E(\HH)$, at least one of the triples $uvb, uwb, vwb$ is not in $E(\HH)$, so $|Q| \geq t_1|\Bs|$. On the other hand, it follows from the above paragraph that any $\As\As\Bs$ triple $uvz \notin E(\HH)$ extends to at most $\mu n$ elements of $Q$, so $|Q| \leq s \mu n$. Hence
\begin{equation*}
t_1 \leq \frac{|Q|}{|\Bs|} \le \frac{s \mu n}{|\Bs|} \leq \varepsilon_7 s,
\end{equation*}
as claimed.
\end{proof}
\begin{claim}
$t_2 \le 2s/3$.
\end{claim}
\begin{proof}
Let $\varepsilon_6 \ll \mu \ll \varepsilon_7$.
To begin with, we show that if $uvw$ is an $\As \Bs \Bs$ triple in $\HH$ (with $u \in \As$) then one of the pairs $uv$ and $vw$ is in at most $\mu n$ triples of form $\As\As\Bs$ in $\HH$. Fix an $\As\As\Bs$ triple $uvw \in E(\HH)$.
Let $W'$ (resp.\ $V'$) be set of vertices $a \in \As$ such that $uwa \in E(\HH)$ (resp.\ $uva \in E(\HH)$). Suppose that $|W'|, |V'| \geq \mu n$. Consider the graph $(N(v) \cap N(w))[W', V']$. By~\Cref{claim:structure}, this graph contains an edge $a_1a_2$, i.e.\ we have~$a_1a_2w$, $a_1a_2v \in E(\HH)$. By definition of $W'$ and $V'$, the triples $uwa_1$ and $uva_2$ are in $\HH$. Hence $uwa_1a_2v$ is a cycle of length 5, contradiction.
Let $F$ be an auxiliary bipartite graph with parts $\As$ and $\Bs$ such that $uv$ is an edge of $F$ whenever (i) there is an $\As\Bs\Bs$ triple in $\HH$ containing $uv$, and (ii) the number of $\As\As\Bs$ triples containing $uv$ is at most $\mu n$. By the previous paragraph, each $\As\Bs\Bs$ triple in $\HH$ contains an edge of $F$, so
\begin{equation*}
t_2 \leq |\Bs| \cdot e(F) \leq 0.4n \cdot e(F).
\end{equation*}
Moreover, we claim that $d_F(v) \le \mu n$ for every $v \in \Bs$. Indeed, by (ii), the graph $N(v)[\As]$ has at least $d_F(v)(|\As| - \mu n)/2$ non-edges. If $d_F(v)>\mu n$ then this quantity is larger than $2\varepsilon_6 n^2$, contradicting \Cref{claim:structure}. Also using (ii), we conclude that
\begin{equation*}
s \ge \sum_{v \in \Bs} d_F(v) \cdot (|\As| - d_F(v) - \mu n) \ge 0.6 n \cdot e(F).
\end{equation*}
It follows that $t_2 \le 2s/3$, as claimed.
\end{proof}
The last two claims, and the choice $\varepsilon_7=0.1$, say, show that $(t_1 + t_2) \le 0.8 s$. Since $s \le t_1 + t_2$ this implies that $t_1 = t_2 = s = 0$. That is, all $\As\As\Bs$ triples are edges in $\HH$ (and there are no $\As\As\As$ or $\As\Bs\Bs$ edges). This proves \Cref{thm:partition}.
\end{proof}
\section{Open problems}
There are two natural extensions of our result. Firstly, one could prove~\Cref{Conjecture_Mubayi_Rodl}, or perhaps determine the density of $C_\ell^{(3)}$ for smaller values of $\ell$, say $\ell \leq 100$. Although we do not state our bound on $\ell$ explicitly, this would not be too cumbersome, since it is a polynomial in $\varepsilon_7$, and we set $\varepsilon_7 = 0.1$.
The second direction is determining the Tur\'an density of $r$-uniform tight cycles for $r \geq 4$. For this, we do not even know of a conjectured optimal construction. Moreover, our characterisation of odd-pseudocycle-free hypergraphs (\Cref{thm:good-colouring}) does not have an obvious extension, as the straightforward extension of Definition~\ref{def:good-col} is too strong.
As mentioned in the introduction, there are many other specific 3-uniform hypergraphs for which determining the Tur\'an density would be very interesting. Let us point out one conjecture which is perhaps less well known, and which can be found for instance in \cite{mubayi2011hypergraph}.
\begin{conjecture} ~\label{conj:c5-minus}
Let $\C_5^{-}$ be the 3-uniform hypergraph obtained from the tight 5-cycle $\C_5^3$ by removing one edge. The Tur\'an density of $\C_5^{-}$ is $\frac 14$.
\end{conjecture}
As in our case, one conjectured extremal hypergraph is an iterated construction; one may take a complete 3-partite 3-uniform hypergraph and then repeat the same construction recursively within each of the three parts.
\end{document} | arXiv | {
"id": "2209.08134.tex",
"language_detection_score": 0.766010582447052,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{New Perspectives on the Erlang-A Queue}
\begin{abstract}
The non-stationary Erlang-A queue is a fundamental queueing model that is used to describe the dynamic behavior of large scale multi-server service systems that may experience customer abandonments, such as call centers, hospitals, and urban mobility systems. In this paper, we develop novel approximations to all of its transient and steady state moments, the moment generating function, and the cumulant generating function. We also provide precise bounds for the difference of our approximations and the true model. More importantly, we show that our approximations have \emph{explicit stochastic representations as shifted Poisson random variables}. Moreover, we are also able to show that our approximations and bounds also hold for non-stationary Erlang-B and Erlang-C queueing models under certain stability conditions. Finally, we perform numerous simulations to support the conclusions of our results. \end{abstract}
\noindent \textbf{Keywords:} Multi-Server Queues, Abandonment, Dynamical Systems, Asymptotics, Time-Varying Rates, Moments, Fluid Limits, Erlang-A Queue, Functional Forward Equations, Moment Generating Function, Cumulant Moment Generating Function.
\section{Introduction}
Markov processes are important modeling tools that help researchers describe real-world phenomena. Thus, it comes as no surprise that the Erlang-A model, which is a Markovian and multi-server queueing model that incorporates customer abandonments, is an important modeling tool in a multitude of application settings. Some of the more prominent applications include telecommunications, healthcare, urban mobility and transportation, and more recently cloud computing. See for example the following work by \citet{mandelbaum2002queue, massey2002analysis, yom2014erlang, pender2016law}. Despite its importance in many different applications, the Erlang-A queueing model has remained to be very difficult to analyze and understand. Even the analysis of the moments of Erlang-A queue beyond the fourth moment has remained an important topic for additional study.
It is well known that the stationary setting of the Erlang-A is much easier to analyze than its non-stationary counterpart. Some common approaches used to analyze non-stationary and state dependent queueing models including asymptotic methods such as heavy traffic limit theory and strong approximations theory, see for example \citet{halfin1981heavy, mandelbaum1998strong}. Uniform acceleration is extremely useful for approximating the transition probabilities and moments such as the mean and variance of Markov processes. Moreover, the strong approximation methods are useful for analyzing the sample path behavior of the Markov process by showing that the sample paths of properly rescaled queueing processes converge to deterministic dynamical systems and Gaussian process limits.
However, there are two main drawbacks of these asymptotic methods. The first is that the method is asymptotic as a function of the model parameters and the results really only hold when the rates are large and are nearly infinite. Thus, the quality of the approximations depends significantly on the size of the model parameters and these asymptotic methods have been shown to be quite inaccruate for moderate sized model parameter settings, see for example \citet{massey2011poster, massey2013gaussian}. The second main drawback is that the asymptotic methods do not generate any important insights for the moments or cumulant moments beyond order two since the limits are are based on Brownian motion. Since Brownian motion has symmetry, its cumulants are all zero beyond the second order. Thus, Brownian approximations are limited in their power to capture asymmetries in higher moments or even the dynamics of the moment generating function, cumulant generating function, or Fourier transform. Moreover, it has been shown recently by \citet{pender2014gram, engblom2014approximations} that the Erlang-A and its variants have non-trivial amounts of skewness and excess kurtosis, which implies that the Erlang-A are not nearly Gaussian for moderate sized queues. These results also demonstrate that it is important to capture the behavior of the Erlang-A model beyond its second moment as this information can be used in staffing decisions \citet{massey2017performance}.
One common approximation method that is used in the stochastic networks, queueing, and chemical reactions literature is a \emph{moment closure approximation}. Moment closure approximations are used to approximate the moments of the queueing process with a surrogate distribution. It is often the case that the set of moment equations for a large number of queueing models are not closed, see for example \citet{matis2001transient, pender2014laguerre}. Thus, the closure approximation helps approximate the moments with a closed system using the surrogate distribution. One such method used by \citet{ pender2016sampling, pender2017approximations} is to use Hermite polynomials for approximating the distribution of the queue length process. In fact, they show that using a quadratic polynomial works quite well. Since the Hermite polynomials are orthogonal to the Gaussian distribution, which has support on the entire real line, these Hermite polynomial approximations do not take into account the discreteness of the queueing process and the fact the queueing process is non-negative. However, they show that Hermite polynomials are natural to analyze since they are orthogonal with respect to the Gaussian distribution and the heavy traffic limits of multi-server queues are Gaussian.
In this paper, we perform an in-depth analysis of the moments and the moment generating function of the non-stationary Erlang-A queue. As the Erlang-B and Erlang-C queueing models are special cases of the Erlang-A model, we are able to obtain similar results for those models. Our approach is to use convexity and exploit Jensen's and the FKG inequality to obtain bounds on the moments and moment generating function of the Erlang-A queue. What we find even more exciting is that we are able to provide a stochastic representation of our approximations and bounds as a Poisson random variables with a constant shift. This shifted Poisson was observed in peer to peer networks by \citet{ferragut2012content}, however, we will show in the sequel, this novel representation will allow us to view our bounds and approximations in a new way.
\subsection{Main Contributions of the Paper}
The main contributions of this work can be summarized as follows: \begin{itemize} \item We provide new approximations for the moments, moment generating function, and cumulant generating function for the nonstationary Erlang-A queue exploting FKG and Jensen's inequalities. \item We derive a novel stochastic intepretation and representation of our approximations as shifted Poisson random variables or $M/M/\infty$ queues, depending on the context. This sheds new light on the complexity of queues in heavy traffic or critically loaded regimes. \item We prove precise error bounds for our approximations and we also prove new upper and lower bounds for the nonstationary Erlang-A queue that become exact in certain parameter settings. \end{itemize}
\subsection{Organization of the Paper}
The remainder of this paper is organized as follows. Section~\ref{secQMod} introduces the nonstationary Erlang-A queueing model and its importance in stochastic network theory. In Section~\ref{MeanJensen}, we provide approximations for the moments of the Erlang-A system and use these to bound the true values. In Section~\ref{MGFsec} we derive approximations for the moment generating function and cumulant moment generating function of the Erlang-A queue. We again bound the true values by these approximations, and we also find a representation for our approximations in terms of Poisson random variables or $M/M/\infty$ queues, depending on the context.
\section{The Erlang-A Queueing Model}\label{modelsec}
\label{secQMod}
The Erlang-A queueing model is a fundamental queueing model in the stochastic processes literature. The work of \citet{mandelbaum1998strong}, shows that the $M(t)/M/c+M$ queueing system process $ Q\equiv \{ Q(t) | t \geq 0 \} $ is represented by the following stochastic, time changed integral equation: $$ Q(t) = Q(0) + \mathit{\Pi}_1 \left(\int^{t}_{0} \lambda(s) ds \right) - \mathit{\Pi}_2 \left(\int^{t}_{0} \mu \cdot (Q(s) \wedge c )ds \right) - \mathit{\Pi}_3 \left(\int^{t}_{0} \theta \cdot (Q(s) - c )^+ds \right) , $$
where $ \mathit{\Pi}_i \equiv \{ \mathit{\Pi}_i(t) | t \geq 0 \} $ for $ i = {1,2,3} $ are i.i.d.\ standard (rate 1) Poisson processes. Thus, we can write the sample path dynamics of the Erlang-A queueing process in terms of three independent unit rate Poisson processes. A deterministic time change for $\mathit{\Pi}_1 $ transforms it into a non-homogeneous Poisson arrival process with rate $ \lambda(t)$ that counts the customer arrivals that occured in the time interval [0,t). A random time change for the Poisson process $\mathit{\Pi}_2$ , gives us a departure process that counts the number of serviced customers. We implicitly assume that the number of servers is $c \in \mathbb{Z}^+$ and that each server works at rate $\mu$. Finally, a the random time change of $ \mathit{\Pi}_3$ gives us a counting process for the number of customers that abandon service. We also assume that the abandonment distribution is exponential and the rate of abandonments is equal to $\theta$.
One of the main reasons that the Erlang-A queueing model has been studied so extensively is because several important queueing models are special cases of it. One special case is the infinite server queue. The infinite server queue can be derived from the Erlang-A queue in two ways. The first way is to set the number of servers to infinity. This precludes any abandonments since the abandonment rate $\theta \cdot (Q(t) - c)^+$ is always equal to zero when the number of servers is infinite. The second way to derive the infinite server queue is to set the service rate $\mu$ equal to the abandonment rate $\theta$. When $\mu = \theta$, this implies that the sum of the service and abandonment departure processes is equal to a linear function i.e. $\mu \cdot (Q(t) \wedge c) + \theta \cdot (Q(t) - c )^+ = \mu \cdot Q(t) = \theta \cdot Q(t)$. Thus, the Erlang-A queueing model becomes an infinite server queue.
One of the main and important insights of \citet{halfin1981heavy} is that for multi-server queueing systems, it is natural to scale up the arrival rate and the number of servers simultaneously. This scaling known as the \emph{Halfin-Whitt} scaling and been an important modeling technique for modeling call centers in the queueing literature. Since the $M(t)/M/c+M$ queueing process is a special case of a single node \emph{Markovian service network}, we can also construct an associated, \emph{uniformly accelerated} queueing process where both the new arrival rate $\eta\cdot\lambda(t)$ and the new number of servers $\eta\cdot c$ are both scaled by the same factor $\eta>0$. Thus, using the \emph{Halfin-Whitt} scaling for the Erlang-A model, we arrive at the following sample path representation for the queue length process as
\begin{eqnarray*} Q^{\eta}(t) &=& Q^{\eta}(0) + \mathit{\Pi}_1 \left(\int^{t}_{0} \eta \cdot \lambda(s) ds \right) - \mathit{\Pi}_2 \left(\int^{t}_{0} \mu \cdot (Q^{\eta}(s) \wedge \eta \cdot c )ds \right) \\ &&- \mathit{\Pi}_3 \left(\int^{t}_{0} \theta \cdot (Q^{\eta}(s) - \eta \cdot c )^+ds \right) \\ &=& Q^{\eta}(0) + \mathit{\Pi}_1 \left( \int^{t}_{0} \eta \cdot \lambda(s) ds \right) - \mathit{\Pi}_2 \left(\int^{t}_{0} \eta \cdot \mu \cdot \left( \frac{Q^{\eta}(s)}{\eta} \wedge c \right)ds \right) \\ &&- \mathit{\Pi}_3 \left(\int^{t}_{0} \eta \cdot \theta \cdot \left( \frac{Q^{\eta}(s)}{\eta} - c \right)^+ds \right) . \end{eqnarray*}
The \emph{Halfin-Whitt} scaling is defined by simultaneously scaling up the rate of customer demand (which is the arrival rate) with the number of servers. In the context of call centers this is scaling up the number of customers and scaling up the number of agents to answer the phones. In the context of hospitals or healthcare this might be scaling up the number of patients with the number of beds or nurses. Taking the following limits gives us the \emph{fluid} models of \citet{mandelbaum1998strong}, i.e.
\begin{equation} \lim_{\eta\to\infty} \frac{1}{\eta} Q^{\eta}(t) = q(t) \hspace{3mm} \mathrm{a.s.} \end{equation} where the deterministic process $q(t)$, the \emph{fluid mean}, is governed by the one dimensional ordinary differential equation (ODE) \begin{equation} \label{fldmean}
\shortdot{q}(t) = \lambda(t) - \mu \cdot (q(t) \wedge c) - \theta \cdot (q(t) - c)^+ . \end{equation} Moreover, if one takes a diffusion limit i.e.
\begin{equation} \lim_{\eta\to\infty} \sqrt{\eta} \left( \frac{1}{\eta} Q^{\eta}(t) - q(t) \right) \Rightarrow \tilde{Q}(t) \hspace{3mm} \end{equation} one gets a diffusion process where the variance of the diffusion is given by the following ODE \begin{eqnarray} \label{diffvar}
\updot{\mathrm{Var}}\left[ \tilde{Q}(t) \right]
& =& \lambda(t) + \mu \cdot (q(t) \wedge c) + \theta \cdot (q(t) - c)^+ \nonumber \\ &&- 2 \cdot \mathrm{Var}\left[\tilde{Q}(t) \right] \cdot \left( \mu \cdot \{ q(t) < c \} + \theta \cdot \{ q(t) \geq c \} \right) . \end{eqnarray}
\subsection{Mean Field Approximation is Identical to the Fluid Limit}
In addtion to using strong approximations to analyze the queue length process one can also use the functional Kolmogorov forward equations as outlined in \citet{massey2013gaussian}. The functional forward equations for the Erlang-A model are derived as, \begin{eqnarray} \label{FOREQN}
\updot{\mathrm{E}} [f(Q(t)) ] &\equiv& \frac{d}{dt} \mathrm{E}[ f(Q(t)) | Q(0) = q(0) ] \\ &=& \lambda \cdot \mathrm{E}\sqparen{f(Q(t)+1)-f(Q(t))} + \mathrm{E}\sqparen{\delta\paren{Q(t), c}\cdot\paren{f(Q(t)-1)-f(Q(t))}} , \end{eqnarray} for all appropriate functions $f$ and where $ \delta\paren{Q(t), c} = \mu \cdot (Q(t) \wedge c) + \theta \cdot (Q(t) - c)^+ $. For the special case where $f(x) = x$, we can derive an ode for the mean queue length process as
\begin{eqnarray}\label{meanforeqn}
\updot{\mathrm{E}} [Q(t)] &=&\lambda(t) - \mu \cdot \mathrm{E}[ ( Q(t) \wedge c ) ] - \theta \cdot \mathrm{E}[ ( Q(t) - c )^+ ] .
\end{eqnarray}
The first thing to note is that this equation is not autonomous and one needs to know the distribution of $Q(t)$ a priori in order to compute the expectations on the righthand side of Equation \ref{meanforeqn}. To know the distribution a priori is impossible except in some special cases like the infinite server setting. However, it is easy to derive simple approximations for the mean queue length by making some assumptions on the queue length process. This is known as a closure approximation and one common closure approximation method is to simply take the expectations from outside the function to inside the function. This implies that the expectation $E[f(X)]$ becomes $f(E[X])$. This method is known as a mean field approximation in physics and is also known as the deterministic mean approximation of \citet{massey2013gaussian}. By applying the mean field approximation to Equation \ref{meanforeqn}, we can show that the resulting differential equation is given by the following autonomous ODE
\begin{eqnarray} \label{fluidmean}
\updot{\mathrm{E}} [Q_{f}(t)] &=&\lambda(t) - \mu \cdot ( \mathrm{E}[Q_{f}] \wedge c ) - \theta \cdot (\mathrm{E}[Q_{f}] - c )^+ .
\end{eqnarray} By careful inspection, one can observe that the ode given by the mean field approximation is identical to the fluid limit of \ref{fldmean}. Moreover, if one simulates the queueing process and compare it to the mean field limit, one notices an ordering property. For example on the left of Figure \ref{Fig1}, we simulate the Erlang-A queue and compare to the fluid model. We observe that when $\theta < \mu$, that the simulated mean is larger than the fluid mean. This is precisely what our results predict. Moreover, on the right of Figure \ref{Fig1}, we simulate the Erlang-A queue and compare to the fluid model when $\theta > \mu$ and observe that simulated queue length is smaller than the fluid limit.
\begin{figure}
\caption{$\lambda(t) = 10 + 2 \cdot \sin (t) $, $\mu = 1$, $Q(0) = 0$, $c=10$. \\
$\theta = 0.5$ (Left) and $\theta = 2 $ (Right). }
\label{Fig1}
\end{figure}
Our goal in this work is to explain the behavior that we observe in Figure~\ref{Fig1}, which we will do in the following section. Before concluding our overview of the Erlang-A queueing model, we make a brief remark for notational clarity.
\begin{rem}\label{notationremark} Throughout the remainder of this work, we use $Q(t)$ to represent the true queueing process and $Q_f(t)$ to represent the fluid approximation of it. This fluid approximation is a stochastic process that will be fully described in this work. In fact, in Section~\ref{MGFsec} we use characterize the fluid approximations and use insight from these representations to bound the true queue length from above and below. \end{rem}
\section{Inequalities for the Moments of the Erlang-A Queue} \label{MeanJensen}
In this section, we prove when the true moments of the Erlang-A queue are either dominated or dominates their corresponding fluid limit. We find that the relationship between the service rate and the abandonment rate determines whether or not the moment is dominated by the fluid limit. This section is organized as follows. In Subsection~\ref{meanineq}, we derive inequalities for the true mean of the Erlang-A and its fluid approximation. In Subsection~\ref{MomentJensen} we extend these inequalities to analogous results for the $m^\text{th}$ moment of the queueing system. Finally, in Subsection~\ref{momentnumerical} we provide figures from numerical experiments that demonstrate these findings.
\subsection{Inequalities for the Mean}\label{meanineq}
We begin with analysis of the mean of the Erlang-A queue. Before we proceed, we first establish a lemma for comparisons of ordinary differential equations that will be fundamental to our approach to the results.
\begin{lemma}[A Comparison Lemma] Let $f: \mathbb{R}^2 \to \mathbb{R}$ be a continuous function in both variables. If we assume that initial value problem \begin{equation} \shortdot{x}(t) = f(t, x(t)), \ x(0) = x_0 \end{equation} has a unique solution for the time interval [0,T] and \begin{equation} \shortdot{y}(t) \leq f(t, y(t)) \quad \mathrm{for} \ t \in [0,T] \ \mathrm{and} \ y(0) \leq x_0 \end{equation} then $ x(t) \geq y(t) $ for all $t \in [0,T]$. \begin{proof} The the proof of this result is given in \citet{hale2013introduction}. \end{proof} \end{lemma}
With this lemma in hand, we can now derive relationships for the fluid limit and the true mean. As seen in the proof, these results follow from the application of this differential equation comparison lemma and the convexity seen in the fluid approximation.
\begin{theorem}\label{jensen1} For the Erlang-A queue, if $Q(0) = Q_{f}(0)$, then the true mean dominates the fluid limit when $\theta < \mu$, the fluid limit dominates the true mean when $\theta > \mu$, and the two means are equal when $\theta = \mu$.
\begin{proof} Recall that the true mean satisfies the following differential equation
\begin{eqnarray*}
\updot{\mathrm{E}} [Q(t)] &=&\lambda(t) - \mu \cdot \mathrm{E}[ ( Q \wedge c ) ] - \theta \cdot \mathrm{E}[ ( Q - c )^+ ]
\end{eqnarray*}
and the fluid limit satisfies the following differential equation
\begin{eqnarray*}
\updot{\mathrm{E}} [Q_{f}(t)] &=& \lambda(t) - \mu \cdot ( \mathrm{E}[Q_{f}] \wedge c ) - \theta \cdot (\mathrm{E}[Q_{f}] - c )^+ .
\end{eqnarray*}
We can simplify both equations by observing that $( X \wedge c ) + ( X - c )^+ = X $ for any random variable X. Thus, we have the following two equations for the true mean and the fluid limit
\begin{eqnarray*}
\updot{\mathrm{E}} [Q(t)] &=&\lambda(t) - \theta \cdot E[ Q] + (\theta - \mu) \cdot \mathrm{E}[ ( Q \wedge c ) ] \\
\updot{\mathrm{E}} [Q_{f}(t)] &=& \lambda(t) - \theta \cdot \mathrm{E}[Q_{f}] + (\theta - \mu) \cdot (\mathrm{E}[Q_{f}] \wedge c ) .
\end{eqnarray*}
If we take the difference of the two equations, we obtain the following
\begin{eqnarray*}
\updot{\mathrm{E}} [Q(t)] - \updot{\mathrm{E}} [Q_{f}(t)] &=& \lambda(t) - \theta \cdot E[ Q] + (\theta - \mu) \cdot \mathrm{E}[ ( Q \wedge c ) ] \\
&-& \lambda(t) + \theta \cdot \mathrm{E}[Q_{f}] - (\theta - \mu) \cdot (\mathrm{E}[Q_{f}] \wedge c ) \\
&=& \theta \cdot \left( \mathrm{E}[Q_{f}] - E[ Q ] \right) + ( \theta - \mu) \cdot \left( \mathrm{E}[ ( Q \wedge c ) ] - ( \mathrm{E}[Q_{f}]\wedge c ) \right) \\
\end{eqnarray*}
Now since the minimum function $(Q \wedge c)$ is a concave function, we have that
\begin{eqnarray*}
\left( \mathrm{E}[ ( Q \wedge c ) ] - ( \mathrm{E}[Q] \wedge c ) \right) &\leq& 0
\end{eqnarray*}
for any random variable Q. Thus, we have that for $\theta < \mu$
\begin{eqnarray*}
\updot{\mathrm{E}} [Q(t)] - \updot{\mathrm{E}} [Q_{f}(t)] &\geq& 0,
\end{eqnarray*}
and for $\theta > \mu$
\begin{eqnarray*}
\updot{\mathrm{E}} [Q(t)] - \updot{\mathrm{E}} [Q_{f}(t)] &\leq& 0.
\end{eqnarray*} Finally, for $\theta = \mu$, we have that
\begin{eqnarray*}
\updot{\mathrm{E}} [Q(t)] - \updot{\mathrm{E}} [Q_{f}(t)] &=& 0
\end{eqnarray*}
since both differential equations are initialized with the same value and the origin is an equilibrium point for the difference. This completes the proof. \end{proof} \end{theorem}
As discussed in Section~\ref{modelsec}, the Erlang-A model is quite versatile in its relation to other queueing systems of practical interest. In the two following corollaries, we find that Theorem~\ref{jensen1} can be applied to the Erlang-B and Erlang-C models.
\begin{corollary} For the Erlang-B queueing model, if $Q(0) = Q_{f}(0)$, then $\mathrm{E}[Q(t)] \leq \mathrm{E}\left[Q_{f}(t) \right]$ for all $t \geq 0$.
\begin{proof} This is obvious after noticing that the Erlang-B queue is a limit of the Erlang-A queue by letting $\theta \to \infty$. \end{proof} \end{corollary}
\begin{corollary} For the Erlang-C queueing model, if $Q(0) = Q_{f}(0)$, then $\mathrm{E}[Q(t)] \geq \mathrm{E}\left[Q_{f}(t) \right]$ for all $t \geq 0$.
\begin{proof} This is obvious after noticing that the Erlang-C queue is an Erlang-A queue with $\theta=0$. Since $\mu $ is assumed to be positive, then we fall into the case where $\theta < \mu$ and this completes the proof. \end{proof} \end{corollary}
\begin{rem} Given that we use Jensen's inequality and the FKG inequality later on in the paper, we find it important to differentiate them. Here we give an example that sets the two apart. If we have the following function $Q^n$, then Jensen's inequality implies that $\mathbb{E}[Q^n] \geq \mathbb{E}[Q]^n $. However, FKG implies that $\mathbb{E}[Q^n] \geq \mathbb{E}[Q^{n-1}] \cdot \mathbb{E}[Q] $. We find it interesting that by iterating the FKG inequality $n-2$ more times, it yields Jensen's inequality for the moments of random variables. \end{rem}
\subsection{Inequalities for the $m^{th}$ Moment}\label{MomentJensen}
In this subsection we will now extend the previous findings for the mean to higher moments of the queueing system. Like the result for the mean, this is again built through observation of the convexity in the differential equation of the fluid approximation.
\begin{theorem}\label{mMoment} For the Erlang-A queue and $m \in \mathbb{Z}^+$, if $Q(0) = Q_f(0)$, then $\E{Q^m(t)} \geq \E{Q_f^m(t)}$ when $\theta < \mu$, $\E{Q^m(t)} \leq \E{Q_f^m(t)}$ when $\theta > \mu$, and $\E{Q^m(t)} = \E{Q_f^m(t)}$ when $\theta = \mu$. \begin{proof} We will use proof by induction. For the base case we can apply Theorem \ref{jensen1}. Now, suppose that the statement holds for $j \in \{1,2, \dots, m -1\}$. Recall that the $m^\text{th}$ moment satisfies \begin{align*} \updot{ \mathrm{E}} \left[Q^m(t)\right] &= \lambda(t) \E{\sum_{j=0}^m {m \choose j} Q^j(t) - Q^m(t)} \\&\quad + \E{\left(\sum_{j=0}^m {m \choose j} (-1)^{m-j}Q^j(t) - Q^m(t)\right)\big(\theta Q(t) - (\theta - \mu)(Q(t) \wedge c)\big)} \\ &= \lambda(t) \sum_{j=0}^{m-1} {m \choose j} \E{Q^j(t)} + \theta \sum_{j=0}^{m-1} {m \choose j} (-1)^{m-j} \E{Q^{j+1}(t)} \\&\quad
+
(\theta - \mu)\E{\left(\sum_{j=0}^{m-1} {m \choose j} (-1)^{m-1-j}Q^{j+1}(t) \right) \wedge \left(c\sum_{j=0}^{m-1} {m \choose j} (-1)^{m-1-j}Q^j(t) \right)} \end{align*} and the approximate autonomous version satisfies \begin{align*} \updot{ \mathrm{E}} \left[Q_f^m(t)\right] &= \lambda(t) \sum_{j=0}^{m-1} {m \choose j} \E{Q_f^j(t)} + \theta \sum_{j=0}^{m-1} {m \choose j} (-1)^{m-j} \E{Q_f^{j+1}(t)} \\&\quad
+
(\theta - \mu)\sum_{j=0}^{m-1} {m \choose j} (-1)^{m-1-j}\left(\E{Q^{j+1}(t) } \wedge \E{c Q^j(t)} \right)
\\ &= \lambda(t) \sum_{j=0}^{m-1} {m \choose j} \E{Q_f^j(t)} + \theta \sum_{j=0}^{m-1} {m \choose j} (-1)^{m-j} \E{Q_f^{j+1}(t)} \\&\quad
+
(\theta - \mu)\left(\E{\sum_{j=0}^{m-1} {m \choose j} (-1)^{m-1-j}Q^{j+1}(t) } \wedge \E{c\sum_{j=0}^{m-1} {m \choose j} (-1)^{m-1-j}Q^j(t)} \right) \end{align*} Now by taking the difference, we have that \begin{align*} \updot{ \mathrm{E}} \left[Q^m(t)\right] - \updot{ \mathrm{E}} \left[Q_f^m(t)\right] &= \lambda(t) \sum_{j=0}^{m-1} {m \choose j} \E{Q^j(t) - Q_f^j(t)} + \theta \sum_{j=0}^{m-1} {m \choose j} (-1)^{m-j} \E{Q^{j+1}(t) - Q_f^{j+1}(t)} \\&\,\,
+
(\theta - \mu)
\Bigg(
\E{\left(\sum_{j=0}^{m-1} {m \choose j} (-1)^{m-1-j}Q^{j+1}(t) \right) \wedge \left(c\sum_{j=0}^{m-1} {m \choose j} (-1)^{m-1-j}Q^j(t) \right)}
\\&
-
\E{\sum_{j=0}^{m-1} {m \choose j} (-1)^{m-1-j}Q^{j+1}(t) } \wedge \E{c\sum_{j=0}^{m-1} {m \choose j} (-1)^{m-1-j}Q^j(t)}
\Bigg). \end{align*} Because the minimum is a concave function, we have that for any $X$ and $Y$ with real means $\E{X \wedge Y} \leq \E{X} \wedge \E{Y}$. Thus, we have that for $\theta > \mu$, $$ \updot{ \mathrm{E}} \left[Q^m(t)\right] - \updot{ \mathrm{E}} \left[Q_f^m(t)\right] \geq 0, $$ if $\theta < \mu$, $$ \updot{ \mathrm{E}} \left[Q^m(t)\right] - \updot{ \mathrm{E}} \left[Q_f^m(t)\right] \leq 0, $$ and if $\theta = \mu$, $$ \updot{ \mathrm{E}} \left[Q^m(t)\right] = \updot{ \mathrm{E}} \left[Q_f^m(t)\right] = 0 $$ since both differential equations are initialized with the same value, the origin is an equilibrium point for the difference, and all the lower-power terms in the differential equations follow this structure, which we know from the inductive hypothesis. Therefore we see this holds for $m$, which completes the proof. \end{proof} \end{theorem}
Again as we have seen for the mean, we can exploit the versatility of the Erlang-A queue to extend these insights to the Erlang-B and Erlang-C models as well.
\begin{corollary} For the Erlang-B queueing model, if $Q(0) = Q_{f}(0)$, then $\mathrm{E}[Q^m(t)] \leq \mathrm{E}\left[Q_{f}^m(t) \right]$ for all $t \geq 0$ and $m \in \mathbb{Z}^+$.
\begin{proof} This is obvious after noticing that the Erlang-B queue is a limit of the Erlang-A queue by letting $\theta \to \infty$. \end{proof} \end{corollary}
\begin{corollary} For the Erlang-C queueing model, if $Q(0) = Q_{f}(0)$, then $\mathrm{E}[Q^m(t)] \geq \mathrm{E}\left[Q_{f}^m(t) \right]$ for all $t \geq 0$ and $m \in \mathbb{Z}^+$.
\begin{proof} This is obvious after noticing that the Erlang-C queue is an Erlang-A queue with $\theta=0$. Since $\mu $ is assumed to be positive, then we fall into the case where $\theta < \mu$ and this completes the proof. \end{proof} \end{corollary}
\subsection{Numerical Results} \label{momentnumerical}
In this section we describe numerical results for approximating the moments of the Erlang-A queue and examine them relative to our findings. In Figures \ref{fig:1to9pt1} and \ref{fig:1to9pt2}, we show the first four moments of the Erlang-A queue and their respective fluid approximations for cases of $\theta < \mu$ and $\theta > \mu$, respectively. In these plots, we take the arrival rate at time $t \geq 0$ to be $\lambda(t) = 10 + 2\sin(t)$. We initialize the queue as empty, and we assume that the queueing system has $c = 10$ servers each with exponential service rate $\mu = 1$. We test two different cases for the abandonment rate: $\theta = 0.5$ and $\theta = 2$. In these settings, we observe that when $\theta < \mu$ the fluid approximations are below their corresponding simulated stochastic values and that when $\theta > \mu$ the fluid values are greater than the simulations, and this matches the statements of Theorems \ref{MeanJensen} and \ref{mMoment}.\\
We observe the same relationships in Figures~\ref{fig:11to19pt1} and \ref{fig:11to19pt2}. For these plots we instead set $\lambda(t) = 100 + 20\sin(t)$ and $c = 100$ and otherwise use the same values as for Figures \ref{fig:1to9pt1} and \ref{fig:1to9pt2}. With this increase in the arrival intensity and the number of servers, we see that the gaps between the fluid approximations and the simulations are again present, albeit proportionally smaller.
\begin{figure}
\caption{{\small First Moment }}
\caption{{\small Second Moment }}
\caption{{\small Third Moment }}
\caption{{\small Fourth Moment }}
\label{fig:1}
\label{fig:2}
\label{fig:3}
\label{fig:4}
\label{fig:1to9pt1}
\end{figure}
\begin{figure}
\caption{{\small First Moment }}
\caption{{\small Second Moment }}
\caption{{\small Third Moment }}
\caption{{\small Fourth Moment}}
\label{fig:1}
\label{fig:2}
\label{fig:3}
\label{fig:4}
\label{fig:1to9pt2}
\end{figure}
\begin{figure}
\caption{{\small First Moment }}
\caption{{\small Second Moment }}
\caption{{\small Third Moment }}
\caption{{\small Fourth Moment }}
\label{fig:1}
\label{fig:2}
\label{fig:3}
\label{fig:4}
\label{fig:11to19pt1}
\end{figure}
\begin{figure}
\caption{{\small First Moment }}
\caption{{\small Second Moment }}
\caption{{\small Third Moment }}
\caption{{\small Fourth Moment }}
\label{fig:1}
\label{fig:2}
\label{fig:3}
\label{fig:4}
\label{fig:11to19pt2}
\end{figure}
\section{Inequalities and Characterizations for Generating Functions of the Erlang-A Queue} \label{MGFsec}
Building on what we have found for the moments of the Erlang-A, we can provide similar inequalities for the moment generating function and the cumulant generating function again through convexity in the differential equations for the fluid approximations. We provide these inequalities in Subsections~\ref{MGFJensen} and \ref{CGFJensen}, respectively. In doing so, we find forms for the fluid approximations that we can interpret in terms of expectations of other random quantities. Through these recognitions, we characterize the fluid approximations. We describe these representations for systems in steady-state in Subsection~\ref{steadychar} and for nonstationary systems in Subsection~\ref{nonstatchar}. We conclude this section with a variety of demonstrations of these results through empirical experiments in Subsection~\ref{mgfnumres}.
\subsection{An Inequality for the Moment Generating Function of the Erlang-A Queue} \label{MGFJensen}
Using the functional forward equations \citet{massey2013gaussian}, we can show that the moment generating function for the Erlang-A queue satisfies the following partial differential equation
\begin{eqnarray}\label{mgfpde}
\updot{\mathrm{E}} \left[e^{\alpha \cdot Q(t)} \right] &=& \lambda(t) \cdot (e^{\alpha} -1) \cdot \mathrm{E}\left[e^{\alpha \cdot Q(t)} \right] + \theta \cdot (e^{-\alpha} -1) \cdot \mathrm{E}\left[ Q(t) \cdot e^{\alpha \cdot Q(t)} \right] \\
&&- ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \mathrm{E}\left[ ( Q(t) \wedge c) \cdot e^{\alpha \cdot Q(t)}\right] \\
&=& \lambda(t) \cdot (e^{\alpha} -1) \cdot \mathrm{E}\left[e^{\alpha \cdot Q(t)} \right] + \theta \cdot (e^{-\alpha} -1) \cdot \frac{\partial M(t,\alpha)}{\partial \alpha} \\
&&- ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \mathrm{E}\left[ ( Q(t) \wedge c) \cdot e^{\alpha \cdot Q(t)}\right] .
\end{eqnarray}
Just like the non-autonomous differential equation for the mean in Equation \ref{meanforeqn}, we also cannot directly compute the moment generating function since we do not know the distribution of the queue length a priori. This is also true for numerical purposes. Unless we can compute the expectation that includes the minimum function it is impossible to know the moment generating function, except in special cases such as the infinite server queue and some cases of the Erlang-B queue. Thus, it is useful to obtain approximations that are explicit upper or lower bounds for the moment generating function. By using Jensen's inequality for concave functions, we can approximate the moment generating function with the following partial differential equation
\begin{eqnarray}
\updot{\mathrm{E}} \left[e^{\alpha \cdot Q_f(t)} \right] &=& \lambda(t) \cdot (e^{\alpha} -1) \cdot \mathrm{E}\left[e^{\alpha \cdot Q_f(t)} \right] + \theta \cdot (e^{-\alpha} -1) \cdot \frac{\partial M_f(t,\alpha)}{\partial \alpha} \nonumber\\
&&- ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \left( \mathrm{E}\left[Q_f(t) \cdot e^{\alpha \cdot Q_f(t)}\right] \wedge \mathrm{E}\left[c \cdot e^{\alpha \cdot Q_f(t)} \right] \right) \\
\frac{\partial M_f(t,\alpha) }{\partial t} &=& \lambda(t) \cdot (e^{\alpha} -1) \cdot M_f(t,\alpha) + \theta \cdot (e^{-\alpha} -1) \cdot \frac{\partial M_f(t,\alpha)}{\partial \alpha} \nonumber\\
&&- ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \left( \frac{\partial M_f(t,\alpha) }{\partial \alpha} \wedge c \cdot M_f(t,\alpha) \right) \label{fluidMGFdef}.
\end{eqnarray}
The following theorem determines exactly when $\mathrm{E}\left[e^{\alpha \cdot Q_f(t)} \right]$ is a lower or upper bound for the exact moment generating function of the Erlang-A queue.
\begin{theorem}\label{mgfineqthm} For the Erlang-A queue, if $Q(0) = Q_{f}(0)$, then $\mathrm{E}\left[e^{\alpha \cdot Q(t)} \right] \geq \mathrm{E}\left[e^{\alpha \cdot Q_f(t)} \right] $ when $\theta < \mu$, $\mathrm{E}\left[e^{\alpha \cdot Q(t)} \right] \leq \mathrm{E}\left[e^{\alpha \cdot Q_f(t)} \right] $ when $\theta > \mu$, and $\mathrm{E}\left[e^{\alpha \cdot Q(t)} \right] = \mathrm{E}\left[e^{\alpha \cdot Q_f(t)} \right] $ when $\theta = \mu$.
\begin{proof}
If we take the difference of the two partial differential equations, we obtain the following
\begin{eqnarray*}
\updot{\mathrm{E}} \left[e^{\alpha \cdot Q(t)} \right] - \updot{\mathrm{E}} \left[e^{\alpha \cdot Q_f(t)} \right] &=& \lambda(t) \cdot (e^{\alpha} -1) \cdot \mathrm{E}\left[e^{\alpha \cdot Q(t)} \right] + \theta \cdot (e^{-\alpha} -1) \cdot \mathrm{E}\left[ Q(t) \cdot e^{\alpha \cdot Q(t)} \right] \\
&&- ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \mathrm{E}\left[ ( Q(t) \wedge c) \cdot e^{\alpha \cdot Q(t)}\right] \\ &&- \lambda(t) \cdot (e^{\alpha} -1) \cdot \mathrm{E}\left[e^{\alpha \cdot Q_f(t)} \right] - \theta \cdot (e^{-\alpha} -1) \cdot \mathrm{E}\left[Q_f(t) \cdot e^{\alpha \cdot Q_f(t)}\right] \\
&&+ ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \left( \mathrm{E}\left[Q_f(t) \cdot e^{\alpha \cdot Q_f(t)}\right] \wedge \mathrm{E}\left[c \cdot e^{\alpha \cdot Q_f(t)} \right] \right) \\
&=& \lambda(t) \cdot (e^{\alpha} -1) \cdot \left( \mathrm{E}\left[e^{\alpha \cdot Q(t)} \right] - \mathrm{E}\left[e^{\alpha \cdot Q_f(t)} \right] \right) \\
&&+ \theta \cdot (e^{-\alpha} -1) \cdot \left( \mathrm{E}\left[ Q(t) \cdot e^{\alpha \cdot Q(t)} \right] - \mathrm{E}\left[Q_f(t) \cdot e^{\alpha \cdot Q_f(t)}\right] \right) \\
&&- ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \mathrm{E}\left[ ( Q(t) \wedge c) \cdot e^{\alpha \cdot Q(t)}\right] \\
&&+ ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \left( \mathrm{E}\left[Q_f(t) \cdot e^{\alpha \cdot Q_f(t)}\right] \wedge \mathrm{E}\left[c \cdot e^{\alpha \cdot Q_f(t)} \right] \right) .
\end{eqnarray*}
Now by exploiting the positive scalability property and the concavity of the minimum function, we have by Jensen's inequality that
\begin{eqnarray*}
\mathrm{E}\left[ ( Q(t) \wedge c) \cdot e^{\alpha \cdot Q(t)} \right] &=& \mathrm{E}\left[ \left( Q(t) \cdot e^{\alpha \cdot Q(t)} \wedge c \cdot e^{\alpha \cdot Q(t)} \right) \right] \\ & \leq & \left( \mathrm{E}\left[Q_f(t) \cdot e^{\alpha \cdot Q_f(t)}\right] \wedge \mathrm{E}\left[c \cdot e^{\alpha \cdot Q_f(t)} \right] \right) .
\end{eqnarray*}
Thus, we have when $\theta < \mu$ that
\begin{equation}
\updot{\mathrm{E}} \left[e^{\alpha \cdot Q(t)} \right] - \updot{\mathrm{E}} \left[e^{\alpha \cdot Q_f(t)} \right] \geq 0 ,
\end{equation}
when $\theta > \mu$
\begin{equation}
\updot{\mathrm{E}} \left[e^{\alpha \cdot Q(t)} \right] - \updot{\mathrm{E}} \left[e^{\alpha \cdot Q_f(t)} \right] \leq 0 ,
\end{equation}
and finally when $\theta = \mu$,
\begin{equation}
\updot{\mathrm{E}} \left[e^{\alpha \cdot Q(t)} \right] - \updot{\mathrm{E}} \left[e^{\alpha \cdot Q_f(t)} \right] = 0
\end{equation}
since they solve the same partial differential equation. This completes our proof. \end{proof} \end{theorem}
As with the moments, we can observe these relationships occurring in numerical experiments. We provide figures demonstrating this in Subsection~\ref{mgfnumres}.
\subsection{An Inequality for the Cumulant Moment Generating Function of the Erlang-A Queue}\label{CGFJensen}
As a consequence of the findings for the moment generating function, we can also provide similar inequalities for the cumulant moment generating function. Using Equation~\ref{mgfpde}, we have
\begin{eqnarray}
\updot{\log\left( \mathrm{E} \left[e^{\alpha \cdot Q(t)} \right] \right) } &\equiv& \frac{\partial}{\partial t} \log\left( \mathrm{E} \left[e^{\alpha \cdot Q(t)} \right] \right)
= \frac{\updot{\mathrm{E}} \left[e^{\alpha \cdot Q(t)} \right] }{ \mathrm{E} \left[e^{\alpha \cdot Q(t)} \right] } \\
&=& \lambda(t) \cdot (e^{\alpha} -1) + \theta \cdot (e^{-\alpha} -1) \cdot \frac{ \mathrm{E}\left[ Q(t) \cdot e^{\alpha \cdot Q(t)} \right] }{ \mathrm{E} \left[e^{\alpha \cdot Q(t)} \right] } \nonumber \\
&&- ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \frac{ \mathrm{E}\left[ ( Q(t) \wedge c) \cdot e^{\alpha \cdot Q(t)}\right] }{ \mathrm{E} \left[e^{\alpha \cdot Q(t)} \right] }\\
&=& \lambda(t) \cdot (e^{\alpha} -1) + \theta \cdot (e^{-\alpha} -1) \cdot \frac{\partial G(t,\alpha)}{\partial \alpha} \nonumber \\
&&- ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \frac{ \mathrm{E}\left[ ( Q(t) \wedge c) \cdot e^{\alpha \cdot Q(t)}\right] }{\mathrm{E} \left[e^{\alpha \cdot Q(t)} \right]} .
\end{eqnarray} Like for the MGF, we note that we cannot compute the cumulant moment generating function directly without knowing the distribution of the queue length. So, by again applying Jensen's inequality, we can describe the fluid approximation as follows.
\begin{eqnarray}
\updot{\log\left( \mathrm{E} \left[e^{\alpha \cdot Q_f(t)} \right] \right) } &=& \lambda(t) \cdot (e^{\alpha} -1) + \theta \cdot (e^{-\alpha} -1) \cdot \frac{\partial G_f(t,\alpha)}{\partial \alpha} \nonumber\\
&&- ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \left( \frac{ \mathrm{E}\left[Q_f(t) \cdot e^{\alpha \cdot Q_f(t)}\right] \wedge \mathrm{E}\left[c \cdot e^{\alpha \cdot Q_f(t)} \right] }{ \mathrm{E} \left[e^{\alpha \cdot Q(t)} \right] } \right) \quad\\
\frac{\partial G_f(t,\alpha) }{\partial t} &=& \lambda(t) \cdot (e^{\alpha} -1) + \theta \cdot (e^{-\alpha} -1) \cdot \frac{\partial G_f(t,\alpha)}{\partial \alpha} \nonumber\\
&&- ( \theta - \mu) \cdot (e^{-\alpha} -1) \cdot \left( \frac{\partial G(t,\alpha) }{\partial \alpha} \wedge c \right) .\label{cumulantMGF}
\end{eqnarray}
Using this observation and our approach in finding the inequalities for the moment generating function, we find the equivalent inequalities for the cumulant moment generating function in the following corollary.
\begin{corollary} For the Erlang-A queue, if $Q(0) = Q_{f}(0)$, then $\log\left(\mathrm{E}\left[e^{\alpha \cdot Q(t)} \right]\right) \geq \log\left(\mathrm{E}\left[e^{\alpha \cdot Q_f(t)} \right]\right) $ when $\theta < \mu$, $\log\left(\mathrm{E}\left[e^{\alpha \cdot Q(t)} \right]\right) \leq \log\left(\mathrm{E}\left[e^{\alpha \cdot Q_f(t)} \right] \right)$ when $\theta > \mu$, and $\log\left(\mathrm{E}\left[e^{\alpha \cdot Q(t)} \right]\right) = \log\left(\mathrm{E}\left[e^{\alpha \cdot Q_f(t)} \right] \right)$ when $\theta = \mu$.
\begin{proof} The proof follows from the same argument that was given in Theorem~\ref{mgfineqthm} and the fact that the log function is strictly increasing. \end{proof} \end{corollary}
\subsection{Characterization of the Moment Generating Function in Steady-State}\label{steadychar}
From what we have observed for the moment generating function, we can derive an exact representation for the fluid approximation of the moment generating function in steady-state. We assume a stationary arrival rate $\lambda > 0$. We will investigate the stationary fluid approximation differential equations in a casewise manner based on the relationship of $\lambda$ and the system's service parameters. To do so, we begin with a lemma bounding the fluid approximation of the mean.
\begin{lemma}\label{cLemma} Suppose that $\lambda$ is constant. If $\lambda < c\mu $, then $E[Q_f(\infty)] < c $. Moreover, if $\lambda \geq c\mu $, then $E[Q_f(\infty)] \geq c $. \begin{proof} We will prove this by contradiction. For the first part, we assume that $E[Q_f(\infty)] \geq c $. Now by using the differential equation for the mean in steady state, we have that \begin{eqnarray*} 0 &=& \lambda - \mu \cdot (E[Q_f(\infty)] \wedge c ) - \theta \cdot ( E[Q_f(\infty)] - c)^+ \\ &=& \lambda - \mu \cdot c - \theta ( E[Q_f(\infty)] - c)^+ . \end{eqnarray*} Since we assumed that $E[Q_f(\infty)] \geq c $, then this yields the following inequality \begin{eqnarray*}
\lambda &\geq & c\mu , \end{eqnarray*} which yields a contradiction. For the second case, where we assume that $\lambda \geq c \mu$ and $E[Q_f(\infty)] < c $, then by the same differential equation we have that \begin{eqnarray*}
\lambda &=& \mu \cdot (E[Q_f(\infty)] \wedge c ) + \theta \cdot ( E[Q_f(\infty)] - c)^+ \\
&=& \mu \cdot (E[Q_f(\infty)] \wedge c ) \\
&=& c \mu + \mu \cdot (E[Q_f(\infty)] - c ) \\
&< & c \mu , \end{eqnarray*} which yields another contradiction. \end{proof} \end{lemma}
We now begin characterizing the fluid approximations with our first case, $\lambda \geq c\mu$, in the following proposition.
\begin{proposition}\label{fluidSteadyMGF} If $\lambda \geq c\mu$, then in steady-state we have that
\begin{eqnarray}
\frac{\partial M_f(\infty,\alpha)}{\partial \alpha} &=& \frac{\lambda \cdot (e^{\alpha} -1) + ( \theta - \mu) \cdot ( 1 - e^{-\alpha}) \cdot c }{\theta \cdot ( 1 - e^{-\alpha}) } \cdot M_f(\infty,\alpha)
\end{eqnarray} which yields a solution of
\begin{eqnarray}
M_f(\infty,\alpha) &=& e^{\frac{\alpha \cdot (\theta - \mu) \cdot c + \lambda \cdot (e^\alpha -1)}{\theta }}
\end{eqnarray}
for $\alpha \in \mathbb{R}$.
\begin{proof}
To find the partial differential equation, we use functional cumulant bound for any non-decreasing function $h(\cdot)$ (which can be seen as a form of the FKG inequality),
\begin{equation}
\frac{\mathrm{E}[ h(X) \cdot e^{\alpha \cdot X}] }{\mathrm{E}[ e^{\alpha \cdot X}] } \geq \mathrm{E}[ h(X) ] .
\end{equation}
In the case that $\lambda \geq c\mu$ we have that $\E{Q_f(t)} \geq c$ in steady-state by Lemma~\ref{cLemma}, and so we know how to evaluate the minimum in the fluid equation. Thus, we have that the derivative of $G_f(\alpha) = \log(M_f(\infty, \alpha))$ with respect to $\alpha$ is \begin{align}\label{logMGFode} \frac{\mathrm{d}G_f(\alpha)}{\mathrm{d}\alpha} = \frac{\lambda(e^{\alpha} - 1 ) + c(\theta - \mu)(1 - e^{-\alpha})}{\theta(1 - e^{-\alpha})} = \frac{\lambda e^\alpha}{\theta} + \frac{c(\theta - \mu)}{\theta} \end{align} where here we have used the identity $e^x = \frac{e^x - 1}{1 - e^{-x}}$, which can be observed by multiplying each side of the equation by $1 - e^{-x}$. Because the MGF is equal to 1 when $\alpha = 0$, we also have that $G_f(0) = 0$. Using this initial condition and integrating left and right sides of Equation~\ref{logMGFode} with respect to $\alpha$, we find that $$ G_f(\alpha) = \frac{\lambda(e^\alpha - 1) + c\alpha(\theta - \mu)}{\theta} $$ and since $M_f(\infty, \alpha) = e^{G_f(\alpha)}$, we attain the stated result.
\end{proof} \end{proposition}
We can now observe that the fluid approximation is equivalent in distribution to a Poisson random variable shifted by $\gamma \equiv \frac{c(\theta - \mu)}{\theta}$, as the moment generation function for the Poisson distribution is $e^{\beta (e^{\alpha} - 1)}$, where $\beta$ is the rate of arrival and $\alpha$ is the space parameter of the MGF. This gives rise to the following.
\begin{theorem}\label{sandwichthm} For the Erlang-A queue with $\lambda \geq c\mu$ and $m \in \mathbb{Z}^+$, if $\theta > \mu$ $$ \E{(Q_f(\infty) - \gamma)^m} \leq \E{(Q(\infty))^m} \leq \E{(Q_f(\infty))^m} $$ and if $\theta < \mu$ $$ \E{(Q_f(\infty))^m} \leq \E{(Q(\infty))^m} \leq \E{(Q_f(\infty) - \gamma)^m} $$ where $\gamma = \frac{c(\theta - \mu)}{\theta}$. \begin{proof} From Proposition~\ref{fluidSteadyMGF}, we have that the fluid approximation of the MGF in steady-state is $$ M_f(\infty, \alpha) = e^{\frac{\lambda(e^\alpha - 1) + c\alpha(\theta - \mu)}{\theta}} = \E{e^{\alpha(\Gamma + \gamma)}} $$ where $\Gamma \sim \mathrm{Pois}\left(\frac{\lambda}{\theta}\right)$ and $\gamma = \frac{c(\theta - \mu)}{\theta}$. From the uniqueness of MGF's, we have that $$ \E{(Q_f(\infty))^m} = \E{(\Gamma + \gamma)^m} $$ for all $m \in \mathbb{Z}^+$. Now, recall that for an $M/M/\infty$ queue with arrival rate $\lambda$ and service rate $\theta$, the stationary distribution is that of a Poisson random variable with rate parameter $\frac{\lambda}{\theta}$. So, we can think of $\Gamma$ as representing the steady-state distribution of an infinite server queue with Poisson arrival rate $\lambda$ and exponential service rate $\theta$.\\
Suppose now that $\theta > \mu$. Then, by Theorem~\ref{mMoment} and our preceding observation, we have that $\E{(Q(\infty))^m} \leq \E{(\Gamma + \gamma)^m} $. Additionally, by comparing the steady-state infinite server queue representation of $\Gamma$ to $Q(\infty)$, we can further observe that $\E{(Q(\infty))^m} \geq \E{\Gamma^m}$, as for any state $j$ the service rate in $Q(\infty)$ is no more than the service rate in the same state in the $\Gamma$ queueing system. Thus we have that $$ \E{(Q_f(\infty) - \gamma)^m} = \E{\Gamma^m} \leq \E{(Q(\infty))^m} \leq \E{(\Gamma + \gamma)^m} = \E{(Q_f(\infty))^m} $$ for all $m \in \mathbb{Z}^+$ whenever $\theta > \mu$.
By symmetric arguments, we can also find that if $\mu > \theta$ then $$ \E{(Q_f(\infty))^m} = \E{(\Gamma + \gamma)^m} \leq \E{(Q(\infty))^m} \leq \E{\Gamma^m} = \E{(Q_f(\infty) - \gamma)^m} $$ for all $m \in \mathbb{Z}^+$, as in this case $\gamma = \frac{c(\theta - \mu)}{\theta} < 0$. \end{proof} \end{theorem}
\begin{rem} Note that in Theorem~\ref{mMoment}, we require that $Q(0) = Q_f(0)$ but in this case we have not assumed such a condition. This is because the inequalities in Theorem~\ref{mMoment} hold for all time, and we simply need the relationship to hold in steady-state, which can be seen to occur regardless of initial conditions. \end{rem}
By knowing the fluid form of moment generating function explicitly as a Poisson distribution, we can also provide exact expressions for the fluid moments and the fluid cumulant moments. These are given in the two following corollaries.
\begin{corollary} If $\lambda \geq c\mu$, then in steady-state we have that the first $n$ moments have the following steady-state expressions:
\begin{eqnarray} \mathrm{E} [Q_{f}^n(\infty)] &=& \sum^{n}_{j=0} { n \choose j} \cdot \left( \frac{c (\theta - \mu)}{\theta} \right)^j \cdot \mathcal{P}_{n-j} \left( \frac{\lambda}{\theta} \right) \end{eqnarray} where $\mathcal{P}_m \left( \frac{\lambda}{\theta} \right) $ is the $m^{th}$ Touchard polynomial with parameter $ \frac{\lambda}{\theta} $.
\begin{proof} This can be seen by direct use of the Poisson form of the fluid MGF. Let $\Gamma \sim \text{Pois}\left(\frac{\lambda}{\theta}\right)$ and let $\gamma = \frac{c(\theta - \mu)}{\theta}$. Then,
\begin{eqnarray*} \mathrm{E} [Q_{f}^n(\infty)] &=& \mathrm{E} [ (\Gamma + \gamma)^n] \\
&=& \sum^{n}_{j=0} { n \choose j } \cdot \gamma^j \cdot \mathrm{E} \left[ \Gamma^{n-j} \right] \\
&=& \sum^{n}_{j=0} { n \choose j} \cdot \gamma^j \cdot \mathcal{P}_{n-j} \left( \frac{\lambda}{\theta} \right) \\
&=& \sum^{n}_{j=0} { n \choose j} \cdot \left( \frac{c (\theta - \mu)}{\theta} \right)^j \cdot \mathcal{P}_{n-j} \left( \frac{\lambda}{\theta} \right) . \end{eqnarray*}
\end{proof}
\end{corollary}
\begin{corollary} If $\lambda \geq c\mu$, then in steady-state we have that
\begin{equation} \frac{\mathrm{d}G_f(\infty,\alpha)}{\mathrm{d}\alpha}
\Bigr|_{\alpha = 0} = \frac{\lambda }{\theta} + \frac{c(\theta - \mu)}{\theta} = \mathrm{E}[Q_f(\infty)] \end{equation} and for $n \in \mathbb{Z}^+$ \begin{equation} \frac{\mathrm{d}^n G_f(\infty,\alpha)}{\mathrm{d}^n \alpha}
\Bigr|_{\alpha = 0} = \frac{\lambda}{\theta} = \mathrm{C}^{(n)}[Q_f(\infty)] \end{equation} where $\mathrm{C}^{(n)}[Q_f(\infty)]$ is defined as the $n^{th}$ cumulant moment of $Q_f(\infty)$. \end{corollary}
We now consider the second case, which is $\lambda < c\mu e^{-\alpha}$. Note that this now also requires a relationship involving the space parameter of the moment generating function, $\alpha$. This is less general than the first case, but it allows us to derive Lemma~\ref{ifflemma}.
\begin{lemma}\label{ifflemma} For $\alpha \geq 0$, $$ \frac{\partial M_f(\infty, \alpha)}{\partial \alpha} < c M_f(\infty, \alpha) $$ if and only if $\lambda < c\mu e^{-\alpha}$. \begin{proof} To begin, suppose that $ \frac{\partial M_f(\infty, \alpha)}{\partial \alpha} < c M_f(\infty, \alpha). $ Using this information in conjunction with the steady-state form of the partial differential equation for the fluid MGF given in Equation~\ref{fluidMGFdef}, we have that $$ 0 = \lambda (e^{\alpha} -1) M_f(\infty,\alpha) + \theta (e^{-\alpha} -1) \frac{\partial M_f(\infty,\alpha)}{\partial \alpha}
- ( \theta - \mu) (e^{-\alpha} -1) \frac{\partial M_f(\infty, \alpha)}{\partial \alpha} $$ which simplifies to $$ \frac{\partial M_f(\infty,\alpha)}{\partial \alpha} = \frac{\lambda}{\mu}e^{\alpha} M_f(\infty, \alpha). $$ Using our assumption, we see that $$ \frac{\lambda}{\mu}e^{\alpha} M_f(\infty, \alpha) < c M_f(\infty, \alpha) $$ and this yields that $\lambda < c \mu e^{-\alpha}$, which shows one direction.\\ \\ \indent We now move to showing the opposite direction and instead assume that $\frac{\partial M_f(\infty, \alpha)}{\partial \alpha} \geq c M_f(\infty, \alpha)$. In this case, Equation~\ref{fluidMGFdef} is given by $$ 0 = \lambda (e^{\alpha} -1) M_f(\infty,\alpha) + \theta (e^{-\alpha} -1) \frac{\partial M_f(\infty,\alpha)}{\partial \alpha}
- c( \theta - \mu) (e^{-\alpha} -1) M_f(\infty,\alpha) $$ and this simplifies to $$ \frac{\partial M_f(\infty, \alpha)}{\partial \alpha} = \frac{\lambda(e^{\alpha} - 1 ) + c(\theta - \mu)(1 - e^{-\alpha})}{\theta(1 - e^{-\alpha})} M_f(\infty, \alpha) = \frac{\lambda e^{\alpha}+c(\theta - \mu)}{\theta} M_f(\infty, \alpha). $$ Again by use of this case's assumption, we have $$ \frac{\lambda e^{\alpha}+c(\theta - \mu)}{\theta} M_f(\infty, \alpha) \geq c M_f(\infty, \alpha) $$ and this now yields $$ \lambda \geq e^{-\alpha} \left(c \theta - c(\theta - \mu)\right) = c \mu e^{-\alpha}, $$ thus completing the proof. \end{proof} \end{lemma}
We can now use this lemma to find an explicit form for the fluid approximation of the steady-state moment generating function when $\lambda < c\mu e^{-\alpha}$.
\begin{proposition}\label{fluidSteadyMGF2} For $\alpha \geq 0 $, if $\lambda < c\mu e^{-\alpha}$, then in steady-state we have that
\begin{eqnarray} \label{sslessthan}
\frac{\partial M_f(\infty,\alpha)}{\partial \alpha} &=& \frac{\lambda \cdot e^{\alpha} }{\mu } \cdot M_f(\infty,\alpha)
\end{eqnarray} which yields a solution of
\begin{eqnarray}
M_f(\infty,\alpha) &=& e^{\frac{ \lambda \cdot (e^\alpha -1)}{\mu }} \label{lambdamumgf}
\end{eqnarray}
for $\alpha \in \mathbb{R}$.
\begin{proof} By Lemma~\ref{ifflemma} and our assumption that $\lambda < c\mu e^{-\alpha}$, we know that $\frac{\partial M_f(\infty, \alpha)}{\partial \alpha} < c M_f(\infty, \alpha)$. Thus, by observing this in the steady-state MGF equation, we easily obtain the result in Equation \ref{sslessthan}. Moreover, the solution to Equation \ref{sslessthan} can be easily seen by inserting our proposed solution in and noting that it satisfies our differential equation. Moreover, the solution is unique by the properties of linear ordinary differential equation theory.
\end{proof} \end{proposition}
\begin{rem} We now pause to note that the $\lambda \geq c\mu e^{-\alpha}$ case of Lemma~\ref{ifflemma} implies Proposition~\ref{fluidSteadyMGF} (and its following consequences) with a weaker assumption. However, because the condition $\lambda \geq c\mu$ does not depend on the choice of $\alpha$ it is more general, and thus we leave those results as stated with that assumption instead of $\lambda \geq c\mu e^{-\alpha}$. \end{rem}
Here we observe that Equation~\ref{lambdamumgf} is equivalent to the moment generating function of a Poisson random variable with parameter $\frac{\lambda}{\mu}$. Now, by recalling again that the steady-state distribution of a $M/M/\infty$ queue is a Poisson distribution with parameter equal to the arrival rate over the service rate, we find the following inequalities.
\begin{theorem}\label{sandwichthm2} Let $\lambda < c\mu$ and $m \in \mathbb{Z}^+$. Then, if $\theta > \mu$ \begin{align} \E{\Gamma_{\theta}^m} \leq \E{Q(\infty)^m} \leq \E{\Gamma_{\mu}^m}, \intertext{and if $\mu > \theta$} \E{\Gamma_{\mu}^m} \leq \E{Q(\infty)^m} \leq \E{\Gamma_{\theta}^m} \end{align} where $\Gamma_{x} \sim \mathrm{Pois}\left(\frac{\lambda}{x}\right)$ for $x > 0$.
\begin{proof} In each case, the inequality involving $\Gamma_{\mu} \sim \mathrm{Pois}\left(\frac{\lambda}{mu}\right)$ follows directly from Proposition~\ref{fluidSteadyMGF2} and Theorem~\ref{mMoment} via the observation that the fluid form of the moment generating function is equivalent in distribution to that of $\Gamma_\mu$. Here we are using Proposition~\ref{fluidSteadyMGF2} with $\alpha = 0$, and by continuity we know this holds for some ball around 0. This validates the use of the derivatives of the steady-state MGF with respect to $\alpha$ evaluated at $\alpha = 0$ in finding the moments for the fluid approximation. Thus, we are left to prove the inequalities for $\Gamma_\theta \sim \mathrm{Pois}\left(\frac{\lambda}{\theta}\right)$.\\
To do so, let's first note that the stationary distribution of a $M/M/\infty$ queue with service rate $\theta$ is equivalent to that of $\Gamma_\theta$. Suppose now that $\theta > \mu$. Then, any state of such a $M/M/\infty$ queue has a larger rate of departure than the same state in the Erlang-A system. Thus, we have that $$ \E{\Gamma_{\theta}^m} \leq \E{Q(\infty)^m} \leq \E{\Gamma_{\mu}^m} $$ for all $m \in \mathbb{Z}^+$. By symmetric arguments in the $\theta < \mu$ case, we complete the proof. \end{proof} \end{theorem}
As we did for the case when $\lambda \geq c\mu$, we can use these findings to give explicit expressions for the fluid approximations of the moments and the cumulant moments.
\begin{corollary} If $\lambda < c\mu$, then in steady-state we have that
\begin{equation} \frac{\mathrm{d}G_f(\infty, \alpha)}{\mathrm{d}\alpha}
\Bigr|_{\alpha = 0} = \frac{\lambda }{\mu} = \mathrm{E}[Q_f(\infty)] \end{equation} and for $n \in \mathbb{Z}^+$, \begin{align} \frac{\mathrm{d}^n G_f(\infty,\alpha)}{\mathrm{d}^n \alpha}
\Bigr|_{\alpha = 0} &= \frac{\lambda}{\mu} = \mathrm{C}^{(n)}[Q_f(\infty)]\\ \frac{\mathrm{d}^n M_f(\infty,\alpha)}{\mathrm{d}^n \alpha}
\Bigr|_{\alpha = 0} &= \mathcal{P}_{n} \left( \frac{\lambda}{\mu} \right) = \E{Q_f(\infty)^n} \end{align} where $\mathrm{C}^{(n)}[Q_f(\infty)]$ is defined as the $n^{th}$ cumulant moment of $Q_f(\infty)$ and $\mathcal{P}_m \left( \frac{\lambda}{\mu} \right) $ is the $m^{th}$ Touchard polynomial with parameter $ \frac{\lambda}{\mu} $. \end{corollary}
\subsection{Characterization of the Nonstationary Moment Generating Function}\label{nonstatchar}
Many scenarios that feature customer abandonments may also feature an arrival process that is nonstationary. To incorporate this, we now incorporate a point process that can be used to approximate any periodic mean arrival pattern, as discussed in \citet{eick1993mt}. Specifically, we define $\lambda(t)$ by a Fourier series: let $\lambda_0$ and $\{(a_k, b_k), k \in \mathbb{Z}^+\}$ be such that \begin{align}\label{lambdaNonStatDef} \lambda(t) = \lambda_0 + \sum_{k=1}^\infty a_k\sin(kt) + b_k\cos(kt). \end{align} We now take $\lambda(t)$ as the rate of arrivals at time $t$ in the Erlang-A model. Under this setting, we derive the following expression for the cumulant moment generating function of the fluid approximation and its corresponding partial differential equation whenever the arrival rate is sufficiently large. We do so through a series of technical lemmas. First, we bound the fluid mean when the arrival rate and initial value are sufficiently large. \begin{lemma}\label{cLemmaNS} Suppose that $\underline{\lambda} \equiv \inf_{t \geq 0} \lambda(t) > c\mu$ and that $\E{Q_f(0)} > c$. Then, $$ \E{Q_f(t)} > c $$ for all time $t \geq 0$. \begin{proof} We have seen that $\E{Q_f(t)}$ evolves according to
$$
\updot{\mathrm{E}}\left[Q_f(t)\right] = \lambda(t) - \mu (\E{Q_f(t)} \wedge c) - \theta(\E{Q_f(t)} - c)^+
$$
at all times $t$. Now, suppose that $\hat t > 0$ is a time such that $\E{Q_f(\hat t)} = c + \epsilon$ for some $\epsilon > 0$. Then, if $\epsilon < \frac{\underline{\lambda} - c\mu}{\theta}$ we have that
$$
\updot{\mathrm{E}}\left[Q_f(\hat t)\right] = \lambda(\hat t) - c \mu - \theta\epsilon \geq \underline{\lambda} - c \mu - \theta \epsilon > 0 .
$$
By the continuity of the fluid mean and the fact that $\E{Q_f(0)} = q(0) > c$, we see that $\E{Q_f(t)} > c$ for all time $t \geq 0$. \end{proof} \end{lemma} With this in hand, we now also provide the moment generating function for an $M/M/\infty$ queue with nonstationary arrival rate $\lambda(t)$, which we will use for comparison later in this section.
\begin{lemma}\label{mminfMGF} Let $Q_\infty(t)$ be an infinite server queue with nonstationary Poisson arrival rate $\lambda(t)$ and exponential service rate $\mu$ and initial value $Q_\infty(t) = q_0$. Then, $$ \E{e^{\alpha Q_\infty(t)}} = e^{(e^{\alpha} - 1)\left(\frac{\lambda_0}{\mu}(1 - e^{- \mu t}) + \sum_{k=1}^\infty \frac{(a_k\mu + b_k k)\sin(k t) + (b_k\mu - a_k k)(\cos(k t) - e^{- \mu t}) }{\mu^2 + k^2}\right)} \left(e^{- \mu t}(e^{\alpha} - 1) + 1\right)^{q_0} $$ for all $t \geq 0$ and $\alpha \in \mathbb{R}$. \begin{proof} To start, we have that time derivative of the MGF is $$ \frac{\mathrm{d}\E{e^{\alpha Q_\infty(t)}}}{\mathrm{d} t} = \lambda(t)(e^\alpha - 1)\E{e^{\alpha Q_\infty(t)}} + \mu(e^{-\alpha} - 1)\E{Q_\infty(t) e^{\alpha Q_\infty(t)}} $$ where $\lambda(t)$ is as defined previously: $$ \lambda(t) = \lambda_0 + \sum_{k=1}^\infty a_k\sin(kt) + b_k\cos(kt). $$ This differential equation can be view as a partial differential equation when expressed as $$ \mu(1 - e^{-\alpha})\frac{\partial M(\alpha, t)}{\partial \alpha} + \frac{\partial M(\alpha, t)}{\partial t} = \lambda(t)(e^{\alpha} - 1)M(\alpha, t) $$ where $M(\alpha, t)$ is the moment generating function at time $t$ and space parameter $\alpha$. To simplify our effort, we instead consider the differential equation for the cumulant MGF, which is $G(\alpha , t) = \log(M(\alpha, t))$. This PDE is $$ \mu(1 - e^{-\alpha})\frac{\partial G(\alpha, t)}{\partial \alpha} + \frac{\partial G(\alpha, t)}{\partial t} = \lambda(t)(e^{\alpha} - 1) $$ with the initial condition that $$ G(\alpha, 0) = \log\left(\E{e^{\alpha Q_\infty(0)}}\right) = \log\left(e^{\alpha q_0}\right) = \alpha q_0. $$ Using the notation that $G_x = \frac{\partial G}{\partial x}$, we seek to solve the system $$ \begin{cases} \mu(1 - e^{-\alpha})G_\alpha + G_t = \lambda(t)(e^{\alpha} - 1)\\ G(\alpha, 0) = \alpha q_0 \end{cases} $$ and we do so via the method of characteristics. For this approach we introduce variables the characteristic variables $r$ and $s$ and establish the characteristic equations, which are ODE's, as \begin{align*} \frac{\mathrm{d}\alpha}{\mathrm{d}s}(r,s) &= \mu(1 - e^{-\alpha}),\\ \frac{\mathrm{d}t}{\mathrm{d}s}(r,s) &= 1,\\ \frac{\mathrm{d}g}{\mathrm{d}s}(r,s) &= \lambda(t)(e^{\alpha} - 1) \end{align*} with the initial conditions \begin{align*} \alpha(r,0) &= r,\\ t(r,0) &= 0,\\ g(r,0) &= rq_0. \end{align*} We can first see that the ODE's for $\alpha$ and $t$ solve to \begin{align*} \alpha(r,s) = \log(e^{c_1(r) + \mu s} + 1) &\longrightarrow \alpha(r,s) = \log\left((e^r - 1) e^{\mu s} + 1\right)\\ t(r,s) = s + c_2(r) &\longrightarrow t(r,s) = s
\end{align*} and so we can now use these to solve the remaining ODE. After substituting we have $$ \frac{\mathrm{d}g}{\mathrm{d}s}(r,s) = \lambda(s)(e^r - 1) e^{\mu s} $$ which gives a solution of \begin{align*} g(r,s) &= (e^r - 1)\left(\frac{\lambda_0}{\mu}(e^{\mu s} - 1) + \sum_{k=1}^\infty \frac{(a_k\mu + b_k k)\sin(k s)e^{\mu s} + (b_k\mu - a_k k)(\cos(k s)e^{\mu s} - 1) }{\mu^2 + k^2}\right) + rq_0. \end{align*} So, using $s = t$ and $r = \log\left(e^{- \mu t}(e^{\alpha} - 1) + 1\right)$, we have that \begin{align*} G(\alpha, t) &= g(\log\left(e^{- \mu t}(e^{\alpha} - 1) + 1\right), t) \\&= (e^{\alpha} - 1)\left(\frac{\lambda_0}{\mu}(1 - e^{- \mu t}) + \sum_{k=1}^\infty \frac{(a_k\mu + b_k k)\sin(k t) + (b_k\mu - a_k k)(\cos(k t) - e^{- \mu t}) }{\mu^2 + k^2}\right) \\&\qquad+ \log\left(e^{- \mu t}(e^{\alpha} - 1) + 1\right)q_0 \end{align*} and therefore by solving for $M(\alpha, t) = e^{G(\alpha, t)}$ we attain the stated result.
\end{proof} \end{lemma}
Now that we have established these lemmas we proceed with the analysis of the nonstationary Erlang-A. In the next theorem we give explicit forms for the fluid form of the cumulant MGF and its corresponding partial differential equation.
\begin{theorem}\label{fluidNSMGF} If $ \inf_{t \leq \infty} \lambda(t) \equiv \underline{\lambda} > c\mu$ and $q(0) > c$, then in for all $t \geq 0$ we have that
\begin{align}
\frac{\partial G_f(t,\alpha) }{\partial t} &= \lambda(t) \cdot (e^{\alpha} -1) + \theta \cdot (e^{-\alpha} -1) \cdot \frac{\partial G_f(t,\alpha)}{\partial \alpha} - c \cdot ( \theta - \mu) \cdot (e^{-\alpha} -1) \label{fluidNSMGFeq1}
\end{align} which gives a solution of
\begin{align}
G_f(t, \alpha)
&=
(e^\alpha - 1)\left(\frac{\lambda_0}{\theta}(1 - e^{-\theta t}) + \sum_{k=1}^\infty \frac{(a_k\theta + b_k k)\sin(k t) + (b_k\theta - a_k k)(\cos(k t) - e^{-\theta t}) }{\theta^2 + k^2}\right)
\nonumber
\\&\qquad
+
\frac{c(\theta - \mu)}{\theta}\alpha
+
\log((e^\alpha - 1)e^{-\theta t} + 1)\left( q(0) - \frac{c(\theta - \mu)}{\theta} \right)
\end{align}
for all $t \geq 0$ and all $\alpha \in \mathbb{R}$.
\begin{proof}
From Equation~\ref{cumulantMGF}, we have that the PDE for the fluid approximation's cumulant moment generating function is
$$
\frac{\partial G_f(t,\alpha) }{\partial t} = \lambda(t) (e^{\alpha} -1) + \theta (e^{-\alpha} -1) \frac{\partial G_f(t,\alpha)}{\partial \alpha}
- ( \theta - \mu) (e^{-\alpha} -1) \left( \frac{\partial G(t,\alpha) }{\partial \alpha} \wedge c \right) .
$$
Now, recall that $\frac{\partial G_f(t,\alpha) }{\partial \alpha} = \frac{\E{Q_f(t)e^{\alpha Q_f(t)}}}{\E{e^{\alpha Q_f(t)}}}$. Using the FKG inequality and our observation from Lemma \ref{cLemmaNS} that $\E{Q_f(t)} > c$, we have that
$$
\E{Q_f(t)e^{\alpha Q_f(t)}} \geq \E{Q_f(t)}\E{e^{\alpha Q_f(t)}} > c \E{e^{\alpha Q_f(t)}}
$$
and so $\left(\frac{\partial G_f(t,\alpha) }{\partial \alpha} \wedge c\right) = c$. Thus, we have the PDE given in Equation~\ref{fluidNSMGFeq1} and so now we seek to find it's solution. We approach this via the method of characteristics. Because $G_f(0, \alpha) = \log(\E{e^{\alpha Q_f(0)}}) = \alpha q(0)$, we see that we seek to solve the following system
$$
\begin{cases}
\theta(1 - e^{-\alpha})G_{(\alpha)} + G_{(t)} = \lambda(t) (e^{\alpha} - 1) + c(\theta - \mu)(1 - e^{-\alpha})\\
G_f(0,\alpha) = \alpha q(0)
\end{cases}
$$
where $G_{(x)} = \frac{\partial G_f}{\partial x}$. Introducing characteristic variables $r$ and $s$, we have the characteristic ODE's as
\begin{align*}
\frac{\mathrm{d}\alpha}{\mathrm{d}s}(r,s) &= \theta (1 - e^{-\alpha})\\
\frac{\mathrm{d}t}{\mathrm{d}s}(r,s) &= 1\\
\frac{\mathrm{d}g}{\mathrm{d}s}(r,s) &= \lambda(t)(e^{\alpha} - 1) + c(\theta - \mu)(1-e^{-\alpha})
\end{align*}
with initial conditions $\alpha(r, 0) = r$, $t(r,0) = t$, and $g(r,0) = rq(0)$. Then, we can solve the first two ODE's to see that
\begin{align*}
\alpha(r,s) &= \log((e^r - 1)e^{\theta s} + 1)\\
t(r,s) &= s
\end{align*}
and so we can use these to solve the remaining equation. Substituting in, we have the ODE as
$$
\frac{\mathrm{d}g}{\mathrm{d}s}(r,s) = \lambda(s)e^{\theta s}(e^r - 1) + c(\theta - \mu)\frac{e^{\theta s}(e^r - 1)}{e^{\theta s}(e^r - 1) + 1}
$$
and this now solves to
\begin{align*}
g(r,s)
&=
(e^r - 1)\left(\frac{\lambda_0}{\theta}(e^{\theta s} - 1) + \sum_{k=1}^\infty \frac{(a_k\theta + b_k k)\sin(k s)e^{\theta s} + (b_k\theta - a_k k)(\cos(k s)e^{\theta s} - 1) }{\theta^2 + k^2}\right)
\\&\qquad
+
\frac{c(\theta - \mu)}{\theta}\left(\log\left((e^r-1)e^{\theta s} + 1\right) - r\right)
+
r q(0) .
\end{align*}
Now, we can rearrange our solutions to find $s = t$ and $r = \log((e^\alpha - 1)e^{-\theta t} + 1)$. Then, we have that
\begin{align*}
G_f(t, \alpha)
&=
g(\log((e^\alpha - 1)e^{-\theta t} + 1), t)
\\
&=
(e^\alpha - 1)e^{-\theta t}\left(\frac{\lambda_0}{\theta}(e^{\theta t} - 1) + \sum_{k=1}^\infty \frac{(a_k\theta + b_k k)\sin(k t)e^{\theta t} + (b_k\theta - a_k k)(\cos(k t)e^{\theta t} - 1) }{\theta^2 + k^2}\right)
\\&\qquad
+
\frac{c(\theta - \mu)}{\theta}\left(\alpha - \log((e^\alpha - 1)e^{-\theta t} + 1)\right)
+
\log((e^\alpha - 1)e^{-\theta t} + 1) q(0)
\end{align*}
and this simplifies to the stated result.
\end{proof} \end{theorem}
Like the approach in our investigation of the steady-state scenario, we can now observe that the fluid approximation is equivalent in distribution to a infinite server queue shifted by $\gamma \equiv \frac{c(\theta - \mu)}{\theta}$. This gives rise to the following.
\begin{theorem} For the Erlang-A queue with nonstationary arrival rate $\lambda(t)$ such that $\underline{\lambda} \equiv \inf_{t \geq 0} \lambda(t) > c\mu$ and initial value $q(0) > c$, the fluid approximation of the MGF is equivalent to that of a shifted $M/M/\infty$ queue with arrival rate $\lambda(t)$, service rate $\theta$, initial value $q(0) - \frac{c(\theta - \mu)}{\theta}$, and linear shift $\frac{c(\theta - \mu)}{\theta}$. \begin{proof} Observe from Theorem~\ref{fluidNSMGF} that the fluid MGF for the Erlang-A under these conditions is \begin{align*} &M_f(t, \alpha) = e^{G_f(t , \alpha)} \\ &= e^{ (e^\alpha - 1)\left(\frac{\lambda_0}{\theta}(1 - e^{-\theta t}) + \sum_{k=1}^\infty \frac{(a_k\theta + b_k k)\sin(k t) + (b_k\theta - a_k k)(\cos(k t) - e^{-\theta t}) }{\theta^2 + k^2}\right) +
\frac{c(\theta - \mu)}{\theta}\alpha } \left((e^\alpha - 1)e^{-\theta t} + 1\right)^{ q(0) - \frac{c(\theta - \mu)}{\theta} } \end{align*} which is of a form that we can recognize. Comparing it to Lemma~\ref{mminfMGF}, we can see that $Q_f$ is of the form of a shifted $M/M/\infty$ queue with arrival rate $\lambda(t)$, service rate $\theta$, initial value $q(0) - \frac{c(\theta - \mu)}{\theta}$, and linear shift $\frac{c(\theta - \mu)}{\theta}$, thus enforcing that the fluid model does start at $q(0)$. \end{proof} \end{theorem}
This representation of the fluid approximation allows us to now provide upper and lower bounds for the moments of the Erlang-A system.
\begin{corollary} Let $Q(t)$ represent the Erlang-A queue with nonstationary arrival rate $\lambda(t)$ such that $\underline{\lambda} \equiv \inf_{t \geq 0} \lambda(t) > c\mu$ and initial value $q(0) > c$, and let $Q_f(t)$ represent the corresponding fluid approximation. Then, if $\theta > \mu$ $$ \E{\left(Q_f(t) - \gamma\right)^m} \leq \E{Q(t)^m} \leq \E{Q_f(t)^m} $$
and if $\theta < \mu$ $$ \E{Q_f(t)^m} \leq \E{Q(t)^m} \leq \E{\left(Q_f(t) - \gamma\right)^m} $$ for all time $t > 0$ and all $m \in \mathbb{Z}^+$, where $\gamma = \frac{c(\theta - \mu)}{\theta}$. \begin{proof} In each case, the bound involving the fluid approximation of the moment is a direct consequence of Theorem~\ref{mMoment} and so only the other two bounds remain to be shown. We now note that since we have characterized the fluid approximation as a shifted $M/M/\infty$ queue, the remaining bounds are from the unshifted version of this system and, by following the same arguments as in Theorems~\ref{sandwichthm} and~\ref{sandwichthm2} regarding the rates of departure in the corresponding states of the Erlang-A queue and the $M/M/\infty$ queue, this completes the proof. \end{proof} \end{corollary}
\subsection{Numerical Results}\label{mgfnumres}
In this subsection we describe various numerical experiments demonstrating these findings. We first have Figures~\ref{MGFFig3},~\ref{MGFFig4},~\ref{MGFFig1}, and~\ref{MGFFig2}, which compare simulated value of the moment generating function to their fluid approximations. In the first two figures, the arrival intensity is $\lambda(t) = 5 + \sin(t)$, the service rate is $\mu = 1$, and the number of servers is $c = 5$. The abandonment rates are the differing component of these plots, with $\theta = 0.5$ and $\theta = 2$ as the two respective values. These same comparisons are made in the latter two figures, however in this case the arrival rate is instead $\lambda(t) = 10 + 2\sin(t)$ and the number of servers is $c = 10$. \\
Through these plots one can observe that the true MGF dominates the fluid approximation when $\theta < \mu$ and that the fluid dominates the stochastic value when $\theta > \mu$. This is of course stated with the understanding that for small values of $\alpha$ or for times near 0 the values of the MGF and the approximation are quite close and so with numerical error the surfaces may overlap.
\begin{figure}
\caption{$\lambda(t) = 5 + \sin (t) $, $\mu = 1$, $\theta = 0.5$, $Q(0) = 0$, $c=5$. \\ Front view (left) and rear view (right). }
\label{MGFFig3}
\end{figure}
\begin{figure}
\caption{$\lambda(t) = 5 + \sin (t) $, $\mu = 1$, $\theta = 2$, $Q(0) = 0$, $c=5$. \\ Front view (left) and rear view (right). }
\label{MGFFig4}
\end{figure}
\begin{figure}
\caption{$\lambda(t) = 10 + 2 \cdot \sin (t) $, $\mu = 1$, $\theta = 0.5$, $Q(0) = 0$, $c=10$. \\ Front view (left) and rear view (right). }
\label{MGFFig1}
\end{figure}
\begin{figure}
\caption{$\lambda(t) = 10 + 2 \cdot \sin (t) $, $\mu = 1$, $\theta = 2$, $Q(0) = 0$, $c=10$. \\ Front view (left) and rear view (right). }
\label{MGFFig2}
\end{figure}
In Figure~\ref{limdists} we plot the limiting distribution for the steady-state Erlang-A. For these plots we take $\lambda = 20$ and $\mu = 1$, and then vary $\theta$ and $c$. For the three plots on the left we take the abandonment rate to be $\theta = 0.5$ and for those on the right we set $\theta = 2$. For the top two plots we set the number of servers as $c = 15$, in the middle two $c = 20$, and in the bottom two we make $c = 25$. We observe that the approximate distribution is quite close when $\lambda$ is not near $c\mu$ but the approximation is less accurate when $\lambda = c\mu$. This finding is consistent with much of the literature that focuses on finding novel approximations for queueing networks and optimal control of these networks, see for example \citet{hampshire2010dynamic, hampshire2009time, hampshire2009dynamic, pender2017approximations, niyirora2016optimal, qin2017dynamic}. We note here that these approximations are not all of the same form: recall that when $\lambda \geq c\mu$ the fluid approximation is equivalent in distribution to a shifted Poisson random variable with parameter $\frac{\lambda}{\theta}$, but when $\lambda < c\mu$ it is equivalent to a Poisson distribution with parameter $\frac{\lambda}{\mu}$.
\begin{figure}
\caption{{\small $\theta = 0.5$, $c = 15$}}
\caption{{\small $\theta = 2$, $c = 15$}}
\caption{{\small $\theta = 0.5$, $c = 20$}}
\caption{{\small $\theta = 2$, $c = 20$}}
\caption{{\small $\theta = 0.5$, $c = 25$}}
\caption{{\small $\theta = 2$, $c = 25$}}
\label{fig:3}
\label{fig:4}
\label{fig:1}
\label{fig:2}
\label{fig:3}
\label{fig:4}
\label{limdists}
\end{figure}
In Figure~\ref{singlelimdists} we examine the limiting distributions for the single server case. In these plots we set $\mu = 1$ and then vary the arrival rate and the abandonment rate. On all plots on the left we set $\theta = 0.5$ and on the right $\theta = 2$. Further, in the top pair we make $\lambda = 0.8$, in the middle we let $\lambda = 1$, and in the bottom pair $\lambda = 1.2$. As in Figure~\ref{limdists}, Figure~\ref{singlelimdists} shows that our approximations are quite good. Thus, we are able to capture single server dynamics as well as large-scale multi-server dynamics even though they are quite different. This is even more useful as our approximations are non-asymptotic and don't rely on scaling the number of servers.
\begin{figure}
\caption{{\small $\theta = 0.5$, $\lambda = 0.8$}}
\caption{{\small $\theta = 2$, $\lambda = 0.8$}}
\caption{{\small $\theta = 0.5$, $\lambda = 1$}}
\caption{{\small $\theta = 2$, $\lambda = 1$}}
\caption{{\small $\theta = 0.5$, $\lambda = 1.2$}}
\caption{{\small $\theta = 2$, $\lambda = 1.2$}}
\label{fig:3}
\label{fig:4}
\label{fig:1}
\label{fig:2}
\label{fig:3}
\label{fig:4}
\label{singlelimdists}
\end{figure}
In Figures~\ref{meanpoisfig},~\ref{MGFpoisfig1}, and~\ref{MGFpoisfig2}, we take the arrival rate as $\lambda(t) = 6.5 + \sin (t)$, the service rate as $\mu = 1$, and the number of servers as $c = 5$. Because $\inf_{t \geq 0} \lambda(t) > c \mu$, we use the characterization of the fluid approximation as a shifted $M/M/\infty$ queue and compare the simulated system, the fluid approximation, and the unshifted $M/M/\infty$. In the first figure we consider the mean for $\theta = 1.1$ and and $\theta = 0.9$ and find that while the fluid approximation is quite close the unshifted system is not near to the Erlang-A system, even for these relatively similar rates of service and abandonment. We find the same for the latter two figures, in which we plot the moment generating function for $\theta = 1.1$ and $\theta = 0.9$, respectively.
\begin{figure}
\caption{Queue Mean for $\lambda(t) = 6.5 + \sin (t) $, $\mu = 1$, $Q(0) = 6$, $c=5$. \\ $\theta = 1.1$ (left) and $\theta = 0.9$ (right). }
\label{meanpoisfig}
\end{figure}
\begin{figure}
\caption{MGF for $\lambda(t) = 6.5 + \sin (t) $, $\mu = 1$, $\theta = 1.1$, $Q(0) = 6$, $c=5$. \\ Front view (left) and rear view (right). }
\label{MGFpoisfig1}
\end{figure}
\begin{figure}
\caption{MGF for $\lambda(t) = 6.5 + \sin (t) $, $\mu = 1$, $\theta = 0.9$, $Q(0) = 6$, $c=5$. \\ Front view (left) and rear view (right). }
\label{MGFpoisfig2}
\end{figure}
\section{Conclusion}
In this paper we have investigated the Erlang-A queueing system through comparison to the fluid approximations of its moments and moment generating function as well as of its cumulants and cumulant moment generating function. Through recognizing the convexity in the differential equations describing these approximations, we have found fundamental relationships between the values of these quantities and their fluid counterparts: when the rate of abandonment is less than the rate of service the true value dominates the approximation, when the service rate is larger the approximation dominates the true value, and when the rates of abandonment and service are equal, the two are equivalent.
In forming these inequalities, we have found explicit representations of the fluid approximations through equivalences in distribution with Poisson random variables and infinite server queues, in the stationary and non-stationary cases, respectively. These characterizations both give insight into the approximations themselves and yield natural inequalities that complement those from the approximations. We have demonstrated the performance of these bounds through simulations. Through consideration of both these findings and the empirical experiments, we can identify interesting directions of future work.
For example, it would be of great interest to gain more explicit insights into the gap between the fluid approximations and the true values. This is a non-trivial endeavor, which stems from the non-differentiablility and non-closure in the differential equations for the true expectations. The numerical experiments in this work indicate that the fluid approximations may often be quite close but not exact, and additional understanding would be useful in practice. Moreover, extending our results to more complicated queueing systems where the arrival and service processes follow phase type distributions is of interest given the new work of \citet{pender2017approximations, ko2016strong, ko2017diffusion}.
Additionally, it would be even more useful to gain a better understanding of the limiting distribution of the Erlang-A queue. As we discuss in the paper, the empirical experiments in Subsection~\ref{mgfnumres} indicate that the true limiting distributions closely resemble the shifted Poisson distributions that we have found as characterizations of our fluid approximations. In particular, the approximations seem quite close when $\lambda$ is not near $c\mu$. As a simple extension of this work, it can be observed that some sort of combination of the approximation when $\lambda < c\mu$ and of the approximation when $\lambda > c\mu$ could make a nice choice for approximation of the distribution when $\lambda = c\mu$. In some sense, it is not surprising that these approximations are similar to the true limiting distribution, as the Erlang-A appears to be a $M/M/\infty$ queue with service rate $\mu$ (the approximation when $\lambda < c\mu$), when only considering the states up to $c$, and it also resembles some sort of shifted $M/M/\infty$ queue with service rate $\theta$ (which also describes the approximation when $\lambda \geq c\mu$) for states $c + 1$ and beyond. Finally, it would be interesting to extend this to networks of Erlang-A queues like in \citet{pender2017approximating}, however, we would have to keep track of the routing probabilities carefully to keep track of the convexity/concavity of the rate functions.
\end{document} | arXiv | {
"id": "1712.08445.tex",
"language_detection_score": 0.6950286626815796,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{A Probabilistic Approach to\\ Voting, Allocation, Matching, and Coalition Formation}
\author{Haris Aziz}
\institute{
Haris Aziz \at
Data61, CSIRO and UNSW
Sydney, Australia \\
Tel.: +61-2-9490\,59090 \\
Fax: +61-2-8306\,0405 \\
\email{haris.aziz@data61.csiro.au}
}
\date{Received: date / Accepted: date}
\maketitle
\begin{abstract}
Randomisation and time-sharing are some of the oldest methods to achieve fairness. I make a case that applying these approaches to social choice settings constitutes a powerful paradigm that deserves an extensive and thorough examination. I discuss challenges and opportunities in applying these approaches to settings including voting, allocation, matching, and coalition formation.
\end{abstract}
\keywords{Social choice, voting, matching, coalition formation, cooperative games, fairness, strategyproofness, Pareto optimality.}
\noindent \textbf{JEL Classification}: C70 $\cdot$ D61 $\cdot$ D63 $\cdot$ D71
\section{Introduction}
Suppose two agents have opposite preferences over the two possible social outcomes. What should be a fair resolution for this problem?
If the outcome is required to be deterministic, then it is patently unfair to one of the agents. However, one can regain fairness by at least three different approaches: (1) resort to randomisation so that each of the social outcomes has equal probability, (2) treat the outcomes as divisible and resort to time sharing where each social outcome has half of the time share or, (3) use a uniform frequency distribution if there will be multiple occurrences of the discrete outcomes. Mathematically, all three resolutions towards fairness are equivalent because the outcomes have equal probability, time-share, or frequency. In the rest of the article, when I will describe a probabilistic approach to social choice, I will use it abstractly so as to model approaches (1), (2), and (3).
I argue that although a probabilistic approach has been applied in several social choice settings in both theory and practice~\citep[see e.g., ][]{Ston11a}, there is potential to revisit fundamental social choice settings such as voting, allocation, matching, and coalition formation with this powerful paradigm. Considering the natural aversion of many people to important decisions being based on the toss of the coin, such an approach may be especially useful for time and budget sharing scenarios. A related chapter in this book is by \citet{Bran17b}.
\section{A case for probabilistic social choice}
I first list some of the compelling reasons why probabilistic social choice is a powerful and useful approach.
\begin{enumerate}
\item \textbf{Modeling time-sharing} Some of the settings and their corresponding results in the literature ignore the possibility of time-sharing. For example, several results in voting suppose that only one alternative is selected. However, the voting could be about deciding the fraction of time different genres of music are played on radio. Similarly most of the results in matching and coalition formation suppose that agents form exclusively one coalition or set of partnerships~\citep{Manl13a}. Many of these results need to be re-examined when we allow the flexibility of time-sharing.
\item \textbf{Participatory Budgeting} Voting can also be used to decide on which projects get how much budget~(see e.g.,~\citep{FGM16a,ABM17a}). The approach is getting traction as grassroots participatory budgeting movements grow in stature~\citep{Caba04a}. In this context, a probabilistic social choice view is useful because the probability of an alternative can represent the fraction of the budget allocated to it.
\item \textbf{Achieving fairness} As explained in the example above, a probabilistic or time-sharing approach to social choice is geared towards achieving fairness.
In the example above, no deterministic mechanism can simultaneously be anonymous (treat agents symmetrically) and neutral (treat social outcomes symmetrically). On the other hand, a probabilistic approach easily overcomes this impossibility. Suppose that in the example, the two social outcomes are allocating one item each to the agents where one item is valuable to both and the second item is useless to both. Then the only way to avoid envy is to use a probabilistic approach in a broad sense.
\item \textbf{Incentivizing participation} Another reason to take a probabilistic approach is to provide participation incentives~\citep{BBH15b,ABM17a,ALR18a}. For the example described above, at least one of the agents appears to have no strict incentive to participate if the decision is made deterministically. On the other hand, probabilistic rules can be designed that give each voter the ability to at least make an epsilon difference (in expectation) to the outcome.
\item \textbf{Achieving strategyproofness without resorting to dictatorship} Some of the most striking results in social choice give the message that if one wants agents to have incentives to report truthfully, then one has to resort to dictatorship. In our running example it would mean selecting the preferred social outcome of one pre-specified agent. However, with a probabilistic or time-sharing approach, one can still achieve strategyproofness and also circumvent the prospect of a single agent with over-riding power~\citep{ABBM14a,CLPP12a,Proc10a,PrTe13a}.
\item \textbf{Achieving stability} In much of the social choice and cooperative game theory literature, a theme of results involves the lack of outcome which satisfies an appropriate notion of stability. In voting, the most prominent result within this theme is Condorcet's theorem says that it can be possible that for any given social outcome, a majority of people prefer another outcome. However, Condorcet's cycles vanish when the probabilistic `maximal lottery' rule is used~\citep{ABBH12a,Bran13a}.\footnote{The argument for the existence of such a lottery invokes von Neumann's minimax theorem. } Similarly, core stable outcomes may not exist for general settings such as hedonic coalition formation in which agents have preferences over coalitions they are members of. However, if we allow for probabilistic outcomes or for time-sharing arrangements, there exist stable outcomes~\citep{AhFl03a}.
\item \textbf{Circumventing impossibility results} Social choice is at times notorious for some of the bleak impossibility results in its literature. Several results showing that no apportionment method simultaneously satisfies basic monotonicity axioms~\citep{Youn94a}. However, these problems disappear if a bit of randomisation is used. Similarly, there are results pointing out that no deterministic voting rule satisfies some basic consistency properties. However, this is not anymore the case if one uses the \emph{maximal lotteries} randomised voting rule~\citep{Bran13a}.\footnote{For further discussion on probabilistic approaches to circumvent impossibility results in voting, we refer to the survey by \citet{Bran17a}.}
\item \textbf{Better welfare guarantees}
When considering cardinal preferences over outcomes, a probabilistic approach may achieve better approximation welfare guarantees while simultaneously achieving other axiomatic properties~\citep{ABP15a,AnPo16a,Proc10a}. In some cases, randomization may allow for better welfare or ex ante Pareto improvement while satisfying stability constraints~\citep{Manj16a}. \end{enumerate}
\section{Research challenges and opportunities}
I outlined several advantages of considering a probabilistic approach.
At an abstract level, resorting to randomisation means that one can consider the full continuous space of outcomes. This can both be a challenge as well as an opportunity for new and exciting research.
\begin{enumerate}
\item \textbf{Formalizing and exploring a range of solution concepts and axioms}
When considering probabilistic or time sharing approches, there are several ways in which a solution concept for deterministic settings can be extended to probabilistic settings. Take for example pairwise stability for the classic stable matching problem in which we want to pair men and women in a way so that no man and woman not paired with each other want to elope. When considering probabilistic outcomes, there is a hierarchy of stability concepts that are all generalisations of deterministic stability~\citep{AzKl17a,DoYi16a,KeUn15a,Manj16a}. Understanding the nature and structure of these properties is already a significant research direction. More importantly, the potential to generalize important axioms based on stability, Pareto optimality, and fairness in several different ways gives useful creative leeway for institution designers for exploring the tradeoffs and compatibility between different levels of properties.
Similar to the potential of defining a several levels of axiomatic properties, one can explore different levels of properties of mechanisms. A case in point is strategyproofness and participation incentives.
\item \textbf{Eliciting, representing, and reasoning about preferences}
In most voting or matching settings, agents express preferences over deterministic outcomes. As we move from deterministic to probabilistic settings, there is an interesting challenge to elicit and represent agents' risk attitudes towards different lotteries. One possible approach that involves compact preferences is to extend the agents' preferences over discrete outcomes to preferences over probabilistic outcomes by \emph{lottery extension} methods such as first order stochastic dominance~\citep{Bran13b,Bran17a,BoMo01a,ABBH12a,Cho15a}.
\item \textbf{Designing time-sharing mechanisms} Since modeling time-sharing is one of the most important motivations of probabilistic social choice, it is important to come up with compelling time-sharing mechanisms. Although voting has been studied for decades, a probabilistic perspective leads to interesting and meaningful new voting rules~\citep[see e.g.,\xspace][]{AzSt14a}.
When allowing for fractional outcomes, several well-known mechanisms such as Gale-Shapley Deferred Acceptance or Gale's Top Trading Cycles need to be generalized~\citep{KeUn15a,AtSe11a,BoMo04a}.
\item \textbf{Efficiency issues} When trying to achieve fairness via randomization, a straightforward approach is to uniformly randomize over reasonable deterministic outcomes or reasonable deterministic mechanisms. However, such a naive approach such as randomizing over Pareto optimal alternatives can lead to loss of ex ante efficiency. This phenomenon is starkly highlighted by the fact that random serial dictatorship that involves uniform randomization over the class of serial dictatorships can lead to unambiguous loss of welfare~\citep{BoMo01a,ABBH12a}. This issue motivates the need to design interesting new mechanisms that are not victim to such a phenomenon.
\item \textbf{Computational complexity} Generally speaking, optimising in continuous environments is computationally more tractable than in discrete environments. However, when considering time-sharing outcomes that are implicitly convex combinations of a potentially exponential number of discrete outcomes, computing the time shares can be a computationally arduous task~\citep{ABBM14a}. Therefore, when formulating time-sharing mechanisms for different social choice settings, computational complexity rears its head as a potential challenge as well an opportunity for innovative algorithmic research.
\item \textbf{Instantiating a lottery} As mentioned earlier, uniformly randomizing over desirable outcomes can result in loss of efficiency or computational intractability. Therefore, mechanisms may first use an alternative way to find an \emph{expected} `fractional' outcome---say a weighted matching signifying probabilities for partnerships. If approach (1) is being used, then the expected outcome needs to be instantiated via a concrete lottery. Finding a suitable lottery is trivial in single-winner voting and easy for simple assignment settings\footnote{Any fractional bipartite matching can be represented as a convex combination of discrete matchings via Birkhoff's algorithm.} but can become a challenge for richer settings with more complex constraints~\citep{BCKM12a}. When instantiating a lottery, an interesting challenge that arises is to instantiate over deterministic outcomes that \emph{also} satisfy some weaker notions of stability, fairness, or other properties~(see e.g., \citep{TST01a,AkNi17a}).
\end{enumerate}
To conclude, a probabilistic approach to social choice in particular voting, allocation, matching, and coalition formation leads to several interesting research questions and directions.
\paragraph{Acknowledgements}
The author is supported by a Julius Career Award. He thanks Gabrielle Demange, J{\"{o}}rg Rothe, Nisarg Shah, Paul Stursberg, Etienne Billette De Villemeur, and Bill Zwicker for useful feedback. He thanks all of his collaborators on this topic in particular Florian Brandl, Felix Brandt, and Bettina Klaus for several insightful discussions. Finally he thanks Herv{\'{e}} Moulin and Bill Zwicker for encouraging him to write the chapter.
\begin{thebibliography}{32} \providecommand{\natexlab}[1]{#1} \providecommand{\url}[1]{\texttt{#1}} \expandafter\ifx\csname urlstyle\endcsname\relax
\providecommand{\doi}[1]{doi: #1}\else
\providecommand{\doi}{doi: \begingroup \urlstyle{rm}\Url}\fi
\bibitem[Aharoni and Fleiner(2003)]{AhFl03a} R.~Aharoni and T.~Fleiner. \newblock On a lemma of scarf. \newblock \emph{Journal of Combinatorial Theory Series B}, 87:\penalty0 72--80,
2003.
\bibitem[Akbarpour and Nikzad(2017)]{AkNi17a} M.~Akbarpour and A.~Nikzad. \newblock Approximate random allocation mechanisms. \newblock Technical Report 422777, SSRN, 2017.
\bibitem[Anshelevich and Postl(2016)]{AnPo16a} E.~Anshelevich and J.~Postl. \newblock Randomized social choice functions under metric preferences. \newblock In \emph{Proceedings of the 25th International Joint Conference on
Artificial Intelligence (IJCAI)}, pages 46--59. AAAI Press, 2016.
\bibitem[Anshelevich et~al.(2015)Anshelevich, Bhardwaj, and Postl]{ABP15a} E.~Anshelevich, O.~Bhardwaj, and J.~Postl. \newblock Approximating optimal social choice under metric preferences. \newblock In \emph{Proceedings of the 29th AAAI Conference on Artificial
Intelligence (AAAI)}, pages 777--783, 2015.
\bibitem[Athanassoglou and Sethuraman(2011)]{AtSe11a} S.~Athanassoglou and J.~Sethuraman. \newblock House allocation with fractional endowments. \newblock \emph{International Journal of Game Theory}, 40\penalty0
(3):\penalty0 481--513, 2011.
\bibitem[Aziz and Klaus(2017)]{AzKl17a} H.~Aziz and B.~Klaus. \newblock A taxonomy of stability concepts for random matching under
priorities. \newblock Technical Report 1707.01231, arXiv.org, 2017.
\bibitem[Aziz and Stursberg(2014)]{AzSt14a} H.~Aziz and P.~Stursberg. \newblock A generalization of probabilistic serial to randomized social choice. \newblock In \emph{Proceedings of the 28th AAAI Conference on Artificial
Intelligence (AAAI)}, pages 559--565. AAAI Press, 2014.
\bibitem[Aziz et~al.(2013)Aziz, Brandt, and Brill]{ABBH12a} H.~Aziz, F.~Brandt, and M.~Brill. \newblock On the tradeoff between economic efficiency and strategyproofness in
randomized social choice. \newblock In \emph{Proceedings of the 12th International Conference on
Autonomous Agents and Multi-Agent Systems (AAMAS)}, pages 455--462. IFAAMAS,
2013.
\bibitem[Aziz et~al.(2014)Aziz, Brandt, Brill, and Mestre]{ABBM14a} H.~Aziz, F.~Brandt, M.~Brill, and J.~Mestre. \newblock Computational aspects of random serial dictatorship. \newblock \emph{ACM SIGecom Exchanges}, 13\penalty0 (2):\penalty0 26--30, 2014.
\bibitem[Aziz et~al.(2017)Aziz, Bogomolnaia, and Moulin]{ABM17a} H.~Aziz, A.~Bogomolnaia, and H.~Moulin. \newblock Fair mixing: the case of dichotomous preferences. \newblock 2017. \newblock Working paper.
\bibitem[Aziz et~al.(2018)Aziz, Luo, and Rizkallah]{ALR18a} H.~Aziz, P.~Luo, and C.~Rizkallah. \newblock {R}ank maximal equal contribution: a probabilistic social choice
function. \newblock In \emph{Proceedings of the 32nd AAAI Conference on Artificial
Intelligence (AAAI)}, 2018. \newblock Forthcoming.
\bibitem[Bogomolnaia and Moulin(2001)]{BoMo01a} A.~Bogomolnaia and H.~Moulin. \newblock A new solution to the random assignment problem. \newblock \emph{Journal of Economic Theory}, 100\penalty0 (2):\penalty0
295--328, 2001.
\bibitem[Bogomolnaia and Moulin(2004)]{BoMo04a} A.~Bogomolnaia and H.~Moulin. \newblock Random matching under dichotomous preferences. \newblock \emph{Econometrica}, 72\penalty0 (1):\penalty0 257--279, 2004.
\bibitem[Brandl(2013)]{Bran13b} F.~Brandl. \newblock Efficiency and incentives in randomized social choice. \newblock Master's thesis, Technische Universit{\"a}t M{\"u}nchen, 2013.
\bibitem[Brandl et~al.(2015)Brandl, Brandt, and Hofbauer]{BBH15b} F.~Brandl, F.~Brandt, and J.~Hofbauer. \newblock Incentives for participation and abstention in probabilistic social
choice. \newblock In \emph{Proceedings of the 14th International Conference on
Autonomous Agents and Multi-Agent Systems (AAMAS)}, pages 1411--1419.
IFAAMAS, 2015.
\bibitem[Brandl et~al.(2016)Brandl, Brandt, and Seedig]{Bran13a} F.~Brandl, F.~Brandt, and H.~G. Seedig. \newblock Consistent probabilistic social choice. \newblock \emph{Econometrica}, 84\penalty0 (5):\penalty0 1839--1880, 2016.
\bibitem[Brandt(2017)]{Bran17a} F.~Brandt. \newblock Rolling the dice: {R}ecent results in probabilistic social choice. \newblock In U.~Endriss, editor, \emph{Trends in Computational Social Choice},
chapter~1, pages 3--26. AI Access, 2017.
\bibitem[Brandt(2018)]{Bran17b} F.~Brandt. \newblock Collective choice lotteries: {D}ealing with randomization in economic
design. \newblock In J.-F. Laslier, H.~Moulin, R.~Sanver, and W.~S. Zwicker, editors,
\emph{The Future of Economic Design}. Springer-Verlag, 2018. \newblock Forthcoming.
\bibitem[Budish et~al.(2013)Budish, Che, Kojima, and Milgrom]{BCKM12a} E.~Budish, Y.-K. Che, F.~Kojima, and P.~Milgrom. \newblock Designing random allocation mechanisms: {T}heory and applications. \newblock \emph{American Economic Review}, 103\penalty0 (2):\penalty0 585--623,
2013.
\bibitem[Cabannes(2004)]{Caba04a} Y.~Cabannes. \newblock Participatory budgeting: a significant contribution to participatory
democracy. \newblock \emph{Environment and Urbanization}, 16\penalty0 (1):\penalty0
27--46, 2004.
\bibitem[Chen et~al.(2013)Chen, Lai, Parkes, and Procaccia]{CLPP12a} Y.~Chen, J.~K. Lai, D.~C. Parkes, and A.~D. Procaccia. \newblock Truth, justice, and cake cutting. \newblock \emph{Games and Economic Behavior}, 77\penalty0 (1):\penalty0
284--297, 2013.
\bibitem[Cho(2016)]{Cho15a} W.~J. Cho. \newblock Incentive properties for ordinal mechanisms. \newblock \emph{Games and Economic Behavior}, 95:\penalty0 168--177, 2016.
\bibitem[Dogan and Yildiz(2016)]{DoYi16a} B.~Dogan and K.~Yildiz. \newblock Efficiency and stability of probabilistic assignments in marriage
problems. \newblock \emph{Games and Economic Behavior}, 95:\penalty0 47--58, 2016.
\bibitem[Fain et~al.(2016)Fain, Goel, and Munagala]{FGM16a} B.~Fain, A.~Goel, and K.~Munagala. \newblock The core of the participatory budgeting problem. \newblock In \emph{Web and Internet Economics - 12th International Conference,
{WINE} 2016, Montreal, Canada, December 11-14, 2016, Proceedings}, pages
384--399, 2016.
\bibitem[Kesten and Unver(2015)]{KeUn15a} O.~Kesten and U.~Unver. \newblock A theory of school choice lotteries. \newblock \emph{Theoretical Economics}, pages 543---595, 2015.
\bibitem[Manjunath(2016)]{Manj16a} V.~Manjunath. \newblock Fractional matching markets. \newblock \emph{Games and Economic Behavior}, 100:\penalty0 321--336, 2016.
\bibitem[Manlove(2013)]{Manl13a} D.~F. Manlove. \newblock \emph{Algorithmics of Matching Under Preferences}. \newblock World Scientific Publishing Company, 2013.
\bibitem[Procaccia(2010)]{Proc10a} A.~D. Procaccia. \newblock Can approximation circumvent {G}ibbard-{S}atterthwaite? \newblock In \emph{Proceedings of the 24th AAAI Conference on Artificial
Intelligence (AAAI)}, pages 836--841. AAAI Press, 2010.
\bibitem[Procaccia and Tennenholtz(2013)]{PrTe13a} A.~D. Procaccia and M.~Tennenholtz. \newblock Approximate mechanism design without money. \newblock \emph{ACM Transactions on Economics and Computation}, 1\penalty0 (4),
2013.
\bibitem[Stone(2011)]{Ston11a} P.~Stone. \newblock \emph{The Luck of the Draw: {T}he Role of Lotteries in Decision
Making}. \newblock Oxford University Press, 2011.
\bibitem[Teo et~al.(2001)Teo, Sethuraman, and Tan]{TST01a} C-P. Teo, J.~Sethuraman, and W-P Tan. \newblock Gale-shapley stable marriage problem revisited: Strategic issues and
applications. \newblock \emph{Management Science}, 49\penalty0 (9):\penalty0 1252---1267,
2001.
\bibitem[Young(1994)]{Youn94a} H.~P. Young. \newblock \emph{Equity: in Theory and Practice}. \newblock Princeton University Press, 1994.
\end{thebibliography}
\end{document} | arXiv | {
"id": "2002.10171.tex",
"language_detection_score": 0.7984715104103088,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Stability of Bounded Solutions for Degenerate Complex Monge-Amp\`ere Equations}
\author{S\l awomir Dinew\\ Jagiellonian University, Krak\'ow\\ Zhou Zhang\\ Department of Mathematics,\\ University of Michigan, at Ann Arbor} \date{} \maketitle
\begin{abstract} We show a stability estimate for the degenerate complex Monge-Amp\`ere operator that generalizes a result of Ko\l odziej \cite{koj}. In particular, we obtain the optimal stability exponent and also treat the case when the right hand side is a general Borel measure satisfying certain regularity conditions. Moreover our result holds for functions plurisubharmonic with respect to a big form generalizing thus the K\"ahler form setting in \cite{koj}. \end{abstract}
\section{Introduction and the Main Theorem}
In this work, we generalize and strengthen Ko\l odziej's stability result concerning bounded solutions for complex Monge-Amp\`ere equations, which is summarized in \cite{koj} (see also \cite{kojnotes}). The solutions are understood in the sense of pluripotential theory, i.e. we do not impose any other regularity than upper semicontinuity and boundedness. It is, however, a classical fact that the image of the Monge-Amp\`ere operator can be well defined as a Borel measure in this setting.
The equation is considered over a closed K\"ahler manifold $X$\ of complex dimension $n\geqslant 2 $ \footnote{When $n=1$, the manifold is a Riemann surface and Monge-Amp\`ere operator is just the Laplace operator.}.
Suppose $\omega$ is a real smooth closed semi-positive $(1,1)$-form over $X$, $\Omega$ is a positive Borel measure on $X$ and $f\in L^p(X)$ for some $p>1$ is non-negative, where the definition of the function space $L^p(X)$ is with respect to $\Omega$. The equation we consider is $$(\omega+\sqrt{-1}\partial\bar{\partial }u)^n=f\Omega.$$ Using $d=\partial+\bar{\partial }$\ and $d^c:=\frac{\sqrt{-1}}{2} (\bar{\partial}-\partial)$ we have $dd^c=\sqrt{-1}\partial\bar {\partial }$ and this convention is also often used in the literature.
As mentioned above, we require regularity of $u$ much less than what is needed to make pointwise sense for the left hand side. More specifically, we look for solutions in the function class $PSH_\omega(X)\cap L^\infty(X)$, where $u\in PSH_\omega(X)$ means that $\omega+\sqrt{-1}\partial \bar{\partial }u$ is non-negative in the sense of distribution theory.
Of course, there is an obvious condition for the existence of such a solution coming from global integration over $X$, i.e. $\int_X{\omega }^n=\int_Xf\Omega$. This condition follows from Stokes theorem in the smooth case, and hence (by smooth approximation) in our case either.
Ko\l odziej mainly studied the case when $\omega$ is a K\"ahler metric, or equivalently, $[\omega]$ is a K\"ahler class, and $\Omega$\ is a smooth volume form. The existence of bounded solution in this case is proved. In fact, even more general $f$'s than $L^p$ functions are treated in \cite{koj98}, but for our main concern, we restrict to $L^p$ functions. Further, in this case, the bounded solution is always continuous as proved in \cite{koj98}. So in the discussion of stability there, continuity of the solutions is naturally assumed.
The degeneration we want to consider in this note is in two places.
First we allow $\omega$\ to be just semi-positive instead of being K\"ahler; we are especially interested in the case when $\omega$ is the pullback of a K\"ahler metric under a holomorphic map preserving dimensions. The following theorem from \cite{zh} gives the precise picture of $\omega$ and the corresponding existence result. This result uses an argument very close to Ko\l odziej's. Both of them have found the notion of relative capacity, introduced in \cite{bed-tay}, extremely useful.
\begin{theo}\label{1.1}
Let $X$ be a closed K\"ahler manifold with (complex) dimension $n\geqslant 2$. Suppose we have a holomorphic map $F: X \to\mathbb{CP}^N$ with the image $F(X)$ of the same dimension as $X$. Let $\omega_M$ be any K\"ahler form over some neighbourhood of $F(X)$ in $\mathbb{CP}^N$. For the following equation of Monge-Amp\`ere type: \begin{equation} (\omega+\sqrt{-1}\partial\bar\partial u)^n=f\Omega,\nonumber \end{equation} where $\omega=F^*\omega_M$, $\Omega$ is a fixed smooth (non-degenerate) volume form over $X$ and $f$ is a nonnegative function in $L^p(X)$ for some $p>1$ with the correct total integral over $X$, i.e. $\int_X f\Omega=\int_X(F^*\omega_M)^n$, then we have the following:
(1) (A priori estimate) If $u$ is a weak solution in $PSH_{\omega}(X)\cap L^{\infty}(X)$ of the equation with the normalization $sup_X u=0$, then there is
a constant $C$ such that $\|u\|_{L^\infty}\le C\|f\|^n_{ L^p}$ where $C$ only depends on $F$, $\omega$ and $p$;
(2) (Existence of a bounded solution) There exists a bounded (weak) solution for this equation;
(3) (Continuity and uniqueness of bounded solution) If $F$ is locally birational, any bounded solution is actually the unique continuous solution.
\end{theo}
The a priori estimate was obtained independently in \cite{EGZ} (even for more general big forms), and later generalized to more singular right hand side in \cite{DP}. As for the continuity of the solution, despite serious effort, the situation is still a little bit unclear. It is not known whether continuity holds when $\omega$\ is a general semi-positive closed form with continuous (even smooth) potentials and positive total integral. This problem has attracted much interest recently, and for this reason we take the opportunity to present a detailed proof of the continuity in the situation above Indeed, the argument in \cite{zh} \footnote{See also in \cite{thesis} where it is not so easily separated from the context.} is a bit too sketchy therefore hard to follow. This will be done in Section \ref{continuity}.
Regardless of that in our discussion of stability we do not impose
{\it a priori} continuity of the solutions. The methods we use are
independent of that assumption. So, theoretically, solutions might
be discontinuous in general, but uniformly close to each other if
we perturb the data a little. Needless to say, this is quite an artificial
situation. So our results strongly support (but in no way prove)
the common belief that continuity holds in general.
Our second degeneration is that we allow $\Omega$\ on the right hand side to be a Borel measure instead of smooth volume form. Then some restrictions must be imposed, since weak solutions for such an equation might not be bounded anymore (for example, if $\Omega$\ is the Dirac delta measure at some point). Worse yet, there are measures for which existence of solutions (bounded or not) is not known so far. Therefore we impose some seemingly natural conditions on $\Omega$\ that guarantee boundedness of the solutions.
\begin{defi}
We say that a Borel measure is {\it well dominated by capacity for $L^p$\ functions}, if there exist constants $\alpha>0$\ and $\chi>0$, such that for any compact $K\subset X$\ and any non-negative $f\in L^p(\Omega),\ p>1$ one has for some constant $C$\ independent of $K$, (but dependent on $f$) $$\Omega(K)\leq C cap_{\omega}(K)^{1+\alpha},\ \ \quad{\rm and}\quad \int_Kf \Omega\leq C cap_{\omega}(K)^{1+\chi}$$
\end{defi}
A very similar notion (only the first condition is imposed) is discussed in \cite{EGZ}. Both are variations of the so-called condition (A), introduced by S. Ko\l odziej in \cite{koj98}. These conditions (which actually are stronger than condition(A)) force boundedness for the solutions $u$\ of $$(\omega+\sqrt{-1}\partial\bar\partial u)^n=f\Omega$$ (see \cite{koj98} for the case $\omega$\ is K\"ahler, and \cite{EGZ} for the case $\omega$\ is merely semi-positive).
A few words on the second assumption. When $\Omega$\ is a smooth volume form it is known (again see \cite{koj98} and \cite{EGZ}) that the first condition is satisfied for every $\alpha>0$. Hence by an elementary application of the H\"older inequality the second condition is also satisfied (for every $\chi>0$). The same reasoning also shows that the second condition is a consequence of the first provided $p$\ is big enough (if $\frac{(1+\alpha)(p-1)}{p}>1$). Anyway, one has to impose some condition, since {\it a priori} $f\Omega$\ is more singular than $\Omega$.
Note that, as in \cite{koj98} or \cite{kojnotes} the exponent $\chi>0$\ is used to construct an {\it admissible} function $Q$\ with proper polynomial growth and afterwards function $\kappa$\ and its inverse $\gamma$\ (see below for a discussion). When the volume form is smooth one can take arbitrary $\chi>0$\ (of course the bigger $\chi$\ we take, the better). Using this, in \cite{koj} it was shown that one can produce a function $\gamma(t)$\ with growth like $t^{\epsilon},\ \forall \epsilon>0$\ near $0$. When $\chi$\ is bounded from above (i.e. we assume it is a fixed constant dependent on the measure $\mu$), calculations as in \cite{kojnotes} or \cite{koj} show that one can take $\gamma(t)\approx t^{\frac n{\chi}}$. In order to avoid too much technicalities throughout the note we shall work with the assumption that $\chi$\ can be taken arbitrarily large. At the end (see Remark \ref{rem4.1}) we will explain how to modify the argument in the case of fixed $\chi$ and obtain the stability exponent in this case either.
As mentioned in the thesis of the second named author \cite{thesis}, Ko\l odziej's original argument is almost good enough for us except for two issues. One of them, about Comparison Principle, is doable using the regularizing result in \cite{blo-koj}. The other one, an inequality for mixed Monge-Amp\`ere measures, looks hard to justify for bounded functions. Recently, this has been treated by the first named author in \cite{dinew} for even more general class of functions.
Now let's state the main theorem.
\begin{theo}
In the same set-up as in the theorem above (we assume that $\Omega$\ is well dominated by capacity for $L^p$\ functions), for any non-negative $L^{p}(\Omega)$-functions $f$ and $g$ with $p>1$ which have the proper total integral over $X$, i.e., $\int_X f\Omega=\int_X g\Omega=\int_X {\omega} ^n$, suppose that $\phi$ and $\psi$ in $PSH_{\omega}\cap L ^\infty(X)$ satisfy ${\omega_{\phi}}^{n}=f{\omega}^{n}$ and ${\omega_{\psi} }^{n}=g {\omega}^{n}$ respectively and are normalized by the conditions $max_{X}\{\phi-\psi\}=max_{X}\{\psi-\phi\}$. Let also $\epsilon>0$\ be arbitrary.
If $\|f-g\|_{L^{1}}\leqslant\gamma (t)t^{n+\epsilon}$ for $\gamma(t)=C\kappa ^{-1}(t)$ with some proper non-negative constant $C$ depending only \footnote{The manifold $X$ and metric $\omega$\ also affect $C$.} on the $L^{p}$-norms of $f$ and $g$, where $\kappa^{-1}(t)$ the inverse function of the following $\kappa$ function, $$\kappa(r)=C_{n}A^{\frac{1}{n}}\bigl(\int_{r^{-\frac{1}{n}}}^{\infty} y^{-1}(Q(y))^{-\frac{1}{n}}dy+\bigl(Q(r^{-\frac{1}{n}}) \bigr)^{-\frac{1}{n}}\bigr),$$ where $C_{n}$ is a positive constant only depending on the complex dimension $n$ and $Q$ is an increasing positive function with proper polynomial growth, then we can conclude that
$$\|\phi-\psi\|_{L^{\infty}}\leqslant Ct$$ for $t<t_{0}$ where $t_{0}>0$ depends on $\gamma$ and $C$ depends on the $L^{p}$-norms of $f$ and $g$.
\end{theo}
As a direct application, we have uniqueness of bounded solution from Theorem $(1.1)$.
Another corollary is the following stability estimate.
\begin{coro}\label{exp2}
In the same setting as above there exists a constant $c=c(p,\epsilon,c_0)$\ where $c_0$\ is an upper bound for $||f||_p$ and $||g||_p$\ such that
$$||\phi-\psi||_{\infty}\leq c||f-g||_1^{\frac 1{n+\epsilon}}$$
\end{coro}
\begin{rema}
The exponent in the last corollary is improved compared to \cite{koj}. As example \ref{exa} shows, the exponent we obtain is optimal.
\end{rema} \begin{rema} The Monge-Amp\`ere equation with $\omega$\ big instead of K\"ahler has been studied extensively in the recent years (see \cite{BGZ}, \cite{DP}, \cite{EGZ}). \end{rema}
The applications of the result above could go in two directions. The semi-positivity is particularly interesting in geometry, since the situation we have described above appears naturally in the study of algebraic manifolds of general type (or big line bundles in general) (see e.g. \cite{t-znote}). The degeneration of the measure on the right hand side, in turn, might be useful in complex dynamics and pluripotential theory. Complex dynamics often deals with such singular measures and it is an important question to obtain any regularity for the potential of such measures. The same question is crucial in pluripotential theory while studying extremal functions.
{\bf Acknowledgment.} The authors would like to thank professor S. Ko\l odziej for all the generous help in the formation of this work and beyond. His suggestion for such a joint work is also very important for beginners like us. This work was initiated during the second named author's visit at MRSI (Mathematical Sciences Research Institute) and he would like to thank the institute and the department of Mathematics at University of Michigan, at Ann Arbor, for the arrangement to provide such a wonderful opportunity.
\section{Stability for Nondegenerate Monge-Amp\`ere Equations}
For readers' convenience, Ko\l odziej's stability argument will be included here. We are going to use global version of the notions, for example, capacity for the closed manifold $X$.
Specifically, in this part all the plurisubharmonic functions with respect to the K\"ahler metric $\omega$ ($\omega$-$PSH$ for short) are continuous by definition. As explained before, this brings no difference in this case. So Comparison Principle between them can be justified by the Richberg's approximation as in \cite{koj}.
Basically, all the following argument is directly quoted from \cite{koj}.\\
Claim: Let $\phi, \psi\in PSH_\omega(X)$\ and satisfy $0\leqslant\phi\leqslant C$, then for $s<C+1$, we have $$Cap_{\omega}(\{\psi+2s<\phi\})\leqslant\bigl(\frac{C+1}{s}\bigr) ^{n}\int_{\{\psi+s<\phi\}}(\omega+\sqrt{-1}\partial\bar{\partial} \psi)^{n}.$$
\begin{proof}
Define $E(s):=\{\psi+s<\phi\}$. Take any $\rho\in PSH_{\omega}(X)$ valued in $[-1, 0]$. Set $V=\{\psi <\frac{s}{C+1}\rho+(1- \frac{s}{C+1})\phi-s\}$. Since $-s\leqslant\frac{s}{C+1}\rho-\frac{s}{C+1}\phi \leqslant 0$, we can easily deduce the following chain relation of sets: $$E(2s)\subset V\subset E(s).$$ Then we can have the following computation (with notation $\omega_\rho:=\omega+\sqrt{-1}\partial\bar{\partial}\rho$):
\begin{equation} \begin{split} (\frac{s}{C+1})^{n}\int_{E(2s)}(\omega+\sqrt{-1}\partial\bar{ \partial}\rho)^{n} &\leqslant \int_{V}(\frac{s}{C+1}\omega_{\rho}+(1-\frac{s}{C+ 1})\omega_{\phi})^{n}\\ &\leqslant \int_{V}{\omega_{\psi}}^{n} \leqslant \int_{E(s)}{\omega_{\psi}} ^{n}\nonumber \end{split} \end{equation} by the relation of sets above and applying comparison principle for the two functions appearing in the definition of the set $V$.
Finally we can conclude the result from the definition of $Cap_{\omega}$.
\end{proof}
Now we state the following version of stability result, which is slightly weaker than the result in \cite{kojnotes}.
\begin{theo}
In the same set-up as before, for any nonnegative $L^{p}$-functions $f$ and $g$ with $p>1$ which have the proper total integral over $X$, i.e., $\int_X f{\omega}^n=\int_X g{\omega}^n=\int_X {\omega}^n$, suppose that $\phi$ and $\psi$ in $PSH_{\omega}(X)$ satisfy ${\omega_{\phi}}^{n}=f{\omega} ^{n}$ and ${\omega_{\psi}}^{n}=g {\omega}^{n}$ respectively and are normalized by the condition $max_{X}\{\phi-\psi\}=max_{X}\{\psi -\phi\}$.
If $\|f-g\|_{L^{1}}\leqslant\gamma (t)t^{n+3}$ for $\gamma(t)=C\kappa ^{-1}(t)$ with some proper nonnegative constant $C$ depending only on the $L^{p}$-norms of $f$ and $g$ \footnote{The dependence on the manifold $X$ and K\"ahler metric $\omega$ should be clear.} , where $\kappa^{-1}(t)$ the inverse function of the $\kappa$ function in the main theorem, then we can conclude that
$$\|\phi-\psi\|_{L^{\infty}}\leqslant Ct$$ for $t<t_{0}$ where $t_{0}>0$ depends on $\gamma$ and $C$ depends on the $L^{p}$-norms of $f$ and $g$.
\end{theo}
\begin{proof}
Suppose $\|f\|_{L^{p}}, \|g\|_{L^{p}}\leqslant A$. We will be careful about the fact that the constants in the argument will only depend on $A$ and the function $\gamma$.
For simplicity, let us normalize to have $\int_{X}{\omega}^{n}=1$. And in fact, we can also assume $max_{X}\{\phi-\psi\}=max_{X}\{ \psi-\phi\}>0$ since the case for $=0$ is trivial \footnote{In this case, we can have $\phi-\psi\leqslant 0$ and $ \psi-\phi\leqslant 0$, which says $\phi=\psi$. In other words, we have the compatible direction.}.
Without loss of generality, assume $\int_{\{\psi<\phi\}}(f+g) {\omega}^{n}\leqslant 1$, since $\int_{X}f{\omega}^{n}=\int_{X} g{\omega}^{n}=1$ and, if needed, one can interchange the roles of $\psi$\ and $\phi$.
Then by adding the same constant to $\phi$ and $\psi$ which obviously affects nothing, we can assume $0\leqslant\phi \leqslant a$ where "$a$'' is a positive constant only depending on $A$ from the boundedness result before.
Of course we can take a larger "$a$'', which we shall actually do below, as long as the dependence on $A$ is clear, or say finally we can still fix it to be some positive constant only dependent on $A$.
As $lim_{_{t\to 0}}\gamma(t)=0$ by definition and the property of the function $\kappa$, we can fix $0<t_{0}<1$ sufficiently small such that $\gamma(t_{0}){t_{0}}^{n+3}<\frac{1}{3}$, which will also hold for $0<t<t_{0}$ since $\gamma$ is obviously decreasing.
Fix such a $t$ for now and set $E_{k}=\{\psi<\phi-k a t\}$ where the "$a$'' is from above, but we still have not made the choice yet.
Clearly we have: $$\int_{E_{0}}g{\omega}^{n}=\frac{1}{2}\int_{E_{0}}\bigl((f+g)+ (g-f)\bigr){\omega}^{n}\leqslant\frac{1}{2}(1+\frac{1}{3})=\frac {2}{3}.$$
Now we construct a function $g_{1}$ which is equal to $\frac{3g} {2}$ over $E_{0}$ and some other nonnegative constant for the complement. By the above estimate, it is easy to see that one can choose a proper constant (in [0,1]) such that $g_{1} $ is still non-negative with $L^{p}$-norm bounded by $\frac{3A}{2} $, and more importantly it has the proper total integral over $X$.
So we can find a continuous solution $\rho\in PSH_{\omega}(X)$ as before by the approximation method such that $${\omega_{\rho}}^{n}=g_{1}{\omega}^{n}, ~~~max_{X}\rho=0$$ with lower bound of $\rho$ only dependent on $A$. \footnote{Notice we've used the existence of continuous solution at this point for the solution $\rho$.} By enlarging "$a$'' if necessary which clearly won't affect the set $E_{0}$, we can assume the lower bound of $\rho$ is $-a$. Now we can finally fix our constant "$a$'', and it clearly depends only on $A$ in an explicit way.
By noticing that $-2at\leqslant -t\phi+t\rho\leqslant 0$, it is easy to see $$E_{2}\subset E:=\{\psi<(1-t)\phi+t\rho\}\subset E_{0}.$$
Let's denote the set $\{f<(1-t^{2})g\}$ by $G$. Then over $E_{0}\setminus G$, we have: $$\bigl((1-t^{2})^{-\frac{1}{n}}\omega_{\phi}\bigr)^{n}\geqslant g{\omega}^{n}, ~~~\bigl((\frac{3}{2})^{-\frac{1}{n}}\omega_{\rho} \bigr)^{n}=g{\omega}^{n}.$$
Hence we can conclude, using an inequality for mixed Monge-Amp\`ere measures from \cite{koj}, that over $E_0\setminus G$, $$(\frac{3}{2})^{-\frac{n-k}{n}}(1-t^{2})^{-\frac{k}{n}}{\omega_ {\phi}}^{k}\wedge{\omega_{\rho}}^{n-k}\geqslant g{\omega}^{n}.$$
\begin{rema}
This is a rather trivial result in smooth case which is just a direct application of arithmetic-geometric mean value inequality. Then by approximation argument, it should also hold in our case here. For the conclusion above, there is no need to restrict ourselves to the set $E_{0}\setminus G$. We can work globally on $X$ and use $g\chi_{_{E_{0}\setminus G}}{\omega}^n$ for the right hand side.
Actually the rigorous approximation argument is local and uses nontrivial results about Dirichlet problem for Monge-Amp\`ere equation. The continuity of the functions is very involved in the proof which seems to be the main obstacle to carry over the whole argument in this part for merely bounded solutions.
This is the point where the recent result in \cite{dinew} is applied.
\end{rema}
Let's set $q=(\frac{3}{2})^{\frac{1}{n}}>1$, and rewrite the above inequality as: $${\omega_{\phi}^{k}\wedge{\omega_{\rho}}^{n-k}}\geqslant q^{n-k} (1-t^{2})^{\frac{k}{n}} g{\omega}^{n}$$ over $E_{0}\setminus G$. Now the following computation is quite obvious: \footnote{$t$ below can be taken to be sufficiently small, say $t<\frac{q-1}2$.} \begin{equation} \begin{split} {\omega_{t\rho+(1-t)\phi}}^{n} &\geqslant \bigl((1-t)(1-t^{2})^{\frac{1}{n}}+q t\bigr)^{n}g{\omega} ^{n}\\ &\geqslant \bigl((1-t)(1-t^{2})+qt\bigr)^{n}g{\omega}^{n}\\ &\geqslant \bigl(1+t(q-1)-t^{2}\bigr)g{\omega}^{n}\\ &\geqslant \bigl(1+\frac{t}{2}(q-1)\bigr)g{\omega}^{n}. \end{split} \end{equation}
From the definition of $G$ and assumption of the theorem, we also have: $$t^{2}\int_{G}g{\omega}^{n}\leqslant\int_{G}(g-f){\omega}^{n}\leqslant \gamma (t)t^{n+3}$$ which is just: \begin{equation} \int_{G}g{\omega}^{n}\leqslant\gamma (t)t^{n+1}. \end{equation}
Hence we can have the following inequalities: \begin{equation} \begin{split} \bigl(1+\frac{t}{2}(q-1)\bigr)\int_{E\setminus G}g{\omega}^{n} &\leqslant \int_{E}{\omega_{t\rho+(1-t)\phi}}^{n}~~~~~~~~~~(the ~measure~inequality~(2.1))\\ &\leqslant \int_{E}{\omega_{\psi}}^{n}~~~~~~~~~~~~~~~~~~~( comparison~principle)\\ &\leqslant \int_{E\setminus G}g{\omega}^{n}+\gamma (t)t^{n+1}~~ (the~integration~inequality~(2.2)).\nonumber \end{split} \end{equation} and arrive at: $$\frac{q-1}{2}\int_{E\setminus G}g{\omega}^{n}\leqslant\gamma (t)t^{n}.$$
Therefore by noticing $E_{2}\subset E$, we get: $$\frac{q-1}{2}(\int_{E_{2}}g{\omega}^{n}-\gamma (t)t^{n+1}) \leqslant\frac{q-1}{2}(\int_{E_{2}}g{\omega}^{n}-\int_{G}g{ \omega}^{n})\leqslant\frac{q-1}{2}\int_{E\setminus G}g{\omega }^{n}\leqslant\gamma(t)t^{n},$$ and so we have $$\int_{E_{2}}g{\omega}^{n}\leqslant (t+\frac{2}{q-1})\gamma (t)t^{n}\leqslant\frac{3}{q-1}\gamma (t)t^{n}$$ for $t$ small enough.
The claim proved before tells us: $$Cap_{\omega}(E_{4})\leqslant (\frac{a+1}{2at})^{n}\int_{E_{2}} g{\omega}^{n}.$$
Combining this with the previous inequality, we have: $$Cap_{\omega}(E_{4})\leqslant (\frac{a+1}{2a})^{n}\frac{3}{q-1} \gamma (t).$$
Thus if $E':=\{\psi<\phi-(4a+2)t\}$ is nonempty, by the argument for boundedness result before, we should have: $$2t\leqslant\kappa (Cap_{\omega}(E_{4}))\leqslant\kappa((\frac {a+1}{2a})^{n}\frac{3}{q-1}\gamma (t))=t.$$ Clearly this is a contradiction for $t>0$.
Anyway, we have from above that $\psi\geqslant\phi-(4a+2)t$.
Hence $max_{X}(\psi-\phi)=max_{X}(\phi-\psi)\leqslant (4a+2)t$, which will give the desired conclusion.
\end{proof}
Now from this stability result, it is easy to get uniqueness result for continuous plurisubharmonic solutions after normalization.
One can easily see the proof can be simplified a little if we only care about the uniqueness result. But this result above actually gives much better description of the variation of the solution under the perturbation of the right hand side of the equation (i.e, the measure).
Now in the same vein as in \cite{koj} one gets the following corollary:
\begin{coro}\label{exp} For any $\epsilon>0$, there exists $c=c(\epsilon,p,c_0)$,\ ($c_0$\ is an upper bound for $L^p$\ norms of $f$\ and $g$) such that
$$||\phi-\psi||_{\infty}\leq c||f-g||_1^{\frac{1}{n+3+\epsilon}}$$ provided $\phi$\ and $\psi$\ are normalised as before. \end{coro}
Before we proceed further we make a small improvement of the stability exponent in the last corollary.
Note that in the definition of set $G=\{f<(1-t^{2})g\}$\ one can exchange $t^2$\ with $\frac{t}{b}$\ for a sufficiently big independent constant $b$, and the the same argument still goes through, so $||f-g||_1\leq \gamma(t)t^{n+2}$\ implies $||\phi-\psi||_{\infty}\leq Ct.$ In particular the result in Corollary \ref{exp} holds with exponent $\frac1{n+2+\epsilon}$.
\section{Adjustment to Our Degenerate Case}
Now we begin to adjust Ko\l odziej's argument for the situation in our main theorem. All the places which need to be considered have been pointed out at the spot. Let us now treat them one by one.
\subsection{Comparison Principle}
In \cite{blo-koj}, authors constructed decreasing smooth approximation for bounded functions plurisubharmonic with respect to a K\"ahler metric. Using this, they got the following version of Comparison Principle,
\begin{theo}
For $\phi, \psi\in PSH_{\omega }(X)\cap L^\infty (X)$, where $(X, \omega)$ is a closed K\"ahler manifold, one has $$\int_{\{\phi<\psi\} }(\omega+\sqrt{-1}\partial\bar{\partial }\psi)^n\leqslant\int_ {\{\phi<\psi\} }(\omega+\sqrt{-1}\partial\bar{\partial }\phi)^n.$$
\end{theo}
Though the result we want would be for some backround form $\omega\geqslant 0$, it would follow from the version above as we can perturb it by $\epsilon\omega_0$ with $ \omega_0>0$ and the constant $\epsilon>0$, since $X$ is K\"ahler. Those functions plurisubharmonic with respect to $\omega$ would still be plurisubharmonic with respect to $\omega+\epsilon\omega_0$. Using the comparison principle above and letting $\epsilon\to 0$, we get the following version
\begin{theo}
For $\phi, \psi\in PSH_{\omega }(X)\cap L^\infty (X)$, where $X$ is a closed K\"ahler manifold and $\omega\geqslant 0$ is a real smooth $(1,1) $-form over $X$, one has $$\int_{\{\phi<\psi\} }(\omega+\sqrt{-1}\partial\bar{\partial }\psi)^n \leqslant\int_{\{\phi<\psi\} }(\omega+\sqrt{-1}\partial\bar{\partial }\phi )^n.$$
\end{theo}
This is Comparison Principle for the adjusted argument for stability.
\subsection{Inequalities for Mixed Measures} Our first observation is that although we considered our equations of the form
$$\omega_{\psi}^n= f\omega^n,\ \ \omega_{\phi}^n= g\omega^n,$$ the volume form $\omega^n$ played no significant role in the proof. The only delicate point is the following inequality:
Suppose $\phi$\ and $\psi$ are continuous $\omega$-$PSH$ functions, and $f,\ g$\ are integrable functions on $X$. Suppose we have (locally or globally) the inequalities $$ \omega_{\psi}^n\geq f\omega^n,\ \ \omega_{\phi}^n\geq g\omega^n,$$ then (locally where we have those inequalities or globally) $$\forall_{ k\in\lbrace0, 1,\cdots, n\rbrace}\ \ \omega_{\psi}^k\wedge \omega_{\phi}^{n-k}\geq f^{\frac {k}{n}}g^{\frac{n-k}{n}}\omega^n.$$ In other words we want to generalize the above inequality, for more general measures and moreover for {\it bounded} (i.e. not necessarily continuous) functions $\phi$\ and $\psi$. The following theorem is essentially taken from \cite{dinew}: \begin{theo} Suppose the nonnegative Borel measure $\Omega$\ is well dominated by capacity, and let $\phi$\ and $\psi$\ be two bounded $\omega$-psh functions on a K\"ahler manifold. Suppose the following inequalities hold $$ \omega_{\psi}^n\geq f\Omega,\ \ \omega_{\phi}^n\geq g\Omega,$$ for some $f,\ g\in L^p(\Omega),\ p>1$. Then $$\forall_{ k\in\lbrace0, 1,\cdots, n\rbrace}\ \ \omega_{\psi}^k\wedge \omega_{\phi}^{n-k}\geq f^{\frac {k}{n}}g^{\frac{n-k}{n}}\Omega.$$ \end{theo}
In \cite{koj} (Lemma 1.2) this inequality was proved under the assumption that both $\phi$\ and $\psi$ are continuous and $\Omega=\omega^n$. The proof is local, it can be rephrased in a setting in a ball in ${\mathbb C}^n$. Then the argument goes via approximation for which a solution for the Dirichlet problem with boundary data is used. Since we deal with merely bounded functions (uppersemicontinuous by the plurisubharmonicity assumption), one cannot expect continuity on the boundary of the ball in general. But as observed in \cite{dinew} we can line-by-line follow the approximation arguments from \cite{koj} whenever the measure on the right hand side is the Lebesgue measure. Indeed, approximants at the boundary will not converge uniformly towards discontinuous boundary data, but the sequence of approximate solutions is again decreasing. This implies convergence in capacity by \cite{bed-tay}, which is enough for the argument to go through. In the case when $\omega^n$\ is exchanged with a general measure well dominated in capacity one cannot rely only on the argument from \cite{koj}. But domination by capacity forces the measure $\Omega$\ to vanish on pluripolar sets, hence one can use the result form \cite{dinew} to conclude. We refer to \cite{dinew} for the details.
\section{Improvement on the Stability Exponent}
The exponent from Corollary \ref{exp} is quite important. In particular, since this inequality can be used to prove H\"older continuity for solutions of Monge-Amp\`ere equations with right hand side in $L^p$\ (see \cite{koj06}), the bigger the exponent in the inequality, the better H\"older exponent one can get.
Trying to improve the exponent, one has to follow the main steps of the original proof and improve points where there is an exponent loss. Our strategy will be to iterate the original argument, defining at each step new function $\rho$\ and use the previous step to get estimates for $||\rho-\psi||_{\infty}$, which in turn will be used to choose the new set $E$\ in a ''better'' way.\\
The argument is divided into the following three parts.
The first part is the original argument quoted before with the improvement mentioned after Corollary \ref{exp}, which is the starting point for us. In the sequel the original argument will be often denoted as Step 1.
The second part, (i.e. Step 2), is the description of the iteration procedure. Since Step 1 differs slightly from all the others, we outline Step 2 below and sketch how to proceed throughout the next iterations.
The mechanism is based on the fact that
$||f-g||_1\leq\gamma(t)t^{\beta}$\ (in the improved original proof $\beta=n+2$) yields $\int_{\lbrace\psi+kt<\phi\rbrace}(\omega+\sqrt{-1}\partial{\bar\partial}\psi)^n\leq c_0t^n$ for some constant $k$\ and $c_0$\ (in what follows $c_i$\ denote constants independent of the relevant quantities). So we try to find $\beta$\ as small as possible for which this implication holds true with uniform control on $c_0$ and enlarging $k$\ if needed. Note that from now on instead of $\omega^n$\ we use the measure $\Omega$. It follows from the discussion above that Step $1$\ is not affected by that.
So assume $||f-g||_1\leq\gamma(t)t^{\beta},\ t<1$. Then if $l:=t^{\frac{\beta}{n+2}},\ \beta<n+2$, we obtain $||f-g||_1\leq\gamma(t)l^{n+2}$, so from Step $1$ we know that \begin{equation}\label{42} \int_{E_2}g\Omega\leq\gamma(t)l^n, \end{equation} Where, as before $E_k:=\{\psi<\phi-kat\}$. (Indeed, in Step $1$\ we have $t=l$, but one can check that the proof can be repeated in this situation). Hence \begin{equation}\label{43} \int_{E_2}g\Omega\leq c_1t^{\frac{\beta n}{n+2}},\ t\leq t_0 \end{equation} (recall $\gamma(t)$\ decreases to $0$, as $t\searrow 0$).
Now fix a small positive constant $\delta$\ to be choosen later on.
Consider the ''new'' function $$g_1(z)=\begin{cases} (1+\frac{t^{\delta}}{2})g(z),\ &z\in E_2\\c_2g(z),\ &z\in X\setminus E_2,\end{cases}$$ where $0\leq c_2\leq 1$\ is choosen such that $\int_Xg_1\Omega=1$. (The constant $\frac 12$\ is taken to assure that the integral over $E_2$\ is less than $1$. Note that despite the fact that the case $t$\ being small is of main interest, when $\delta$\ is also small the quantity $t^{\delta}$\ cannot be controlled by a constant smaller then $1$). As in Step $1$ we find a solution $\rho$ to the problem $(\omega_{\rho})^n=g_1\omega^n,\ max_X\rho=0.$ Again $\rho\geq-a$\ and we renormalize $\rho$\ by adding a constant so that $max_X(\psi-\rho)=max_X(\rho-\psi)$\ (this can by done in an uniform way).
Now by Step 1 \begin{align*}
&||\rho-\psi||_{\infty}\leq c_3||g-g_1||_1^{\frac 1{n+2+\epsilon}}=c_3([\int_{E_2}+\int_{X\setminus E_2}]|g-g_1|\Omega)^{\frac 1{n+2+\epsilon}}=\\ =&c_3(2t^{\delta}\int_{E_2}g\Omega)^{\frac 1{n+2+\epsilon}}\leq c_4t^{\frac{\delta+\frac{\beta n}{n+2}}{n+2+\epsilon}}. \end{align*} If $\delta$\ is sufficiently small the last exponent is less than $1$\ and we define $\alpha:=1-\frac{\delta+\frac{\beta n}{n+2}}{n+2+\epsilon}$. Then by the above estimate \begin{equation} E_s=\lbrace\psi+sat<\phi\rbrace=\lbrace(1-t^{\alpha})(\psi+sat)<(1-t^{\alpha})\phi \rbrace\subset \end{equation} \begin{align*} &\subset\lbrace\psi <(1-t^{\alpha})\phi+t^{\alpha}\rho+c_4t-sat(1-t^{\alpha})\rbrace=E\subset\\ &\subset\lbrace\psi <(1-t^{\alpha})\phi+t^{\alpha}\psi+2c_4t-sat(1-t^{\alpha})\rbrace=\\ &=\lbrace\psi+(sa-\frac{2c_4}{1-t^{\alpha}})t<\phi\rbrace\subset E_k, \end{align*} provided $s\geq 4c_4+k$, (we take $t<\frac 12$).
Consider the ''new'' set $$G_1:=\lbrace f<(1-\frac{t^{\alpha+3\delta}}{8n2^{\frac{n-1}{n}}})g\rbrace.$$
Using that $h(t)=(1+\frac{t^{\delta}}2)^{\frac 1n}-1-\frac1{4n2^{\frac{n-1}{n}}}t^{2\delta}$\ is increasing in $[0,1]$\ and hence nonnegative there, we conclude as in Step 1 that on $E_k\setminus G$ \begin{equation}\label{44} (\omega_{t^{\alpha}\rho+(1-t^{\alpha})\phi})^n\geq((1-t^{\alpha}) (1-\frac{t^{\alpha+3\delta}}{8n2^{\frac{n-1}{n}}})^{\frac 1n}+(1+\frac{t^{\delta}}{2})^{\frac 1 n}t^{\alpha})^ng\Omega\geq \end{equation} \begin{align*} &\geq ((1-t^{\alpha}) (1-\frac{t^{\alpha+3\delta}}{8n2^{\frac{n-1}{n}}}) +(1+\frac1{4n2^{\frac{n-1}{n}}}t^{2\delta})t^{\alpha})^ng\Omega\geq (1+\frac{t^{\alpha+2\delta}}{8n2^{\frac{n-1}{n}}})g\Omega. \end{align*}
As in Step 1 on $G$\ we have \begin{equation}\label{setG} \frac{t^{\alpha+3\delta}}{8n2^{\frac{n-1}{n}}}\int_Gg\Omega\leq\int_G(g-f) \Omega\leq\gamma(t)t^{\beta}, \end{equation} so, using (\ref{44}), (\ref{setG}) and the comparison principle we obtain \begin{equation} (1+\frac{t^{\alpha+2\delta}}{8n2^{\frac{n-1}{n}}})\int_{E_k\setminus G}g\Omega\leq\int_E(\omega_{(1-t^{\alpha})\phi+t^{\alpha}\rho})^n\leq \end{equation} \begin{equation*} \leq\int_{E_k}g\Omega\leq\int_{E_k\setminus G}g\Omega+c_5\gamma(t)t^{\beta-\alpha-3\delta}. \end{equation*}
Finally, as in Step 1, we obtain $$\int_{E_k\setminus G}g\Omega\leq c_6\gamma(t)t^{\beta-2\alpha-5\delta}$$ and $$\int_{E_s}g\Omega\leq c_7\gamma(t)t^{\beta-2\alpha-5\delta}.$$
If $\beta-2\alpha-5\delta=n$, we can proceed as in Step $1$\ to get $\max(\phi-\psi)=\max(\psi-\phi)\leq(2s+2)t$, and $||\phi-\psi||_{\infty}\leq C(\epsilon)||f-g||_1^{\frac{1}{\beta+\epsilon}},\ \forall\epsilon>0.$ Now $\beta-2\alpha-5\delta=n$\ yields $$\beta(1+\frac{\frac{2n}{n+2}}{n+2+\epsilon})=n+2+5 \delta-\frac{2\delta}{n+2+\epsilon}.$$ It is clear that if $\delta$\ is sufficiently small $\beta$\ is smaller than $n+2$, hence we get an improvement.
Now in the last part we iterate the argument.
Consider $||f-g||_1\leq\gamma(t)t^{\beta_{k+1}}$, then as before $l=t^{\frac{\beta_{k+1}}{\beta_k}}$, $\int_{E_r}g\Omega\leq Ct^{\frac{n\beta_{k+1}}{\beta_k}}$, (compare with (\ref{42}), $r$\ is now chosen so that we can use the estimate on appropriate sublevel set from the previous step).
Choosing $\delta_{k+1}$\ small enough and proceeding in the same way as in the previous step one gets $$\beta_{k+1}=n+2\alpha_{k+1}+5\delta_{k+1}.$$ ($\alpha_{k+1}=1-\frac{\delta_{k+1}+\frac{\beta_{k+1} n}{n+2}}{n+2+\epsilon}$). This yields \begin{equation}\label{requr} \beta_{k+1}(1+\frac{2n}{\beta_{k}(\beta_{k}+\epsilon)})= n+2+5\delta_{k+1}-2\frac{\delta_{k+1}}{\beta_{k}+\epsilon} \end{equation} If we choose $\lbrace\delta_{k}\rbrace$\ to be a sequence of sufficiently small numbers decreasing to $0$, one can obtain that $\lbrace\beta_{k}\rbrace$\ is decreasing (recall $n\geq 2$). If $A$\ is the limit of the sequence $\lbrace\beta_{k}\rbrace$\ one gets $$A(1+\frac{2n}{A(A+\epsilon)})=n+2\Rightarrow A=\frac{n+2-\epsilon+\sqrt{(n-2-\epsilon)^2+8\epsilon}}{2}$$ Now $\epsilon\rightarrow0^{+}\Rightarrow A\rightarrow n$, so $\beta_k$'s can be arbitrarily close to n for $k$\ big enough if we take small enough $\epsilon$.
Thus this argument yields in paritcular Corollary \ref{exp2}. \begin{rema}\label{rem4.1} In the case when the measure $\Omega$\ is well dominated by capacity for $L^p$\ functions but the constant $\chi$\ is fixed one can construct $Q(t)$\ and aftrewards $\kappa(t),\ \gamma(t)$\ in such a way that $\gamma(t)\approx t^{\frac n{\chi}}$. Then one can use the same iteration technique as obove with the exception that inequality (\ref{43}) should be improved to $$\int_{E_2}g\Omega\leq Ct^{\frac n{\chi}+\frac{\beta n}{n+2}}$$ (the factor $t^{\frac n{\chi}}$\ comes from the estimate of $\gamma$). The recurrence (\ref{requr}) now reads \begin{equation} \beta_{k+1}(1+\frac{2n}{\beta_{k}(\beta_{k}+\frac n{\chi})})= n+2-\frac{\frac{n}{\chi}}{n+\frac{n}{\chi}}+5\delta_{k+1}-2\frac{\delta_{k+1}}{\beta_{k}+\frac n{\chi}} \end{equation} Again this is a convergent sequence and it can be computed that $$\lim_{k\rightarrow\infty}\beta_k=n.$$ Hence the stability estimate in this case reads \begin{equation}
||\phi-\psi||_{\infty}\leq c(\epsilon,c_0,X,\mu)||f-g||_{L^1(d\mu)}^{\frac 1{n+\frac n{\chi}+\epsilon}} \end{equation}
\end{rema} The following example shows that the exponent we obtained is sharp: \begin{exam}\label{exa} Fix appropriate positive constants $B,\ D$\ such that $D<B$\ and $B2^{2\alpha}<\log2+ D$, for some fixed $\alpha\in(0,1)$\ (such constants clearly exist). Then the function
$$\widehat{\rho}(z):=\begin{cases} B||z||^{2\alpha},\ &||z||\leq 1\\
\max\{ B||z||^{2\alpha},\log(||z||)+D\},\ &1\leq||z||\leq 2\\
\log(||z||)+D,\ &||z||\geq2\end{cases} $$
is well defined, plurisubharmonic in ${\mathbb C}^n$\ and of logarithmic growth. One can smooth out $\widehat{\rho}$, so that the new function $\rho$\ is again of logarithmic growth, radial, smooth away from the origin and $\rho(z)=B||z||^{2\alpha}$\ for $||z||\leq\frac 34$.
Via the standard inclusion
$${\mathbb C}^n\ni z\longrightarrow[1:z]\in\mathbb P^n $$ one identifies $\rho(z)$\ with $$\overline{\rho}([z_0:z_1:,\cdots,:z_n]): =\rho(\frac{z_1}{z_0},\cdots,\frac{z_n}{z_0})-\frac12
\log(1+\frac{||z||^2}{|z_0|^2})\in PSH(\mathbb P^n,\omega_{FS})$$ (here $\omega_{FS}$\ is the Fubini-Study metric on $\mathbb P^n$, and the values of $\overline{\rho}$\ on the hypersurface $\{z_0=0\}$\ are understood as limits of values of $\overline{\rho}$\ when $z_0$\ approaches $0$.) It is clear that $\omega_{\overline{\rho}}^n=(dd^c\rho)^n$\ in the chart $z_0\neq 0$\ and in fact one can neglect what happens on the hypersurface at infinity.
Now for a vector $h\in{\mathbb C}^n$\ one can define $\rho_h(z):=\rho(z+h)$\ and analogously the corresponing $\overline{\rho}_h$. Note that when $||h||\rightarrow0,\ \overline{\rho}_h\rightrightarrows\overline{\rho}$.
One sees that \begin{equation}
\label{al} B||h||^{2\alpha}\leq||\overline{\rho}_h-\overline{\rho}||_{\infty} \end{equation}
The Monge-Amp\`ere measures of $\overline{\rho}$\ and $\overline{\rho}_h$\ are smooth functions except at the origin, and belong to $L^p(\omega_{FS}^n)$, for some $p>1$\ dependent on $\alpha$.
Now $\int_{\mathbb P^n}|\omega_{\overline{\rho}}^n-\omega_{\overline{\rho}_h}^n|
=\int_{{\mathbb C}^n}|(dd^c\rho)^n-(dd^c\rho_h)^n|$
To estimate the last term we divide ${\mathbb C}^n$\ into three pieces (we suppose $||h||$\ is small):
$$\int_{{\mathbb C}^n}|(dd^c\rho)^n-(dd^c\rho_h)^n|=
\int_{\{||z||\leq2||h||\}}+\int_{\{2||h||<||z||\leq \frac12\}}+\int_{\{||z||>\frac12\}}$$
Using the fact that $\rho$\ and $\rho_h$\ are smooth functions in a neighbourhood of $\{||z||>\frac12\}$\ one can easily estimate the last term by $||h||C_0$\ for some constant independent of $h$. For the first two terms we observe that $(dd^c\rho)^n=B^n||z||^{2n(\alpha-1)},\ (dd^c\rho_h)^n=B^n||z+h||^{2n(\alpha-1)}$.
Now we use a computation trick we found in \cite{KW}. \begin{align*}
&\int_{\{||z||\leq2||h||\}}|(dd^c\rho)^n-(dd^c\rho_h)^n| =\\
&=B^n\int_{\{||z||\leq2||h||\}}|||z||^{2n(\alpha-1)}-
||z+h||^{2n(\alpha-1)}|\leq\\
&\leq 2B^n\int_{\{||z||\leq3||h||\}}||z||^{2n(\alpha-1)}=C_1||h||^{2n\alpha} \end{align*} For the second term \begin{align*}
&\int_{\{2||h||\leq||z||\leq\frac12\}}|(dd^c\rho)^n-(dd^c\rho_h)^n|=\\
&=B^n\int_{\{2||h||\leq||z||\leq\frac12\}}|||z||^{2n(\alpha-1)}-
||z+h||^{2n(\alpha-1)}|\leq \\
&\leq B^n\int_{2||h||<||z||}|\int_0^1<\nabla||z+th||^{2n(\alpha-1)},h>dt|\leq\\
&\leq C_2||h||\int_{||h||<||z||}||z||^{2n(\alpha-1)-1}\leq C_3||h||^{2n\alpha} \end{align*}
provided $\alpha<\frac 1{2n}$, so that the integral is finite. Finally we obtain for small $||h||$ \begin{equation}\label{cont}
\int_{\mathbb P^n}|\omega_{\overline{\rho}}^n-\omega_{\overline{\rho}_h}^n|\leq C_1||h||^{2n\alpha}+C_3||h||\leq C_4||h||^{2n\alpha} \end{equation}
Suppose finally that we have a stability estimate $||\phi-\psi||_{\infty}\leq C_5||f-g||_1^{\frac 1 m}$. Then coupling \ref{al} and \ref{cont} one gets
$$||h||^{2\alpha}\leq C_6(||h||^{2n\alpha})^{\frac 1 m},\ \alpha\in(0,\frac 1{2n})$$
If we let $||h||\rightarrow0$\ this can hold only if $m\geq n$. \end{exam} \begin{rema} In \cite{EGZ} Authors show a stability estimate of another type: In the setting as above ($\Omega$\ is now equal to $\omega^n$) \begin{equation}
||\phi-\psi||_{\infty}\leq c(\epsilon,c_0,\omega)
||\phi-\psi||_{L^2(\omega^n)}^{\frac2{nq+2+\epsilon}} \end{equation} ($c_0$\ is a constant that controls $L^p$\ norms of Monge-Amp\`ere measures of $\phi$\ and $\psi$). Using the same reasoning as in \cite{EGZ} one can show more generally that \begin{equation}
||\phi-\psi||_{\infty}\leq c(\epsilon,c_0,\omega)
||\phi-\psi||_{L^s(\omega^n)}^{\frac s{nq+s+\epsilon}}, \forall s>0. \end{equation} Using the same example and similar estimates one can show that this exponent is also sharp, provided that $p<2$\ and $s>\frac{2np}{2-p}$\ (the reason for these obstructions is that the second integral we estimate as in the example would be divergent otherwise). It is, however, very likely that these exponents are sharp in general. \end{rema}
\section{Continuity of Solutions in the Case of a Pullback Form via a Locally Birational Map} \label{continuity}
We give below a more detailed proof of the continuity statememt in Theorem \ref{1.1}. Arguments used heavily rely on \cite{koj98} and at some places we just follow it line by line. This section is unrelated with the other ones in the note. Recall once again, that this result is known already.
First of all we recall the geometrical background. Let $X$\ be the base closed K\"ahler manifold we work on, and $F:X\rightarrow \mathbb{CP}^N$, is a map with the property that the image $F(X)$\ has the same dimension and $F$\ is itself locally birational i.e. for every small enough neighbourhood $U$\ of any point on $F(X)$, each component of $F^{-1}(U)$\ is birational to $U$. A typical global example of this situation is obtained as follows: if $X$\ carries a big line bundle $L$, the linear series corresponding to $L^n$\ generate (for sufficiently big $n\in\mathbb N$) a birational morphism into $\mathbb{CP}^N$\ with the claimed properties. Note hovewer that local and global birationality are different notions (see the example below) and if one has to deal with the global birationality one has to impose some additional assumptions for the argument to go through.
Consider now $Y:=F(X)$. By the Proper Mapping Theorem $Y$\ is a (singular in general) subvariety in $\mathbb{CP}^N$. It is also clear that $Y$\ is irreducible and locally irreducible variety (the latter follows from the local birationality). Recall that an upper semicontinuous function $u$\ on a singular variety $W$\ is called weakly plurisubharmonic if for every holomorphic disc $f:\Delta\rightarrow W$\ the function $u\circ f$ is a subharmonic function (see \cite{for-nar}). In that paper it is proved (in fact in a much more general situation of Stein spaces) that any such function $u$\ can be extended locally to the ambient space to a classical plurisubharmonic function i.e. for every $x\in Y$ there exists a small Euclidean ball $B$ in $\mathbb{CP}^N$, centered at $x$\ and a function $v\in PSH(B)$, such that $v|_{B\cap Y}=u$.
Now suppose $\phi$\ is a positive discontinuous solution of the Monge-Ampere equation in question and let $d:=\sup(\phi-\phi_{*})>0$, where $\phi_{*}$\ denotes the lower semicontinuous regularization of $\phi$. Note that the supremum is attained, and if $E$\ is the closed set $\lbrace\phi-\phi_{*}=d\rbrace$, there exists a point $x_0$\ such that $\phi(x_0)=\min_E\phi$. Positivity is a technical assumption that can always be achieved by adding appropriate constant since we already know that $\phi$\ is bounded.
By assumption there exist analytic sets $Z\subset X$\ and $W\subset Y=F(X)$\ such that $F|_{X\setminus Z}\rightarrow Y\setminus W$\ is a biholomorphism and moreover $S:=\lbrace \omega^n=0\rbrace\subset Z$. Note that in the general case of a big form $S$\ need not be contained in an analytic set- it may well happen that $S$\ is open in $X$.
Two possibilities might take place \begin{enumerate} \item $x_0\in X\setminus S$. In this case $\omega$\ is strictly positive in a small ball centered at $x_0$\ and repeating the argument from Section 2.4 in \cite{koj98} we obtain a contradiction. \item $x_0\in S$. Then we shall produce a domain $V$\ (not contained in a chart in general) and a potential $\theta$ of $\omega$\ in $V$\ with the property that $\inf_{\partial V}\theta>\theta(x_0)+b$, where $b$\ is a positive constant. \end{enumerate}
Consider $F(x_0)=z$\ and a neighbourhood $U$\ of $z$\ in $Y$, such that its preimages are birational to it. Choose the one $x_0$\ sits in. For the rest of the argument we restrict ourselves to $F|_{F^{-1}(U)\ni x_0}\rightarrow U$. Consider the pushforward function $$F_{*}\phi(z):=\begin{cases}\phi(w),\ \ if\ z\in Y\setminus W, w\in X\setminus Z,\ F(w)=z\\ \limsup_{X\setminus Z\ni \zeta\rightarrow z}F_{*}(\zeta) \end{cases}$$ and a local potential $\eta$\ for the K\"ahler form on $U\cap\mathbb {CP}^N$.
Claim: $\eta+F_{*}\phi$\ is weakly subharmonic on $Y$.
Proof: Weak subharmonicity is a local property, hence it is enough to check it in a neighbourhood of any point on $Y$. For regular points of $Y$\ this is evident. However at singularities of $Y$\ one might a priori run into trouble as the example of a double point shows. Indeed, consider the following (classical) local example:
Let $$F:\mathbb C\ni t\rightarrow (t^2-1,t(t^2-1))\in\mathbb C^2$$
The image $F(\mathbb C)$\ sits in the variety $\lbrace(z_1,z_2)\in\mathbb C^2|z_1^2+z_1^3=z_2^3\rbrace$. Observe that $F$\ is a bijection onto its image, except for the points $1$\ and $-1$\ being mapped to $(0,0)$. But then it is clear that the pushforward of a subharmonic function $w$\ on $\mathbb C$\ cannot be weakly subharmonic on the image if $w(1)\neq w(-1)$. Note that $F$\ is not locally birational though.
Observe that local birationality forces the analytic set $Y$\ to be locally irreducible. Then there is a classical theorem (see \cite{De}, Theorem 1.7) stating that on a locally irreducible variety $Y$\ and a locally bounded plurisubharmonic function $w$\ defined on $\Reg Y$- the regular part of $Y$ the extension via limsup technique $w(z):=\limsup_{\zeta\rightarrow z,\ \zeta\in \Reg Y}w(\zeta)$\ is a weak plurisubharmonic function. Moreover, it follows from the proof that for any $s\in Y$\ and any birational modification $G:Y^{'}\rightarrow Y$\ of $Y$\ the pulled-back function $G^{*}w$\ is constant on the fiber $G^{-1}(s)$.
Now if $\omega_M$\ is the K\"ahler metric which defines $\omega$\ (i.e. $\omega=F^{*} \omega_M$), fix $\rho$- a local potential of $\omega_M$\ near $z$\ (in $\mathbb{CP}^n$). First we shall modify $\rho$\ exactly as in \cite{koj98}:
In local coordinates in a ball $B^{''}$\ centered at $z$\ $\rho$\ is strictly plurisubharmonic smooth function and is expanded as \begin{equation} \begin{split} \rho(z+h) &= \rho(z)+2\Re (\sum_{j=1}^na_jh_j+\sum_{j,k=1}^nb_{jk}h_jh_k)+\sum_{j,k=1
}^nc_{j\bar{k}}h_j\bar{h}_k+o(|h|^2) \\
&= \Re P(h)+H(h)+o(|h|^2), \nonumber \end{split} \end{equation} where $P$ is a complex polynomial in $h$\ and $H$\ is the compex Hessian at $z$.
Proceeding exactly as in \cite{koj98} (Lemma 2.3.1) $\eta:=\rho-\Re P(\cdot-z)$\ is also a local potential for $\omega_M$, with the additional property that $\eta$\ has a strict local miniumum at $z$\ (we use at this point that $H$\ is strictly positive definite). This means that for a smaller ball (which after possible shrinking we again denote by $B^{"}$) $\inf_{\partial B^{"}}\eta>\eta(z)+b^{"}$\ for some positive constant $b^{"}$. Adding a constant if necessary one can further assume that $\eta(z)>0$.
Now by Fornaess-Narasimhan theorem we find a small euclidean ball $B^{'}$\ in $\mathbb{CP}^n$\ centered at $z$\ and a function $\psi\in PSH(B^{'})$, such that $\psi|_{Y\cap B^{'}}=\eta+\phi$. On a neighbourhood of a slightly smaller ball $B$\ (everything is contained in $B^{'}$\ and $B^{"}$) $\psi$\ can be approximateded by a sequence on smooth plurisubharmonic functions $\psi_j$\ decreasing towards it. Again (decreasing a bit $b^{"}$\ if necessary) one obtains $\inf_{\partial B}\eta>\eta(z)+b$\ for some positive constant $b$.
Now we pull back the ball and the regularizations: let $V:=F^{-1}(B^{"}\cap Y)$\ and $u_j:=\psi_j(F(w))$ ( $u_j$\ are assumed to be defined only on small neighbourhood of $V$). Of course these are continuous plurisubharmonic functions on $V$\ which decrease towards $u:=\eta\circ F+F^{*}(F_{*}\phi)=\eta\circ F+\phi$ (the equality is due to the fact that $\phi$\ has to be a constant on the fiber). Note that $V$\ need not be an Euclidean domain anymore (i.e. it need not be contained in a coordinate chart), nevertheless $\eta\circ F$\ is a global potential of $\omega$\ on this set. This is the essential difference between this special situation and the general case.
Next we state a lemma which is essentially contained in \cite{koj98} (Section 2.4). We include the proof for the sake of completeness. \begin{lemm} There exist $a_0>0,\ t>1$ such that the sets $$W(j,c):=\lbrace tu+d-a_0+c<u_j\rbrace$$ are non-empty and relatively compact in $V$\ for every constant $c$\ belonging to an interval which does not depend on $j>j_0$. \end{lemm} \begin{proof} Note that $E(0):=\lbrace u-u_{*}=d\rbrace\cap \overline{V}=E\cap \overline{V}$, since the potential is continuous. Also the sets $E(a):=E:=\lbrace u-u_{*}\geq d-a\rbrace\cap \overline{V}$\ are closed and decrease towards $E(0)$. Hence if $c(a):=\phi(x_0)-\min_{E_a}\phi$\ we have that $\limsup_{a\rightarrow 0^{+}}c(a)\leq 0$, for othrwise we would get a contradiction with the definition of $d$. Hence $$c(a)<\frac13 b$$ for $0<a<a_0<min(\frac13 b, d)$. Let $A:=u(x_0)$. Note that $A>d$\ since the potential is greater than $0$\ at $x_0$, and $\phi$\ as a globally positive function has to be greater than $d$\ at $x_0$. One can choose $t>1$, such that it satisfies $$(t-1)(A-d)<a_0<(t-1)(A-d+\frac 2 3 b)$$ Now if $y\in\partial V\cap E(a_0)$\ one gets $$u_{*}(y)\geq \eta(F(x_0))+b+F^{*}F_{*}\phi(x_0)\geq A-d+\frac 23 b$$ Hence $u(y)\leq u_{*}(y)+d<tu_{*}(y)+d-a_0$. Note that this inequality extends to a neighbourhood of $\partial V\cap E(a_0)$. Taking another neighbourhood relatively compact in the first and applying Hartogs type argument one obtains $$u_j<tu(y)+d-a_0,\ \ \forall j>j_1$$ For the rest part of $\partial V$\ the same inequality holds if we take big enough $j_1$\ and the proof is even simpler, since $u-u_{*}$\ is less than $d-a_0$\ there. This proves the relative compactness on $W(j,c)$\ in $V$.
Note that from the left inequality defining $t$\ one gets $(t-1)u_{*}(x_0)<a_0$, hence $$tu_{*}(x_0)<u(x_0)-d-a_1+a_0<u_j(x_0)-d-a_1+a_0$$ for some constant $a_1>0$. This implies that the sets $W(j,c),\ c\in(0,a_1)$\ contain some points near $x_0$, hence they are non empty. \end{proof}
Now applying Lemma 2.3.1 from \cite{koj98} (one can verify that despite the fact that $V$\ can be a non Euclidean set the argument still goes through) one can bound the capacity $cap(W(j,a_1),V)$\ from below by an uniform positive constant. On the other hand $W(j,a_1)\subset\lbrace u+(d-a_0+a_1)<u_j\rbrace$ and this contradicts the fact that the decreasing sequence $u_j$\ has to converge towards u in capacity. This proves that $d=0$, hence $\phi$\ is continuous. \begin{rema} As we have seen the argument cannot be applied in the case of a (globally) birational map. Then further assumption is needed to assure the pushforward to be plurisubharmonic. A satisfactory additional assumption is that the fibers in the preimage have to be connected. Then the function has to be constant on any nontrivial connected fiber and this is enough to push it forward onto the image. \end{rema} \section{Remarks}
Complex Monge-Amp\`ere equations are of great interest in geometry. In \cite{thesis}, the following version of the Monge-Amp\`ere equation $$(\omega+\sqrt{-1}\partial\bar{\partial}u)^n=e^u\Omega$$ is the main focus. Of course anything new would be for a degenerate class $[\omega]$\ as in the settings of Theorem 1.1. And using the argument in \cite{koj}, we know that the main result in this work would also apply for it. To be precise, the following folds: \begin{theo} Let $\omega$\ be a big form and $u_1$,\ $u_2$\ be $\omega$-psh solutions for the following Monge-Amp\`ere equations: $$(\omega_{u_1})^n=e^{u_1}\Omega_1,\ \ (\omega_{u_2})^n=e^{u_2}\Omega_2,$$ where $\Omega_1$\ and $\Omega_2$\ are smooth volume forms such that
$$\int_X|\Omega_1-\Omega_2|\leq\gamma(t)t^{n+3}.$$
Then $$||u_1-u_2||_{\infty}\leq Ct.$$ \end{theo} \begin{proof}Since the comparison principle for big forms is avialiable the proof is entirely the same as in Theorem 5.2 in \cite{koj}. \end{proof} The following problems are related to the results in \cite{koj06} and \cite{EGZ}, stating that when $\omega$\ is K\"ahler form on a compact K\"ahler manifold, the solutions of $$\omega_{\phi}^n=f\omega^n, f\in L^p(\omega^n),\ p>1$$
are H\"older continuous. In general the H\"older exponent depends on the manifold $X$, and on $n$\ and $p$ (\cite{koj06}). Under the additional assumption that X is {\it homogeneous} i.e. the automorphism group $\Aut(X)$\ acts transitively the exponent is independent of $X$ and is not less that $\frac 2{nq+2},\ q=\frac p{p-1}$\ (\cite{EGZ}). One can ask the following questions: \begin{enumerate} \item Is the solution continuous when $\omega$\ is semi-positive and big in general? \item If so, does the H\"older continuity hold in the case $\omega$\ is merely semi-positive and big? \item Does the H\"older exponent on general manifold do really depend on the manifold? In the corresponding result in the flat case (\cite{GKZ}) the H\"older exponent is uniform and independent of the domain. Moreover the proof in \cite{koj06} strongly depends on a regularization procedure for $\omega$-psh functions, which consists of patching local regularizations, and this is the point where the geometry of the manifold influences the exponent. In particular are there another regularization procedures of more global nature that are not so affected by the local geometry? \item Is the exponent for the homogeneous case sharp? Note that in the flat case in \cite{GKZ} there is also a gap between the exponent given there $\frac 2{qn+1}$\ and the exponent $\frac 2{qn}$, for which an example is shown. \item It is interesting to compare the stability resuts we have proven and the one in \cite{EGZ}. In particular, is the stability exponent in \cite{EGZ} sharp in general? \item It would be very interesting to generalize H\"older continuity to more singular measures. One possible application of such a result would be a criterion for H\"older continuity of the Siciak extremal function of certain compact sets in ${\mathbb C}^n$\ (see \cite{kojnotes} for a definition). Such a property is very important from pluripotential point of view. So one has to study the equilibrium measure of the compact. The problem is that such measures are singular with respect to the Lebesgue measure, while \cite{koj06}\ and \cite{EGZ} rely strongly on smoothness of $\omega^n$. However, as this note shows, some arguments can be adjusted to singular measures either. \end{enumerate} We hope to address some of these questions in the future.
\begin{flushleft}Jagiellonian University\\ Institute of Mathematics\\ Reymonta 4, 30-059 Krak\'ow, POLAND.\\ E-mail {\tt slawomir.dinew@im.uj.edu.pl}\\ \end{flushleft}
\begin{flushleft}Michigan University\\ Department of Mathematics\\ 4835 East Hall, Ann Arbor, USA\\ E-mail {\tt zhangou@umich.edu}\\ \end{flushleft}
Key words and phrases: K\"ahler manifold, complex Monge-Amp\`ere operator.\\
2000 Mathematics Subject Classification: Primary 32U05, 53C55; secondary: 32U40.
\end{document} | arXiv | {
"id": "0711.3643.tex",
"language_detection_score": 0.7958666086196899,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{ \small \text{Views on level {$\mathit \ell$}
\begin{abstract} An analogue of the Mukai map $m_g: \mathcal P_g \to \mathcal M_g$ is studied for the moduli $\mathcal R_{g, \ell}$ of genus $g$ curves $C$ with a level $\ell$ structure. Let $\mathcal P^{\perp}_{g, \ell}$ be the moduli space of $4$-tuples $(S, \mathcal L, \mathcal E, C)$ so that $(S, \mathcal L)$ is a polarized K3 surface of genus $g$, $\mathcal E$ is orthogonal to $\mathcal L$ in $\Pic S$ and defines a standard degree $\ell$ K3 cyclic cover of $S$, $C \in \vert \mathcal L \vert$. We say that $(S, \mathcal L, \mathcal E)$ is a level $\ell$ K3 surface. These exist for $\ell \leq 8$ and their families are known. We define a level $\ell$ Mukai map $r_{g, \ell}: \mathcal P^{\perp}_{g, \ell} \to \mathcal R_{g, \ell}$, induced by the assignment of $(S, \mathcal L, \mathcal E, C)$ to $ (C, \mathcal E \otimes \mathcal O_C)$. We investigate a curious possible analogy between $m_g$ and $r_{g, \ell}$, that is, the failure of the maximal rank of $r_{g, \ell}$ for $g = g_{\ell} \pm 1$, where $g_{\ell}$ is
the value of $g$ such that $\dim \mathcal P^{\perp}_{g, \ell} = \dim \mathcal R_{g,\ell}$. This is proven here for $\ell = 3$. As a related open problem we discuss Fano threefolds whose hyperplane sections are level $\ell$ K3 surfaces and their classification.
\end{abstract}
\section{Introduction} Our aim is to convince the reader, showing a program and new results, of the interest represented by some complex projective varieties whose curvilinear sections are canonical curves $C$ of genus $g$, endowed with a distinguished nonzero $\ell$-torsion element $\eta \in \Pic C$.
Often one says that $(C, \eta)$ is a \it level $\ell$ curve of genus $g$\rm, cfr. \cite{CEFS}. Fixing $(g, \ell)$ the moduli space of these pairs is integral, quasi projective and denoted by $\mathcal R_{g, \ell}$.
\par To enter further in the matter let us mention two other names from the title: \it $K3$ surface \rm and \it Fano threefold. \rm The $K3$ surfaces $S$ we consider are very special: they admit a non split cyclic cover of degree $\ell$, still birational to a K3 surface. This is defined by a line bundle $\mathcal O_S(E):= \mathcal E$ such that $h^0(\mathcal O_S(\ell E)) = 1$ and $h^0(\mathcal O_S(mE)) = 0$ for $m < \ell$. The study of these surfaces stems from Nikulin's classification of K3 surfaces with an order $\ell$ symplectic automorphism and the classification implies $\ell \leq 8$, \cite{N1}. Since then several foundational results, in use here, did follow, cfr. \cite{ GP, GS1, G, GS2, vGS}. \par Now let $\mathcal L \in \Pic S$ be a genus $g$ polarization orthogonal to $\mathcal E$. Let $\eta := \mathcal O_C(E)$, where $C \in \vert \mathcal L \vert$ is smooth, then it turns out that $(C, \eta)$ is a level $\ell$ curve. We say that the triple $(S, \mathcal L, \mathcal E)$ is a \it level $\ell$ K3 surface of genus $g$, \rm see definition (\ref{defnat}) for some precision. Fixing $\ell$ the moduli of these triples are reducible for infinitely many values of $g$. However a distinguished irreducible component exists for every $g$, namely the moduli space of triples $(S, \mathcal L, \mathcal E)$ such that $\Pic S$ is the sum of $\mathbb Z\mathcal L$ and its orthogonal lattice. We denote it by
\begin{equation} \mathcal F^{\perp}_{g, \ell}. \end{equation}
Finally we come to the moduli space $\mathcal P^{\perp}_{g, \ell}$ of $4$-tuples $(S, \mathcal L, \mathcal E, C)$ such that $C \in \vert \mathcal L \vert$ and $(S, \mathcal L, \mathcal E)$ defines a point in ${\mathcal F}^{\perp}_{g, \ell}$. Such a space is strictly related with the first topic considered in our paper. To introduce it let us define the \it level $\ell$ Mukai map. \rm This is the rational map \begin{equation} \label{lmukai} r_{g, \ell}: \mathcal P^{\perp}_{g, \ell} \to \mathcal R_{g, \ell}, \end{equation}
assigning the moduli point of the $4$-tuple $(S, \mathcal L, \mathcal E, C)$ to the moduli point of the pair $(C, \eta)$, where $\eta$ is $\mathcal O_C(E)$. Let $\mathcal P_g$ be the moduli space of triples $(S, \mathcal L, C)$, where $(S, \mathcal L)$ is a polarized K3 surface of genus $g$ and $C \in \vert \mathcal L \vert$, then the previous name is motivated by the well known Mukai map \begin{equation} \label{mukai} m_g: \mathcal P_g \to \mathcal M_g, \end{equation}
assigning the moduli point of the triple $(S, \mathcal L, C)$ to the moduli point of the curve $C$. Some famous connections between canonical curves of genus $g$, K3 surfaces and Fano threefolds are well represented by $m_g$ and, in particular, by a curious variation of its rank. We recall that a rational map $f: X \to Y$ of integral varieties has \it maximal rank \rm if $\dim f(X) = \min \lbrace \dim X, \dim Y \rbrace$. \par Considering $m_g$ we recall that $\dim \mathcal P_g = 19 + g$ and $\dim \mathcal M_g = 3g-3$, therefore $\dim \mathcal P_g = \dim \mathcal M_g$ iff $g = 11$. Now $m_{11}$ is birational but, curiously, $m_g$ fails to be of maximal rank precisely before and after this transition value, that is, for $g = 11 \pm 1$. For the rest $m_g$ is dominant for $g \leq 9$ and generically injective for $g \geq 13$. As is well known this anomaly is due to the presence behind the scene of some Fano varieties, whose curvilinear sections are general canonical curves of genus $11 \pm 1$, cfr. \cite{CLM, Mu, Mu1, S}.\par A task of this paper is to point out the same possible anomalies for the level $\ell$ Mukai maps $r_{g, \ell}$. The case $\ell = 2$ has already been done and it is an experimental origin to this work. If $\ell = 2$ we have $\dim \mathcal P^{\perp}_{g,2} = \dim \mathcal R_{g,2}$ for $g = 7$. Then $r_{g,2}$ fails to be of maximal rank for $g = 7\pm 1$ and is birational for $g = 7$, \cite{FV, KLV1, Ve}. The 'Fano varieties behind the scene' for $g = 8$ and $g = 6$ are addressed or revisited in section 7. \par In section 5 we summarize the question for each $\ell$. Let $g_{\ell}$ be the unique value of $g$ such that $\dim \mathcal P^{\perp}_{g, \ell} = \dim \mathcal R_{g, \ell}$, for $ l = 2, \ 3, \ 4, \ 5, \ 6, \ 7, \ 8$ we respectively have: \begin{equation} g_{\ell} = 7 \ , \ 5 \ , \ 4 \ , \ 3 \ , \ 2 \ , \ 2 \ , \ 2. \end{equation} In this paper we present the following theorem, solving the question for $\ell = 3$. \begin{theorem} Let $r_{g, 3}: \mathcal P^{\perp}_{g, 3} \to \mathcal R_{g,3}$ be the level $3$ Mukai map then: \begin{enumerate} \item $r_{4,3}$ has not maximal rank, \item $r_{5,3}$ is birational, \item $r_{6,3}$ has not maximal rank. \end{enumerate} \end{theorem}
The image of $r_{4,3}$ is contained in a divisor of $\mathcal R_{4,3}$, parametrizing pairs $(C, \eta)$ such that the multiplication map $\mu: H^0(\omega_C \otimes \eta) \otimes H^0(\omega_C \otimes \eta^{-1}) \to H^0(\omega_C^{\otimes 2})$ is not an isomorphism. This case seems interestingly related to the $G_2$-variety, see \cite{Mu} and section 7. \par
The proof of (3) is sketched here and it will appear elsewhere. The image of $r_{6,3}$ parametrizes pairs $(C, \eta)$, where $C$ is a curvilinear section of a suitable Gushel - Mukai threefold singular along a rational normal sextic curve, see section 7. \par Let $(S, \mathcal L, \mathcal E)$ be a level $\ell$ K3 surface of genus $g$ and $\phi: S \to \mathbb P^g$ the morphism defined by $\mathcal L$, we assume for simplicity that $\phi$ is birational onto $\overline S := \phi(S)$. Then we close this introduction with few lines addressing the \it classification of Fano threefolds \rm $$ \overline X \subset \mathbb P^{g+1}$$ whose general hyperplane sections are projective models $\overline S$ as above. The problem sounds similar to that of classifying threefolds $T \subset \mathbb P^g$ whose hyperplane sections are Enriques surfaces, that is, Enriques-Fano threefolds. It seems however quite neglected. \par Some examples of threefolds $\overline X$ appear in this paper, most are normal and $\Sing \overline X$ is a curve. Moreover $\overline X$ admits a cyclic cover $\pi: \tilde X \to \overline X$, branched exactly on $\Sing \overline X$. A basic notion of level $\ell$ polarized projective variety $(X, \mathcal L, \mathcal E)$ is introduced in the next section, since it is useful in the cases we want to consider. \par
We wish to thank the referee for the careful reading and the useful advice.
\section{Some preliminaries}
In what follows $X$ is a smooth, irreducible complex projective variety and $\mathcal L$ is a big and nef line bundle on $X$, we say that $(X, \mathcal L)$ is a polarized projective variety. On the other hand we are interested, along this paper, in some families of cyclic coverings
\begin{equation} \pi: \tilde X \to X. \end{equation} Then we fix our conventions about, \cite{EV}, \cite{L} I p.242. By definition $\pi$ is a finite morphism of degree $\ell \geq 2$ and it is the quotient map of the action of an automorphism of order $\ell$ of $\tilde X$. We assume that $\tilde X$ is normal, up to composing $\pi$ with the normalization map. Hence $\tilde X$ is reduced with irreducible connected components. Starting from $\pi$, we briefly review the recipe for its construction. Notice that $\pi_* \mathcal O_{\tilde X} \cong \mathcal A$, where \begin{equation}
\mathcal A = \label{mathcal A}\mathcal O_X \oplus \mathcal E^{-1} \oplus \dots \oplus \mathcal E^{-\ell+1}
\end{equation} and $\mathcal E \in \Pic X$. Assume $\tilde X$ is connected and hence irreducible. Then $\pi$ defines the field extension $\pi^*: k(X) \to k(\tilde X)$ and its trace map induces
the exact sequence
\begin{equation}
0 \to {\mathcal E}^{- \ell} \stackrel { s} \to \mathcal O_X \to \mathcal O_B \to 0,
\end{equation}
for some $s \in H^0(\mathcal E^{ \ell})$. The multiplication by $s$ defines a structure of $\mathcal O_X$-Algebra on $\mathcal A$.
We have $\tilde X = \Spec \mathcal A$, moreover $\pi$ factors through the projection $ u: \mathbb P(\mathcal A) \to X$. The branch divisor of $\pi$ is ${\rm div}(s)$ and will be denoted by $B$. For $B$ we fix the notation
\begin{equation} B = m_1B_1 + \dots + m_rB_r, \end{equation} where $B_1, \dots, B_r$ are prime divisors. Conversely, a pair $(\mathcal E, B)$ such that $B \in \vert \mathcal E^{\ell} \vert$ defines on $\mathcal A$ an $\mathcal O_X$- Algebra structure as above and a cyclic cover $\pi$. Notice that the condition $g.c.d. (\ell, m_1, \dots, m_r) = 1$ implies the irreducibility of $\tilde X$. \par Now let $C$ be a reduced curve and $\eta \in \Pic C$ a nontrivial $\ell$-torsion element. Then $(C, \eta)$ uniquely defines, using a nonzero vector $s \in H^0(\eta^{\ell})$, a nonramified cyclic cover $$ \pi: \tilde C \to C, $$ which is nontrivial. To give a pair $(C, \pi)$ is equivalent to give a singular level $\ell$ curve $(C, \eta)$. Now recall that a curve $C \subset X$ is \it mobile \rm if moves in an irreducible algebraic family covering $X$, with \it integral \rm general member. In the Neron-Severi group $N_1(X) \otimes_{\mathbb Z} \mathbb R$ the \it mobile classes \rm of such curves generate an important convex cone, \cite{BDPP} 1.3 (vi), \cite{L} II p. 307. Finally we introduce the following definition.
\begin{definition} Let $\mathcal E \in \Pic X$, the pair $(X, \mathcal E)$ is a level $\ell$ structure on $X$ if: \par
$\circ$ $\vert \mathcal E^{\ell} \vert \neq \emptyset$ and a general $B \in \vert \mathcal E^{\ell} \vert$ defines an integral cyclic cover, \par
$\circ$ there exists a mobile curve $C$ in $X$ such that $C B = 0$.
\end{definition} Assume $\dim X = 1$ then $X$ is the smooth, integral curve $C$ and $\mathcal E$ is a line bundle of degree $0$ such that $\mathcal E^{\ell} \cong \mathcal O_C$. Moreover we are assuming that the cover $\pi: \tilde C \to C$ defined by $\mathcal E$ is integral. Hence $\mathcal E$ is a nontrivial $\ell$-torsion element. Then, for curves, the definition is the traditional one. In higher dimension the next property is clear. \begin{proposition} Let $(X, \mathcal E)$ be a level $\ell$ structure on $X$ and $C \subset X$ a mobile curve such that $CE = 0$, where $\mathcal O_X(E) \cong \mathcal E$. Then $\mathcal O_C(E)$ is an $\ell$-torsion element of $\Pic C$. \end{proposition} \begin{proof} Consider $D \in \vert \mathcal E^{\ell} \vert$. Since $C$ is movable we can assume that $C$ is not a component of $D$. Then $C \cap D$ is empty because $CE= 0$. This implies that $\mathcal E^{\ell} \otimes \mathcal O_C \cong \mathcal O_C(D) \cong \mathcal O_C$. \end{proof} \begin{remark} \rm Nevertheless we may have a trivial $\mathcal O_C(E)$ even when $\mathcal E$ is not, and even generically when $C$ moves in its family. This is obvious if $C$ is smooth and rational. Furthermore consider a curve $F$ and the projection $p: F \times X \to X$. Then $(F \times X, p^* \mathcal E)$ is a level $\ell$-structure on $F \times X$ and $p^* \mathcal E$ is trivial on the mobile curve $p^*(x)$, $x \in X$. \end{remark} Then, to address the concrete topics of our paper, we turn to polarized pairs $(X, \mathcal L)$ and we denote by $d$ the dimension of $X$. We assume that $\vert \mathcal L^{m} \vert$ is globally generated for $m >>0$ and observe that a general complete intersection of $d-1$ elements of $\vert \mathcal L^{m} \vert$ is a smooth, integral mobile curve, which moves in an irreducible family $\mathcal C_m$ of transversal complete intersections in $X$. \begin{proposition} Let $X, \ \mathcal L, \ \mathcal E$ be as above. Assume $C E = 0$, where $C \in \mathcal C_m$ and $\mathcal O_X(E) \cong \mathcal E$. Then $\mathcal O_C(E)$ is a nontrivial $\ell$-torsion element of $\Pic C$, moreover $$ h^0(\mathcal O_X(kE)) = 0, \ \ k \not \equiv 0 \mod \ell. $$
\end{proposition}
\begin{proof} By induction on $d = \dim X$. Let $d = 1$ then $X = C$ and $\lbrace C \rbrace = \mathcal C_m$. Since $\mathcal E$ defines an integral cover, the statement follows. Let $d \geq 2$ and $C = D_1 \cdot\ldots \cdot D_{d-1}$, where $D_1, \ldots, D_{d-1} \in \vert \mathcal L^{m} \vert$, then a general $D$ in the linear system generated by $D_1 \dots D_{d-1}$ is smooth. $\mathcal O_D(D)$ is nef, big and globally generated. Let $\pi: \tilde X \to X$ be the cyclic cover, branched on $B$, we have, since $C$ is mobile and $CB = 0$ we can assume $C \cap B = \emptyset$. Now let $f: X \to \mathbb P^n$ be the morphism defined by $\vert D \vert$, then $f$ is generically finite onto its image and the same is true for $f \circ \pi: \tilde X \to \mathbb P^n$. Then $\tilde C = \pi^{-1}(C)$ is connected by the connectedness theorem and $\mathcal O_C(E)$ is non trivial of $\ell$-torsion in $\Pic C$. Moreover $(D, \mathcal O_D(E))$ is a level $\ell$ structure and the second statement follows by induction on $d$. \end{proof}
Keeping this notation we finally come to the following definition. \begin{definition}\label{DEFI} A level $\ell$ polarized variety is a triple $(X, \mathcal L, \mathcal E)$ such that $(X, \mathcal E)$ is a level $\ell$ structure on $X$ and $CE = 0$, where $C \in \mathcal C_m$. \end{definition} Actually the triples $(X, \mathcal L, \mathcal E)$ we will consider always satisfy the additional property:
\par \it $\vert \mathcal L \vert$ is base point free and defines a birational morphism onto its image \begin{equation} f: X \to \mathbb P^n. \end{equation} \rm Hence we assume $C = H_1 \cap \dots \cap H_{d-1} \in \mathcal C_1$, where $H_1 \dots H_{d-1} \in \vert f^* \mathcal O_{\mathbb P^n}(1) \vert$. So $C$ shows the distinguished line bundles $ \eta_C := \mathcal E \otimes \mathcal O_C$ and $\mathcal L_C := \mathcal L \otimes \mathcal O_C$ and these lead us to the varieties we are interested in. For these $\mathcal L_C$ is the canonical sheaf $\omega_C$. For the triples considered, we will also have that the restriction $ r: H^0(\mathcal L) \to H^0(\omega_C)$ is surjective and that $\overline X := f(X)$ is normal.
So we are going to deal with projective varieties $\overline X$ whose curvilinear sections are canonical curves $C$, endowed with the \'etale cover defined by $\eta_C$. This includes K3 surfaces and Fano threefolds with a prescribed level $\ell$ structure.
\section{ Level $\ell$ K3 surfaces} We begin discussing the families of level $\ell$ polarized $K3$ surfaces $(S, \mathcal L, \mathcal E)$ and the chances that $C \in \vert \mathcal L \vert$ be a curve with general moduli. We say that $C^2 = 2g - 2$ is the \it degree \rm of $(S, \mathcal L)$ and $g$ its \it genus\rm. As usual the moduli space of $(S, \mathcal L)$ is denoted by \begin{equation} \mathcal F_g, \end{equation} it is an integral quasi projective variety of dimension $19$. Let $[S, \mathcal L] \in \mathcal F_g$ be a general point, we recall that then $\Pic S \cong \mathbb Z \mathcal L$ and $\vert \mathcal L \vert$ defines an embedding
\begin{equation}
f: S \to \mathbb P^g
\end{equation} for $g \geq 3$. Coming to level $\ell$ structures $(S, \mathcal L, \mathcal E)$, these properties are no longer satisfied, as we are going to recall. We fix our notation as follows, the map
\begin{equation} \pi': \tilde S' \to S \end{equation} is the covering morphism defined by $\mathcal E$. As already established its branch divisor is $$ B = m_1B_1 + \dots + m_rB_r, $$ where $B_1, \dots, B_r$ are the irreducible components of $\Supp B$. Of course, since $\Pic S$ has no torsion, $B$ is not zero. We fix the following convention: \begin{itemize} \it \item[$\circ$] $r$ is the number of irreducible components of $\Supp B$, \item[$\circ$] $t$ is the number of its connected components. \end{itemize} Moreover we set \begin{equation} B_1 + \dots + B_r = B_{\mathsf {red}} = N_1 + \dots + N_t, \end{equation} where $N_1 \dots N_t$ denote the connected components of $\Supp B$. Notice that $CB_i = 0$ for $i = 1 \dots r$. Indeed $C$ is integral and $\dim \vert C \vert \geq 1$ so that $CB_i \geq 0$. Since $B \in \vert \ell E \vert$ then $CB = 0$ and this implies $CB_i = 0$. Then, applying the Hodge Index Theorem, $B_i$ is an integral curve on $S$ with $B^2_i < 0$. Hence $B^2_i = -2$ and $B_i$ is $\mathbb P^1$. The same argument applies to $N_j$ which is a reduced connected curve of arithmetic genus $0$. In particular each $N_j$ is contracted by $f$ to a quadratic singularity and $\Pic S$ is not isomorphic to $\mathbb Z$. \par It is not difficult to see that the Kodaira dimension of $\tilde S'$ is zero, moreover, with some elaboration, one has the following property, cfr. \cite{G}, \cite{N1}. \begin{proposition} Either $\tilde S'$ is birational to a K3 surface or to an abelian surface. \end{proposition}
\begin{definition} \label{defnat} Let $(S, \mathcal L, \mathcal E)$ be a level $\ell$ K3 surface, we say that:
\begin{enumerate}
\item $(S, \mathcal L, \mathcal E)$ is of K3 type if $\tilde S'$ is birational to a K3 surface,
\item $(S, \mathcal L, \mathcal E)$ is of abelian type if $\tilde S'$ is birational to an abelian surface.
\end{enumerate}
\end{definition}
Case (2) is scarcely interesting for our purposes. We aim indeed to use the curves $C \in \vert \mathcal L \vert$ in order to parametrize the moduli space $\mathcal R_{g, \ell}$ of level $\ell$ curves in low genus.
But in case (2) $C$ has not enough moduli for $g \geq 3$. \par
\it We assume since now that $(S, \mathcal L, \mathcal E)$ is a level $\ell$ K3 surface of K3 type. \rm Then, to ameliorate the expositon, we just say with some abuse that $(S, \mathcal L, \mathcal E)$ is
a level $\ell$ K3 surface. We say that two triples $(S_n, \mathcal L_n, \mathcal E_n)$, ($n = 1,2$), are isomorphic if there exists a biregular map $\beta:S_1 \to S_2$ such that $\beta^* \mathcal L_2 \cong \mathcal L_1$ and $\beta^* \mathcal E_2 \cong \mathcal E_1$, $i = 1,2$.\par As mentioned the classification of these triples is due to Nikulin and originates from his paper \cite{N1}. The part of interest here is the classification of pairs $(\tilde S, G)$, where $\tilde S$ is a K3 surface and $G$ is a finite group of symplectic automorphisms of $\tilde S$. There exist $14$ classes of pairs $(\tilde S, G)$ such that $G$ is commutative and $G$ is $\mathbb Z / \ell \mathbb Z$ exactly for $2 \leq \ell \leq 8$. After the classification, several papers addressed the description of the moduli and the projective models of these K3 surfaces. It is due to mention here \cite{GP, GS1, G, GS2, vGS}. \par
The triple $(S, \mathcal L, \mathcal E)$ determines an associated triple $(\tilde S,\tilde{ \mathcal L}, \gamma)$, where $\gamma \in \Aut \tilde S$ is a symplectic automorphisms of order $\ell$ and $(\tilde S, \tilde{ \mathcal L})$ is a polarized K3 surface of degree $\ell(2g-2)$. We have indeed $B_{\mathsf{red}} = N_1 + \dots + N_t$, where the summands are the connected components and $-2$-curves. Let $\nu: S \to \overline S$ be their contraction morphism, then the Cartesian square
\begin{equation} \label{diagram} \begin{CD} {\tilde S'} @>{\pi'}>> {S} \\ @V{\nu'}VV @VV{ \nu}V \\ {\tilde S} @>{\pi} >> {\overline S} \\ \end{CD} \end{equation} is the Stein factorization of $\nu \circ \pi'$. In it $\nu'$ is a birational morphism. Let $G \subset \Aut \tilde S'$ be the group whose quotient map is $\pi'$. As we will see $ {\pi'} ^*H^0(\mathcal L(-E))$ sits in $H^0(\tilde {\mathcal L})$ as an eigenspace of the natural representation of $G$ and defines a generator $\gamma$ of $G$. Moreover $\pi$ is the quotient map of the induced action of $G$ on $\tilde S$. Conversely, starting from $\pi$ and the minimal desingularization $\nu$, $\pi'$ is reconstructed from the fibre product $\pi \times_{\overline S}\nu$. \par In order to describe the rational singularities occurring in $\Sing \overline S$ we use the notation \begin{equation} \mathsf T := n_1 \mathsf T_1 + \dots + n_s \mathsf T_s, \end{equation} where $\mathsf T_j$ is the singularity type and $n_j$ the number of points of type $\mathsf T_j$ in $\Sing \overline S$. \begin{theorem} Let $(S, \mathcal E, \mathcal L)$ be a level $\ell$ K3 surface of genus $g$, then one has $2 \leq \ell \leq 8$ and $(S, \mathcal E)$ satisfies one of the following conditions:
\begin{enumerate} \it
\item $\ell = 2$. One has $t = 8$, $r = 8$ \ and $\mathsf T = 8 \mathsf A_1$.
\par
\item $\ell = 3$. One has $t = 6$, $r = 12$ and $\mathsf T= 6\mathsf A_2$.
\par
\item $\ell = 4$. One has $t = 6$, $r = 14$ and $\mathsf T= 4\mathsf A_3 + 2\mathsf A_1$.
\par
\item $\ell = 5$. One has $t = 4$, $r = 16$ and $\mathsf T= 4\mathsf A_4$.
\par
\item $\ell = 6$. One has $t = 6$, $r = 16$ and $\mathsf T= 2 \mathsf A_5 + 2\mathsf A_2 + 2\mathsf A_1$.
\par
\item $\ell = 7$. One has $t = 3$, $r = 18$ and $\mathsf T= 3\mathsf A_6$.
\par
\item $\ell = 8$. One has $t = 4$, $r = 18$ and $\mathsf T = 2\mathsf A_7 + \mathsf A_3 + \mathsf A_1$.
\end{enumerate}
\end{theorem} See \cite{N1}. It is also useful to observe that always one has \begin{equation} E^2 = \frac {B^{ 2 \color {black}}} {{\ell}^2} = - 4. \end{equation} Now, in view of the concrete applications in this paper, we mention some relevant properties of the structure of $\Pic S$ and of the moduli of the above triples. \begin{definition} $\mathcal F_{g, \ell}$ is the moduli space of level $\ell$ K3 surfaces of genus $g$. \end{definition} As in the case of $(S, \mathcal L)$, the construction of $\mathcal F_{g, \ell}$ relies on the usual notion of lattice polarized variety, see \cite{B, D, H} and \cite{N1} for this K3 case. In particular, for every $g \geq 2$, $\mathcal F_{g,\ell}$ has a \it standard irreducible component \rm to be constructed as follows. We may have \begin{equation} \mathbb Z[\mathcal L] \oplus \mathbb M_S \subseteq \Pic S, \end{equation} where the sum is orthogonal. Moreover $\mathbb M_S$ has rank $r$ and it is generated by the classes $[B_1], \dots, [B_r], [E]$, with $\mathcal E \cong \mathcal O_S(E)$, so that the relation $\ell[E] - [B] = 0$ is satisfied in $\Pic S$. We can see the inclusion as the image of a primitive embedding of lattices
\begin{equation} \label{slattice} \upsilon: \mathbb Z c \oplus \mathbb M_{\ell} \to \Pic S, \end{equation} where $\upsilon(c) := [\mathcal L ]$ and $\upsilon(\mathbb M_{\ell}) = \mathbb M_S$. The lattice $\mathbb M_{\ell}$ is given with the set of generators $\lbrace e , b_1, \dots, b_r \rbrace$ so that $\upsilon(e) = [E]$, $\upsilon(b_1) = [B_1], \dots, \upsilon(b_r) = [B_r]$. Notice also that \begin{equation} \label{selfint} c^2 = 2g -2 \ , \ e^2 = -4 \ , \ b_1^2 = \dots = b_r^2 = -2, \end{equation} cfr. \cite{N1}. Fixing these data, the moduli space of triples $(S, \mathcal L, \mathcal E)$ endowed with an embedding $\upsilon$, can be constructed as a moduli space of lattice polarized K3 surfaces $(S, \upsilon)$. In our case $S$ is $M$-polarized with $M := \mathbb Zc \oplus \mathbb M_{\ell}$ and the induced embedding $M \subset L := H^2(S, \mathbb Z)$ is unique up to isometries, \cite{N1}. Then the moduli space is constructed as quotient of the period domain of these surfaces $S$. In particular its dimension is $19 - r$\rm, \cite{D} Section 4.1 and Theorem 1.4.8, \cite{B1} Section 2.4 and Proposition 2.6.
Moreover a unique irreducible component of it is the closure of the moduli points of pairs $(S, \upsilon)$ such that \begin{equation} \Pic S = \mathbb Z[\mathcal L] \oplus \mathbb M_S. \end{equation} In this case we will say that $(S, \mathcal L, \mathcal E)$ is a \it standard triple \rm of genus $g$ and level $\ell$. Let us fix our notation:
\begin{definition} $\mathcal F^{\perp}_{g, \ell}$ is the moduli space of standard triples of genus $g$ and level $\ell$. \end{definition} $\mathcal F^{\perp}_{g, \ell}$ exists for any $g \geq 2$ and $\ell = 2 \dots 8$. Fixing $\ell$, $\mathcal F^{\perp}_{g, \ell}$ is the unique irreducible component of $\mathcal F_{g, \ell}$ along a proper countable set of values $g \in \mathbb N$. \begin{remark} Let $(S, \mathcal L, \mathcal E)$ be a \it non standard \rm triple and $C \in \vert \mathcal L \vert$. Then, at least experimentally for $\ell = 2$, $C$ is never general in moduli for $g \geq 4$. This is true even when the parameter count makes that possible in low genus, see \cite{KLV2}. The situation is quite different for standard triples. This paper studies indeed the modular properties of $C$ in this case: standard behavior or peculiarities of $C$.
\end{remark}
\section{A standard projective model}
Given a standard triple $(S, \mathcal L, \mathcal E)$, let us construct a projective realization of $S$ useful to our purposes.
Consider $C \in \vert \mathcal L \vert$ such that $C \cap B = \emptyset$ and $\tilde C' = \pi'^*C$. Then the curve $\tilde C = \nu'_* \tilde C'$ is biregular to $\tilde C'$ via the contraction $\nu': \tilde S' \to \tilde S$ and the linear map \begin{equation} \nu'_*: H^0(\mathcal O_{\tilde S'}(\tilde C')) \to H^0(\mathcal O_{\tilde S}(\tilde C)) \end{equation} is an isomorphism, we identify the two spaces under it. Then, using $\tilde C$, it is easy to remind of the action of the group $\mathbb Z / \ell \mathbb Z$ on this space and of its eigenspaces. Let \begin{equation} 0 \to \mathcal O_{\tilde S'} \to \mathcal O_{\tilde S'}(\tilde C') \to \omega_{\tilde C} \to 0 \end{equation} be the standard exact sequence, then $\mathbb Z / \ell \mathbb Z$ acts on its associated long exact sequence $$ 0 \to H^0(\mathcal O_{\tilde S'}) \to H^0(\mathcal O_{\tilde S'}(\tilde C')) \to H^0 (\omega_{\tilde C}) \to 0. $$ As is well known the $\mathbb Z / \ell \mathbb Z$-decomposition of $H^0(\omega_{\tilde C })$ is as follows \begin{equation} H^0(\omega_{\tilde C }) = \bigoplus_{k = 1 \dots \ell -1} {\pi'}^* H^0(\omega_C \otimes \eta^{-k}) \bigoplus {\pi'}^* H^0(\omega_C). \end{equation} and this implies that $H^0(\mathcal O_{\tilde S}(\tilde C' ))$ decomposes as \begin{equation} H^0(\mathcal O_{\tilde S}(\tilde C' )) = \bigoplus_{k = 1 \dots \ell -1} {\pi'}^* H^0(\mathcal O_S(H_k)) \bigoplus {\pi'}^* H^0(\mathcal O_S(C)), \end{equation} where $\mathcal O_S(H_1) \dots \mathcal O_S(H_{\ell - 1}) \in \Pic S$ and $ \mathcal O_C(H_k) \cong \omega_C \otimes \eta^{\otimes -k}$, up to reindexing. Since $\tilde C$ has genus $\tilde g = g + (\ell - 1)(g-1)$ it follows $\dim H^0(\mathcal O_{\tilde S}( \tilde C)) = g+1 + (\ell - 1)(g-1)$. In particular the above decomposition immediately implies that \begin{equation} \dim H^0( \mathcal O_S(H_k)) = \dim H^0(\omega_C \otimes \eta^{- k}) = g - 1, \ \ \ k = 1 \dots \ell - 1. \end{equation} In what follows, it is also useful to recall the mentioned fact that $E^ 2 = - 4$.
\begin{lemma} It holds $h^i(\mathcal O_S(E)) = h^i(\mathcal O_S(-E)) = 0$, for $i \geq 0$. \end{lemma} \begin{proof} By assumption $E$ is not effective. The same is true for $-E$, since $\ell E \sim B$ and $B > 0$. This implies $h^0(\mathcal O_S(E)) = 0$ and $h^2(\mathcal O_S(E)) = h^0(\mathcal O_S(-E)) = 0$. Since $E^2 = -4$ we have $\chi(\mathcal O_S(E)) = 0$ and then $h^1(\mathcal O_S(E)) = 0$. The same argument applies to $-E$. \end{proof} Now we consider the line bundle $\mathcal O_S(C - E)$ and the standard exact sequence $$ 0 \to \mathcal O_S(-E) \to \mathcal O_S(C - E) \to \mathcal O_C(C - E) \to 0. $$ \begin{lemma} \label{one1} Let $g \geq 2$ then the associated long exact sequence is $$
0 \to H^0(\mathcal O_S(C - E)) \to H^0 (\omega_C \otimes \eta^{-1} )\to 0, $$ in particular it follows $\dim \vert C - E \vert = g - 2$ and $h^i(\mathcal O_S(C-E)) = 0, \ i \geq 1$.
\end{lemma} \begin{proof} By the previous lemma $h^i(\mathcal O_S(E)) = h^i(\mathcal O_S(-E)) = 0$, for $i \geq 0$. Moreover we have $h^0(\omega_C \otimes \eta^{- 1}) = g- 1$ and $h^1(\omega_C \otimes \eta^{- 1}) = 0$. Then the statement follows. \end{proof} Now we observe that the pull-back by $\pi'$ defines a linear embedding $$ {\pi'}^*: H^0(\mathcal O_S(C - E)) \to H^0(\mathcal O_{\tilde S'}(\tilde C')). $$ We have indeed $\mathcal O_{\tilde S'}(\tilde C') \otimes {\pi'}^* \mathcal O_S(E - C) \cong \mathcal O_{\tilde S'}({\pi'}^*E)$ and finally \begin{equation} \label{one2} h^0 ( \mathcal O_{\tilde S'}({\pi'}^*E)) = h^0({\pi'}_* \mathcal O_{\tilde S'}({\pi'}^*E)) = h^0(\mathcal A(E)) = 1, \end{equation} with $\mathcal A = \mathcal O_S \oplus \mathcal O_S(-E) \oplus \dots \oplus \mathcal O_S((1-\ell)E)$. The equality defines, up to a nonzero constant factor, the linear embedding ${\pi'}^*$. Then {\rm Im} ${\pi'}^*$ is the $\mathbb Z/ \ell \mathbb Z$-invariant space $$ {\pi'}^* H^0(\mathcal O_S(C - E)). $$ \begin{proposition} Let $g \geq 3$ and $\Pic S \cong \mathbb Zc \oplus \mathbb M_{\ell}$, then $\vert C - E \vert$ is base point free. \end{proposition} \begin{proof} Since $S$ is a K3 surface, it suffices to prove that $\vert C - E \vert$ has no fixed component. Let $F$ be an integral fixed component of $\vert C - E \vert$, set $f = F \cdot C$ for a general $C$. Then $f$ is a fixed divisor of $\vert \omega_C \otimes \eta^{-1} \vert$. Applying Riemann-Roch to $C$ it follows $\dim \vert \eta(f) \vert = \deg f - 1$. Since $g \geq 3$ then $deg f \leq 2$. Hence $F$ is a line, a conic or $FC = 0$. We have $F \sim xC + \sum y_jB_j +zE$ in $\Pic S$. Assume $\deg f > 0$ then $0 < CF = (2g-2)x \leq 2$ with $x \in \mathbb Z$: a contradiction for $g \geq 3$. Let $CF = 0$ then $F^2 = -2$ by the Hodge Index Theorem and $F$ is a $\mathbb P^1$ contracted by $f_{\vert C \vert}: S \to \mathbb P^g$.
By Lemma \ref{one1}, $h^0(C-E)=g-1=(C-E)^2/2+2$. Let $M$ be the moving part of the linear system $|C-E|$, then $\dim \vert M \vert \geq 1$ and $MF \geq 0$. Moreover we have
$C - E \sim M + kF + R$, where $R$ is a curve not containing $F$ and $k \geq 1$. Let $G \in \vert M + F \vert$ be general then $G$ contains $F$: otherwise the curve $kF$ could'nt be a component of the element $G + (k-1)F + R \in \vert C - E \vert$. Hence $F$ is a fixed component of $\vert M + F \vert$. Now observe that $MF \geq 0$ and then consider the standard exact sequence $$ 0 \to \mathcal O_S(M) \to \mathcal O_S(M+F) \to \mathcal O_F(M) \to 0. $$ We claim that, passing to the associated long exact sequence, it follows $$ \chi(\mathcal O_S(M)) = \chi (\mathcal O_S(M+F)) $$ and $\chi(\mathcal O_F(M)) = 0$. Since $F = \mathbb P^1$ this implies $MF < 0$: a contradiction. To prove the claim consider a smooth $D \in \vert M \vert$. Then either $D$ is integral of genus $g - 2$ and $h^1(\mathcal O_S(M)) = 0$ or $M \sim (g-2)N$ and $N$ is a smooth integral elliptic curve. Via Serre duality we have $h^2(\mathcal O_S(M)) = h^2(\mathcal O_S(M+F)) = 0$. Moreover $MF \geq 0$ implies $h^1(\mathcal O_F(M)) = 0$. Then, in the former case, $h^1(\mathcal O_S(M)) = 0$ implies $h^1(\mathcal O_S(M+F)) = 0$ and the claim follows. In the latter case replace $M$ by $N$. Then the equality and the same contradiction follow by the same type of arguments.
\end{proof} Now we introduce a second linear system associated with $E$. At first let us set \begin{equation} \label{defBred} B_{\mathsf {red}} := B_1 + \dots + B_r, \end{equation} where the summands are the irreducible components of $\Supp B$. Then we recall that $$ E = \frac 1{\ell} (m_1 B_1 + \dots + m_r B_r), \ \ \text {with $m_1 \dots m_r \in [1 \dots \ell-1]$.} $$
\begin{definition} Set $ \mathring E = B_{\mathsf {red}} - E = \frac 1{\ell} ( \mathring m_1B_1 + \dots + \mathring m_rB_r)$, where $\mathring m_i := \ell - m_i$\end{definition}
Let us denote by $n_i$ the coefficients of the curves $B_i$ in $-\ell E$. Then $n_i\equiv\mathring m_i\mod \ell$. More precisely, $E$ is a generator of $\mathbb{Z}/\ell\mathbb{Z}=\langle B_i, E\rangle/\langle B_i\rangle$ and $\mathring E$ is its opposite in $\mathbb{Z}/\ell\mathbb{Z}$; in particular it is a different generator of the same group. Hence $\mathring {\mathcal E}:= \mathcal O_S(\mathring E)$ is a level $\ell$ structure, with the same properties of $\mathcal E$ . We notice that $\mathring E$ defines a cover $\mathring \pi': \tilde S' \to S$ so that $\mathring \pi' = \pi' \circ a$ and $a^{\ell} = id_{\tilde S'}$. Then we define \begin{equation} \vert H \vert := \vert C - E \vert \ \ {\rm , } \ \ \mathring H := \vert C - \mathring E \vert. \end{equation} The rational maps associated with these linear systems respectively will be
\begin{equation}
p: S \to \mathbb P \ \ , \ \ \mathring p: S \to \mathring {\mathbb P}, \end{equation} where $\mathbb P := \vert H \vert^*$ and $\mathring {\mathbb P} := \vert \mathring H \vert^*$ are the projective space $\mathbb P^{g-2}$. Let $\iota$ be the inclusion \begin{equation} \mathbb P \times \mathring { \mathbb P} \subset \mathbb P^{(g-1)^2-1} \end{equation} defined by the Segre embedding, we set $f:= \iota \circ (p \times \ring p)$ and fix the notation \begin{equation} f: S \to \mathbb P \times \mathring {\mathbb P} \subset \mathbb P^{(g-1)^2-1}. \end{equation}
\begin{definition} \label{projmodel} The morphism $f$ is the main projective model of $(S, \mathcal L, \mathcal E)$. \end{definition}
The next two remarks are simple but relevant in order to discuss $f$ (the second one follows by a direct computation of $E\cdot \mathring E$, where the class $E$ is explicitly given in \cite{N1}):
\begin{enumerate} \item $f^* \mathcal O_{\mathbb P^{(g-1)^2 -1}}(1) \cong \mathcal O_S(H + \mathring H) \cong \mathcal O_S(2C - B_{\sf red})$,
\par \item $ H \mathring H = 2g + 2 - t$. \end{enumerate} \begin{proposition}\label{H-ringH not effective} The divisors $[H - \mathring H]$ and $[\mathring H - H]$ are not effective classes for $\ell \geq 3$ and \begin{equation} \label { vanish1 } h^1(\mathcal O_S(H - \mathring H)) = h^1(\mathcal O_S(\mathring H - H)) = 6-t.\end{equation} \end{proposition} \begin{proof} We have $H(H - \mathring H) = \mathring H (\mathring H - H) = t - 8$. Since the general elements of $\vert H \vert$ and $\vert \mathring H \vert$ are irreducible curves, the first statement follows for $\ell \geq 3$ because then $t \leq 6$. The second statement just follows from Riemann-Roch. \end{proof} Now let us consider, for a general $C \in \vert \mathcal L \vert$, the standard exact sequence \begin{equation} \label{exact} 0 \to \mathcal O_S(C - B_{\sf red}) \to \mathcal O_S(2C - B_{\sf red}) \to \mathcal O_C(2C - B_{\sf red}) \to 0. \end{equation} Since $C$ is smooth and disjoint from $B_{\sf red}$, then $\mathcal O_C(- B_{\sf red})$ is trivial and $\vert 2C - B_{\sf red} \vert$ cuts on $C$ a linear system of bicanonical divisors. Moreover we know that both $\vert H \vert$ and $\vert \mathring H \vert$ are base point free. Hence the same is true for $\vert H + \mathring H \vert = \vert 2C - B_{\mathsf{red}} \vert$. Notice that $$ (2C - B_{\mathsf{red}})^2 = 8(g-1) - 2t, $$ which is $\geq 0$ for $g \geq 3$ and any of the prescribed values of $t, \ell$. Actually the zero value is only reached in the known situation $g = 3$, $\ell = 2$. Hence we assume $g \geq 4$ for $\ell = 2$. Then a general $D \in \vert H + \mathring H \vert$ is a smooth integral curve such that $D^2 > 0$. As is well known, this implies $h^i(\mathcal O_S(H + \mathring H)) = 0$ for $i \geq 1 $ and the next property follows. \begin{proposition} Let $g$ be as above then $\dim \vert 2C - B_{\mathsf{red}} \vert = 4g - t - 3$ and the long exact sequence associated with the exact sequence (\ref{exact}) is as follows: $$ 0 \to H^0(\mathcal O_S(C-B_{\sf red})) \to H^0(\mathcal O_S (2C - B_{\sf red})) \to H^0(\omega_C^{\otimes 2}) \to H^1(\mathcal O_S(C-B_{\sf red})) \to 0. $$ \end{proposition} The linear system $\vert C - B_{\mathsf{red}} \vert$ also deserves some observations. Since we are dealing with a general standard triple $(S, \mathcal L, \mathcal E)$, we know that $\vert C \vert$ defines a morphism $$ f_{\vert C \vert}: S \to \mathbb P^g $$ which is the contraction $\nu: S \to \overline S$, composed with the embedding $\overline S \subset \mathbb P^g$ defined by $\vert \nu_* C \vert$. Since a general $C$ is disjoint from $B$, $\vert \nu_*C \vert$ is a linear system of Cartier divisors. Let $\mathcal I_{\Sing \overline S}$ be the ideal sheaf of $\Sing \overline S$, it is clear that the natural map $$ f^*_{\vert C \vert}: H^0(\mathcal I_{\Sing \overline S}(1)) \to H^0(\mathcal O_S(C - B_{\mathsf{red}})) $$ is an isomorphism. Then, considering the above exact sequence (\ref{exact}), we have \begin{equation} h^0(\mathcal O_S(C - B_{\mathsf{red}})) - h^1(\mathcal O_S(C - B_{\mathsf{red}})) = \chi(\mathcal O_S(2C - B_{\mathsf{red}})) - \chi(\omega^{\otimes 2}_C) = g + 1 - t. \end{equation} This implies the next property. \begin{proposition} It holds $h^1(\mathcal O_S(C - B_{\mathsf{red}})) = 0$ if and only if $h^0(\mathcal O_S(C - B_{\mathsf{red}})) = g + 1 - t$, that is, the points of $\Sing \overline S$ are linearly independent in $\mathbb P^g$. \end{proposition} On the other hand consider the commutative diagram \begin{equation} \begin{CD} \label{CD} @. 0 \\ @. @VVV \\ @. {H^0(\mathcal O_S(C - B_{\mathsf{red}})) } \\ @. @VVV \\ {H^0(\mathcal O_S( H)) \otimes H^0(\mathcal O_S(\mathring H))} @>{\mu_S}>> {H^0(\mathcal O_S( H+\mathring H))} \\ @V{\rho_H \otimes \rho_{\mathring H}}VV @V{\rho_C}VV \\ {H^0(\omega_C \otimes \eta^{-1}) \otimes H^0(\omega_C \otimes \eta)} @>{\mu_C}>> {H^0(\omega_C^{\otimes 2})} \\ @. @VVV \\ @. {H^1(\mathcal O_S(C - B_{\mathsf{red}}))} \\ @. @VVV \\ @. 0 \end{CD} \end{equation} where $\mu_S$ and $\mu_C$ are the multiplication maps and the vertical arrows are the restriction maps. It follows from Lemma (\ref{one1}) that $\rho_H \otimes \rho_{\mathring H}$ is an isomorphism. The next property is clear. \begin{proposition} \label{prop1} If $\mu_C$ is surjective then $h^1(\mathcal O_S(C - B_{\mathsf{red}})) = 0$ i.e. $\rho_C$ is surjective. \end{proposition} Since $\chi (\mathcal O_S (C - B_{\mathsf{red}}) = g + 1 - t$ let us point out that $\mu_C$ is \it not surjective \rm if \begin{equation} g < t - 1. \end{equation}
We do not further investigate the diagram, for our applications these results suffice.
\section{Views on the Mukai maps in level $\ell$}
In this section we only put in large the picture we have outlined in the introduction. This picture concerns the maps in (\ref{mukai}) and (\ref{lmukai}), that is, the Mukai map $$ m_g: \mathcal P_g \to \mathcal M_g $$ and the level $\ell$ Mukai maps $$ r_{g,\ell}: \mathcal P^{\perp}_{g,\ell} \to \mathcal R_{g, \ell}. $$ These maps, and the involved moduli spaces, have been previously considered. We recall that the points of $\mathcal P_g$ are the elements $[S, \mathcal L, C]$ such that $[S, \mathcal L] \in \mathcal F_g$ and $C \in \vert \mathcal L \vert$. The Mukai map $m_g$ is the natural forgetful map. We have
\begin{enumerate} \it \item $m_g$ is dominant for $g \leq 9$, \item $m_g$ is not dominant for $g = 10$, \item $m_g$ is birational for $g = 11$, \item $m_g$ has $1$-dimensional fibre for $g = 12$. \item $m_g$ is generically injective for $g \geq 13$. \end{enumerate} Thus $m_g$ has not maximal rank for $g = 10, 12$. It is indeed known that a general $[C] \in m_{10}(\mathcal P_{10})$ is a linear section $C$ of the $G_2$ variety $W \subset \mathbb P^{13}$, \cite{Mu}. Hence the family of $2$-dimensional linear sections of $W$ through $C$ is a $\mathbb P^3$. It turns out from this fact that the fibre of $m_{10}$ at $[C]$ is $3$-dimensional. Then $m_{10}(\mathcal P_{10})$ has codimension $1$. Genus $12$ Fano threefolds play a similar role, then a general fibre of $m_{12}$ is a rational curve. \par In this perspective, asking about the connections between the moduli space $\mathcal F^{\perp}_{g, \ell}$, of level $\ell$ K3 surfaces of genus $g$, and $\mathcal R_{g, \ell}$ is, as observed, natural. For a general point $[S, \mathcal L, \mathcal E] \in \mathcal F^{\perp}_{g, \ell}$ one can ask if $(C, \eta)$, with $C \in \vert \mathcal L \vert$ and $\eta = \mathcal E \otimes \mathcal O_C$, defines a general point of $\mathcal R_{g, \ell}$. More precisely recall that
$\mathcal P^{\perp}_{g, \ell}$ is the moduli space of $4$-tuples $(S, \mathcal L, \mathcal E, C)$ such that $[S, \mathcal L, \mathcal E] \in \mathcal F^{\perp}_{g,\ell}$ and $C \in \vert \mathcal L \vert$.
The level $\ell$ Mukai $ r_{g,\ell}: \mathcal P^{\perp}_{g, \ell} \to \mathcal R_{g,\ell}$ is the morphism sending $[S, \mathcal L, \mathcal E, C] \in \mathcal P^{\perp}_{g, \ell}$ to the point $[C, \eta_C] \in \mathcal R_{g, \ell}$, where $\eta$ is $\mathcal E \otimes \mathcal O_C$. About the possible dominance of the map $r_{g,\ell}$ we have:
\begin{enumerate} \it
\item $3g-3 = \dim \mathcal R_{g,2} \leq \dim \mathcal P^{\perp}_{g, 2 } = 11 + g$ iff $g \leq 7$.
\item $3g-3 = \dim \mathcal R_{g,3} \leq \dim \mathcal P^{\perp}_{g, 3 } = \ 7 + g$ iff $g \leq 5$.
\item $3g-3 = \dim \mathcal R_{g,4} \leq \dim \mathcal P^{\perp}_{g, 4 } = \ 5 + g $ iff $g \leq 4$.
\item $3g-3 = \dim \mathcal R_{g,5} \leq \dim \mathcal P^{\perp}_{g, \color {blue} 5 } = \ 3 + g$ iff $g \leq 3$.
\item $3g-3 = \dim \mathcal R_{g,6} \leq \dim \mathcal P^{\perp}_{g, 6 } = $ \ $3 + g$ iff $g \leq 3$.
\item $3g-3 = \dim \mathcal R_{g,7} \leq \dim \mathcal P^{\perp}_{g, 7 } = \ 1 + g$ iff $g \leq 2$.
\item $3g-3 = \dim \mathcal R_{g,8} \leq \dim \mathcal P^{\perp}_{g, 8 } = \ 1 + g$ iff $g \leq 2$.
\end{enumerate} These issues have not been systematically considered but for $\ell = 2$. We close this expository section with a summary on what happens for $\ell = 2, 3$. \subsection { \sf The picture for $\ell = 2$}
We have $3g-3 = \dim \mathcal M_g \leq \dim \mathcal P^{\perp}_{g,2} = 11 + g$ iff $g \leq 7$. Again, $r_{g,2}$ behaves unexpectedly near the value of transition, which is now $g = 7$.
\begin{enumerate} \it \item $r_{g,2}$ is dominant for $g \leq 5$,
\item $r_{g,2}$ is not dominant for $g = 6$,
\item $r_{g,2}$ is birational for $g = 7$,
\item $r_{g,2}$ has not finite fibres for $g = 8$.
\item $r_{g,2}$ is generically injective for $g \geq 9$. \end{enumerate} These surfaces are known as (standard) \it Nikulin surfaces. \rm Cases (1), (2), (3) are treated in \cite{FV, FV1}, the remaining ones, (standard and non standard), in \cite{ KLV1, KLV2}. Notice that \it $r_{g,2}$ is not of maximal rank for $g = 6,8$. \rm In genus $6$ the condition $C \subset S$ implies that the following multiplication map is not an isomorphism as expected: \begin{equation} \mu: {\rm Sym}^2 H^0(\omega_C \otimes \eta_C) \to H^0(\omega_C^{\otimes 2}). \end{equation} Then $(C, \eta_C)$ does not define a general point of $\mathcal R_{g,2}$, see \cite{B}. We point out that, studying the two cases where $r_{g,2}$ has not maximal rank, two families of singular Fano threefolds appear. Their hyperplane sections are singular models $\overline S$ of general Nikulin surfaces $S$. The existence of these threefolds implies the failure of the maximal rank.
\par \subsection{ \sf The picture for $\ell = 3$}
We will prove that $r_{g,3}$ behaves unexpectedly near $g = 5$:
\begin{enumerate} \it \item $r^s_{g,3}$ is dominant for $g \leq 3$,
\item $r^s_{g,3}$ has not maximal rank for $g = 4$,
\item $r^s_{g,3}$ is birational for $g = 5$,
\item $r^s_{g,3}$ has not maximal rank for $g = 6$.
\end{enumerate} \begin{remark} \rm The case $g \geq 7$ should be considered for further investigation, addressing the generic injectivity. The (uni)rationality of $\mathcal R_{g,3}$ is known, or elementary , for $g \leq 5$, cfr. \cite{BaC, BaV, Ve1}. We recall that $\mathcal R_{g,3}$ is of general type for $g \geq 12$ and of Kodaira dimension $\geq 19$ for $g = 11$, \cite{CEFS}. Bruns proved in \cite{Br} that $\mathcal R_{8,3}$ is of general type. The cases $g = 6, 7, 9, 10$ and partially $g =11$ are open. \end{remark}
\section{The Mukai map in level $3$}
\par \subsection { \sf The case of genus $4$} Let $[S, \mathcal L, \mathcal E, C] \in \mathcal P^{\perp}_{g, \ell}$ be general and $\ell = 3$, as in section 2, (\ref{CD}) we consider the commutative diagram
\begin{equation} \begin{CD} {H^0(\mathcal O_S( H)) \otimes H^0(\mathcal O_S(\mathring H))} @>{\mu_S}>> {H^0(\mathcal O_S( H+\mathring H))} \\ @V{\rho_H \otimes \rho_{\mathring H}}VV @V{\rho_C}VV \\ {H^0(\omega_C \otimes \eta^{-1}) \otimes H^0(\omega_C \otimes \eta)} @>{\mu_C}>> {H^0(\omega_C^{\otimes 2})}. \\ \end{CD} \end{equation}
Since $\ell = 3$ we have $t = 6$ connected components of $\Supp B$. Then, by proposition (\ref{prop1}), $\mu_C$ is \it not surjective \rm if $g < t - 1 = 5$. This is obvious for $g \leq 3$. For $g = 4$ the dimension count suggests that in $\mathcal R_{4,3}$ the map $\mu_C$ is not surjective in codimension $1$ . \begin{proposition} Let $[C, \eta] \in \mathcal R_{4,3}$ be a general point then $\mu_C$ is surjective, moreover the locus of points such that $\mu_C$ is not surjective is an effective Cartier divisor
in $\mathcal R_{4,3}$.
\end{proposition}
Indeed, for $g = 4$ and $\ell = 3$, this locus turns out to be the locus $\mathcal D_{g,\ell}$ defined in \cite{CEFS} p. 77. There, for low level $\ell \geq 3$ and for $g \leq16$, the so defined \it Torsion bundle conjecture B \rm is proven, which implies that $\mathcal D_{4,3}$ is an effective Cartier divisor in $\mathcal R_{4,3}$. Then the next theorem follows. Notice also that, for $g = 4$, theorem 1.7 of \cite{BaV} implies that $\mu_C$ is an isomorphism for a general $(C, \eta)$. \begin{theorem} The map $r_{4,3}: \mathcal P^{\perp}_{4,3} \to \mathcal R_{4, 3}$ fails to be dominant. \end{theorem} \begin{remark} \rm The case $g = 4$ turns out to be of special interest. See the last section for a natural, presently conjectural, geometric interpretation. \end{remark}
\subsection {\sf The case of genus 5} Differently from the case $g \leq 4$ the multiplication map $$ \mu_C: H^0(\omega_C \otimes \eta) \otimes H^0(\omega_C \otimes \eta^{-1}) \to H^0(\omega_C ^{\otimes 2}) $$ can be surjective for $g \geq 5$ and a general point $[C, \eta] \in \mathcal R_{g,3}$. This property occurs in genus $g = 5$ and makes possible the proof of the next \it birationality theorem. \rm \begin{theorem} \label{bir} The Mukai map $r_{5,3}: \mathcal P^{\perp}_{5,3} \to \mathcal R_{5,3}$ is birational. \end{theorem}
Before proving it we cannot avoid a long series of preliminaries. We will always assume that $ [S, \mathcal L, \mathcal E, C] \in \mathcal P^{\perp}_{5,3} $ is a \it general point\rm, in particular $\Pic S \cong \mathbb Z c \oplus \mathbb M_3$. Let \begin{equation} 0 \to \mathcal O_S(H + \mathring H - C) \to \mathcal O_S(H + \mathring H) \to \omega_C^{\otimes 2} \to 0. \end{equation} be the standard exact sequence, at first we point out the following fact. \begin{proposition} The associated long exact sequence is \begin{equation} \label{iso} 0 \to H^0(\mathcal O_S(H + \mathring H)) \stackrel {\rho_C} \to H^0(\omega_C^{\otimes 2}) \to 0. \end{equation} \end{proposition} Since $H + \mathring H - C \sim C - B_{\mathsf{red}}$, the next lemma implies the previous statement. \begin{lemma}\label{initial} It holds $h^i(\mathcal O_S(C - B_{\mathsf{red}})) = 0$ for $i \geq 0$. \end{lemma}
\begin{proof} Since $C(B_{\mathsf{red}} - C) < 0$, $h^0(\mathcal O_S(B_{\mathsf{red}} - C)) = 0$. Hence $h^2(\mathcal O_S(C - B_{\mathsf{red}}))$ is zero by Serre duality.
Since $ (C- B_{\mathsf{red}})^2 = - 4$ then $ \chi(\mathcal O_S(C - B_{\mathsf{red}})) = 0$ and the statement follows if $h^0(\mathcal O_S(C - B_{\mathsf{red}})) = 0$. Assume $A \in \vert C - B_{\mathsf{red}} \vert$ then $A$ is not connected. This follows from $ \chi(\mathcal O_S(A)) = h^0(\mathcal O_S(A)) - h^1(\mathcal O_S(A)) = 0$ and the standard exact sequence
$$
0 \to \mathcal O_S(-A) \to \mathcal O_S \to \mathcal O_A \to 0.
$$
This implies $A = A_1 + A_2$, where $A_1$ is a connected component and $A_2 = A - A_1$ is a curve. We have $C(A_1+ A_2) = C(C - B_{\mathsf{red}})= 8$ and we can choose $A_1$ so that $CA_1 > 0$. Assume $CA_2 = 0$ then the morphism $\phi: S \to \mathbb P^5$, defined by $\vert C \vert$, maps birationally $A_1 + A_2 + B_{\mathsf{red}}$ onto a degree $8$ hyperplane section of $\overline S = \phi(S)$. This is the curve $\phi_*A_1$, singular at the points of $\phi (B_{\mathsf{red}}) = \Sing \overline S$.
These points are the images by $\phi$ of the six connected components of $B_{red}$ and are exactly six. Indeed each fibre of $\phi$ is connected and hence two connected components $V_1, V_2$ of $B_{red}$, contracted to the same point, are connected by an effective divisor $W$ orthogonal to $C$. On the other hand, under our generality assumption, we have $\Pic S \cong \mathbb Zc \oplus \mathbb M_3$. Moreover a direct computation shows that, in the negative definite lattice $\mathbb M_3$, $\Supp W$ is union of irreducible components of $B_{red}$. Actually one computes that the only classes of irreducible $(-2)$-curves are the classes of $B_1 \dots B_{12}$. This implies $W = 0$ and $V_1 = V_2$. But then $\phi_*A_1$ is not integral, because it is a hyperplane section of $\phi(S)$ with six singular points. Then there exists an irreducible component $R$ of it such that $0 < CR < 8$. The same is obvious if $CA_2 > 0$. Since $\Pic S \cong \mathbb Zc \oplus \mathbb M_3$ we have $[R] =x[C] + \sum y_i [B_i] + z[E]$, with $x, y_i, z \in \mathbb Z$. But this implies $0 < CR = x8 < 8$ with $x \notin \mathbb Z$: a contradiction. \end{proof} \begin{proposition} The linear systems $\vert H \vert$ and $\vert \mathring H \vert$ are not hyperelliptic. \end{proposition} \begin{proof} Let $\vert H \vert$ be hyperelliptic, then $\vert H \vert$ defines a $2:1$ morphism $\psi: S \to \mathbb P^3$ onto a quadric surface $Q := \psi(S)$. As is well known the pull-back of a ruling of lines of $Q$ defines a pencil $\vert F_2 \vert$ of curves such that $F_2^2 = 0$ and $HF_2 = 2$. Moreover $\vert F_1 \vert := \vert H - F_2 \vert$ is a pencil of irreducible elliptic curves. The same is true for the moving part of $\vert F_2 \vert$. Since $H \sim F_1 + F_2$ and $C \sim H + E$ we have $C (F_1 + F_2) = 8$ and also $CF_i \geq 2$, $i = 1,2$. Let $\vert F \vert$ be the moving part of the pencil $\vert F_i \vert$ such that $CF_i$ is minimal, then it follows $2 \leq CF \leq 4$. On the other hand we have $F \sim xC + \sum y_j B_j + zE$ in $\Pic S$. This implies $2 \leq CF = 8x \leq 4$ and $x \notin \mathbb Z$: a contradiction. The same argument works for $\vert \mathring H \vert$. \end{proof} \begin{lemma} It holds $h^i(\mathcal O_S(2H - \mathring H)) = h^i(\mathcal O_S(2 \mathring H - H)) = 0$ for $i \geq 0$. \end{lemma} \begin{proof} From $H \sim C - E$ and $\mathring H \sim C - \mathring E$ we have $2H - \mathring H \sim C - 2E + \mathring E$, moreover $$\mathring H (\mathring H - 2H) = -8 \Rightarrow h^0 (\mathcal O_S(\mathring H -2H)) = 0 \Rightarrow h^2(\mathcal O_S(2H - \mathring H)) = 0. $$ Since $(2H - \mathring H)^2 = -4$ then $\chi(\mathcal O_S(2H - \mathring H)) = 0$. Hence the statement follows for $2H - \mathring H $ if we prove $h^0(\mathcal O_S(2H - \mathring H)) = 0$. For this we observe that the well known descriptions of $E$ and $\mathring E$ are as follows. For $i = 1 \dots 6$ consider $N_i = B_i + B'_{i}$, that is, the $i$-th connected component of $B_{\mathsf{red}} = \sum_{i = 1 \dots 6} B_i + B'_{i} $. Then in $\Pic S$ we have \begin{equation} [E] = \sum_{i = 1 \dots 6} \frac 13 [B_i + 2B'_i] \ , \ [\mathring E] = \sum_{i = 1 \dots 6} \frac 13 [2B_i + B'_i] \end{equation} up to exchanging $E$ with $\mathring E$. Since $2H - \mathring H \sim C - 2E + \mathring E$, it follows that \begin{equation} \label{noteff} 2H - \mathring H \sim C - \sum_{i = 1 \dots 6} B'_i. \end{equation} This implies that $[2H - \mathring H]$ is not an effective class. Indeed let $B' := B'_1 + \dots + B'_6$, observe that $(C - B')B_i = - 1$, $i = 1 \dots 6$. Assume $C - B' \sim F$ where $F$ is an effective divisor. Then $FB_i = -1$ implies $B_i \subset F$ and $F = F' + B_1 + \dots + B_6$ where $F'$ is effective. Hence $C - B_{\mathsf{red}}\sim F' > 0$: a contradiction to the above lemma (\ref{initial}).
\end{proof} We will profit of genus $3$ curves of the non hyperelliptic linear systems $\vert H \vert$ or $\vert \mathring H \vert$. \begin{lemma} It holds $\forall \ D \in \vert H \vert, \ h^0(\mathcal O_D(\mathring H - H)) = 0$ \ and \ $\forall \ \mathring D \in \vert \mathring H \vert, \ h^0(\mathcal O_{\mathring D}(H - \mathring H)) = 0$. \end{lemma} \begin{proof} Let $D \in \vert H \vert$, once more consider the standard exact sequence $$ 0 \to \mathcal O_S(\mathring H - 2H) \to \mathcal O_S (\mathring H - H) \to \mathcal O_D(\mathring H - H) \to 0 $$ and its long exact sequence. We have $h^1(\mathcal O_S(\mathring H - 2H)) = h^1(\mathcal O_S(2H - \mathring H)) = 0$ by the previous lemma and $h^0(\mathcal O_S(\mathring H - 2H)) = 0$ because $H(\mathring H - 2H) = -2$. Then it follows $h^0(\mathcal O_D(\mathring H - H)) = h^0(\mathcal O_S(\mathring H - H))$. Finally the latter is zero by Proposition (\ref{H-ringH not effective}). \end{proof} Let $D \in \vert H \vert$ be \it smooth \rm then $\mathcal O_D(\mathring H - H) \cong \mathcal O_D(b)$, where $\deg b = 2$. We fix the notation $b$ for such a divisor and the notation $\mu_D$ for the following multiplication map:
\begin{equation} \label{NOTEFF} \mu_D: H^0(\omega_D) \otimes H^0(\omega_D(b)) \to H^0(\omega^{\otimes 2}_D(b)). \end{equation}
Let us also point out that $h^0(\mathcal O_D(b)) = 0$ by the above lemma. Moreover we fix the notation
\begin{equation} \nu_D: H^0(\mathcal O_S(H)) \to H^0(\omega_D) \ , \ \mathring \nu_D: H^0(\mathcal O_S(\mathring H)) \to H^0(\omega_D(b)) \ , \ \rho_D: H^0(\mathcal O_S(H + \ring H)) \to H^0(\omega_D^{\otimes 2}(b)) \end{equation} for the natural restriction maps. Then we consider the commutative diagram: \begin{equation} \begin{CD} \label{CD five} {H^0(\mathcal O_S( H)) \otimes H^0(\mathcal O_S(\mathring H))} @>{\mu_S}>> {H^0(\mathcal O_S( H+\mathring H))} \\ @V{\nu_D \otimes \mathring \nu_D}VV @V{\rho_D}VV \\ {H^0(\omega_D) \otimes H^0(\omega_D(b))} @>{\mu_D}>> {H^0(\omega_D^{\otimes 2}(b))}. \\ \end{CD} \end{equation} which is similar to our main diagram (\ref {CD})
\begin{proposition} The vertical arrows and the horizontal arrow $\mu_D$ are surjective. \end{proposition} \begin{proof} Let $p: S \to \mathbb P^3$ be the map defined by $\vert H \vert$, then $p \vert D: D \to \mathbb P^2 = \vert \omega_D \vert^*$ is the canonical map and $\vert \omega_D(b) \vert$ is cut on $D$ by $\vert \mathcal I_{d \vert S}(3H) \vert$, where $d$ is any element of $\vert \omega^{\otimes 2}(-b)\vert$ and $\mathcal I_{d \vert S}$
is its ideal sheaf. Moreover the map $p^*: \vert \mathcal O_{\mathbb P^2}(3) \vert \to \vert \omega_D^{\otimes 3} \vert$ is an isomorphism and $\vert \mathcal I_{d \vert S}(3H) \vert$ $=$ $p^* \vert \mathcal I_{Z \vert \mathbb P^2}(3) \vert$, where $Z = p_*d$ and $\mathcal I_{Z \vert \mathbb P^2}$ is its ideal sheaf. Hence it follows $ h^0(\mathcal I_{Z \vert \mathbb P^2}(2)) = h^0(\omega_D^{\otimes 2}(- b)) = h^0(\mathcal O_D(b)) = 0$ and $h^1(\mathcal O_D(b)) = h^0(\mathcal O_D(b)) = 0$. This easily implies $h^i(\mathcal I_{Z \vert \mathbb P^2}(3-i)) = 0$ for $i > 0$, that is, $\mathcal I_{Z \vert \mathbb P^2}$ is $3$-regular. Hence, by Castelnuovo-Mumford regularity theorem, the multiplication map \begin{equation}
\mu: H^0(\mathcal O_{\mathbb P^2}(1)) \otimes H^0(\mathcal I_{Z \vert \mathbb P^2}(3)) \to H^0(\mathcal I_{Z \vert \mathbb P^2}(4))) \end{equation} is surjective. Now consider the standard exact sequence of ideal sheaves $$ 0 \to \mathcal I_{p(D) \vert \mathbb P^2}(4) \to \mathcal I_{Z \vert \mathbb P^2}(4) \stackrel{\rho} \to \mathcal I_{Z \vert p(D)}(4) \to 0 $$ and its associated long exact sequence. Since $\mathcal I_{p(D) \vert \mathbb P^2}(4) \cong \mathcal O_{\mathbb P^2}$ it follows that $$ h^0(\rho): H^0(\mathcal I_{Z \vert \mathbb P^2}(4)) \to H^0(\omega^{\otimes 2}_D(b)) $$ is surjective. On the other hand we have $\mu_D \circ \lambda = h^0(\rho) \circ \mu$, where $\lambda$ is the tensor product $$\lambda_1 \otimes \lambda_2: H^0(\mathcal O_{\mathbb P^2}(1)) \otimes H^0(\mathcal I_{Z \vert \mathbb P^2}(3)) \to H^0(\omega_D) \otimes H^0(\omega_D(b))$$ of the natural isomorphisms $\lambda_1: H^0(\mathcal O_{\mathbb P^2}(1)) \to H^0(\omega_D)$ and $\lambda_2: H^0(\mathcal I_{Z \vert \mathbb P^2}(3)) \to H^0(\omega_D(b))$. Since $\lambda$ is an isomorphism and $h^0(\rho)$ and $\mu$ are surjective, then $\mu_D$ is surjective. The surjectivity of $\rho_D$ follows from the vanishing of $h^1(\mathcal O_S(\mathring H))$ and the standard exact sequence $$ 0 \to \mathcal O_S(\mathring H) \to \mathcal O_S(H + \mathring H) \to \omega_D^{\otimes 2}(b) \to 0. $$ Since $\omega_D^{\otimes 2}(b)$ is $\mathcal O_D(H + \mathring H)$, the surjectivity of $\nu_D$ follows from the above exact sequence twisted by $-\mathring H$. Finally the exact sequence $$ 0 \to \mathcal O_S(\mathring H - H) \to \mathcal O_S(\mathring H) \to \omega_D(b) \to 0 $$ implies that $\mathring \nu_D$ is an isomorphism. Indeed we have $h^0(\mathcal O_S(\mathring H - H )) = h^1(\mathcal O_S(\mathring H - H))$ $ = 0$ in its long exact sequence by (\ref{ vanish1 }). Hence $\nu_D \otimes \mathring \nu_D$ is surjective too. \end{proof} \begin{proposition} The map \label{proof} $\mu_S: H^0(\mathcal O_S( H)) \otimes H^0(\mathcal O_S(\mathring H)) \to H^0(\mathcal O_S( H+\mathring H))$ is surjective. \end{proposition} \begin{proof} Let us consider again the commutative diagram (\ref{CD five}), that is, $$ \begin{CD} {H^0(\mathcal O_S( H)) \otimes H^0(\mathcal O_S(\mathring H))} @>{\mu_S}>> {H^0(\mathcal O_S( H+\mathring H))} \\ @V{\nu_D \otimes \mathring \nu_D}VV @V{\rho_D}VV \\ {H^0(\omega_D) \otimes H^0(\omega_D(b))} @>{\mu_D}>> {H^0(\omega_D^{\otimes 2}(b))}. \\ \end{CD} $$ Counting dimensions we have $\dim \Ker \mu_S \geq 4$, hence it suffices to show that the equality holds. Now we know that $\mu_D$ and $\nu_D \otimes \mathring \nu_{D}$ are surjective. Let $\mathbb K$ be the Kernel of $\mu_D \circ (\nu_D \otimes \mathring \nu_D)$, then the dimension count gives $\dim \mathbb K = 8$ and, of course, we have $\Ker \mu_S \subseteq \mathbb K$. Therefore, to prove $\dim \Ker \mu_S = 4$, it suffices to produce a $4$-dimensional subspace $V \subset \mathbb K$ such that $V \cap \Ker \mu_S = (0)$. To this purpose consider the space of decomposable vectors $V := \langle s \rangle \otimes H^0(\mathcal O_S(\mathring H))$, where $s$ is nonzero and ${ \rm div } (s) = D$. Then we have $(\nu_D \otimes \mathring \nu_D) (V) =(0)$ and hence $V \subset \mathbb K$. On the other hand let $t \in H^0(\mathcal O_S(\mathring H))$, then $\mu_S(s \otimes t) = st$ and this is zero iff $t = 0$. Hence $V \cap \Ker \ \mu_S = (0)$. \end{proof} Now we go back, in genus $5$, to our usual diagram (\ref {CD}) in section 2. This is \begin{equation}
\label{NEW} \begin{CD} {H^0(\mathcal O_S( H)) \otimes H^0(\mathcal O_S(\mathring H))} @>{\mu_S}>> {H^0(\mathcal O_S( H+\mathring H))} \\ @V{\rho_H \otimes \rho_{\mathring H}}VV @V{\rho_C}VV \\ {H^0(\omega_C \otimes \eta) \otimes H^0(\omega_C \otimes \eta^{-1})} @>{\mu_C}>> {H^0(\omega_C^{\otimes 2}).} \\ \end{CD} \end{equation}
\begin{proposition} $\mu_C: H^0(\omega_C \otimes \eta) \otimes H^0(\omega_C \otimes \eta^{-1}) \to H^0(\omega_C^{\otimes 2})$ is surjective. \end{proposition} \begin{proof} We have already shown that $\mu_S$ and $\rho_H \otimes \rho_{\mathring H}$ are surjective. By (\ref{iso}) and its related lemma the same is true for $\rho_C$. Hence the surjectivity of $\mu_C$ follows. \end{proof} Let $\mathbb P^{15}: = \mathbb P( H^0(\mathcal O_S( H))^* \otimes H^0(\mathcal O_S(\mathring H))^*)$ and let $\mathbb P^3 \times \mathbb P^3 := \iota( \vert H \vert^* \times \vert \mathring H \vert^*)$ be the image in $\mathbb P^{15}$ of the Segre embedding $\iota$. Now we study the morphism defined in (\ref{projmodel}) $$ f: S \to \mathbb P^3 \times \mathbb P^3 \subset \mathbb P^{15}, $$ that is, $f = \iota \circ (p \times \mathring p)$. Since the map $\mu_S$ is surjective it follows that
\begin{equation}
(p \times \mathring p)^* H^0(\mathcal O_{\mathbb P^3 \times \mathbb P^3}(1,1)) = H^0(\mathcal O_S(H + \mathring H)). \end{equation} Let $\mathbb P^{11} \subset \mathbb P^{15}$ be the linear embedding of $\mathbb P( {\rm Im \ \mu_S^*})$ defined by $\mu^*_S$, then we have \begin{equation} f(S) \subseteq \mathbb P^{11} \cdot (\mathbb P^3 \times \mathbb P^3) \subset \mathbb P^{15}, \end{equation} In other words $f$ is just the morphism defined by the complete linear system $\vert H + \ring H \vert$ composed with the linear embedding $\mathbb P^{11} \subset \mathbb P^{15}$.
\begin{proposition} The map $p \times \mathring p$ is an embedding for a general point $[S, \mathcal L, \mathcal E] \in \mathcal F^{\perp}_{5,3}$. \end{proposition} \begin{proof} The linear systems $\vert H \vert$ and $\vert \mathring H \vert$ are non hyperelliptic. Hence $p$, $\mathring p$ are generically injective and the same is true for $f$. In particular $f: S \to f(S)$ is biregular over $f(S) - \Sing f(S)$ and $\Sing f(S)$ is a finite set of rational double points. Let $R \subset S$ be an integral curve contracted by $f$ then $R$ is biregular to $\mathbb P^1$ but it is not $B_i$. Indeed $R$ is contracted by $p$ and $\ring p$ while $B_i$ is not, as one can directly compute.
Notice also that $C \sim \frac 12(H + \ring H + B_{red})$. Therefore, since $RC \geq 0$, it follows $$ RC = \frac 12 \sum_{i = 1 \dots 12} RB_i \geq 0$$ with $RB_i \geq 0$. Assume $RB_i = 0$ for each $i$, then $RC=0$. Since the Picard group of $S$ is $\mathbb Z[\mathcal{L}]\oplus \mathbb{M}_3$, $R$ is necessarily contained in $\mathbb{M}_3=\mathbb{Z}[\mathcal{L}]^{\perp}$. By \cite{G} the unique $(-2)$-curves contained in $\mathbb{M}_3$ are the $B_i$'s, which contradicts the fact that $R$ cannot be a $B_i$.. Now assume that $RB_i \geq 2$ for some $B_i$ and consider, among the maps $p$ and $\mathring p$, the one not contracting $B_i$, say $p$. Then $p$ embeds $B_i$ as a line. On the other hand $p$ contracts $R \cdot B_i$, which is a divisor of degree $\geq 2$ in $B_i$: a contradiction. This implies $RB_i = 1$ for each $i$. Finally consider two distinct curves as above, say $B_1$ and $B_2$, which are contracted by $p$. Let us also claim that $p(B_1)$ and $p(B_2)$ are distinct points for a general $(S, \mathcal L, \mathcal E)$. Since $RB_1 = RB_2 = 1$ then $p(R)$ is not a point: a contradiction. \par
We now prove that $p(B_1)\neq p(B_2)$ for a general $(S, \mathcal L, \mathcal E)$. If two curves are contracted by a map $p$ to the same point, there is a tree of $(-2)$-curves connecting these curves which is contracted by $p$. Since $p$ is defined by $|H|$, the $(-2)$-curves contracted by $p$ are orthogonal to $H$ in $\mathbb Z[\mathcal{L}]\oplus \mathbb{M}_3$, which is the Picard group of a general $S$. By a direct computation one observes that the negative defined lattice orthogonal to $H$ contains exactly 12 $(-2)$-classes, which are $\pm B_i$ for $i=1,\ldots ,6$. Since $B_iB_j=0$ if $i,j\in\{1,\ldots,6\}$ and $i\neq j$, $p(B_1)\neq p(B_2)$.
\end{proof}
At this point the special geometry determined by $\mu_S$ appears, we have \begin{equation} \Ker \mu_S = H^0(\mathcal I(1,1)), \end{equation} where $\mathcal I$ is the ideal sheaf of $\mathbb P^{11} \cdot (\mathbb P^3 \times \mathbb P^3)$ in $\mathbb P^3 \times \mathbb P^3$ and $\dim \Ker \mu_S = 4$. Let \begin{equation} \Sigma := \mathbb P^{11} \cdot (\mathbb P^3 \times \mathbb P^3), \end{equation} then $f(S)$ sits in $\mathbb P^{11}$ as a K3 surface of degree $20$ and $f(S) \subseteq \Sigma$. Now assume that the intersection scheme $\Sigma$ is proper, then $\Sigma$ is a K3 surface of degree $20$ and hence
\begin{equation} f(S) = \Sigma. \end{equation} Postponing its proof, we therefore assume the following claim.
\par {\sc claim} \it For a general triple $(S, \mathcal L, \mathcal E)$ the intersection scheme $\Sigma$ is proper. \rm
\par Then we prove the \it birationality of the Mukai map $ r_{5,3}: \mathcal P^{\perp}_{5,3} \to \mathcal R_{5,3}$. \rm \begin{proof}[Proof of the birationality] Since $\mathcal P^{\perp}_{5,3}$ and $\mathcal R_{5,3}$ are irreducible of the same dimension, it suffices to show that $r_{5,3}$ is birational onto $\mathcal M := r_{5,3}(\mathcal P^{\perp}_{5,3})$. Let $x = [S, \mathcal L, \mathcal E, C]$ be general in $\mathcal P^{\perp}_{5,3}$ and $y = r_{5,3}(x)$, then $y = [C, \eta]$ with $\eta := \mathcal E \otimes \mathcal O_C$. Let $y \in \mathcal M$ be general, we prove that a unique $x = [S, \mathcal L, \mathcal E, C]$ exists so that $[C, \mathcal E \otimes \mathcal O_C]= y$. We already know, for a general $y = [C, \eta] \in \mathcal M$, the surjectivity of the multiplication map $$ \mu_C: H^0(\omega_C \otimes \eta) \otimes H^0(\omega_C \otimes \eta^{-1}) \to H^0(\omega_C^{\otimes 2}), $$ because this condition is open and non empty on $\mathcal M$. Then, applying to $\mu_C$ the same construction applied to $\mu_S$, one obtains \begin{equation} C \subseteq \Sigma := \mathbb P^{11} \cdot (\mathbb P^3 \times \mathbb P^3) \subset \mathbb P^{15}. \end{equation} Let $V = H^0(\omega_C \otimes \eta)^*$ and $\mathring V = H^0(\omega_C \otimes \eta^{-1})^*$, here $C$ is bicanonically embedded in $\mathbb P^{11} := \mathbb P({\rm Im} \ \mu_C)^*$ and the inclusion is the Segre embedding $\mathbb P(V) \times \mathbb P(\mathring V) \subset \mathbb P(V \otimes \mathring V)$. Now the properness of $\Sigma$ is an open condition on $\mathcal M$, not empty under our claim. Then $(\Sigma, \mathcal O_{\Sigma}(1))$ is a polarized K3 surface as above. Since $y = r_{5,3}(x)$ for some $x = [S, \mathcal L, \mathcal E, C]$, the commutative diagram (\ref{NEW}) implies that $[\Sigma, \mathcal O_{\Sigma}(1)]= [S, \mathcal L]$. Therefore $\mu_C$ defines a rational map, sending $y = [C, \eta] \in \mathcal M$ to $x \in \mathcal P^{\perp}_{5,3}$, which is inverse to $r_{5,3}$. \end{proof} \begin{proof}[Proof of the claim] Since each component of $\Sigma$ has dimension $\geq 2$, it suffices to construct one $\mathbb D \in \vert \mathcal O_{\mathbb P^3 \times \mathbb P^3}(1,1) \vert$ so that $\mathbb D \cdot \Sigma = \mathbb D \cdot S$. We choose the hyperplane section \begin{equation} \mathbb D = (P \times \mathbb P^3) + (\mathbb P^3 \times \ring P), \end{equation} where $P$ and $\ring P$ are general planes. Then we have $\mathbb D \cdot S = D + \ring D$, where $D \in \vert H \vert$ and $\ring D \in \vert \ring H \vert$ are smooth, non hyperelliptic curves of genus $3$. We show, only for $D$, that \begin{equation} D = \mathbb P^{11} \cdot (P \times \mathbb P^3) \ , \ \ring D = \mathbb P^{11} \cdot (\mathbb P^3 \times \ring P). \end{equation} The map $p: D \to P$ is the canonical map; we fix on $P$ coordinates $(x) = (x_1:x_2:x_3)$. The map $\ring p: D \to \mathbb P^3$ is defined by $\vert \omega_D(b) \vert$, where $\deg b = 2$ and $h^0(\mathcal O_D(b)) = 0$. This implies that $\omega_D(b)$ is very ample, we fix coordinates $(y) = (y_1: \dots: y_4)$ on $\mathbb P^3$. The resolution of $\mathcal O_{\ring p(D)}(1) \cong \omega_D(b)$ is definitely well known, \cite{Ho}. We have the exact sequence \begin{equation} 0 \to \mathcal O_{\mathbb P^3}(-1)^{\oplus 3} \stackrel{A} \to \mathcal O_{\mathbb P^3}^{\oplus 4} \to \omega_D(b) \to 0, \end{equation} $A = (a_{ij})$ being a $4 \times 3$ matrix of linear forms in $(y)$. Then $\ring p(D)$ is a determinantal curve defined by the cubic minors of $A$. In particular $A$ has rank $3$ on $\mathbb P^3 - \ring p(D)$ and, since $\ring p: D \to \ring p(D)$ is biregular and $\ring p(D)$ is smooth, it also follows that $\ring p(D)$ is the set of points $y \in \mathbb P^3$ such that $A$ has exactly rank $2$. This implies that the equations $ a_{i1}x_1 + a_{i2}x_2 + a_{i3}x_3 = 0, \ i = 1 \dots 4,$ define a complete intersection $\hat D \subset P \times \mathbb P^3$ such that $\Supp \hat D = D$. Finally one easily computes that $\hat D$ and $D$ have the same degree $10$ with respect to $\mathcal O_{\mathbb P^3 \times \mathbb P^3}(1,1)$. This implies $\hat D = D$ and the claim follows. \end{proof} \subsection{\sf {The case of genus $6$}} \begin{theorem} \label{announce} The Mukai map $r_{6,3}: \mathcal P^{\perp}_{6,3} \to \mathcal R_{6,3}$ has not maximal rank. \end{theorem}
In this paper we only sketch the proof of this theorem and its geometric motivation: see section 7 and also \cite{Ve1}. We postpone some details to further investigation on $\mathcal R_{6,3}$. We conclude that the mentioned analogies are confirmed for $\ell = 3$: \it the Mukai maps \begin{equation} m_{_{11 \pm 1}} \ , \ r_{_{7 \pm 1, 2}} \ , \ r_{_{5 \pm 1, 3}} \end{equation} have not maximal rank, while they are birational for $g = 11, \ 7, \ 5$. \rm These maps are not dominant for $g = 10, \ 6, \ 4$ and they have positive dimensional fibre for $g = 12, \ 8, \ 6$.
\section{Views on Fano threefolds with sections of level $2$ or $3$} We close this paper discussing some families of Fano threefolds $\overline X \subset \mathbb P^{g+1}$, whose general hyperplane sections are singular K3 surfaces $\overline S$ of the considered types. Then $\overline S$ is endowed with a degree $\ell$ cyclic cover $\pi: \tilde S \to \overline S$ with branch locus $\Sing \overline S$. Moreover its minimal desingularization $\nu: S \to \overline S$ fits in a standard level $\ell$ K3 surface $(S, \mathcal L, \mathcal E)$, so that $\mathcal L \cong \nu^* \mathcal O_{\overline S}(1)$ and $\mathcal E$ induces $\pi: \tilde S \to \overline S$. We have $\ell = 2, 3$. \par For some families a natural cyclic cover $\pi_{\overline X}: \tilde X \to \overline X$ is visible, with branch locus the curve $\Sing \overline X$. However we do not address it here.
The existence of these families implies that $r_{g, \ell}$ has not maximal rank. They correspond to the peculiar values \begin{equation} (g, \ell) = (6,3), (6,2), (8,2), (4,3). \end{equation} For $\ell = 2$ these families are known, \cite{FV, KLV1, Ve}. The case $(6,2)$ is revisited here with emphasis on a singular quadratic complex of the Grassmannian $G(2,5)$. This implies that $r_{6, 2}$ is not of maximal rank. For $(6,3)$ we introduce a family of Gushel - Mukai threefolds singular along a rational normal sextic curve. This is responsible for the failure of the maximal rank of $r_{6,3}$. The case $(8,2)$ is similar and not treated here, \cite{Ve}. Finally we point out the plausible relation of the case $(4,3)$ to the $G_2$-variety. \subsection{ \sf A singular Gushel - Mukai threefold: $\ell = 3$ and $g = 6$} We sketch the geometric construction implying theorem (\ref{announce}). Let $g = 6$ and $\ell = 3$, keeping our notation we consider $p \times \ring p: S \to \mathbb P^4 \times \mathbb P^4$. Then $p$ is defined by the linear system \begin{equation} \vert H \vert = \vert C - \frac 13 \sum_{i = 1 \dots 6} (B_i+ 2B_i') \vert, \end{equation} where $B_i+ B_i'$, are the connected components of $B_{\mathsf {red}}$. Let $x_0 := [S, \mathcal L, \mathcal E, C] \in \mathcal P^{\perp}_{6,3}$ be a general point, then a standard analysis shows that $p: S \to p(S)$ is the contraction of $\sum B_i$ to six points and that $p(B_i')$ is a line. Moreover we have \begin{equation} p(S) = F_0 \cap Q, \end{equation} where $F_0$ is a cubic and $Q$ a \it smooth \rm quadric. Notice that $p \vert C$ is the embedding defined by $\omega_C \otimes \eta^{-1}$, since $CB_i = 0$ then $p(C) \cap \Sing p(S) = \emptyset$. Let $C' := p(C)$ and let
\begin{equation} 0 \to \mathcal I_{p(S)}(3) \to \mathcal I_{p(C)}(3) \to \mathcal I_{C' \vert p(S)}(3) \to 0 \end{equation} be the standard exact sequence of ideal sheaves of $Q$, we notice the isomorphisms $\mathcal I_{p(S)}(3) \cong \mathcal O_Q$ and $p_*: H^0(\mathcal O_S(3H-C)) \to H^0(\mathcal I_{p(C) \vert p(S)}(3))$. This implies that \begin{equation} 0 \to H^0( \mathcal O_Q) \to H^0(\mathcal I_{C'}(3)) \to H^0(\mathcal O_S(3H - C)) \to 0 \end{equation} is its associated long exact sequence. It easily follows that $C'$ is projectively normal. A second standard step is the remark that $\mathcal O_S(3H-C)$ is a genus $3$ polarization of $S$. Now let $M \in \vert 3H - C \vert$, then $p_*(C+M) \in \vert \mathcal I_{p(C) \vert p(S)}(3) \vert$ and it is cut on $p(S)$ by a cubic hypersurface. Therefore we have in $Q$ the complete intersection scheme \begin{equation} p_*(C + M) = F_0 \cap F_{\infty} \cap Q, \end{equation} where $F_0, F_{\infty}$ are cubics. Let $S'_0 = F_0 \cdot Q$ and $S'_{\infty} = F_{\infty} \cdot Q$. We consider the pencil \begin{equation} P_M = \lbrace S'_t, \ t \in \mathbb P^1 \rbrace, \end{equation} of cubic sections of $Q$ generated by $S'_0$ and $S'_{\infty}$. We can assume $p(S) = S'_0$, notice that a general $S'_t$ is a possibly singular $K3$ surface, smooth along $C'$. Let $\sigma_t: S_t \to S'_t$ be its minimal desingularization and $C_t := \sigma^*_t C'$, then $S_t$ is endowed with the line bundles \begin{equation} \mathcal H_t := \sigma^*_t \mathcal O_Q(1), \ \mathcal L_t := \mathcal O_{S_t}(C_t), \ \mathcal E_t := \mathcal L_t \otimes \mathcal H^{-1}_t. \end{equation} For $t = 0$ the fourtuple $(S_t, \mathcal L_t, \mathcal E_t, C_t)$ defines the point $x_0 = [S, \mathcal L, \mathcal E, C]$ of $\mathcal P^{\perp}_{6,3}$. For $t \neq 0$ we have constantly $C_t = C$. Now consider the family of fourtuples \begin{equation} \lbrace (S_t, \mathcal L_t, \mathcal E_t, C_t), \ t \in \mathbb P^1 \rbrace, \end{equation} then the assignment $t \to [\mathcal S_t, \mathcal L_t] \in \mathcal F_6$ defines a non constant rational map $m: \mathbb P^1 \to \mathcal F_6$. Assume $(S_t, \mathcal L_t, \mathcal E_t)$ is a K3 surface of level $3$ for a general $t$. Then $m$ lifts to a map $\tilde m: \mathbb P^1 \to \mathcal P^{\perp}_{6,3}$, sending $t$ to $[S_t, \mathcal L_t, \mathcal E_t, C_t]$, and the next statement immediately follows. \begin{proposition} If $(S_t, \mathcal L_t, \mathcal E_t)$ is a K3 surface of level $3$ for a general $t$, the curve $\tilde m(\mathbb P^1)$ is in the fibre at the point $[C, \eta]$ of the Mukai map $r_{6,3}$, which is therefore not of maximal rank. \end{proposition} The assumption mentioned in the statement depends on the choice of the element $M$ in $ \vert 3H - C \vert$ and in general it is not satisfied. However the assumption is satisfied choosing in $\vert M \vert$ the very special element \begin{equation} M_0 := 2A + \sum_{i = 1 \dots 6} B_i, \end{equation} where $A$ is the \it unique \rm element of $\vert C - \sum_{i = 1 \dots 6} (B_i+B_i') \vert$. The curve $A$ is biregular to $\mathbb P^1$ and $p \vert A$ embeds it as a rational normal quartic curve.
Let $A' = p(A)$, then the base scheme of $P_{M_0}$ is a non reduced, complete intersection curve and its $1$-cycle is
\begin{equation}
p_*(M_0 + C) = 2A' + C'.
\end{equation}
In other words the surfaces $S'_t$ intersect along a contact curve $A'$ of multiplicity two and along $C'$. It turns out that a general \it $\Sing S'_t$ consists of six nodes \rm moving in $A'$ and each node belongs to a line in $S'_t$. This can be shown using the special property that $\eta \cong \omega_{C'}(-1) \in \Pic C$ is of $3$-torsion. Omitting further details of this construction, let us just say that $M_0$ defines a pencil of level $3$ and genus $6$ K3 surfaces as required. \par
To close geometrically this sketch let $\mathsf A$ be the non reduced component, supported on $A'$, of the base curve of $P_{M_0}$ and $\mathcal I_{\mathsf A \vert Q}$ its ideal sheaf. Consider the rational map
\begin{equation}
\phi: Q \to \mathbb P^7
\end{equation}
defined by the linear system $\vert \mathcal I_{\mathsf A \vert Q}(3) \vert$. Let us notice the following property.
\begin{proposition} The map $\phi$ is birational onto its image $W$, which is a singular Gushel - Mukai threefold whose general hyperplane sections are singular $K3$ surfaces $\overline S$ as above. \end{proposition} Therefore $W$ is a complete intersection of type $(1,1,2)$ in the Grassmannian $G(2,5)$. We notice that $\Sing W$ is a rational normal sextic curve. This completes our sketch.
\subsection{{ \sf The tangential quadratic complex of $\mathbb P^4$: $\ell = 2$ and $g = 6$}} Let $\mathbb G_n$ be the Pl\"ucker embedding of the Grassmannian of lines of $\mathbb P^n$, a quadratic complex is just a quadratic section of $\mathbb G_n$. Let $Q \subset \mathbb P^n$ be a quadric, then the family $\mathbb T$ of tangent lines to $Q$ is a quadratic complex, named sometimes the \it tangential quadratic complex. \rm We assume $Q$ is smooth, then $\mathbb T$ is a Fano variety. Notice that $\Sing \mathbb T$ is the Hilbert scheme of lines of $Q$, of codimension and multiplicity $2$ in $\mathbb T$. \par Now we assume $n$ is even. Then $\mathbb T$ has a unique nontrivial quasi \'etale 2:1 cover \begin{equation} \pi: \tilde {\mathbb T} \to \mathbb T, \end{equation} whose branch locus is $\Sing \mathbb T$. Let us describe the known map $\pi$ in the case $n = 4$, since it is linked to the Mukai map $r_{6,2}: \mathcal P^{\perp}_{6,2} \to \mathcal R_6$ and its behavior. This is treated in \cite{FV}. For $n = 4$ the Hilbert scheme of lines of $Q$ is the $2$-Veronese embedding of $\mathbb P^3$, say \begin{equation} V \subset \mathbb G_4 \subset \mathbb P^9. \end{equation} Let $t \in \mathbb T$, consider the pencil $\lbrace H_p, p \in t \rbrace$, where $H_p$ is the polar hyperplane to $Q$ at $p$. Its base locus is a plane $P_t$ and $Q_t := P_t \cdot Q$ is a conic. Since $t$ is tangent to $Q$, a standard exercise shows that $\Sing Q_t = t \cap Q$. This defines a smooth, integral correspondence \begin{equation} \tilde {\mathbb T} := \lbrace (t,r) \in \mathbb T \times V \ \vert \ r \subset Q_t \rbrace. \end{equation} Notice that its projection onto $\mathbb T$ is a quasi \'etale $2:1$ cover branched on $V$, say \begin{equation} \pi: \tilde {\mathbb T} \to \mathbb T. \end{equation} Indeed the fibre $\zeta_t := \pi^*(t)$ is the Hilbert scheme of lines of $Q_t$ and is finite of length $2$. Then $\zeta_t$ is smooth iff $\rank Q_t = 2$ iff $t \notin V$ and $\zeta_t$ has multiplicity $2$ iff $\rank Q_t = 1$ iff $t \in V$. \par Now it is well known that a general $2$-dimensional linear section $ \overline S = \mathbb T \cap \mathbb P^6$ is the model defined by $\vert \mathcal L \vert$ of $S$, where $[S, \mathcal L, \mathcal E] \in \mathcal F^{\perp}_6$ is general. In particular $\Sing \overline S = V \cap \mathbb P^6 $ is an even set of $8$ nodes, defining $\pi \vert \tilde S$ with $\tilde S = \pi^{-1}(\overline S)$, cfr. \cite{FV, KLV1, KLV2}. For $\ell = 2$ and $[S, \mathcal L, \mathcal E] \in \mathcal F^{\perp}_g$, the surface $S$, or its model $\overline S$, is known as a standard Nikulin surface of genus $g$. Therefore we can say that a general $3$-dimensional linear section of $\mathbb T$ is \it a Fano threefold whose hyperplane sections are standard Nikulin surfaces of genus $6$. \rm Let us denote such a section by \begin{equation} X = \mathbb T \cap \mathbb P^7, \end{equation} notice that $\Sing X$ is a curvilinear section of $V$, hence an elliptic curve of degree $8$. \par Finally let $\mathcal C$ and $\overline {\mathcal S}$ respectively be the family of general curvilinear sections $C$ and that of general $2$-dimensional linear sections $\overline S$ of $\mathbb T$. Consider the family of pairs \begin{equation} \mathcal P := \lbrace(C, \overline S) \in \mathcal C \times \overline {\mathcal S} \ \vert \ C \subset \overline S \rbrace.
\end{equation} Let $(C, \overline S) \in \mathcal P$ then $C$ is a canonical curve and $ C \in \vert \mathcal O_{\overline S}(1) \vert$. Let $\nu: S \to \overline S$ be the desingularization then $\nu^*C \in \vert \mathcal L \vert $ and $\eta := \mathcal E \otimes \mathcal O_{\nu^*C}$ defines $\pi \vert \tilde C$, where $\tilde C = \pi^{-1}(C)$. Then the assignment of $(C, \overline S)$ to $[S, \mathcal L, \mathcal E, \nu^*C]$ defines a \it dominant \rm rational map
$$ m: \mathcal P \to \mathcal P^{\perp}.$$ We already know that the Mukai map $r_{6,2}$ fails to be of maximal rank. However we can now see this fact from a geometric perspective: the existence of the Fano variety $\mathbb T$ and its quasi finite $2:1$ cover $\pi$. Indeed this implies that $C \in \mathcal C$ is contained in a higher dimensional family of sections $\overline S$ of $\mathbb T$, so that $C$ cannot have general moduli. \par More precisely the parameter space $\mathcal C$ is open in the Grassmannian $G(5, 9)$, hence $\dim \mathcal C = 24$. Moreover $\Aut Q \subset \Aut \mathbb P^4$ has dimension $10$ and acts faithfully on $\mathcal C$. Then we have $\dim \mathcal C \dslash \Aut Q = 14 < \dim \mathcal R_6 = 15$. Hence $r_{6,2}$ cannot be dominant. \begin{remark} \rm Let $C \in \mathcal C$ then $\tilde C = \pi^{-1}(C)$ is a smooth, integral curve of genus $11$. We have $\tilde C \subset \tilde S \subset \tilde X \subset \mathbb P^{12}$, where $\tilde X = \pi^{-1}(X)$ is a non prime Fano threefold of genus $11$. We just mention that $\tilde C$ is the base locus of a pencil of hyperplane sections of $\tilde X$ and that the birational Mukai map $m_{11}: \mathcal P_{11} \to \mathcal M_{11}$ is not invertible at $[\tilde C]$. \end{remark}
\subsection{ { \sf The $\mathsf G_2$-variety: $\ell = 3$ and $g = 4$} }
A \it geometric interpretation \rm seems plausible and it is possibly postponed to future work. It relates to the failure of the Mukai map in genus $10$. As in (\ref{diagram}) let $\pi: \tilde S \to \overline S$ be the cover induced by $\mathcal E$ and $\nu: S \to \overline S$ the desingularization map. For a general $C$ the map $\nu: C \to \overline S \setminus \Sing \overline S$ is an embedding, then we set $C := \nu(C)$. Let $\tilde C := \pi^{-1}(C)$ then $(\tilde S, \mathcal O_{\tilde S}(\tilde C ))$ is a K3 surface of genus $10$. This suggests that $\tilde S$ embeds in the $G_2$-variety $W \subset \mathbb P^{13}$ as a linear section, \cite{Mu}. Now a general curvilinear section of $W$ is not general as a genus $10$ curve. In the same way, if it is a triple cover of a genus $4$ curve, it seems not a general genus $4$ triple cover. {\tiny
}
\end{document} | arXiv | {
"id": "2108.12215.tex",
"language_detection_score": 0.7238230109214783,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\renewcommand{\thefootnote}{} \footnotetext{Research partially supported by Ministerio de Econom\'ia y Competitividad Grant No: MTM2016-80313-P and the `Maria de Maeztu'' Excellence Unit IMAG, reference CEX2020-001105-M, funded by MCIN/AEI/10.13039/501100011033}
\title{Calabi-Bernstein type results for critical points of a weighted area functional in $\mathbb{R}^3$ and $\mathbb{L}^3$} \author{$\text{A. Mart\'inez}^{1}\text{ and A.L. Mart\'inez-Trivi\~no}^{2}$}
\date{} \maketitle { \noindent $^1$Departamento de Geometr\'\i a y Topolog\'\i a, Universidad de Granada, E-18071 Granada, Spain\\ \\ e-mails: $\text{amartine@ugr.es}^{1}$ and $\text{aluismartinez@ugr.es}^{2}$} \begin{abstract} In this paper we prove some Calabi-Bernstein type and non-existence results concerning complete $[\varphi,\vec{e}_3]$-minimal surfaces in $\mathbb{R}^3$ whose Gauss maps lie on compacts subsets of open hemispheres of $\mathbb{S}^2$. We also give a general non-existence result for complete spacelike $[\varphi,\vec{e}_3]$-maximal surfaces in $\mathbb{L}^3$ and, in particular, we obtain a Calabi-Bernstein type result when $\dot{\varphi}$ is bounded. \end{abstract}
\noindent 2020 {\it Mathematics Subject Classification}: {53C42, 35J60 }
\noindent {\it Keywords: } Calabi-Bernstein, $[\varphi,\vec{e}_{3}]$-minimal surface, spacelike $[\varphi,\vec{e}_{3}]$-maximal surface, Bochner formula, minimal surfaces, maximum principles. \everymath={\displaystyle} \section{Introduction.} At the beginning of the 20th-century, Bernstein \cite{B} proved that the only entire minimal graph $\Sigma$ in $\mathbb{R}^{3}$ is the hyperplane. De Giordi \cite{G} extended this result to $\mathbb{R}^{4}$ and Almgren \cite{A} to $\mathbb{R}^{5}$. In fact, the Bernstein's Theorem is true until $\mathbb{R}^{7}$, see for instance \cite{Simons} and Bombieri, De Giorgi and Giusti \cite{BGG} gave a counterexample for $\mathbb{R}^{n}$ with $n\geq 8$. However, under the additional assumption that the norm of the gradient of our graph is uniformly bounded, Moser \cite{Moser} proved that any entire minimal graph must be a hyperplane for any dimension. \
In 1959, a generalized version of the Bernstein problem was given by Osserman \cite{Oss}, who proved the Nirenberg's conjecture, namely, he showed that any complete minimal surface in $\mathbb{R}^{3}$ whose Gauss map omits a neighborhood of some point at the sphere, must be a plane. This result was generalized to $\mathbb{R}^{n}$ by Chern \cite{Chern}. Few years later, Fujimoto \cite{Fuj} showed that the plane is the only complete orientable minimal in $\mathbb{R}^{3}$ whose Gauss map omits at least 5 points of the sphere. \
This kind of results have been also proved for other families of surfaces in different ambient spaces. For instance, Hoffman, Osserman and Schoen \cite{HOS} proved a version of the Osserman's result for CMC surfaces. Barros, Aquino and Lima \cite{BAL} for complete CMC hypersurfaces in the hyperbolic space with prescribed Gauss map and Fern\'andez, G\'alvez and Mira \cite{FGM} for elliptic Weingarten complete multigraphs with a quasiconformal Gauss map that omits an open hemisphere.
In 1970 and for Lorentzian ambient spaces, Calabi \cite{CA} proved that the only entire maximal graph in the 3-dimensional Lorentz-Minkowski space $\mathbb{L}^{3}$ is a plane. Aledo, Rubio and Romero \cite{ARR} obtained that the only complete maximal surface in $\mathbb{L}^{3}$ is a spacelike plane.
In 2003, Al\'ias and Mira \cite{AM} gave a generalization to higher dimension and in 2009, Albujer and Al\'ias \cite{AA} proved the Calabi-Bernstein result for maximal surfaces in Lorentzian product spaces.
\
The main goal of this work is to prove Calabi-Bernstein's type results for critical points of the weighted area functional \begin{equation} \label{area} \mathcal{A}^{\varphi}(\Sigma)=\int_{\Sigma}e^{\varphi}\, d \Sigma \end{equation} on isometric immersions of Riemannian surfaces $\Sigma$ in a domain $\mathfrak{D}^3$ of $ \mathbb{R}^3$ (or $\mathbb{L}^3$) when $\varphi$ is the restriction on $\Sigma$ of a smooth function depending only on the coordinate of $\mathfrak{D}^3$ in the direction of $\vec{e}_3=(0,0,1)$ and where $d\Sigma$ denotes the volume element induced on $\Sigma$ by the Euclidean (or Lorentzian) metric $\langle\cdot,\cdot\rangle:= dx^2+dy^2+dz^2$ ($\langle\cdot,\cdot\rangle_{\mathbb{L}^3}:= dx^2+dy^2-dz^2$).
The Euler-Lagrange equation of \eqref{area} is given in terms of the mean curvature vector $\textbf{H}$ of $\Sigma$ as follows \begin{equation} \label{meancurvature} \textbf{H}=(\overline{\nabla}\varphi)^{\perp} = \dot{\varphi} \ \vec{e}_3^{\,\perp}, \end{equation} here $\perp$ denotes the projection on the normal bundle, and $\overline{\nabla}$ stands the usual gradient operator in $\mathbb{R}^{3}$ (or $\mathbb{L}^3$). \\
Any critical point of \eqref{area} in $\mathbb{R}^{3}$ (or $\mathbb{L}^3$) will be called {\sl $[\varphi, \vec{e}_3]$-minimal } (or {\sl spacelike $[\varphi, \vec{e}_3]$-maximal}) surface. Interesting examples of these families of surfaces are: \begin{itemize} \item the classical minimal surfaces in $\mathbb{R}^{3}$ and the the spacelike maximal surfaces in $\mathbb{L}^{3}$ when $\varphi$ is a constant. \item the translating solitons: if $\varphi$ is just the height function, $\varphi(p)=\langle p,\vec{e}_3\rangle$, that is, surfaces such that $$ t \rightarrow \Sigma + t \vec{e}_3 $$ is a mean curvature flow, i.e. the normal component of the velocity at each point is equal to the mean curvature at that point. \item the singular $\alpha$-minimal (spacelike $\alpha$-maximal) surfaces: if $\varphi(p) =\alpha \log \langle p,\vec{e}_3\rangle$, $\alpha$=const. For surfaces in $\mathbb{R}^3$ and when $\alpha=1$, $\Sigma$ describes the shape of a ``hanging roof'', i.e. a heavy surface in a gravitational field that, according to the architect F. Otto \cite[p. 290]{Otto} are of importance for the construction of perfect domes. \end{itemize} This kind of surfaces has been widely studied specially from the viewpoint of calculus of variations. Classical results about the Euler equation and the existence and regularity for the solutions of the Plateau problem for \eqref{area} can be found in \cite{BHT, H1, H2, HK, T}. But the situation have changed in very recent years and nowdays very and interesting geometric properties of these surfaces are known. We mention, the convexity result of Spruck-Xiao \cite{SX} for complete mean-convex translating solitons which solved a Wang conjeture \cite{Wa}, they proved that the only entire vertical graph translating soliton is the rotationally symmetric Bowl soliton. Hoffman, Ilmanen, Mart\'in y White \cite{HIMW} have classified all complete vertical graphs translating soliton in $\mathbb{R}^{3}$.
B\"ome, Hildebrant and Tausch \cite{BHT} have characterized the 2-dimensional singular $\alpha$-catenary and Dierkes \cite{D} together with L\'opez \cite{RL} have classified the singular $\alpha$-catenaries and the rotationally symmetric singular $\alpha$-minimal examples. \
Concerning to Bernstein's problem, Bao-Shi \cite{BS} proved that if $\Sigma$ is a convex complete translating soliton in $\mathbb{R}^3$ whose Gauss map lies in a closed ball of $\mathbb{S}^{2}$ of radii less than $\pi/2$, then $\Sigma$ must be a vertical plane, Kunikawa \cite{K} generalized this result for arbitrary codimension and Qiu \cite{HQ} have also given a Bernstein type result for vertical translating soliton graphs in $\mathbb{R}^{3}$. \\
Having good control over the geometry of a $[\varphi,\vec{e}_{3}]$-minimal surface in $\mathbb{R}^3$ make it necessary to impose some analytical constraints on the function $\varphi$, see for instance \cite{MM,MM1,MM2,MMJ,MJ}. As a general condition we are going to consider $\varphi:]a,b[\rightarrow \mathbb{R}$, satisfying either \begin{align} & \text{$\varphi$ monotone,}\quad \dddot{\varphi\hspace{0pt}} \dot{\varphi}\geq 0 \ \ \text{and} \ \ \ddot{\varphi}\leq 0 \quad \text{on $]a,b[$,}\label{hy1} \end{align} or \begin{align} &\text{$\varphi$ monotone,}\quad \text{and} \quad \ddot{\varphi}\geq 0 \quad \text{on $]a,b[$}.\label{hy11} \end{align} and $(\ \dot{ }\ )$ denotes derivate with respect to the third coordinate.
Our main results in this work are the following Calabi-Bernstein type classification results: \setcounter{theorem}{0} \renewcommand{\arabic{theorem}}{\Alph{theorem}} \begin{theorem} \label{Main1} Consider $\varphi:\mathbb{R} \rightarrow \mathbb{R}$ satisfying \eqref{hy1} and let $\Sigma$ be a complete $[\varphi,\vec{e}_{3}]$-minimal surface in $\mathbb{R}^{3}$ with bounded mean curvature $H$ and Gauss curvature $K\geq 0$. If $\dot{\varphi}$ has the following asymptotic behaviour \begin{align} & \langle p, \vec{e}_3\rangle \, \vert\dot{\varphi} \vert\leq C \vert p\vert, \quad p\rightarrow \infty, \end{align} and the Gauss map $N:\Sigma\rightarrow\mathbb{S}^{2}$ of $\Sigma$ satisfies \begin{equation} \langle N,y_{0}\rangle\leq -2\varepsilon, \qquad \text{on $\Sigma$},\label{N1} \end{equation} for some constants $C>0$, $\varepsilon>0$, where $\vert \cdot \vert:\Sigma\rightarrow \mathbb{R}$ is the extrinsic distance with respect to the origin and $y_{0}\in\mathbb{S}^{2}$, $\langle y_0,\vec{e}_{3}\rangle =0$, then $\Sigma$ must be a plane. \end{theorem} \begin{theorem} \label{Main2} Consider $\varphi:\mathbb{R} \rightarrow \mathbb{R}$ satisfying \eqref{hy1} and let
$\Sigma$ be a properly embedded $[\varphi,\vec{e}_{3}]$-minimal surface in $\mathbb{R}^{3}$ with bounded mean curvature $H$ and Gauss curvature $K\geq 0$. If $\dot{\varphi}$ has the following asymptotic behaviour \begin{align} & \vert\dot{\varphi} \vert\leq C \vert p\vert \log\vert p\vert, \quad p\rightarrow \infty,\label{hy3} \end{align} and the Gauss map $N$ of $\Sigma$ satisfies \eqref{N1}, then $\Sigma$ must be a plane. \end{theorem} \begin{theorem}\label{Main3} Consider $\varphi:]a,b[ \rightarrow \mathbb{R}$ satisfying \eqref{hy11} and \begin{align} & \vert\dot{\varphi} \vert\leq C \vert p\vert, \quad p\rightarrow \infty.\label{hy4} \end{align} Then, there is no properly embedded $[\varphi,\vec{e}_{3}]$-minimal surfaces $\Sigma$ in $\mathbb{R}^{2}\times ]a,b[$ whose Gauss map $N$ verifies \begin{equation} \langle N,\vec{e}_3\rangle \geq \epsilon>0 \label{N2} \end{equation} and such that either $\inf_\Sigma \dot{\varphi}^2>0$ or $\inf_\Sigma \ddot{\varphi}>0$. \end{theorem} Let us make some comments about these theorems. Condition \eqref{N1} means that the image of the Gauss map lies onto a compact region in the open hemisphere bounded by the semicircle determined by a vertical plane through the origen. In particular, from the Ekeland's principle \cite[Proposition 2.2]{AMR}, the height function $\mu(p)= \langle p, \vec{e}_{3}\rangle$ satisfies that $$\mu_\star = \inf_\Sigma \mu = -\infty, \qquad \mu^\star = \sup_\Sigma \mu = +\infty,$$ otherwise, there exists a sequence of points $\{q_n\}$ in $\Sigma$ such that $N(q_n)\rightarrow \vec{e}_3$. And so, under the assumption \eqref{N1}, the function $\varphi$ must be globally defined on $\mathbb{R}$.
We should point out too, see \cite{HIMW,MM}, that tilted grim reapers are examples of flat $[\varphi,\vec{e}_{3}]$-minimal surfaces in $\mathbb{R}^{3}$ whose Gauss map takes values onto a non-compact semicircle on the open hemisphere bounded by a vertical plane through the origin. Moreover, about the condition \eqref{N2}, it is remarkable, see \cite{MM}, that the Gauss maps of $[\varphi,\vec{e}_{3}]$-catenary cylinders and $[\varphi,\vec{e}_{3}]$-bowls takes their values on an open hemisphere bounded by the horizontal plane through the origin.
The proofs of Theorems \ref{Main1}, \ref{Main2} and \ref{Main3} are inspired by ideas used in \cite{BS,K} for translating solitons and depend on a blend of ideas from surface theory, elliptic theory and maximum principles.
In the ambient $\mathbb{L}^{3}$ we prove: \begin{theorem} \label{Main4} Consider $\varphi:]a,b[ \rightarrow \mathbb{R}$, a concave function, that is, $\ddot{\varphi}\leq 0$ on $]a,b[$. Then, there is no complete spacelike $[\varphi,\vec{e}_{3}]$-maximal surfaces $\Sigma$ in $\mathbb{L}^{3}$ satisfying that \begin{equation}\label{loeq} \text{either}\quad \inf_\Sigma \dot{\varphi}^2 >0, \quad \text{or}\quad \sup_\Sigma \ddot{\varphi}<0. \end{equation} \end{theorem} \setcounter{theorem}{0} \renewcommand{\arabic{theorem}}{\arabic{theorem}} \numberwithin{theorem}{section}
The paper is organized as follow: In Section \ref{s2} we recall some facts about the weak maximum principle and the stochastic completeness. After, in Section \ref{s3} we consider the Euclidean case and prove, assuming reasonable curvature restrictions, some Calabi-Bernstein type and non-existence results concerning complete $[\varphi,\vec{e}_3]$-minimal surfaces in $\mathbb{R}^3$ whose Gauss maps lie on compact subsets of open hemispheres of $\mathbb{S}^2$. Finally, Section \ref{s4} deals with the Lorentzian case where we prove a general non-existence result for complete spacelike $[\varphi,\vec{e}_3]$-maximal surfaces and, in particular, we give a Calabi-Bernstein type result when $\dot{\varphi}$ is bounded. \section{The weak maximum principle}\label{s2} Stochastic completeness is the property for a stochastic process to have infinite (intrinsic) life time. An analytic condition to express stochastic completeness, see \cite[Section 2.3]{AMR}, is the following, \begin{definition}{\rm
A Riemannian manifold $(\Sigma,\langle\cdot,\cdot\rangle)$ is said to be \textit{stochastically complete} if the \textit{weak maximum principle} hold for the Laplacian operator $\Delta$, that is, if for any function $u\in C^{2}(\Sigma)$ with $u^{*}=\sup_{\Sigma}u<+\infty$, there exists a sequence of points $\{p_{n}\}_{n\in\mathbb{N}}\subset\Sigma$ satisfying $$u(p_{n})>u^{*}-\frac{1}{n}, \ \ \text{and} \ \ \Delta u(p_{n})<\frac{1}{n}, \text{ for any } n\in\mathbb{N}.$$ } \end{definition} Notice that, in the above definition the Riemannian manifold $\Sigma$ is not assumed to be complete (or geodesically complete). From \cite[Theorem 2.8]{AMR}, we have that the following statements are equivalent. \begin{itemize} \item $\Sigma$ is stochastically complete. \item For every $\lambda>0$, the only nonnegative bounded $C^{2}$ solution of $\Delta u\geq\lambda u$ on $\Sigma$ is the constant zero. \item For every $\lambda>0$, the only nonnegative bounded $C^{2}$ solution of $\Delta u=\lambda u$ on $\Sigma$ is the constant zero. \end{itemize} In this paper we shall apply the weak maximum principle for the drift laplacian operator $$\Delta^{f}(\cdot)=\Delta(\cdot)+\langle\nabla f,\nabla (\cdot)\rangle,$$ for some $f\in C^{2}(\Sigma)$, where $\nabla$ denote the gradient operator in $\Sigma$. In this sense, we may give the following definition, \begin{definition}{\rm Let $(\Sigma,\langle\cdot,\cdot\rangle)$ be a Riemannian manifold and consider $f\in C^{2}(\Sigma)$. We say that $\Sigma$ is $f$-\textit{stochastically complete} if the weak maximum principle holds for $\Delta^{f}$, that is, if for any function $u\in C^{2}(\Sigma)$ with $u^{*}=\sup_{\Sigma}u<+\infty$, there exists a sequence of points $\{p_{n}\}_{n\in\mathbb{N}}\subset\Sigma$ satisfying $$u(p_{n})>u^{*}-\frac{1}{n}, \ \ \text{and} \ \ \Delta^{f} u(p_{n})<\frac{1}{n}, \text{ for any } n\in\mathbb{N}.$$} \end{definition} As above, see \cite[Theorem 2.14]{AMR}, the following characterization holds. \begin{proposition} The following statements are equivalent \begin{itemize} \item $\Sigma$ is $f$-stochastically complete. \item For every $\lambda>0$, the only nonnegative bounded $C^{2}$ solution of $\Delta^{f} u\geq\lambda u$ on $\Sigma$ is the constant zero. \item For every $\lambda>0$, the only nonnegative bounded $C^{2}$ solution of $\Delta^{f} u=\lambda u$ on $\Sigma$ is the constant zero. \end{itemize} \end{proposition} Notice that, it is difficult to check when a Riemannian manifold verifies the $f$-weak maximum principle. To end this part we give a criteria to verifying this property when $u^{*}>0$. From Theorem \cite[Theorem 2.14]{AMR}, we can adapt the arguments of the \cite[Theorem 2.9]{AMR} to prove the following result, \begin{theorem} \label{criteria} Let $\Sigma$ be a Riemannian manifold and $f\in C^{2}(\Sigma)$. If there exists a function $\gamma\in C^{\infty}(\Sigma)$ such that $\gamma(p)\rightarrow +\infty$ as $p\rightarrow \infty$ and $\Delta^{f}\gamma\leq\, \lambda\gamma$ outside a compact set, for some $\lambda>0$, then the weak maximum principle holds in $\Sigma$ for any function $u\in C^{2}(\Sigma)$ with $0<u^{*}<+\infty$. \end{theorem} In particular, if the Omori-Yau maximum principle holds in $\Sigma$ for $\Delta^{f}$, see \cite[Theorem 3.2]{AMR}, then $\Sigma$ is $f$-stochastically complete. \section{The Euclidean case}\label{s3} Let $\Sigma$ be the $[\varphi,\vec{e}_{3}]$-minimal immersion in $\mathbb{R}^{3}$ whose Gauss $N:\Sigma\rightarrow\mathbb{S}^{2}$ verifies \eqref{N1}. We shall denote by $\mathcal{S}$ the scalar second fundamental form given by $\mathcal{S}(u,v)=-\langle \textbf{S} u,v\rangle$ for any vector fields $u,v\in T\Sigma$, where $\textbf{S}$ is shape operator. \
Consider the height function $\mu:\Sigma\rightarrow\mathbb{R}$, $\mu(p)=\langle p,\vec{e}_{3}\rangle$ and the angle function $\eta:\Sigma\rightarrow\mathbb{R}$, $\eta(p)=\langle N(p),\vec{e}_{3}\rangle$. Then, from the Simons-type formula, we have, see \cite[Theorem 3]{CMZ}), \begin{equation} \label{b3} \Delta^{\varphi}\vert\mathcal{S}\vert^{2}=2\vert\nabla\mathcal{S}\vert^{2}-2\vert\mathcal{S}\vert^{4}+2\ddot{\varphi}\eta^{2}\vert\mathcal{S}\vert^{2}-4\ddot{\varphi}\vert\textbf{S}\nabla\mu\vert^{2}-2\dddot{\varphi}\eta\langle\textbf{S}\nabla\mu,\nabla\mu\rangle. \end{equation} From \eqref{N1}, there exists a constant $b>1$ such that the function $\phi:\Sigma\rightarrow\mathbb{R}$ given by $\phi(p)=1-\langle N(p),y_{0}\rangle$ satisfies that $\phi-b\geq\varepsilon$. Moreover, by a straightforward computation, we have that $\nabla\phi=\textbf{S}y_{0}^{\top}$, where $(\ .\ )^{\top}$ denotes the projection on the tangent bundle, and \begin{align} \label{c1} \Delta^{\varphi}\phi=(\vert\mathcal{S}\vert^{2}-\ddot{\varphi}\eta^{2})(1-\phi). \end{align} Let $\psi:\Sigma\rightarrow\mathbb{R}$ be the function given by $$\psi=\frac{\vert\mathcal{S}\vert^{2}}{(b-\phi)^{2}}.$$ From equations \eqref{b3} and \eqref{c1}, we obtain that \begin{align} \label{c2} \Delta^{\varphi}\psi&=\frac{2\vert\nabla\mathcal{S}\vert^{2}}{(b-\phi)^{2}}-\frac{2\vert\mathcal{S}\vert^{4}}{(b-\phi)^{2}}+\frac{2\ddot{\varphi}\eta^2\vert\mathcal{S}\vert^{2}}{(b-\phi)^{2}}-\frac{4\ddot{\varphi}\vert\textbf{S}\nabla\mu\vert^{2}}{(b-\phi)^2}-\frac{2\dddot{\varphi}\eta\langle\textbf{S}\nabla\mu,\nabla\mu\rangle}{(b-\phi)^2} \\ &+\frac{2\vert\mathcal{S}\vert^{4}(1-\phi)}{(b-\phi)^{3}}-\frac{2\ddot{\varphi}\eta^2\vert\mathcal{S}\vert^2(1-\phi)}{(b-\phi)^3}+\frac{6\vert\mathcal{S}\vert^{2}\vert\nabla\phi\vert^{2}}{(b-\phi)^4}+\frac{4\langle\nabla\vert\mathcal{S}\vert^{2},\nabla\phi\rangle}{(b-\phi)^3}\nonumber. \end{align} But, \begin{align} &\frac{\langle\nabla\phi,\nabla\psi\rangle}{b-\phi}=\frac{\langle\nabla\phi,\nabla\vert\mathcal{S}\vert^{2}\rangle}{(b-\phi)^{3}}+\frac{2\vert\mathcal{S}\vert^{2}\vert\nabla\phi\vert^{2}}{(b-\phi)^{4}}, \label{c3}\\ &\frac{2\vert\nabla \mathcal{S}\vert^{2}}{(b-\phi)^{2}}+\frac{2\vert\nabla\phi\vert^{2}\vert\mathcal{S}\vert^{2}}{(b-\phi)^{4}}\geq\frac{4\vert\nabla\phi\vert \vert\mathcal{S}\vert \vert \nabla \mathcal{S}\vert}{(b-\phi)^{3}}. \label{c4} , \end{align} and so, from \eqref{c2},\eqref{c3} and \eqref{c4}, we have the following inequality \begin{align} \label{c5} \Delta^{\varphi}\psi\geq& \frac{2(1-\phi)\vert\mathcal{S}\vert^{4}}{(b-\phi)^{3}}-\frac{2\vert\mathcal{S}\vert^{4}}{(b-\phi)^{2}}+\frac{2\langle\nabla\phi,\nabla\psi\rangle}{b-\phi}+ \frac{2\ddot{\varphi}\eta^2\vert\mathcal{S}\vert^{2}}{(b-\phi)^2}\\ &-\frac{2\ddot{\varphi}\vert\mathcal{S}\vert^{2}\eta^{2}(1-\phi)}{(b-\phi)^{3}}-\frac{4\ddot{\varphi}\vert\textbf{S}\nabla\mu\vert^{2}}{(b-\phi)^{2}}-\frac{2\dddot{\varphi}\eta\langle\textbf{S}\nabla\mu,\nabla\mu\rangle}{(b-\phi)^{2}}.\nonumber \end{align} \textbf{Claim 1:} The following statements hold on $\Sigma$ \begin{enumerate} \item [(i)] $\ddot{\varphi}\eta^2\vert\mathcal{S}\vert^{2}(b-\phi)-\ddot{\varphi}\vert\mathcal{S}\vert^{2}\eta^{2}(1-\phi)-2\ddot{\varphi}\vert\mathcal{S}\nabla\mu\vert^{2}(b-\phi)\leq 0,$ \item [(ii)] $\dddot{\varphi}\eta\langle\textbf{S}\nabla\mu,\nabla\mu\rangle\leq 0$. \end{enumerate} \begin{proof}[Proof of Claim 1.]
Fix any point $p\in\Sigma$. The first item (i) trivially holds since $-\ddot{\varphi}\vert\textbf{S}\nabla\mu\vert^{2}\geq 0$ and $b-\phi\geq 1-\phi$. On the other hand, by taking an orthornormal frame of principal directions $\{v_{i}\}_{i=1,2}$ of $T_{p}\Sigma$, $$\langle\textbf{S}\nabla\mu,\nabla\mu\rangle=\sum_{i=1}^{2}\langle\nabla\mu,v_{i}\rangle^{2}k_{i},$$ where $k_{i}$ are the principal curvatures of $\Sigma$ in $p$.
If $\dot{\varphi}\geq 0$ everywhere (the case $\dot{\varphi}\leq 0$ is analogous), then as $K\geq 0$, we get that either $H\leq 0$ or $H\geq 0$ on $\Sigma$. \begin{itemize} \item If $H\leq 0$, then each $k_{i}\leq 0$ on $\Sigma$. In particular, from the equation \eqref{meancurvature}, the angle function $\eta\geq 0$. Consequently, from the condition $(\dagger)$, the following inequality holds \begin{equation} \label{ineqq} 0\geq \dddot{\varphi}\eta\langle\textbf{S}\nabla\mu,\nabla\mu\rangle=\sum_{i=1}^{2}\dddot{\varphi}\eta\, k_{i}\langle\nabla\mu,v_{i}\rangle^{2}. \end{equation} \item If $H\geq 0$, then each $k_{i}\geq 0$ on $\Sigma$. In this case, the angle function $\eta\leq 0$ and then, the inequality \eqref{ineqq} also holds. \end{itemize} \end{proof}
Consequently, from the inequality \eqref{c5} together with Claim 1, if we denote by ${\cal J}$ to the drift Laplacian operator $\Delta^{\varphi+2\text{log}(\phi-b)}$, then \begin{equation} \label{c6} {\cal J} \psi \geq 2(b- \phi)(1-b)\, \psi^{2}\geq 2\varepsilon (b-1)\psi^{2}. \end{equation}
Let $\rho(p)=|p|$ be the distance function in $\mathbb{R}^3$ from the point $p\in \Sigma$ to the origin and $\lambda = (R^2-\rho^2)^2 \psi$ where $R$ is any positive real number. It is easy to check that \begin{align}
\rho \nabla \rho &= p^\top,\label{grho}\\ {\cal J} \rho^2 &= 2(2 +\dot{\varphi} \mu - \frac{2}{b-\phi} {\cal S}(y_0^\top,p^\top),\label{lrho}\\ {\cal J} \lambda&=(R^2-\rho^2)^2 {\cal J}\psi - 2 \psi (R^2-\rho^2) {\cal J}\rho^2 \label{llambda} \\
&+ 8 |p^\top|^2 \psi - 8 (R^2-\rho^2) \langle \nabla \psi , p^\top\rangle.\nonumber \end{align}
\subsection{ Proof of Theorem \ref{Main1}}
By using that $H$ is bounded on $\Sigma$ and $K\geq 0$, we have that the length of the second fundamental form $|{\cal S}|^2 = H^2 - K \leq H^2 $ is uniformly bounded on $\Sigma$ and, from \eqref{c6}, \eqref{lrho} and \eqref{llambda}, there exist positive constants $C_1$ and $C_2$ such that \begin{align} {\cal J} \lambda &\geq 2(R^2-\rho^2)^2 \varepsilon \, (b-1)\psi^2 - C_1\psi (R^2-\rho^2) (C_2 + \rho)\label{dlambda}\\
&+ 8 |p^\top|^2 \psi - 8 (R^2-\rho^2) \langle \nabla \psi, p^\top\rangle.\nonumber \end{align} Let $B_R$ be the ball in $\mathbb{R}^3$ of radius $R$ with center at the origin and consider $R$ large enough so that $\Sigma_R= \Sigma\cap B_R\neq \emptyset$. Then, on $\Sigma_R$ the function $\lambda$ attains its maximum at a interior point $p_R$, where we have that $$ \nabla \lambda (p_R) =0, \qquad {\cal J} \lambda (p_R) \geq 0.$$ Thus, from the above expression, \eqref{grho}, \eqref{dlambda} and by a straightforward computation, we get that, \begin{equation}\sup_{\Sigma_R} \lambda\leq \widetilde{C}_1R^2 (\widetilde{C}_2 + R), \end{equation} for some positive constants $\widetilde{C}_1$ and $\widetilde{C}_2$. In particular on $\Sigma_{R/2}$ the function $\psi$ satisfies $$ \psi \leq \frac{16}{9}\widetilde{C}_1 \frac{\widetilde{C}_2 + R}{R^2},$$ that is, $\psi\rightarrow 0$ as $R\rightarrow \infty$ and then, $\Sigma$ must be a plane. \begin{proof}[] \end{proof} \subsection{Proof of Theorem \ref{Main2}} Let $\Sigma$ be a properly embedded $[\varphi,\vec{e}_{3}]$-minimal surface in $\mathbb{R}^{3}$. Up to horizontal translations, we can assume $0$ is not contained in $\Sigma$. Consider the following smooth function $\gamma:\Sigma\rightarrow\mathbb{R}$ given by $\gamma(p)=2\log(\vert p\vert)$. As $\Sigma$ is properly embedded, we have that $\vert\gamma\vert\rightarrow+\infty$ when $p\rightarrow+\infty$. It is not difficult to check that \begin{align*} &\Delta\gamma=-4\frac{\vert p^{T}\vert}{\vert p\vert^{2}}+\frac{2}{\vert p\vert^{2}}\left(2+\langle p,N\rangle H \right), \\ &\langle\nabla\gamma,\nabla(\varphi+2\log(\phi-b))\rangle=\frac{1}{\vert p\vert^{2}}\left(2\dot{\varphi}\langle p^{T},\vec{e}_{3}^{T}\rangle+4\frac{\mathcal{S}(p^{T},y_{0}^{T})}{\phi-b}\right). \end{align*} Now, from the above expressions, \eqref{hy3} and by using that $H$ is bounded and $K\geq 0$, we have that $\vert\mathcal{S}\vert^{2}$ and $\psi$ are uniformly bounded and there must be a constant $C>0$ such that $${\cal J} \gamma=\Delta^{\varphi+2\log(\phi-b)}\gamma\leq C\gamma,$$ outside a compact set. Thus, from the Proposition \ref{criteria}, $\Sigma$ is $(\varphi+2\log(\phi-b))$-stochastics complete and there exists a sequence of points $\{p_{n}\}_{n\in\mathbb{N}}\subset\Sigma$ satisfying $$\psi(p_{n})\geq\psi^{*}-\frac{1}{n} \ \ \text{and} \ \ {\cal J}\psi(p_{n})\leq \frac{1}{n} \ \ \text{ for any }n\in\mathbb{N}, $$ where $\psi^{*} = \sup_\Sigma \psi$. However, by taking limits in \eqref{c6}, we obtain that $$0\geq 2\varepsilon (b-1)(\psi^{*})^{2}\geq 0,$$ which proves that $\psi^{*}=0$ and $\Sigma$ must be a plane. \begin{proof}[]\end{proof} \subsection{Proof of Theorem \ref{Main3}} Let $\eta =\langle N,\vec{e}_3\rangle$ be the angle function of $\Sigma$, then by using \cite[Lemma 2.1]{MM}, we get \begin{equation} \Delta^\varphi \eta = - (\ddot{\varphi} \vert \nabla \mu \vert^2 + \vert A\vert^2)\eta \leq -(\ddot{\varphi} \vert \nabla \mu \vert^2 + \frac{\dot{\varphi}^2 \eta^2}{2})\eta.\label{thc} \end{equation} Consider the function $\gamma:\Sigma\longrightarrow \mathbb{R}$ given by $\gamma(p) = 2 \log \vert p\vert$, then as $\Sigma$ is properly embedded and $\varphi$ satisfies \eqref{hy4}, we can check that \begin{align} &\gamma(p) \rightarrow +\infty, \quad \text{ $p\rightarrow \infty$} \label{comp1}\\ &\vert \nabla \gamma(p)\vert = 2\frac{\vert p^\top\vert^2}{\vert p\vert^2} \leq 2,\quad p \rightarrow \infty\label{comp2}\\ & \Delta^\varphi \gamma(p) = -4\frac{\vert p^\top\vert^2}{\vert p\vert^4} + \frac{2 \mu(p) \dot{\varphi}(\mu(p))+4}{\vert p\vert^2} \leq C, \quad p\rightarrow \infty.\label{comp3} \end{align} Thus, from \cite[Theorem 3.2]{AMR} we can apply the generalized Omori-Yau maximum principle to $\Delta^\varphi$ and there exists a sequence of points $\{p_n\}$ in $\Sigma$ such that \begin{align*} &\eta(p_n) \rightarrow \eta_\star =\inf_\Sigma\eta >0\\ &\Delta^\varphi\eta(p_n) > \frac{1}{n}, \quad \nabla\eta (p_n) \rightarrow 0, \end{align*} and taking limits in \eqref{thc} along the points $p_n$, we get a contradiction. \begin{proof}[]\end{proof} \section{The Lorentzian case}\label{s4} Let $\mathbb{L}^3$ be the 3-dimensional Lorentz-Minkowski space, that is, the real vector space $\mathbb{R}^3$ endowed with the Lorentzian metric tensor $$\langle \cdot, \cdot\rangle_{\mathbb{L}^3} = dx^2+ dy^2- dz^2$$ where $(x,y,z)$ are the canonical coordinates of $\mathbb{R}^3$. A surface $\Sigma$ in $\mathbb{L}^3$ is said to be a spacelike surface if the induced metric is a Riemannian metric on $\Sigma$, which, as usual, is also denoted by $\langle \cdot, \cdot\rangle_{\mathbb{L}^3} $. It is well-known that such a surface is orientable, namely, we can choose a unit timelike normal vector field $N$ globally defined on $\Sigma$ in the same time-orientation of $\vec{e}_3$ that we will call the Gauss map of the immersion. \\ Denote by $\overline{D}$ and $D$, the Levi-Civita connections of $\mathbb{L}^{3}$ and $\Sigma$, respectively. Then the Gauss and Weingarten formulas for $\Sigma$ in $\mathbb{L}^3$ are given by \begin{align} & \overline{D}_XY = D_X Y + {\cal S}(X,Y)N =D_X Y - \langle A(X), Y \rangle N \\ &A(X) = - dN(X) = - \overline{D}_XN \end{align}
for any $X,Y\in T\Sigma$. We will consider $H=\text{trace}(\mathcal{S})$ and $K = -\det(A)$ the mean and Gaussian curvatures of $\Sigma$.
\
Let $\varphi:]a,b[\rightarrow\mathbb{R}$ be a smooth function. Recall that $\Sigma$ is $[\varphi,\vec{e}_{3}]$-\textit{maximal} in $\mathfrak{D}^3=\mathbb{R}^2\times]a,b[\subseteq\mathbb{L}^{3}$ if and only if the mean curvature of $\Sigma$ satisfies \begin{equation} \label{a1L3} H= \dot{\varphi} \ \langle N,\vec{e}_3\rangle_{\mathbb{L}^3}, \end{equation} which is equivalent to say that $\Sigma$ is a critical point of the weighted area functional \eqref{area}.
Consider the height function $\mu=-\langle\psi,\vec{e}_{3}\rangle_{\mathbb{L}^{3}}$ and the hyperbolic angle function $\eta=\langle N,\vec{e}_{3}\rangle_{\mathbb{L}^{3}}$ of a spacelike $[\varphi,\vec{e}_{3}]$-maximal surface in $\mathbb{L}^3$, then arguing as in \cite[Lemma 2.1]{MM} we have, \begin{lemma} \label{equations} The following statements hold \begin{enumerate} \item $\nabla\mu=-\vec{e}_{3}^{\top}$ , $\vert\nabla\mu\vert_{\mathbb{L}^3}^{2}=\eta^{2}-1$, \item $\langle\nabla\eta,\cdot\rangle_{\mathbb{L}^3}=-\mathcal{S}(\nabla\mu,\cdot)$, \item $\nabla^{2}\mu(\cdot,\cdot)=-\eta\mathcal{S}(\cdot,\cdot)$, \item $\nabla^{2}\eta(\cdot,\cdot)=-(D_{\nabla\mu}\mathcal{S})(\cdot,\cdot)+\eta\mathcal{S}^{[2]}(\cdot,\cdot),$ \item $\Delta^{\varphi}\eta=\eta\left(\vert\mathcal{S}\vert_{\mathbb{L}^3}^{2}-\ddot{\varphi}\vert\nabla\mu\vert_{\mathbb{L}^3}^{2} \right)$, \end{enumerate} where $^\top$, $\nabla$ and $\nabla^2$ denote the projection on the tangent bundle, the gradient and hessian operators, respectively, in $\Sigma$ and $\mathcal{S}^{[2]}$ is the symmetric $2$-tensor given by $$\mathcal{S}^{[2]}(X,Y)=\sum_{i=1}^{2}\mathcal{S}(X,v_{i})\mathcal{S}(Y,v_{i})$$ with $\{v_{i}\}_{i=1,2}$ an orthonormal frame of $T\Sigma$. \end{lemma} \begin{lemma}\label{ll2} Let $\Sigma$ be a spacelike $[\varphi,\vec{e}_{3}]$-maximal surface in $\mathbb{L}^{3}$ and consider the smooth function $\Upsilon:\Sigma\rightarrow\mathbb{R}$ given by $\Upsilon=-(1+\eta^{2})^{-1/2}$. Then, the following inequality hold \begin{equation}-\Upsilon\Delta^{\varphi}\Upsilon+3\vert\nabla\Upsilon\vert^{2}\geq (\vert\mathcal{S}\vert^{2}-\ddot{\varphi}\vert\nabla\mu\vert^{2})\frac{1-\Upsilon^{2}}{1+\eta^{2}}\geq 0.\label{oylorentz}\end{equation} \end{lemma} \begin{proof} By a straightforward computation, $$\Delta^{\varphi}\Upsilon=\frac{\Delta^{\varphi}\eta^{2}}{2(1+\eta^{2})^{3/2}}-\frac{3\vert\nabla\eta^{2}\vert^{2}}{4(1+\eta^{2})^{5/2}}.$$ Moreover, from item 5. of Lemma \ref{equations} and multiplying in both members by $(1+\eta^{2})^{-1/2}$, we obtain that $$\frac{\Delta^{\varphi}\Upsilon}{(1+\eta^{2})^{1/2}}\geq (\vert\mathcal{S}\vert^{2} -\ddot{\varphi}\vert\nabla\mu\vert^{2})\frac{\eta^{2}}{(1+\eta^{2})^{2}}-\frac{3\vert\nabla\eta^{2}\vert^{2}}{4(1+\eta^{2})^{3}}.$$ Finally, the proof follows taking into account that $$1-\Upsilon^{2}=\frac{\eta^{2}}{1+\eta^{2}} \ \ \text{and} \ \ \nabla\Upsilon=\frac{\nabla\eta^{2}}{2(1+\eta^{2})^{3/2}}$$ \end{proof} \begin{lemma} \label{ll3} Let $\Sigma$ be a complete spacelike $[\varphi,\vec{e}_{3}]$-maximal surface in $\mathbb{L}^{3}$. If $\ddot{\varphi} \leq0$ on $]a,b[$, then the generalized Omori-Yau maximum principle can be applied on $\Delta^\varphi$. \end{lemma} \begin{proof} By taking the Bakry-\'Emery Ricci tensor which is defined by $$ Ric_\varphi = Ric - \nabla^2 \varphi,$$ where $Ric$ denotes the standard Ricci tensor on $\Sigma$, we have from the Gauss equation, Lemma \ref{ll2} and by a straightforward computation that \begin{align*} & Ric_\varphi(X,X) = Ric(X,X) - \nabla^2 \varphi(X,X) = \vert AX\vert^2_{\mathbb{L}^3} - \ddot{\varphi}\langle X,\nabla \mu\rangle_{\mathbb{L}^3}^2 \geq 0. \end{align*} Then, from completeness and by applying the weighted Bochner's formula to the distance function $r(\, \cdot \, )= d_\Sigma (\cdot,p_0)$, see \cite[Theorem 1.1]{WW}, we have $$ \Delta^\varphi r (p) \leq \frac{C}{r(p)} \leq C, \quad p\rightarrow \infty.$$ This expression together with the fact that $\vert \nabla r\vert=1$ allows us to apply Theorem 3.2 in \cite{AMR} and to conclude the proof. \end{proof} \subsection{Proof of Theorem \ref{Main4}} We may assume that $\eta$ is non constant on $\Sigma$ otherwise $\Sigma$ must be an horizontal plane and $\dot{\varphi}\equiv 0$. In this case, from Lemma \ref{ll3}, there exists a sequence $\{p_n\} \subset \Sigma$ such that, \begin{align} & \Upsilon(p_n)\rightarrow \sup_\Sigma\Upsilon =\Upsilon^*, \quad -\frac{1}{\sqrt{2}}< \Upsilon^* \leq 0,\label{le1}\\ &\Delta^{\varphi} \Upsilon(p_{n})<\frac{1}{n}, \text{ for any } n\in\mathbb{N}, \quad \vert \nabla \Upsilon\vert(p_n)\rightarrow 0.\label{le2} \end{align} Now, having in mind that $2\vert {\cal S}\vert^2 \geq H^2 = \dot{\varphi}^2 \eta^2$ and $$ \frac{\vert \nabla \mu \vert^2}{\eta^2} (p_n) \rightarrow \frac{-1}{\eta^2_*} + 1, \qquad \eta^2_* =\sup_\Sigma \eta^2,$$ if we plug the sequence in the inequality \eqref{oylorentz} and take the limit, then \begin{equation} \inf_\Sigma \dot{\varphi}^2 = 0\ \ \text{and}\ \ \sup_\Sigma \ddot{\varphi} = 0, \label{condlorentz} \end{equation} which concludes the proof. \begin{proof}[]\end{proof} \begin{corollary} Let $\varphi:]a,b[\rightarrow \mathbb{R}$ be an analytic function with $\dot{\varphi}$ bounded and satisying \eqref{hy1}. Then any complete spacelike $[\varphi,\vec{e}_3]$-maximal surface in $\mathbb{L}^3$ must be a plane. \end{corollary} \begin{proof} From \eqref{hy1} we may assume that $\dot{\varphi}\geq 0$, $\ddot{\varphi}\leq 0$ and $\dddot{\varphi}\geq 0$ on $]a,b[$, otherwise we could argue in a similar way. Thus, from Theorem \ref{Main4}, $\dot{\varphi}$ will be a nonnegative decreasing convex function satisfying \eqref{condlorentz}.
We assert that either $\dot{\varphi}\equiv 0$ or $]a,b[ = \mathbb{R}$. In fact, let consider $\{p_n\}\subset \Sigma$ the sequence of points satisfying \eqref{le1} and \eqref{le2}, then \begin{align} &\inf_n\vert \nabla \mu\vert^2(p_n) >0, \quad \dot{\varphi}(p_n) \rightarrow 0,\label{le3}\\ & \mu_0= \sup_n\mu(p_n) \leq \mu^* = \sup_\Sigma \mu \leq b.\label{le4} \end{align} If $\mu_0<\mu^*$, then $\dot{\varphi}\equiv 0$ on $]\mu_0,\mu^*[$ and, by analyticity, $\Sigma$ must be a plane. If not, $\mu_0=\mu^*$ and then we can apply the Ekeland's principle \cite[Proposition 2.2]{AMR} and use \eqref{le3} and \eqref{le4} to prove that $\mu^*=\sup_\Sigma\mu=+\infty$.
Now, consider $\mu_* = \inf_\Sigma\mu$. If $\mu_*>-\infty$, from Lemma \ref{ll3}, there exists a sequence of points $\{q_n\}$ in $\Sigma$ satisfying $$ \mu(q_n)\rightarrow \mu_*, \quad \inf \vert\nabla u\vert(q_n) = 0, \quad \Delta^\varphi \mu (q_n)> \frac{1}{n}, $$ but, as $\dot{\varphi}$ is decreasing in $]\mu_*,\infty[$, we have that $\inf_n\dot{\varphi}(q_n)>0$ (otherwise $\dot{\varphi}\equiv 0$ on $]\mu_*,\infty[$ and $\Sigma$ would be a plane) and then, from Lemma \ref{equations}, the following inequalities hold $$ 0 \leq \inf_n\dot{\varphi}(q_n)= - \inf_n \eta^2(q_n) \dot{\varphi} (q_n) < 0, $$ which is a contradiction and our assertion is true.
The results follows because a decreasing convex and bounded smooth function $\dot{\varphi}:\mathbb{R}\rightarrow\mathbb{R}$ satisfying \eqref{condlorentz} must be identically zero and so, $\Sigma$ is a plane.
\end{proof}
\end{document} | arXiv | {
"id": "2305.06649.tex",
"language_detection_score": 0.5760422945022583,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Non-exponential spontaneous emission dynamics for emitters in a time-dependent optical cavity}
\author{Henri Thyrrestrup}\email{Email: h.t.nielsen@utwente.nl, website: www.photonicbandgaps.com} \author{Alex Hartsuiker} \affiliation{Complex Photonic Systems (COPS), MESA+ Institute for Nanotechnology, University of Twente, 7500 AE Enschede, The Netherlands} \author{Jean-Michel G\'erard} \affiliation{CEA/INAC/SP2M, Nanophysics and Semiconductor Laboratory, 17 rue des Martyrs, 38054 Grenoble Cedex, France} \author{Willem L. Vos} \affiliation{Complex Photonic Systems (COPS), MESA+ Institute for Nanotechnology, University of Twente, 7500 AE Enschede, The Netherlands} \date{\today}
\begin{abstract} We have theoretically studied the effect of deterministic temporal control of spontaneous emission in a dynamic optical microcavity. We propose a new paradigm in light emission: we envision an ensemble of two-level emitters in an environment where the local density of optical states is modified on a time scale shorter than the decay time. A rate equation model is developed for the excited state population of two-level emitters in a time-dependent environment in the weak coupling regime in quantum electrodynamics. As a realistic experimental system, we consider emitters in a semiconductor microcavity that is switched by free-carrier excitation. We demonstrate that a short temporal increase of the radiative decay rate depletes the excited state and drastically increases the emission intensity during the switch time. The resulting time-dependent spontaneous emission shows a distribution of photon arrival times that strongly deviates from the usual exponential decay: A deterministic burst of photons is spontaneously emitted during the switch event. \end{abstract}
\maketitle \section{Introduction} Impressive progress has been achieved in controlling spontaneous emission in the frequency domain with nanophotonic structures \cite{haroche1989aa,vahala2003aa,gerard2003aa,Reithmaier2008,Buckley2012,kleppner1981aa}, like microcavities, photonic crystals \cite{Leistikow2011,Wang2011}, waveguides \cite{lundhansen2008,thyrrestrup2010,sapienza2010aa} and nano-antennas \cite{Novotny2011}. This is possible since the spontaneous emission rate is not an immutable property of the emitter \cite{kleppner1981aa,haroche1989aa} but strongly depends on its surroundings through the local density of optical states (LDOS) \cite{sprik1996}. The LDOS counts the number of modes into which a photon can be emitted, and can be interpreted as the density of vacuum fluctuations at the position of the emitter. A well-studied tool to enhance the average LDOS and thereby the spontaneous decay rate for an emitter is a cavity tuned to the source's emission frequency. Following the pioneering work in \cite{gerard1998aa}, many groups have demonstrated the Purcell effect with quantum dots embedded in solid-state microcavities \cite{bayer2001aa,Hennessy2007,sapienza2010aa}. In all cases, however, the modification of the LDOS is \emph{stationary in time}. Thus, the radiative decay rate is time independent and the distribution of photon emission times decays exponentially in time and is completely determined by this rate.
In this work, we theoretically propose a novel paradigm in light emission: we modify the environment of an ensemble of two-level emitters \emph{in time during their lifetime}, as mediated by a time-dependent LDOS. This results in non-exponential time evolution of the internal dynamics of the emitters and the emitted intensity. By utilizing fast optical modulation of a microcavity, we can tune the cavity resonance and drastically change the LDOS at the emission frequency within the emission lifetime. As a result, we anticipate bursts of dramatically enhanced emission, concentrated within short time intervals. The spontaneous emission process remains stochastic but results in a strongly non-exponential temporal distribution of detected photons that is completely controlled by the experimentalist. Our approach thus offers a tool to dynamically control the light-matter coupling \cite{Majumdar2012}. For modulation dynamics faster than the cavity storage time this allows to achieve non-Markovian dynamics in cavity quantum electrodynamics, and thus bring the system out of the weak-coupling limit \cite{lagendijk1993aa}. In the present study we limit ourselves to the Markovian regime where the modulation is slower than the storage time, which captures the essential features of the non-exponential emission dynamics.
We first derive the rate equation for the excited state population of an ensemble of two-level sources in a time-dependent environment modeled through a time-dependent LDOS. From the rate equation, we determine the time-dependence of the intensity emitted from an ensemble of two-level emitter, such as quantum dots \cite{gerard1998aa} or rare earth atoms \cite{Vredenberg1993}, under pulsed excitation in a cavity. Since micropillar cavities are known as a versatile class of microcavities we choose them as an example. The decay rate of the ensemble, determined by the LDOS, is switched by exciting free carriers, which is a well-known control mechanism in the time domain for nano-cavities \cite{jewell1989,rivera1994,fushman2007,McCutcheon2007,harding2007aa,hu2008aa}.
\section{Emission dynamics in a time-dependent environment}
\subsection{Rate equations} We consider a single two-level emitter in a medium with a strongly dispersive LDOS $\rho(\omega,\vec r)$ in a photonic microcavity and we investigate the effect of a time-dependent LDOS, that modifies the radiative decay rate in time. To derive the rate equation of a two-level source we start with the equation of motion of the probability amplitude of the excited two-level emitter $c_a(t)$ \cite{vats2002aa} with a LDOS $\rho(\omega,\mathbf{e}_d,\vec{r},t')$ that depends on time $t'$ \begin{equation} \frac{dc_{a}(t)}{dt} = -\frac{d^2}{2\hbar\epsilon_0}\int_0^t\int^\infty_0c_a(t')\omega\rho(\omega,\mathbf{e}_d,\vec{r},t')e^{i(\omega-\omega_d)(t'-t)}d\omega dt'. \label{eq:popdensityNikolaev} \end{equation} Here $d$ and $\mathbf{e}_d$ are the amplitude and orientation vector of the transition dipole moment, respectively, $\hbar$ the reduced Planck's constant, $\epsilon_0$ the dielectric constant of vacuum, $\mathbf{r}$ the emitter position, and $\omega_d$ the emission frequency. For convenience, we only write the time dependency of $c_a(t)$, but it should be kept in mind that the amplitude $c_a(t,\vec{r},\vec{e}_d,\omega_d)$ also depends on $\vec{r}$, $\vec{e}_d$ and $\omega_d$ \cite{vos2009}.
In the following we limit ourselves to the weak coupling regime in cavity quantum electrodynamics where the single emitter linewidth is narrow compared to the spectral variations in the factor ($\omega\rho(\omega,\vec{e}_d,\vec{r},t')$). This approximation is known as the \emph{Markov approximation} \cite{lagendijk1993aa} or the \emph{Wigner-Weisskopf approximation} \cite{loudon1983aa}. We thus neglect coherent interactions between the emitter and the environment where a full quantum mechanical description is necessary. In the Markov approximation we can take $\omega\rho(\omega,\vec{e}_d,\vec{r},t')$ out of the frequency integral and Eq.~\eqref{eq:popdensityNikolaev} can be simplified to
\begin{equation} \frac{dc_{a}(t)}{dt} = -\frac{d^2}{2\hbar\epsilon_0}\int_0^tc_a(t')\pi\delta(t-t')\omega_d\rho(\omega_d,\textbf{e}_d,\mathbf{r},t')dt'. \label{eq:N2WWapprox} \end{equation} The integral in Eq.~\eqref{eq:N2WWapprox} can be evaluated to yield \cite{nikolaev2006aa} \begin{equation} \frac{dc_{a}(t)}{dt} = -\frac{d^2}{2\hbar\epsilon_0}c_a(t)\pi\omega_d\rho(\omega_d,\mathbf{e}_d,\textbf{r},t), \label{eq:N2WWapproxsimplify} \end{equation} which can be written as \begin{equation} \frac{dc_{a}(t)}{dt} = -\frac{\Gamma_{\mathrm{rad}}}{2} c_a(t), \label{eq:cagamma} \end{equation} with $\Gamma_{\mathrm{rad}}(t)$ the radiative rate \begin{equation} \Gamma_{\mathrm{rad}}(t) = \frac{d^2\omega_d\pi}{\hbar\epsilon_0}\rho(\omega_d,\vec{e}_d,\vec{r},t). \label{eq:raddecayhom} \end{equation}
Equation~\eqref{eq:raddecayhom} is Fermi's golden rule \cite{fermi1932aa} augmented with a time-dependent LDOS. This shows that in the Markov limit the instantaneous radiative rate $\Gamma_{\mathrm{rad}}(t)$ directly follows the time dependence of the LDOS. In case of a time-independent LDOS the rate $\Gamma_{\mathrm{rad}}(t)=\Gamma_{\mathrm{rad}}$ is constant in time and Eq.~\eqref{eq:cagamma} shows the well-known feature that the amplitude $c_a(t)$ decreases exponentially with the rate $\frac{\Gamma_{\mathrm{rad}}}{2}$ \cite{loudon1983aa}. Similarly, the probability $|c_a(t)|^2$ of the two-level emitter to be excited decreases exponentially according to \begin{equation}
|c_a(t)|^2 = \abs{c_a(0)}^2e^{-\Gamma_{\mathrm{rad}} t}.\label{eq:ca2gamma} \end{equation} For a time-dependent LDOS the rate in Eq.~\eqref{eq:ca2gamma} is no longer constant and the excited state population decreases non-exponentially and thus deviates from the standard Markovian dynamics.
From Eq.~\eqref{eq:cagamma} we can write the equation of motion for the population density $N_2(t)$ for an ensemble of $N$ identical non-interacting two-level sources. To complete the model we include a time-dependent excitation term for the sources and a non-radiative decay rate $\Gamma_\mathrm{nrad}$. The equation of motion for the population density becomes \begin{equation} \frac{dN_{2}(t)}{dt}=\eta_\mathrm{abs}\frac{P_{\mathrm{exc}}(t)}{\hbar\omega_{exc}}-\left(\Gamma_{\mathrm{rad}}(t)+\Gamma_\mathrm{nrad} \right)N_{2}(t). \label{eq:rateeq} \end{equation} The first term describes the excitation and depends on the excitation power $P_{\mathrm{exc}}(t)$ per emitter, the excitation frequency $\omega_{exc}$, and the absorption efficiency of the excitation power that reaches the two-level source $\eta_\mathrm{abs}$. The second term describes the radiative decay and the third term the non-radiative decay. For convenience, we write $N_2(t)$ only as a function of time in Eq.~\eqref{eq:rateeq}, although for an inhomogeneous ensemble of \dd{$N$} two-level sources $N_2(t)$ also depends on \vec{r}, $\vec{e}_d$ and $\omega_d$. The general solution of Eq.~\eqref{eq:rateeq} is \begin{equation} N_2(t)=N_{2}(t)=N_{2}(0)+\int_0^t\left(\eta_\mathrm{abs}\frac{P_{\mathrm{exc}}(t')}{\hbar\omega_{exc}} -\left(\Gamma_{\mathrm{rad}}(t')+\Gamma_\mathrm{nrad} \right)N_2(t')\right)dt'. \label{eq:gensolrateeq} \end{equation} The corresponding radiated emission intensity $I(t)$ is given by \cite{van_driel2007aa} \begin{equation} I(t) = \Gamma_{\mathrm{rad}}(t)N_2(t) \label{eq:emissionfromN2}, \end{equation} which means that the total emitted light intensity is proportional to the instantaneous radiative decay rate and the population density. For a low density sub-ensemble of non-interacting emitters with the same emission frequency $\omega_d$ we should average Eq.~\eqref{eq:emissionfromN2} over \vec{r} and $\vec{e}_d$. Equation \eqref{eq:gensolrateeq} and \eqref{eq:emissionfromN2} are generally valid for any set of two-level emitters in environment with a time-dependent LDOS. Equations~\eqref{eq:gensolrateeq} and \eqref{eq:emissionfromN2} form the basis for our further discussion and they will be used to calculate the emission of an ensemble of emitters that experience a time-dependent LDOS.
\subsection{Time dependent radiative decay rate in a microcavity\label{sec:timedeprate}} The central goal of this work is to describe the effects of a time-dependent radiative decay rate $\Gamma_{\mathrm{rad}}(t)$ that is realized by dynamically changing the LDOS in time at the position and frequency of an emitter. In general, we can separate the time-dependent decay rate into a constant rate $\Gamma_0$ and a time-dependent change in the decay rate $\Delta\Gamma_\mathrm{rad}(t)$ \begin{equation}
\Gamma_\mathrm{rad}(t)=\Gamma_0+\Delta\Gamma_\mathrm{rad}(t). \end{equation} where $\Delta\Gamma_\mathrm{rad}(t)$ is proportional to the change in the LDOS $\Delta\rho(t)$ \begin{equation} \Delta\Gamma_{\mathrm{rad}}(t)=\frac{2\pi d^2\omega_d}{\hbar\epsilon_0}\Delta\rho(t). \end{equation} We assume that the time-depended part is the result of a short switching event that quickly changes the LDOS within a characteristic switching time $\tau_\mathrm{sw}$.
In the following we choose as a realistic experimental situation a scheme where the emitter is embedded in a semiconductor microcavity. The LDOS is modified in time by controlling the refractive index by means of the free carrier density in the semiconductor, as excited by a short optical (or electrical) pump pulse at $t=t_\mathrm{pu}$. The induced change in the refractive index is proportional to the free carrier density \cite{euser2008aa} and the resulting change in the LDOS depends strongly on the dielectric structure of the microcavity \cite{vahala2003aa}. The excited free carriers recombine exponentially with a characteristic recombination time $\tau_\mathrm{sw}$, after which the refractive index is restored to its original value \cite{harding2007aa,euser2008aa}. Here we use $\tau_\mathrm{sw}=\unit{35}{\pico\second}$, characteristic for GaAs \cite{harding2007aa}.
It has previously been proposed to switch the LDOS by shifting the band gap frequency of a photonic crystal \cite{Johnson2002}. In this study, however, only the change in the LDOS was considered and not the effect on the spontaneous emission of embedded quantum emitters. Moreover, switching a cavity resonance is a more versatile and interesting tool to modify the LDOS due to the large LDOS change over a very narrow bandwidth.
\begin{figure}\label{fig:LDOSswitch}
\end{figure}
As an example, Fig.~\ref{fig:LDOSswitch} illustrates the effect of switching the resonance frequency $\omega_{\mathrm{cav,0}}$ of a microcavity with a Lorentzian LDOS with linewidth $\gamma_\mathrm{cav}$ in the spectral vicinity of an emitter with emission frequency $\omega_d$. The single emitter homogeneous linewidth is taken to be narrower than the cavity linewidth ($\gamma_\mathrm{em} <\mathrm{cav}$), to fulfill the Markov approximation. This criterion can easily be obtained with semiconductor quantum dots at low temperatures. The large inhomogeneous spectral broadening of semiconductor quantum dots further ensures that only a small sub-ensemble interacts with the cavity and the dots can be treated as non-interacting single emitters. Non-exponential modifications of the emission decay curve arising from non-local effects is therefore negligible \cite{Svidzinsky2012}. At higher temperatures dephasing and spectral diffusion will spectrally broaden the homogeneous linewidth \cite{bayer2002}. These incoherent broadenings will effectively diminish the coupling to the cavity and the effect of the cavity frequency shift.
The decrease in the refractive index induced by the switching free carriers leads to a positive frequency shift of the cavity resonance frequency $\omega_\mathrm{cav}(t)$ as indicated in Fig.~\ref{fig:LDOSswitch} \cite{RefractiveIndexChange}. The emitter is initially detuned from the cavity resonance and experiences a low radiative rate $\Gamma_{\mathrm{0rad}}$. During the switch event the cavity peak is tuned into resonance with the emitter as shown as the dashed Lorentzian in Fig.~\ref{fig:LDOSswitch}. This change results in a rapid increase in the LDOS at the emitter frequency and greatly enhances the decay rate $\Gamma_{\mathrm{rad}}(t)$ from the initial value $\Gamma_{\mathrm{rad}}(0)=\Gamma_{0}$ to its maximum value of $\Gamma_{\mathrm{rad}}(\Delta t)=\Gamma_{0}+\Delta\Gamma_\mathrm{rad}$ and back to $\Gamma_0$ within a time $\Delta t$. The effective switching time in this scenario is therefore given by \begin{equation}
\tau_\mathrm{sw}=\frac{\Delta t}{\abs{\omega_\mathrm{cav}(\Delta t)-\omega_\mathrm{cav,0}}}\;\gamma_\mathrm{cav}. \end{equation} A shorter effective switching time can thus be realized by either a faster tuning of the cavity resonance in time $\Delta t$ or by increasing the spectral tuning range relative to the cavity linewidth $\gamma_\mathrm{cav}$ within the time $\Delta t$.
We note that this switching procedure is very flexible and we can effectively move along different trajectories on the cavity's LDOS by choosing the initial detuning and strength of the switching effect. An alternative configuration is where the emitter starts on resonance and experiences a radiative rate that is already Purcell enhanced. The switch then detunes the cavity resonance away from the emitter's frequency and thus inhibits the spontaneous decay rate. In general, the steep slope of the cavity LDOS gives a rapid change in the LDOS that can be used to either greatly enhance or inhibit the radiative decay rate, relative to the unswitched rate.
For an initial detuning between the cavity and emitter frequency smaller than the cavity linewidth ($\omega_\mathrm{d}-\omega_{\mathrm{cav,0}} < \gamma_\mathrm{cav}$) we can approximate the steep slope of the Lorentzian resonance as a linear trend shown as the red dashed line in Fig.~\ref{fig:LDOSswitch}. We can therefore effectively make a linear approximation \dd{for the} between the excited free carrier density and the radiative decay rate. For a typical switching pulse with a Gaussian temporal width $\tau_\mathrm{pu} = \unit{120}{\femto\second}$, that is much shorter than the carrier recombination time (\unit{35}{\pico\second}, see \cite{fushman2007,McCutcheon2007,harding2007aa}), we can separate the excitation and relaxation time scales of the free carriers. Using the linear relation between the carrier density and the decay rate discussed above, we can decompose the time-dependent decay rate as \begin{equation} \Gamma_{\mathrm{rad}}(t) = \Gamma_{0}+\Delta\Gamma_{\mathrm{rad}}e^{\frac{-(t-t_{\mathrm{0pu}})}{\tau_{\mathrm{sw}}}}\Theta(t-t_{\mathrm{0pu}},\tau_\mathrm{pu}) \label{eq:raddecay} \end{equation} namely a constant decay rate $\Gamma_{0}$, and a change induced by the switch that is turned on at time $t_{\mathrm{0pu}}$. The change is initiated by a Heaviside step function and the magnitude of the switched term in Eq.~\eqref{eq:raddecay} then decays exponentially with an effective switching time comparable to the free carrier relaxation time.
\begin{figure}\label{fig:Gradvstime}
\end{figure}
Two realistic examples of Eq.~\eqref{eq:raddecay} are shown in Fig.~\ref{fig:Gradvstime}, plotting the normalized time-dependent decay rate for a situation where the decay rate is either enhanced or inhibited locally in time. The upper curve (long dashed) depicts the situation where the cavity resonance, initially off-resonance, is tuned into resonance with the emitter as illustrated in Fig.~\ref{fig:LDOSswitch}. As a result the radiative rate is greatly increased at $t=\unit{10}{\pico\second}$ before decreasing again at a rate set by the inverse switching time. Similarly, the lower curve (short dashed) illustrates the situation where the emitter is initially on resonance and the cavity is switched out of resonance. In the examples in Fig.~\ref{fig:Gradvstime} we use either an enhancement or inhibition by a factor of 5, which is a realistic change observed on ensemble of quantum dots in micropillar cavities \cite{gerard1998aa}. Note that a constant relative decay rate of $\Gamma_\mathrm{rad}(t)/\Gamma_0=1$ corresponds to the unswitched case, typical for all Purcell experiments performed to date \cite{gerard1998aa,bayer2001aa,Hennessy2007,sapienza2010aa}. Most striking is the fast dynamics in the decay rate: both the switch pulse duration $\tau_\mathrm{pu}$ and the exponential decrease with decay time $\tau_{\mathrm{sw}}$ are much faster than the intrinsic lifetime $1/\Gamma_{0}=\unit{1}{\nano\second}$ typical for quantum dot emitters.
\subsection{Figure of merit for pulsed excitation\label{sec:poppulsed}} In this section we study the dynamics for the excited state population of emitters after a pulsed excitation, when the environment is subsequently switched during their decay time. We assume that a short excitation pulse with amplitude $P_{\mathrm{0exc}}$ initializes the system at $t=t_{\mathrm{0exc}}$ such that we have an initial population density $N_2(t=t_{\mathrm{0exc}})=N_{02}$. After the excitation pulse the dynamics of the population density is governed only by the time-dependent decay rate and this gives a monotonous decrease in the population density. If we approximate the short excitation pulse by a Dirac delta pulse $P_{\mathrm{exc}}(t)=\delta(t-t_{\mathrm{0exc}})P_{\mathrm{0exc}}$ in the rate equation (Eq.~\eqref{eq:rateeq}) it can be solved analytically for times after excitation ($t>t_{\mathrm{0exc}}$). In this case Eq.~\eqref{eq:rateeq} simplifies to \begin{equation} \frac{dN_{2}(t-t_{\mathrm{0exc}})}{dt}=-\left(\Gamma_{\mathrm{rad}}(t-t_{\mathrm{0exc}})+\Gamma_\mathrm{nrad}\right)N_{2}(t-t_{\mathrm{0exc}}), \label{eq:simplerateequation} \end{equation} which can be integrated to yield \begin{equation} N_2(t-t_{\mathrm{0exc}})=N_{02}\exp\left(\int_0^{t-t_{\mathrm{0exc}}}-\left(\Gamma_{\mathrm{rad}}(t')+\Gamma_\mathrm{nrad}\right)dt'\right). \label{eq:simplerateequationrewritsolGen} \end{equation} Equation \ref{eq:simplerateequationrewritsolGen} describes the population density for any time-dependent decay rate $\Gamma_{\mathrm{rad}}(t)$ as a function of time $t$ after the excitation process is over. Despite the time-integral in Eq.~\eqref{eq:simplerateequationrewritsolGen} the equation does not describe non-Markovian dynamics, since the dynamics only depends on the present time (Eq.~\eqref{eq:simplerateequation}) and only accumulate changes from the modification in the LDOS and not the light-matter dynamics \cite{Chruscinski2010}. Inserting the switched decay rate Eq.~\eqref{eq:raddecay} into Eq.~\eqref{eq:simplerateequationrewritsolGen} and solving the integral over the constant part of the decay rate yields \begin{equation} N_2(t-t_{\mathrm{0exc}})=N_{02}e^{-\left(\Gamma_{0}+\Gamma_{0nrad}\right)(t-t_{\mathrm{0exc}})-\Delta\alpha_\mathrm{rad}(t)} \label{eq:partsolutionswitcheddecrate} \end{equation} where we have defined a dimensionless time-dependent switch parameter $\Delta\alpha_\mathrm{rad}(t)$ \begin{equation} \Delta\alpha_\mathrm{rad}(t)\equiv\int_0^{t}\Delta\Gamma_\mathrm{rad}e^{\frac{-(t'-t_{\mathrm{0pu}})}{\tau_{\mathrm{sw}}}}\Theta(t-t_{\mathrm{0pu}},\tau_\mathrm{pu})dt'. \label{eq:deltagammarad} \end{equation} This parameter is a figure of merit that describes the relative change in the population density due to the change in the decay rate. A negative $\Delta\alpha_\mathrm{rad}(t)$ results in a population density that decays slower compared to the unswitched situation, while a positive $\Delta\alpha_\mathrm{rad}(t)$ results in a faster decay. If we assume that the duration of the switch pulse $\tau_\mathrm{pu}$ is short compared to the effective switch time $\tau_{\mathrm{sw}}$, the integral in Eq.~\eqref{eq:deltagammarad} can be split into two parts - before and after the switch $t=\tau_\mathrm{pu}$ - and $\Delta\alpha_\mathrm{rad}$ simplifies to \begin{equation} \Delta\alpha_\mathrm{rad}(t) = \Delta\Gamma_\mathrm{rad}\tau_{\mathrm{sw}}\left(1-e^{\frac{-(t-t_{\mathrm{0pu}})}{\tau_{\mathrm{sw}}}}\right)\Theta(t-t_{\mathrm{0pu}},\tau_\mathrm{pu}).
\label{eq:deltagammaradapprox} \end{equation} Here $\Theta(t-t_{\mathrm{0pu}},\tau_\mathrm{pu})$ is a step function from 0 to 1 that accounts for the fact that there is no change in the decay rate before the switching pulse arrives at $t=t_{\mathrm{0pu}}$. In the limit of time $t$ going to infinity $\Delta\alpha_\mathrm{rad}(t)$ becomes \begin{equation} \Delta\alpha_\infty=\lim_{t\to\infty }\Delta\alpha_\mathrm{rad}(t)= \Delta\Gamma_\mathrm{rad}\tau_{\mathrm{sw}}. \label{eq:steadystatedgamApprox} \end{equation} Equation~\eqref{eq:steadystatedgamApprox} shows that $\Delta\alpha_\mathrm{rad}(t)$ is nonzero even in the long-time limit and is given by a product of the switch magnitude $\Delta\Gamma_\mathrm{rad}$ the effective switch duration. The switch therefore has an effect on the population dynamics even long after the switch event. The dimensionless switching parameter $\Delta\alpha_\infty$ is therefore a useful figure of merit for the total switching effect on the excited state population.
We can quantify the effect the switch has on the population at long times by using Eq.~\eqref{eq:partsolutionswitcheddecrate} and the limit in Eg.~\eqref{eq:steadystatedgamApprox} to calculate the ratio between the switched population density $N_{2}$ and the unswitched population density $N_{2us}$ in the limit of $t$ tend to infinity \begin{equation} \lim_{t\to\infty }\frac{N_{2}(t)}{N_{2us}(t)}=e^{-\Delta\Gamma_\mathrm{rad}\tau_{\mathrm{sw}}}=e^{-\Delta\alpha_\infty}. \label{eq:ratioSwUSwN2} \end{equation} Equation \eqref{eq:ratioSwUSwN2} quantifies the long term effect of the switching on the population density as a result of a momentarily short change in the decay rate.
\begin{figure}\label{fig:popintensitydecay}
\end{figure}
\subsection{Population dynamics for pulsed excitation} Figure~\ref{fig:popintensitydecay}(a) displays the excited state population Eq.~\eqref{eq:partsolutionswitcheddecrate} for four cases: two without a switch pulse (solid lines) with two different decay rates ($\Gamma_{0}=\unit{1}{\reciprocal{\nano\second}}$ and $\Gamma_{0}=\unit{5}{\reciprocal{\nano\second}}$) and two with switching pulses (solid lines) resulting in the time-dependent decay rates shown in Fig.~\ref{fig:Gradvstime}. In the two stationary cases, as expected, the population decay exponentially with their initial rates $\Gamma_{0}$. The green long dashed curve shows the case where a switch tunes the cavity into resonance with the emitter and induces an enhanced decay rate by a factor of 5 ($\Delta\Gamma_\mathrm{rad}= 4\Gamma_{0}$) from $\Gamma_{0}=\unit{1}{\reciprocal{\nano\second}}$. The red short dashed curve represents the opposite case where a cavity is tuned out of resonance by the switch and induces an inhibition in the decay rate by a factor of 5 starting from a high initial rate $\Gamma_{0}=\unit{5}{\reciprocal{\nano\second}}$. For the two switched examples the population density clearly decays non-exponentially.
Before the switching pulse the population decays exponentially with the same rate as in the unswitched case. In the enhanced case as the switching pulse arrives at $t=t_\mathrm{pu}=\unit{150}{\pico\second}$ the population decreases faster and thus deviates from exponential decay. During the effective switching time of \unit{35}{\pico\second} the population density continues to deviate from an exponential decay. A few switching times later ($t>\unit{250}{\pico\second}$) the decay rate returns to its original value but the absolute value of the populations is reduced compared to the unswitched case. Using Eq.~\eqref{eq:partsolutionswitcheddecrate} and the figure of merit (Eq.~\eqref{eq:steadystatedgamApprox}) we see that the larger decay rate induced by the switch depletes the excited state population faster, thereby lowering the population density at long times. The situation is reversed for a switch that induces an inhibition of the spontaneous emission: the population also experiences a non-exponential decay after the switch; however, the population is now larger than its reference value (unswitched case) at long times.
\subsection{Emission dynamics for pulsed excitation} We now continue to the emission dynamics from emitters in a switched environment. According to Eq.~\eqref{eq:emissionfromN2} the emitted intensity $I(t)$ is the product of the excited state population and the radiative rate that is also time-dependent. Modifications to the decay rate are therefore directly reflected in the total emitted intensity. For large dynamic changes in the decay rate, we therefore expect correspondingly large changes in the emitted intensity. One striking consequence is that for a time-dependent decay rate the population density and the emission intensity are no longer directly proportional, contrary to the results in the steady-state case \cite{van_driel2007aa}.
Inserting the dynamic decay rate Eq.~\eqref{eq:raddecay} and the population density Eq.~\eqref{eq:partsolutionswitcheddecrate} into Eq.~\eqref{eq:emissionfromN2} yields the emitted intensity \begin{equation} I(t)=\left(\Gamma_{0}+\Delta\Gamma_{\mathrm{rad}}(t)\right) N_{02}e^{-\Gamma_{0tot}(t-t_{\mathrm{0exc}})-\Delta\alpha_\mathrm{rad}(t)}\Theta(t-t_{\mathrm{0exc}},\tau_{\mathrm{exc}}), \label{eq:emissionvstime} \end{equation} where $\Delta\Gamma_{\mathrm{rad}}(t)=\Delta\Gamma_\mathrm{rad}e^{\frac{-(t-t_{\mathrm{0pu}})}{\tau_{\mathrm{sw}}}}\Theta(t-t_{\mathrm{0pu}},\tau_\mathrm{pu})$, and $\Delta\alpha(t)$ is given by Eq.~\eqref{eq:deltagammaradapprox}. The main difference between the population density dynamics Eq.~\eqref{eq:partsolutionswitcheddecrate} and the emitted intensity is the presence of the decay rate prefactor $\left(\Gamma_{0}+\Delta\Gamma_{\mathrm{rad}}(t)\right)$. In addition, the intensity in Eq.~\eqref{eq:emissionvstime} is still proportional to the population density so that the influence of the switching process remains visible in the emission intensity even long after the switch event has passed as discussed in section~\ref{sec:poppulsed}. The relative intensity to the unswitched intensity at long times is thus given by $\lim_{t\rightarrow\infty} I(t)/I_\mathrm{us}(t)=e^{-\Delta\Gamma_\mathrm{rad}\tau_{\mathrm{sw}}}$ as the exponent in Eq.~\eqref{eq:emissionvstime} is the same as in Eq.~\eqref{eq:ratioSwUSwN2} and the time-dependent decay rate $\Delta\Gamma_{\mathrm{rad}}(t)$ in the prefactor tends to zero a long times.
Figure~\ref{fig:popintensitydecay}(b) shows the normalized emission dynamics corresponding to the population density in Fig.~\ref{fig:popintensitydecay}(a): one where the radiative rate is quickly enhanced from an initial low rate of $\Gamma_{0}=\unit{1}{\reciprocal{\nano\second}}$ and another where the radiative rate is inhibited from a high value of $\Gamma_{0}=\unit{5}{\reciprocal{\nano\second}}$. The emitter is excited at $t_{ex}=\unit{0}{\pico\second}$, followed by an exponential decay of the emission intensities with the same rate as the population density as expected in the weak coupling limit. A switching pulse arrives at $t=t_\mathrm{pu}=\unit{150}{\pico\second}$ whose effect is to either quickly enhance (green long dashed) or inhibit (red short dashed) the radiative decay rate from the initial rate by a factor of 5.
In the case where the switching pulse enhances the decay rate we see in Fig.~\ref{fig:popintensitydecay}(b) a short and intense burst on top of the normally decaying signal; the intensity thus strongly deviates from an exponential decay. The relative magnitude of the enhancement is equal to the maximum Purcell enhancement and the temporal shape closely follows the modulation in the decay rate as expected from Eq.~\eqref{eq:emissionvstime}. In this example the temporal shape follows the exponential change in the decay rate and the width is limited by the effective switching time of $\unit{35}{\pico\second}$. This time is much shorter than the minimum Purcell enhanced decay time of $1/5\Gamma_0=\unit{200}{\pico\second}$. Let us note in pasing that the effective switching time could be engineered to be as short as 2 to \unit{3}{\pico\second} by either decreasing the free carrier lifetime \cite{nemec2001} or increasing the frequency shift of the cavity resonance. For times much longer than the switch time, we see a lower intensity relative to the unswitched case due to the depletion of the population density of the emitter discussed in Sec.~\ref{sec:poppulsed}.
In addition to changing the real part of the refractive index the excited free carriers also introduce absorption of the light in the cavity. As discussed in Appendix~\ref{app:FCA} the qualitative features in Fig.~\ref{fig:popintensitydecay}(b) with an intense photon burst are robust against realistic levels of free carrier absorption, with only minor reduction in the peak height of 15\% for a quality factor of around 1000. For cavities in GaAs with higher Q where a lower free carrier concentration of $N\simeq\unit{\power{10}{18}}{\centi\meter\rpcubed}$ is sufficient to switch several linewidths, even lower reductions are expected.
In the second case in Fig.~\ref{fig:popintensitydecay}(b) the switching event inhibits the decay rate, which results in a short temporal drop in the emission intensity relative to the stationary case. The temporal duration of the drop is again limited by the switch time. The drop in intensity exemplifies a highly unusual shape for a decay curve where the radius of curvature, during the free switching time, is negative during the decay. In the traditional paradigm of steady state spontaneous emission such a negative radius of curvature would be unphysical. An exponential decay with a stationary decay rate or even a sum of exponentials with stationary rates will always reveal a positive radius of curvature in the resulting decay curve. This shows the flexibility of the switching mechanism to shape the temporal emission distribution of the emitted photons at will.
\section{Discussion} Spontaneous emission is a stochastic process for a photon to be emitted from an excited light source \cite{loudon1983aa}. It is thus impossible to predict the exact time when an excited source will emit a photon. On the other hand, the distribution of photon emission times, averaged over many excitation cycles, is usually well-known. In the weak coupling limit the distribution of photon arrival times is exponential and characterized by a single rate given by Einstein's A coefficient. Much interest has been devoted to controlling spontaneous emission by modifying this rate using the Purcell effect by embedding emitters in a nano-structured environment \cite{gerard1998aa,bayer2001aa,vahala2003aa,sapienza2010aa}. Several schemes have been implemented to tune the decay rate in time such as gas deposition, temperature and, electronic gating. However, the modification in the rate has in all cases remained constant in time during a single decay process. For this reason, the distribution of photon arrival times remains single exponential, and is simply described by one enhanced or inhibited single exponent.
We have here introduced a new paradigm with dynamic control of the local density of states in time using all optical switching. The spontaneous emission process remains stochastic in time but the dynamical change in the decay rate results in a strongly non-exponential temporal distribution of photon emission times. The active switching process allows us to deterministically control the photon distribution in time. We have shown (in Fig.~\ref{fig:popintensitydecay}) that photon arrival times can be bunched in short bursts where timing and duration of the burst can be fully controlled by the experimentalist. Naturally, within these short emission pulses, the individual photons still arrive at unpredictable moments in time. This approach is thus different from \cite{Scully2003} where an essential non-adiabatic process is necessary to create an enhanced emission intensity.
On a \dd{more} fundamental level spontaneous emission arises from the interaction between a single quantum emitter and fluctuations in the vacuum field at the emitter position \cite{haroche1989aa,loudon1983aa}. By dynamically modifying the environment of the emitter our approach gives direct temporal control of the local strength of the vacuum field on timescales much shorter than the excited state lifetime. As shown in Fig.~\ref{fig:popintensitydecay} this allows to manipulate the excited state probability for a quantum emitter in time and subsequently control the time dependence of the single photon wave function of the emitted photon. Such control opens great prospects in quantum information processing and allows to shape the photon wave function emitted by single photon sources, for example for optimal mode matching of photons \cite{Rohde2005} and to enhance the absorption of single photons \cite{Johne2011,Dilley2012}. More generally, by dynamically tuning a cavity in the vicinity of a quantum emitter we can drastically modulate the light matter coupling between the emitter and the cavity mode. This offers interesting prospects where a system is modulated between the weak and strong coupling regime while emitting a single photon \cite{fernee2007}. More complicated coherent quantum systems can be employed to offer more control of the emitted single photons \cite{Su2009}, although this goes beyond the present scope of spontaneous emission control.
For very fast switching events the decay rate of the emitter can no longer adiabatically follow changes in the environment and the decay rate is not proportional to the instantaneous LDOS but depends also upon the past history of changes in the LDOS \cite{lagendijk1993aa}. Our method thus offers a novel tool to realize non-Markovian dynamics in cavity quantum electrodynamics, namely by very fast modulation. This means that for example a coupled cavity-emitter system that would be weakly coupled in the steady state case can be brought out of the weak coupling limit by switching of the cavity resonance faster than the inverse storage time. To treat this regime a full coherent model describing the emitter-bath interaction is needed. The internal non-Markovian dynamics of the emitter can in this case be detected for example using a time- and frequency gating technique\cite{Dorfman2012}.
For a large ensemble of emitters our approach offers a tool to implement a bright ultra-fast light source based on spontaneous emission with a low temporal coherence. This source has potentially much shorter pulse duration than electronically controlled LEDs. The photon statistics of such a source differs significantly from known laser action such as Q-switching or cavity dumping). An ultrafast low coherence source may find applications in speckle-free imaging which requires low coherence \cite{mosk2012}.
\section{Conclusion} We have demonstrated that by dynamically controlling the local density of states, the radiative decay of emitters can be drastically modified during the characteristic decay time. For pulsed excitation the dynamic decay rate results in a strongly non-exponential distribution of photon arrival times. A figure of merit has been introduced, that quantifies the total effect of the modulation on the spontaneous emission dynamic. The introduced model is geared toward experimental validation using free carrier switching of micropillars cavities with embedded quantum dots.
\appendix
\section{Effect of free carrier absorption on the dynamic emission intensity\label{app:FCA}} In this appendix we discuss the expected linewidth broadening of a switched cavity as a result of free carrier absorption and quantify the effect on the dynamic emission intensity for emitters embedded in a switched cavity. The free carriers modify both the real part $n$ and the imaginary part $n''$ of the complex refractive index $\tilde n$ whose components are linked through the Kramers-Kronig relations. Thus, the free carriers induce absorption of the light in the cavity. The absorption manifests itself as a broadening of the cavity linewidth during the switch event. For applications where the interest is in the photons emitted from the cavity, such losses are an unwanted effect. A side effect of the loss mechanism, and the associated linewidth broadening, is a decrease of the local density of states experienced by the emitter, which can be exploited as an additional switching mechanism.
\begin{figure}\label{fig:IntensityDecayLoss}
\end{figure}
To assess the effect of the free carrier absorption on the linewidth we obtain a relation between broadening of the cavity resonance and the switching magnitude both normalized to the unswitched linewidth. We first separate the total cavity linewidth $\gamma(t)$ into a sum of the intrinsic linewidth of the unswitched cavity $\gamma_i$ and a loss rate due to free carrier absorption $\gamma_a(t)$. \begin{equation}
\gamma(t) = \gamma_i + \gamma_a(t)\label{eq:gamma}. \end{equation} For GaAs, the Drude model is a good approximation for relatively low carrier concentration $N < \unit{\power{10}{19}}{\centi\meter\rpcubed}$ after thermalization of the free carriers at $t>\unit{6}{\pico\second}$. Within this approximation, the imaginary part of the refraction index and therefore the loss rate $\gamma_a(t)$ is proportional to the free carrier concentration $N$. Similarly, the change in the real part is proportional to $N$. To first order we can therefore assume a linear relation between the shift of the cavity resonance frequency $\Delta\omega(t)$ and the loss rate for small frequency shifts. Defining the relative shift as \begin{equation}
S(t) \equiv \frac{\Delta\omega(t)}{\gamma_i} \end{equation} the relative linewidth can be written as \begin{equation}
\frac{\gamma(t)}{\gamma_i} = 1 +a\, S(t)\label{eq:gamma_rel} \end{equation} where $a$ is a phenomenological constant. Equation~\eqref{eq:gamma_rel} directly relates the relative broadening of the cavity linewidth with the switching magnitude. There are not many sources for the effective losses caused by free carrier absorption. Nevertheless, we can extract $a$ from previous published data on switched GaAs planar cavities \cite{harding2012}. The relative linewidth as function of the switching magnitude is extracted for the same data in Fig.~3.8 in \cite{harding2008Thesis} and is consistent with \cite{harding2009ArXiv}. A value of $a\simeq0.083$ fits the data remarkably well for shifts smaller than 4 linewidths, which yields an increase in the linewidth by 25\% for a 3 linewidth switch.
With the considerations above we can study the effect of free carrier absorption on the time resolved emission curves in Fig.~\ref{fig:popintensitydecay}(b). The absorption contributes via two effects to the emitted intensity. First, the absorption decreases the effective quality factor, Q. In the weak coupling limit the radiative rate $\Gamma_\mathrm{rad}(t)$ is proportional to Q and the radiative rate $\Gamma_\mathrm{rad}(t)$ must be scaled by $\gamma_i/\gamma(t)$. Secondly, a fraction $[1-\gamma_i/\gamma(t)]$ of the photons emitted into the cavity is absorbed, and only the remaining fraction $\gamma_i/\gamma(t)$ leaves the cavity to be detected. The modified time-dependent intensity is therefore \begin{equation}
I(t) = \Gamma_{\mathrm{rad}}(t) \left(\frac{\gamma_i}{\gamma(t)}\right)^2\, N_{02}\exp\left(-\int_0^{t-t_{\mathrm{0exc}}}\frac{\gamma_i}{\gamma(t')}\Gamma_{\mathrm{rad}}(t')dt'\right).\label{eq:IntensityDecayLoss} \end{equation} where $\Gamma_{\mathrm{rad}}(t)$ is given by Eq.~\eqref{eq:raddecay} and $\gamma_i/\gamma(t)$ is the inverse of Eq.~\eqref{eq:gamma_rel}. Similarly to the description in Sec.~\ref{sec:timedeprate}, we assume a linear relation between the switching magnitude $S(t)$ and the free carrier concentration. Thus, $S(t)$ has the form \begin{equation} S(t) = S_0 e^{\frac{-(t-t_{\mathrm{0pu}})}{\tau_{\mathrm{sw}}}}\Theta(t-t_{\mathrm{0pu}},\tau_\mathrm{pu}) \end{equation} where $S_0$ is the maximum frequency shift of the cavity resonance relative to the initial cavity linewidth.
Equation~\eqref{eq:IntensityDecayLoss} cannot be solved analytically and must be integrated numerically. The result is shown in Fig.~\ref{fig:IntensityDecayLoss}, that compares the time resolved emission intensity with and without free carrier absorption. Without absorption the intensity curve is identical to the green dashed line in Fig.~\ref{fig:popintensitydecay}(b) that shows the characteristic strongly non-exponential decay dynamics with an intense photon burst at the switching time at $t=\unit{150}{\pico\second}$. When including free carrier absorption (solid red line) with $a=0.083$ and a switching magnitude of one linewidth $S_0=1$, the shape of the peak is barely modified.. We only observe a small reduction by 15\% of the original peak and a gentler slope back down to its unswitched dynamics compared to the direct change in the free carrier concentration. The intensity dynamics is therefore hardly affected by free carrier absorption.
\section*{Acknowledgments} \label{sec:Acknowledgement} We thank Bart Husken, Merel Leistikow and Ivan Nikolaev for contributions at an early stage and Allard Mosk and Ad Lagendijk for discussions. This work is part of the research programme of the "Stichting voor Fundamenteel Onderzoek der Materie" (FOM) and "Zap!": Ultrafast time control of spontaneous emission", and by (STW), which is financially supported by the NWO. JMG acknowledges support from the CAFE project financed by the French National Research Agency (ANR).
\end{document} | arXiv | {
"id": "1301.7612.tex",
"language_detection_score": 0.8058780431747437,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\global\long\def\mathbb{C}{\mathbb{C}} \global\long\defS^{1}{S^{1}} \global\long\def\mathbb{R}{\mathbb{R}} \global\long\def\curvearrowright{\curvearrowright} \global\long\def\rightarrow{\rightarrow} \global\long\def\mathbf{z}{\mathbf{z}} \global\long\def\mathbb{Z}{\mathbb{Z}} \global\long\def\mathbb{N}{\mathbb{N}} \global\long\def\mathrm{sgn}\:{\mathrm{sgn}\:} \global\long\def\RR_{>0}{\mathbb{R}_{>0}} \global\long\def\mathrm{var}{\mathrm{var}} \global\long\def\int_{-\pi}^{\pi}{\int_{-\pi}^{\pi}} \global\long\def\mathcal{F}{\mathcal{F}} \global\long\def\pb#1{\langle#1\rangle} \global\long\def\mathrm{op}{\mathrm{op}} \global\long\def\mathrm{op}{\mathrm{op}} \global\long\def\mathrm{supp}{\mathrm{supp}} \global\long\def\ceil#1{\lceil#1\rceil} \global\long\def\mathrm{TV}{\mathrm{TV}} \global\long\def\floor#1{\lfloor#1\rfloor} \global\long\def\vartheta{\vartheta} \global\long\def\varphi{\varphi} \global\long\def\class#1{[#1]} \global\long\def(\cdot){(\cdot)} \global\long\def\mathbb{1}{\mathbb{1}} \global\long\def\mathrm{cov}{\mathrm{cov}} \global\long\def\mathbb{C}{\mathbb{C}} \global\long\defS^{1}{S^{1}} \global\long\def\mathbb{R}{\mathbb{R}} \global\long\def\curvearrowright{\curvearrowright} \global\long\def\rightarrow{\rightarrow} \global\long\def\mathbf{z}{\mathbf{z}} \global\long\def\mathbb{Z}{\mathbb{Z}} \global\long\defh{\mu} \global\long\def*_{\RR}{*_{\mathbb{R}}} \global\long\def\mathbf{x}{\mathbf{x}} \global\long\def\epsilon{\epsilon} \global\long\def\mathfrak{c}{\mathfrak{c}} \global\long\def\wh#1{\hat{#1}}
\global\long\def\norm#1{\left|\left|#1\right|\right|} \global\long\defT{T} \global\long\defm{m} \global\long\def\degC#1{#1^{\circ}\mathrm{C}} \global\long\def\mathcal{X}{\mathbf{X}} \global\long\def\boldsymbol{\beta}{\boldsymbol{\beta}} \global\long\def\mathbf{b}{\mathbf{b}} \global\long\def\mathbf{Y}{\mathbf{Y}} \global\long\def\mathbf{H}{\mathbf{H}} \global\long\def\bm{\epsilon}{\bm{\epsilon}} \global\long\def\mathbf{s}{\mathbf{s}} \global\long\def\mathbf{t}{\mathbf{t}} \global\long\def\mathbf{R}{\mathbf{R}} \global\long\def\partial A_c{\partial A_c} \global\long\def\mathrm{cl}{\mathrm{cl}} \global\long\def\text{H\"older}{\text{H\"older}} \global\long\def\bb#1{\mathbb{#1}} \global\long\def\bm{X}{\bm{X}} \global\long\def\bm{Y}{\bm{Y}} \global\long\def\mathfrak{D}{\mathfrak{D}} \global\long\defd_{\mathcal{GH}}{d_{\mathcal{GH}}} \global\long\defd_{\mathcal{W},p}{d_{\mathcal{W},p}} \global\long\def\underline{\mu}{\underline{\mu}} \global\long\def\mathbf{FLB}{\mathbf{FLB}} \global\long\def\mathbf{d_\bX}{\mathbf{d_\bm{X}}} \global\long\def\mathbf{d_\bY}{\mathbf{d_\bm{Y}}} \global\long\def{\mu_\bX}{{\mu_\bm{X}}} \global\long\def{\mu_\bY}{{\mu_\bm{Y}}} \global\long\def\cal#1{\mathbf{\mathcal{#1}}} \global\long\defU_{\bX n}{U_{\bm{X} n}} \global\long\def{\Delta_{\bX}}{{\Delta_{\bm{X}}}} \global\long\def{\Delta_{\bY}}{{\Delta_{\bm{Y}}}} \global\long\def\mathrm{d}{\:\mathrm{d}} \global\long\def\mathfrak{d}{\mathfrak{d}} \global\long\def\hat{p}{\hat{p}} \global\long\def\hat{\mu}{\hat{\mu}} \global\long\def\one{\mathbb{1}} \global\long\def\mathcal{T}{\mathcal{T}} \global\long\def\mathrm{root}{\mathrm{root}} \global\long\def\mathrm{parent}{\mathrm{parent}} \global\long\def\mathrm{children}{\mathrm{children}} \global\long\def\scp#1#2{\langle #1, #2 \rangle} \global\long\def\mathcal{X}{\mathcal{X}} \global\long\def\bm{r}{\bm{r}} \global\long\def\bm{s}{\bm{s}} \global\long\def\hat{\bm{r}}{\hat{\bm{r}}} \global\long\def\hat{\bm{s}}{\hat{\bm{s}}} \global\long\def\hat{r}{\hat{r}} \global\long\def\hat{s}{\hat{s}} \global\long\def{\bm{u}}{{\bm{u}}} \global\long\def\bm{v}{\bm{v}} \global\long\def\bm{h}{\bm{h}} \global\long\def\bm{w}{\bm{w}} \global\long\def{\bm{D}}{{\bm{D}}} \global\long\def\bm{x}{\bm{x}} \global\long\def\bm{y}{\bm{y}} \global\long\def\bm{Z}{\bm{Z}} \global\long\def\hat{W}^{(S)}{\hat{W}^{(S)}} \global\long\def{\sigma_{\mathrm{max}}}{{\sigma_{\mathrm{max}}}} \global\long\def\sigma_\T{\sigma_\mathcal{T}} \global\long\def\bm{\sigma}_\T{\bm{\sigma}_\mathcal{T}} \global\long\def\mathcal{E}{\mathcal{E}} \global\long\def\mathcal{V}{\mathcal{V}} \global\long\defc_{\mathrm{max}}{c_{\mathrm{max}}} \global\long\defh{h} \global\long\def\mathrm{Id}{\mathrm{Id}} \global\long\def\mathcal{N}{\mathcal{N}} \global\long\def\:\mathrm{diam}{\:\mathrm{diam}} \global\long\def{n^{*}}{{n^{*}}} \global\long\def{p^{*}}{{p^{*}}} \global\long\defW^{(t)}{W^{(t)}} \global\long\def\mathrm{BL}_1{\mathrm{BL}_1} \global\long\def\mathcal{I}{\mathcal{I}} \global\long\def\stackrel{D}{\sim}{\stackrel{D}{\sim}} \global\long\def\mathrm{d}{\mathrm{d}} \global\long\defd^p\!{d^p\!}
\title{Inference for empirical Wasserstein distances on finite spaces}
\author{Max Sommerfeld\footremember{fbms}{\scriptsize Felix Bernstein Institute
for
Mathematical Statistics in the Biosciences and Institute for Mathematical
Stochastics, University of G\"ottingen,
Goldschmidtstra{\ss}e 7, 37077 G\"ottingen}
\and
Axel Munk\footrecall{fbms} \footnote{\scriptsize Max Planck Institute for
Biophysical
Chemistry, Am Fa{\ss}berg 11, 37077 G\"ottingen}
}
\date{}
\maketitle
\begin{abstract}
The Wasserstein distance is an attractive
tool for data analysis
but statistical
inference is hindered by the lack of distributional limits. To overcome this
obstacle, for probability measures
supported on finitely many points, we derive the asymptotic distribution of
empirical Wasserstein distances as the optimal value of a
linear program with random objective function. This facilitates statistical
inference (e.g. confidence intervals for sample based Wasserstein distances)
in large
generality. Our proof is based on directional Hadamard
differentiability. Failure of the classical bootstrap and alternatives are
discussed.
The utility of the distributional results is illustrated on two data sets. \end{abstract}
\scriptsize \textbf{Keywords: }optimal transport, Wasserstein distance, central limit theorem, directional Hadamard derivative, bootstrap, hypothesis testing
\textbf{AMS 2010 Subject Classification:} Primary: 62G20, 62G10, 65C60 Secondary: 90C08, 90C31 \normalsize
\section{Introduction} \label{sec:intro}
The
\textit{Wasserstein distance} \citep{vasershtein_markov_1969}, also known as Mallows distance \citep{mallows_note_1972}, Monge-Kantorovich-Rubinstein distance in the physical sciences \citep{kantorovich_space_1958,rachev_monge-kantorovich_1985, jordan_variational_1998}, earth-mover's distance in computer science \citep{rubner_earth_2000} or optimal transport distance in optimization \citep{ambrosio_lecture_2003}, is one of the most fundamental metrics on the space of probability measures. Besides its prominence in probability (e.g. \cite{dobrushin_prescribing_1970, gray_probability_1988}) and finance (e.g. \cite{rachev_mass_1998}) it has deep connections to the asymptotic theory of PDEs of diffusion type (\cite{otto_geometry_2001}, \cite{villani_topics_2003, villani_optimal_2008} and references therein). In a statistical setting it has mainly been used as a tool to prove weak convergence in the context of limit laws (e.g. \cite{bickel_asymptotic_1981, shorack_empirical_1986, Johnson2005, dumbgen_approximation_2011, Dorea2012}) as it metrizes weak convergence together with convergence of moments. However, recently the empirical (i.e. estimated from data) Wasserstein distance has also been recognized as a central quantity itself in many applications, among them clinical trials \citep{Munk1998, freitag_nonparametric_2007}, metagenomics \citep{evans_phylogenetic_2012}, medical imaging \citep{Ruttenberg2013}, goodness-of-fit testing \citep{Freitag2005, Barrio1999}, biomedical engineering \citep{oudre_classification_2012}, computer vision \citep{gangbo_shape_2000, ni_local_2009}, cell biology \citep{orlova_earth_2016} and model validation \citep{halder_model_2011}. The barycenter with respect to the Wasserstein metric \citep{agueh_barycenters_2011} has been shown to elicit important structure from complex data and to be a promising tool, for example in deformable models
\citep{boissard_distributions_2015, agullo-antolin_parametric_2015}. It has also been used in large-scale Bayesian inference to combine posterior distributions from subsets of the data \citep{srivastava_scalable_2015}.
Generally speaking three characteristics of the Wasserstein distance make it particularly attractive for various applications. First, it incorporates a ground distance on the space in question. This often makes it more adequate than competing metrics such as total-variation or $\chi^2$-metrics which are oblivious to any metric or similarity structure on the ground space. As an example, the success of the Wasserstein distance in metagenomics applications can largely be attributed to this fact (see \cite{evans_phylogenetic_2012} and also our application in Section \ref{sub:appl_alt}).
Second, it has a clear and intuitive interpretation as the amount of 'work' required to transform one probability distribution into another and the resulting transport can be visualized (see Section \ref{sub:FP}). This is also interesting in applications where probability distributions are used to represent actual physical mass and spatio-temporal changes have to be tracked.
Third, it is well-established \citep{rubner_earth_2000} that the Wasserstein distance performs exceptionally well at capturing human perception of similarity. This motivates its popularity in computer vision and related fields.
Despite these advantages, the use of the empirical Wasserstein distance in a statistically rigorous way is severely hampered by a lack of inferential tools. We argue that this issue stems from considering too large classes of candidate distributions (e.g. those which are absolutely continuous with respect to the Lebesgue measure if the ground space has dimension $\geq 2$). In this paper, we therefore discuss the Wasserstein distance on finite spaces, which allows to solve this issue. We argue that the restriction to finite spaces is not merely an approximation to the truth, but rather that this setting is sufficient for many practical situations as measures often already come naturally discretized (e.g. two- or three-dimensional images - see also our applications in Section \ref{sec:appl}).
We remark that from our methodology further inferential procedures can be derived, e.g. a (M)ANOVA type of analysis and multiple comparisons of Wasserstein distances based on their $p$-values (see e.g. \cite{benjamini_controlling_1995}). Our techniques also extend immediately to dependent samples $(X_i,Y_i)$ with marginals $\bm{r}$ and $\bm{s}$.
\paragraph{Wasserstein distance} Let $(\mathcal{X},d)$ be a complete metric space with metric $d:\mathcal{X}\times\mathcal{X}\rightarrow\mathbb{R}_{\geq 0}$. The \textit{Wasserstein distance of order $p$} ($p\geq 1$) between two Borel probability measures $\mu_1$ and $\mu_2$ on $\mathcal{X}$ is defined as \[
W_p(\mu_1, \mu_2) = \left\{ \inf_{\nu\in\Pi(\mu_1, \mu_2)} \int_{\mathcal{X}\times \mathcal{X}} d^p\!(x,x') \nu(dx, dx') \right\}^{1/p}, \] where $\Pi(\mu_1,\mu_2)$ is the set of all Borel probability measures on $\mathcal{X}\times \mathcal{X}$ with marginals $\mu_1$ and $\mu_2$, respectively.
\paragraph{Wasserstein distance on finite spaces} If we restrict in the above definition $\mathcal{X} = \left\{ x_1,\dots,x_N \right\}$ to be a finite space, every probability measure on $\mathcal{X}$ is given by a vector $\bm{r}$ in
$\mathcal{P}_\mathcal{X} = \left\{ \bm{r} = (r_x)_{x\in\mathcal{X}} \in\mathbb{R}_{> 0}^\mathcal{X} :
\sum_{x\in\mathcal{X}} r_x =1 \right\}$, via $P_{\bm{r}}(\{x\}) = r_x$. We will not distinguish between the vector $\bm{r}$ and the measure it defines. The \textit{Wasserstein distance of order $p$} between two finitely supported probability measures $\bm{r},\bm{s} \in\mathcal{P}_\mathcal{X}$ then becomes \begin{equation}
W_p(\bm{r}, \bm{s}) =
\left\{
\min_{\bm{w}\in\Pi(\bm{r}, \bm{s})} \sum_{x,x'\in\mathcal{X}} d^p\!(x, x') w_{x,x'} \right\}^{1/p},
\label{eq:def_wass} \end{equation} where $\Pi(\bm{r}, \bm{s})$ is the set of all probability measures on $\mathcal{X}\times\mathcal{X}$ with marginal distributions $\bm{r}$ and $\bm{s}$, respectively. All our methods and results concern this Wasserstein distance on finite spaces.
\subsection{Overview of main results} \paragraph{Distributional limits} The basis for inferential procedures for the Wasserstein distance on finite spaces is a limit theorem for its empirical version $W_p(\hat{\bm{r}}_n, \hat{\bm{s}}_m)$. Here, the empirical measure generated by independent random variables $X_1,\dots, X_n\sim\bm{r}$ is given by $ \hat{\bm{r}}_n = (\hat{r}_{n,x})_{x\in\mathcal{X}}$, where $\hat{r}_{n,x}= \frac{1}{n} \#\left\{ k : X_k=x \right\}$. Let $\hat{\bm{s}}_m$ be generated from i.i.d. $Y_1,\dots,Y_m\sim \bm{s}$ in the same fashion. Under the null hypothesis $\bm{r} = \bm{s}$ we prove that \begin{equation}
\left(\frac{nm}{n+m}\right)^{\frac{1}{2p}} W_p(\hat{\bm{r}}_n,\hat{\bm{s}}_m) \Rightarrow
\left\{ \max_{{\bm{u}} \in \Phi^*_p} \scp{\bm{G}}{{\bm{u}}}
\right\}^{\frac{1}{p}}, \quad n,m\rightarrow \infty.
\label{eq:first_mention_distr_lim} \end{equation} Here, '$\Rightarrow$' means convergence in distribution, $\bm{G}$ is a mean zero Gaussian random vector with covariance depending on $\bm{r}=\bm{s}$ and $\Phi_p^*$ is the convex set of dual solutions to the Wasserstein problem depending on the metric $d$ only (see Theorem \ref{thm:full}). In Section \ref{sub:FP} we use this result to assess the statistical significance of the differences between real and synthetically generated fingerprints in the Fingerprint Verification Competition \citep{maio_fvc2002}.
We give analogous results under the alternative $\bm{r}\neq\bm{s}$. This extends the scope of our results beyond the classical two-sample (or goodness-of-fit test) as it allows for confidence statements on $W_p(\bm{r}, \bm{s})$ when the null hypothesis of equality is likely or even \textit{known to be false}. An example for this is given by our application to metagenomics (Section \ref{sub:appl_alt}) where samples from the same person taken at different times are typically statistically different but our asymptotic results allow us to assert with statistical significance that inter-personal distances are larger that intra-personal ones.
\paragraph{Proof strategy} We prove these results by showing that the Wasserstein distance is \textit{directionally Hadamard differentiable} \citep{Shapiro1990} and the right hand side of \eqref{eq:first_mention_distr_lim} is its derivative evaluated at the Gaussian limit of the empirical multinomial process (see Theorem \ref{thm:derivative_Wasserstein}). This notion generalizes Hadamard differentiability by allowing \textit{non-linear} derivatives but still allows for a refined delta-method (\cite{Romisch2004} and Theorem \ref{thm:delta_method}). Notably, the Wasserstein distance is not Hadamard differentiable in the usual sense.
\paragraph{Explicit limiting distribution for tree metrics} When the space $\mathcal{X}$ are the vertices of a tree and the metric $d$ is given by path length we give an explicit expression for the limiting distribution in \eqref{eq:first_mention_distr_lim} (see Theorem \ref{thm:trees}). In contrast to the general case, this explicit formula allows for fast and direct simulation of the limiting distribution. This extends a previous result of \cite{Samworth2004} who considered a finite number of point masses on the real line. The Wasserstein distance on trees has, to the best of our knowledge, only been considered in two papers: \cite{kloeckner_geometric_2013} studies the geometric properties of the Wasserstein space of measures on a tree and \cite{evans_phylogenetic_2012} use the Wasserstein distance on phylogenetic trees to compare microbial communities. \paragraph{The bootstrap} Directional Hadamard differentiability is not enough to guarantee the consistency of the naive ($n$ out of $n$) bootstrap \citep{dumbgen_nondifferentiable_1993, fang_inference_2014} - in contrast to the usual notion of Hadamard differentiability. This implies that the bootstrap is \textit{not} consistent for the Wasserstein distance \eqref{eq:def_wass}(see Theorem \ref{thm:boot}). In contrast, the $m$-out-of-$n$ bootstrap for $m/n\rightarrow 0$ is known to be consistent in this setting \citep{dumbgen_nondifferentiable_1993} and can be applied to the Wasserstein distance. Under the null hypothesis $\bm{r}=\bm{s}$, however, there is a more direct way of obtaining an approximation of the limiting distribution. In the appendix, we discuss this alternative re-sampling scheme based on ideas of \cite{fang_inference_2014}, that essentially consists of plugging in a bootstrap version of the underlying empirical process in the derivative. We show that this scheme, which we will call \textit{directional bootstrap}, is consistent for the Wasserstein distance (see Theorem \ref{thm:boot}, Section \ref{sub:boot}).
\subsection{Related work} \paragraph{Empirical Wasserstein distances} In very general terms, we study a particular case (finite spaces) of the following question and its two-sample analog: Given the empirical measure $\mu_n$ based on $n$ i.i.d. random variables taking variables in a metric space with law $\mu$. What can be inferred about $W_p(\mu_n,\mu_0)$ for a reference measure $\mu_0$ which may be equal to $\mu$?
It is a well-known and straightforward consequence of the strong law of large numbers that if the $p$-th moments are finite for $\mu$ and $\mu_0$
then $W_p(\mu_n, \mu_0)$ converges to $W_p(\mu, \mu_0)$, almost surely, as the sample size $n$ approaches infinity {\citep[Cor. 6.11]{villani_optimal_2008}}. Determining the exact rate of this convergence is the subject of an impressive body of literature developed over the last decades starting with the seminal work of \cite{ajtai_optimal_1984} considering for $\mu_0$ the uniform distribution on the unit square, followed by \cite{talagrand_matching_1992, talagrand_transportation_1994} for the uniform distribution in higher dimensions and \cite{Horowitz1994} giving bounds on mean rates of convergence. \cite{Boissard2014,fournier_rate_2014} gave general deviation inequalities for the empirical Wasserstein distance on metric spaces. For a discussion in the light of our distributional limit results see Section \ref{sec:dis}.
Distributional limits give a natural perspective for practicable inference, but despite considerable interest in the topic have remained elusive to a large extent. For measures on $\mathcal{X} = \mathbb{R}$ a rather complete theory is available (see \cite{Munk1998, freitag_nonparametric_2007, Freitag2005} for $\mu_0\neq \mu$ and e.g. \cite{Barrio1999, samworth_empirical_2005, Barrio2005} for $\mu_0 = \mu$ as well as \cite{mason_weighted_2016,bobkov_one-dimensional_2014} for recent surveys). However, for $\mathcal{X} = \mathbb{R}^d$, $d\geq 2$ the only distributional result known to us is due to \cite{rippl_limit_2015} for specific multivariate (elliptic) parametric classes of distributions, when the empirical measure is replaced by a parametric estimate. In the context of deformable models distributional results are proven \citep{del_barrio_statistical_2015} for specific multidimensional parametric models which factor into one-dimensional parts.
The simple reason why the Wasserstein distance is so much easier to handle in the one-dimensional case is that in this case the optimal coupling attaining the infimum in \eqref{eq:def_wass} is known explicitly. In fact, the Wasserstein distance of order $p$ between two measures on $\mathbb{R}$ then becomes the $L^p$ norm of the difference of their quantile functions (see \cite{mallows_note_1972} for an early reference) and the analysis of empirical Wasserstein distances can be based on quantile process theory. Beyond this case, explicit coupling results are only known for multivariate Gaussians and elliptic distributions \citep{Gelbrich1990}. A classical result of \cite{ajtai_optimal_1984} for the uniform distribution on $\mathcal{X} = [0,1]^2$ suggests that, even in this simple case, distributional limits will have a complicated form if they exist at all. We will elaborate on this thought in the discussion, in Section \ref{sec:dis}.
The Wasserstein distance on finite spaces has been considered recently by \cite{gozlan_displacement_2013} to derive entropy inequalities on graphs and by \cite{erbar_ricci_2012} to define Ricci curvature for Markov chains on discrete spaces. To the best of our knowledge, empirical Wasserstein distances on finite spaces have only been considered by \cite{Samworth2004} in the special case of measures supported on $\mathbb{R}$. We will show (Section \ref{sec:trees}) that our results extend theirs.
\paragraph{Directional Hadamard differentiability} We prove our distributional limit theorems using the theory of parametric programming \citep{bonnans_perturbation_2013} which investigates how the optimal value and the optimal solutions of an optimization problem change when the objective function and the constraints are changed. While differentiability properties of optimal values of linear programs are extremely well studied such results have, to the best of our knowledge, not yet been applied to the statistical analysis of Wasserstein distances.
It is well-known that under certain conditions the optimal value of a mathematical program is differentiable with respect to the constraints of the problem \citep{Rockafellar1984, Gal1997}. However, the derivative will typically be non-linear. The appropriate concept for this is directional Hadamard differentiability \citep{Shapiro1990}. The derivative of the optimal value of a mathematical program is typically again given as an extremal value.
Although the delta-method
for directional Hadamard derivatives has been known for a long time \citep{shapiro_asymptotic_1991, dumbgen_nondifferentiable_1993}, this notion scarcely appears in the statistical context (with some exceptions, such as \cite{Romisch2004}, see also \cite{donoho_pathologies_1988}). Recently, an interest in the topic has evolved in econometrics (see \cite{fang_inference_2014} and references therein).
\paragraph{Organization of the paper} In Section \ref{sec:asymp_distr} we give a comprehensive result on distributional limits for the Wasserstein distance for measures supported on finitely many points. In Section \ref{sec:appl} we apply our methods to two data sets to highlight different aspects. In Section \ref{sec:dis} we briefly address limitations and possible extensions of our work. In the supplementary Material we discuss the bootstrap for the Wasserstein distance and give some technical proofs.
\section{Distributional limits} \label{sec:asymp_distr} \subsection{Main result} In this section we give a comprehensive result on distributional limits for the Wasserstein distance when the underlying population measures are supported on finitely many points $\mathcal{X} = \left\{ x_1, \dots, x_N \right\}$. We denote the inner product on the vector space $\mathbb{R}^\mathcal{X}$ by
$\scp{{\bm{u}}}{{\bm{u}}'}=\sum_{x\in \mathcal{X}}u_x u'_{x}$ for ${\bm{u}},{\bm{u}}' \in \mathbb{R}^\mathcal{X}$. \begin{thm}
\label{thm:full}
Let $p\geq 1$, $\bm{r}, \bm{s}\in\mathcal{P}_\mathcal{X}$ and $\hat{\bm{r}}_n, \hat{\bm{s}}_m$ generated by
i.i.d.
samples $X_1,\dots, X_n\sim\bm{r}$ and
$Y_1,\dots, Y_m\sim\bm{s}$, respectively. We
define the convex sets
\begin{equation}
\begin{split}
\Phi^*_p &=
\left\{
{\bm{u}}\in\mathbb{R}^\mathcal{X}:
u_x - u_{x'} \leq d^p(x,x'), \quad x,x'\in\mathcal{X}
\right\} \\
\Phi^*_p(\bm{r}, \bm{s}) &=
\left\{
({\bm{u}},\bm{v})\in\mathbb{R}^\mathcal{X} \times \mathbb{R}^\mathcal{X} :
\begin{split}
&\scp{{\bm{u}}}{\bm{r}} + \scp{\bm{v}}{\bm{s}}= W_p^p(\bm{r}, \bm{s}), \\
&u_x + v_{x'} \leq d^p(x, x'), \: x,x'\in\mathcal{X}
\end{split}
\right\}
\end{split}
\label{eq:sets}
\end{equation} and the multinomial covariance matrix
\begin{equation}
\Sigma(\bm{r}) =
\begin{bmatrix}
r_{x_1} (1 - r_{x_1}) & -r_{x_1} r_{x_2} & \cdots & -r_{x_1} r_{x_N} \\
-r_{x_2} r_{x_1} & r_{x_2} (1 - r_{x_2}) & \cdots & -r_{x_2} r_{x_N} \\
\vdots & & \ddots &\vdots \\
- r_{x_N} r_{x_1}& -r_{x_N} r_{x_2} & \cdots & r_{x_N} (1 - r_{x_N})
\end{bmatrix}
\label{eq:def_Sigma}
\end{equation}
such that with independent Gaussian random variables
$\bm{G}\sim\mathcal{N}(0,\Sigma(\bm{r}))$ and $\bm{H}\sim\mathcal{N}(0,
\Sigma(\bm{s}))$ we have the following.
\begin{enumerate}[a)]
\item \textbf{(One sample - Null hypothesis)} With the sample size $n$
approaching infinity, we have the weak convergence
\begin{equation}
n^{\frac{1}{2p}} W_p(\hat{\bm{r}}_n, \bm{r}) \Rightarrow
\left\{ \max_{{\bm{u}} \in \Phi^*_p} \scp{\bm{G}}{{\bm{u}}} \right\}^{\frac{1}{p}}.
\label{eq:one_sample_null}
\end{equation}
\item \textbf{(One sample - Alternative)} With $n$ approaching infinity we have
\begin{equation}
n^{\frac{1}{2}} \left(W_p(\hat{\bm{r}}_n, \bm{s}) - W_p(\bm{r}, \bm{s})\right) \Rightarrow
\frac{1}{p} W_p^{1-p}(\bm{r}, \bm{s})
\left\{ \max_{({\bm{u}},\bm{v}) \in \Phi_p^*(\bm{r}, \bm{s})} \scp{\bm{G}}{{\bm{u}}}
\right\}.
\label{eq:one_sample_alt}
\end{equation}
\item \textbf{(Two samples - Null hypothesis)} Let $\rho_{n,m} = \left(
nm/(n+m) \right)^{1/2}$. If $\bm{r} = \bm{s}$ and $n$ and
$m$ are approaching infinity such that $n\wedge m\rightarrow\infty$ and
$m/(n + m)\rightarrow\lambda\in(0,1)$ we have
\begin{equation}
\rho_{n,m}^{1/p}W_p(\hat{\bm{r}}_n, \hat{\bm{s}}_m)
\Rightarrow
\left\{ \max_{{\bm{u}}\in\Phi_p^*} \scp{\bm{G}}{{\bm{u}}}
\right\}^{\frac{1}{p}}.
\label{eq:two_sample_null}
\end{equation}
\item \textbf{(Two samples - Alternative)} With $n$ and $m$
approaching infinity such that $n\wedge m\rightarrow\infty$ and
$m/(n + m)\rightarrow\lambda\in[0,1]$
\begin{equation}
\begin{split}
\rho_{n,m} & \left(
W_p(\hat{\bm{r}}_n, \hat{\bm{s}}_m) - W_p(\bm{r},\bm{s}) \right)
\Rightarrow \\
& \frac{1}{p} W_p^{1-p}(\bm{r}, \bm{s})
\left\{ \max_{({\bm{u}},\bm{v})\in\Phi_p^*(\bm{r}, \bm{s})} \sqrt{\lambda}\scp{\bm{G}}{{\bm{u}}} +
\sqrt{1 - \lambda}\scp{\bm{H}}{\bm{v}} \right\}.
\end{split}
\label{eq:two_sample_alt}
\end{equation}
\end{enumerate} \end{thm} The sets $\Phi_p^*$ and $\Phi_p^*(\bm{r}, \bm{s})$ are (derived from) the dual solutions to the Wasserstein linear program (see Theorem \ref{thm:derivative_Wasserstein} below). This result is valid for all probability measures with finite support, regardless of the (dimension of the) underlying space. In particular, it generalizes a result of \cite{Samworth2004}, who considered a finite collection of point masses on the real line and $p=2$. We will re-obtain their result as a special case in Section \ref{sec:trees} when we give explicit expressions for the limit distribution when the metric $d$, which enters the limit law via the dual solutions $\Phi_p^*$ or $\Phi_p^*(\bm{r},\bm{s})$, is given by a tree.
\begin{rem}
In our numerical experiments (see Section \ref{sec:appl} we have found the
representation \eqref{eq:two_sample_alt} to be numerically unstable when used
to simulate from the limiting distribution under the alternative. We therefore
give an alternative representation \eqref{eq:altalt} in the supplementary
material as a one-dimensional optimization problem of a non-linear function
(in contrast to a high-dimensional linear program shown here).
Note that the limiting distribution under the null does not suffer from this
problem and can be simulated from directly using a linear program solver. \end{rem}
The scaling rate in Theorem \ref{thm:full} depends solely on $p$ and is completely independent of the underlying space $\mathcal{X}$. This contrasts known bounds on the rate of convergence in the continuous case. We will elaborate on the differences in the discussion. Typical choices are $p=1,2$. The faster scaling rate can be a reason to favor $p=1$. In our numerical experiments however, this advantage was frequently outweighed by larger quantiles of the limiting distribution.
\cite{dumbgen_nondifferentiable_1993} showed that the naive $n$-out-of-$n$ bootstrap is inconsistent for functionals with a non-linear Hadamard derivative, but resampling fewer than $n$ observations leads to a consistent bootstrap. Since we will show in the following that the Wasserstein distance belongs to this class of functionals, it is a direct consequence that the naive bootstrap fails for the Wasserstein distance (see Section \ref{sub:boot} in the supplementary material for details) and that the following holds. \begin{thm}
\label{thm:mofn}
Let $\hat{\bm{r}}_n^*$ and $\hat{\bm{s}}_m^*$ be bootstrap versions
of $\hat{\bm{r}}_n$ and $\hat{\bm{s}}_m$ that are obtained via re-sampling $k$ observations
with $k/n\rightarrow 0$ and $k/m\rightarrow 0$.
Then, the plug-in bootstrap with
$\hat{\bm{r}}_n^*$ and $\hat{\bm{s}}_m^*$ is consistent, that is
\begin{equation*}
\begin{split}
\sup_{f\in\mathrm{BL}_1(\mathbb{R})}E\left[ f(\phi_p(\sqrt{k}\left\{ (\hat{\bm{r}}_n^{**},
\hat{\bm{s}}_m^{**}) - (\hat{\bm{r}}_n, \hat{\bm{s}}_m)
\right\})) | X_1, \dots,X_n,Y_1, \dots, Y_m \right] \\
- E\left[ f\left(\rho_{n,m}\left\{ W_p^p(\hat{\bm{r}}_n, \hat{\bm{s}}_m) - W_p^p(\bm{r},
\bm{s}) \right\}\right) \right]
\end{split}
\end{equation*}
converges to zero in probability. \end{thm} In the following we will prove our main Theorem \ref{thm:full} by \begin{enumerate}[i)]
\item introducing Hadamard directional differentiability, which
does not require the derivative to be linear but still allows for a delta-method;
\item showing that the map $(\bm{r}, \bm{s})\mapsto W_p(\bm{r}, \bm{s})$ is
differentiable in this sense. \end{enumerate}
\subsection{Hadamard directional derivatives} In this section we follow \cite{Romisch2004}. A map $f$ defined on a subset $D_f\subset \mathbb{R}^d$ with values in $\mathbb{R}$ is called \textit{Hadamard directionally differentiable} at ${\bm{u}}\in\mathbb{R}^d$ if there exists a map $f'_{{\bm{u}}} : \mathbb{R}^d\rightarrow \mathbb{R}$ such that \begin{equation}
\lim_{n\rightarrow\infty} \frac{f({\bm{u}}+t_n\bm{h}_n) - f({\bm{u}})}{t_n} = f'_{\bm{u}}(\bm{h})
\label{eq:def_hadamard} \end{equation} for any $\bm{h}\in\mathbb{R}^d$ and for arbitrary sequences $t_n$ converging to zero from above and $\bm{h}_n$ converging to $\bm{h}$ such that ${\bm{u}}+t_n\bm{h}_n\in D_f$ for all $n\in\mathbb{N}$. Note that in contrast to the usual notion of Hadamard differentiability (e.g. \cite{van_der_vaart_weak_1996}) the derivative $\bm{h}\mapsto f'_{\bm{u}}(\bm{h})$ is \textit{not} required to be linear. A prototypical example is the absolute value $f:\mathbb{R}\rightarrow\mathbb{R}$,
$t \mapsto |t|$ which is not in the usual sense Hadamard differentiable at $t=0$ but directionally differentiable with the non-linear derivative $t\mapsto
|t|$. \begin{thm}[{\citealp[Theorem 1]{Romisch2004}}]
\label{thm:delta_method}
Let $f$ be a function defined on a subset $F$ of
$\mathbb{R}^d$ with values in $\mathbb{R}$, such that
\begin{enumerate}
\item $f$ is Hadamard directionally differentiable at ${\bm{u}}\in F$ with
derivative $f'_{\bm{u}}:F\rightarrow \mathbb{R}$ and
\item there is a sequence of $\mathbb{R}^d$-valued random variables $X_n$ and a
sequence of non-negative numbers $\rho_n\rightarrow\infty$ such that $\rho_n(X_n -
{\bm{u}})\Rightarrow X$ for some random variable $X$ taking values in
$F$.
\end{enumerate}
Then, $\rho_n(f(X_n) - f({\bm{u}}))\Rightarrow f_{\bm{u}}'(X)$. \end{thm}
\subsection{Directional derivative of the Wasserstein distance}
In this section we show that the functional $(\bm{r}, \bm{s})\mapsto W_p^p(\bm{r}, \bm{s})$ is Hadamard directionally differentiable and give a formula for the derivative.
The \textit{dual} program (cf. {\cite[Ch. 4]{Luenberger2008}}, also \cite{kantorovich_space_1958}) of the linear program defining the Wasserstein distance \eqref{eq:def_wass} is given by \begin{equation}
\label{eq:dual}
\begin{split}
&\max_{({\bm{u}}, \bm{v})\in\mathbb{R}^\mathcal{X}\times \mathbb{R}^\mathcal{X}}\quad \scp{{\bm{u}}}{\bm{r}} + \scp{\bm{s}}{\bm{v}} \\
\textbf{s.t.} \quad& u_x + v_{x'} \leq d^p(x, x') \quad \forall x,x'\in\mathcal{X}.
\end{split} \end{equation} As noted above, the optimal value of the primal problem is $W_p^p(\bm{r}, \bm{s})$ and by standard duality theory of linear programs (e.g. \cite{Luenberger2008}) this is also the optimal value of the dual problem. Therefore, the set of optimal solutions to the dual problem is given by $\Phi^*_p(\bm{r}, \bm{s})$ as defined in \eqref{eq:sets}. \begin{thm}
\label{thm:derivative_Wasserstein}
The functional $(\bm{r}, \bm{s})\mapsto W_p^p(\bm{r}, \bm{s})$ is directionally Hadamard
differentiable at all $(\bm{r}, \bm{s})\in\mathcal{P}_\mathcal{X}\times\mathcal{P}_\mathcal{X}$
with derivative
\begin{equation}
(\bm{h}_1,\bm{h}_2) \mapsto \max_{({\bm{u}}, \bm{v})\in\Phi^*_p(\bm{r}, \bm{s})}
- (\scp{{\bm{u}}}{\bm{h}_1} + \scp{\bm{v}}{\bm{h}_2}).
\label{eq:derivative}
\end{equation} \end{thm} We can give a more explicit expression for the set $\Phi^*_p(\bm{r}, \bm{s})$ in the case $\bm{r} = \bm{s}$, when the optimal value of the primal and the dual problem is $0$. Then, the condition $W_p^p(\bm{r},\bm{s}) = \scp{\bm{r}}{{\bm{u}}} + \scp{\bm{s}}{\bm{v}}$ becomes $\scp{\bm{r}}{{\bm{u}} + \bm{v}}=0$. Since $u_x + v_{x'} \leq d^p\!(x,x')$ for all $x,x'\in \mathcal{X}$ implies ${\bm{u}} + \bm{v} \leq 0$ this yields ${\bm{u}} = -\bm{v}$. This gives \[
\Phi^*_p(\bm{r}, \bm{r}) = \left\{ ({\bm{u}}, -{\bm{u}})\in\mathbb{R}^\mathcal{X}\times\mathbb{R}^\mathcal{X} : u_x - u_{x'} \leq
d^p\!(x, x'), \: x,x'\in\mathcal{X} \right\} \] and the following more compact representation of the dual solutions in the case $\bm{r}=\bm{s}$, independent of $\bm{r}$: \begin{equation} \Phi_p^*(\bm{r},\bm{r}) = \Phi^*_p \times \left( -\Phi_p^* \right). \label{eq:null_Phi*} \end{equation} \subsection{Proof of Theorem \ref{thm:full}} \begin{enumerate}[a)]
\item With the notation introduced in Theorem \ref{thm:full}, $n\hat{\bm{r}}_n$ is a
sample of size $n$ from a multinomial distribution with
probabilities $\bm{r}$. Therefore, $\sqrt{n}(\hat{\bm{r}}_n - \bm{r})\Rightarrow
\bm{G}$ as $n\rightarrow \infty$ {\citep[Thm. 14.6]{wasserman_all_2011}}. The
Hadamard derivative of the
map $(\bm{r}, \bm{s}) \mapsto W_p^p(\bm{r},\bm{s})$ as given in Theorem
\ref{thm:derivative_Wasserstein} can now be
used in the delta-method
from Theorem \ref{thm:delta_method}. Together with the representation \eqref{eq:null_Phi*} of
the set of dual solutions $\Phi^*_p(\bm{r}, \bm{s})$, this yields
\[
\sqrt{n}W_p^p(\hat{\bm{r}}_n, \bm{r}) \Rightarrow \max_{({\bm{u}}, \bm{v})\in
\Phi^*_p(\bm{r}, \bm{r})} -\scp{{\bm{u}}}{\bm{G}} \:\stackrel{D}{\sim}\: \max_{{\bm{u}}\in\Phi^*_p}
\scp{{\bm{u}}}{\bm{G}}.
\]
Here and in the following $Z_1\stackrel{D}{\sim} Z_2$ means the distributional equality of the
random variables $Z_1$ and $Z_2$.
Applying to this the Continuous Mapping Theorem with the map $t\mapsto
t^{1/p}$ gives the assertion.
\item Consider the map
$(\bm{r}, \bm{s})\mapsto W_p(\bm{r}, \bm{s}) = (W_p^p(\bm{r}, \bm{s}))^{1/p}$. By Theorem
\ref{thm:derivative_Wasserstein} and the chain rule for
Hadamard directional derivatives {\citep[Prop. 3.6]{Shapiro1990}}, the Hadamard derivative of
this map at $(\bm{r}, \bm{s})$ is
given by
\begin{equation}
(\bm{h}_1, \bm{h}_2) \mapsto p^{-1}W_p^{1-p}(\bm{r}, \bm{s})\left\{ \max_{({\bm{u}},
\bm{v})\in\Phi^*_p(\bm{r},\bm{s})}
-(\scp{{\bm{u}}}{\bm{h}_1} + \scp{\bm{v}}{\bm{h}_2}) \right\}.
\label{eq:der_alt}
\end{equation}
An application of the delta-method of Theorem \ref{thm:delta_method} concludes this part.
\item and d). Note that under the assumptions of the Theorem
\begin{equation}
\sqrt{\frac{nm}{n + m}}\left( (\hat{\bm{r}}_n, \hat{\bm{s}}_m) - (\bm{r}, \bm{s}) \right)
\Rightarrow (\sqrt{\lambda} \bm{G}, \sqrt{1 - \lambda}\bm{H}).
\label{eq:conv_multi_twosample}
\end{equation}
Part d) follows with the delta-method from \eqref{eq:der_alt} and
\eqref{eq:conv_multi_twosample}.
For part c) we use, as we did for a), the derivative given in Theorem
\ref{thm:derivative_Wasserstein} and the Continuous
Mapping Theorem.
The limit distribution is
\[
\left\{ \max_{({\bm{u}},\bm{v})\in\Phi^*_p(\bm{r}, \bm{s})} (\sqrt{\lambda}\scp{\bm{G}}{{\bm{u}}} +
\sqrt{1 - \lambda}\scp{\bm{H}}{\bm{v}}) \right\}^{1/p}.
\]
Note that if $\bm{r} = \bm{s}$
we have $({\bm{u}},\bm{v})\in\Phi^*_p(\bm{r}, \bm{s})$ if and only if ${\bm{u}}\in\Phi^*_p$ and
$\bm{v} = -{\bm{u}}$, by \eqref{eq:null_Phi*} and \eqref{eq:sets}. Hence, with
$\bm{G} \stackrel{D}{\sim} \bm{H}$ we conclude
\[
\begin{split}
\max_{({\bm{u}},\bm{v})\in\Phi^*_p(\bm{r}, \bm{s})} (\sqrt{\lambda}\scp{\bm{G}}{{\bm{u}}} +
\sqrt{1 - \lambda}\scp{\bm{H}}{\bm{v}})
& \stackrel{D}{\sim} \max_{{\bm{u}}\in\Phi^*_p} (\sqrt{\lambda}\scp{\bm{G}}{{\bm{u}}} -
\sqrt{1 - \lambda}\scp{\bm{H}}{{\bm{u}}}) \\
& \stackrel{D}{\sim}
\max_{{\bm{u}}\in\Phi^*_p} \sqrt{\lambda + (1 -
\lambda)}\scp{\bm{G}}{{\bm{u}}} \\
& = \max_{{\bm{u}}\in\Phi^*_p}
\scp{\bm{G}}{{\bm{u}}}.
\end{split}
\] \end{enumerate}
\subsection{Explicit limiting distribution for tree metrics} \label{sec:trees} Assume that the metric structure on $\mathcal{X}$ is given by a weighted tree, that is, an undirected connected graph $\mathcal{T} = (\mathcal{X}, E)$ with vertices $\mathcal{X}$ and edges $E \subset \mathcal{X}\times \mathcal{X}$ that contains no cycles. We assume the edges to be weighted by a function $ w:E \rightarrow \mathbb{R}_{>0}$. For $x, x'\in \mathcal{X}$ let $e_1,\dots,e_l\in E$ be the unique path in $\mathcal{T}$ joining $x$ and $x'$, then the length of this path, $d_\mathcal{T}(x,x') = \sum_{j=1}^l w(e_j)$ defines a metric $d_\mathcal{T}$ on $\mathcal{X}$. Without imposing any further restriction on $\mathcal{T}$, we assume it to be rooted at $\mathrm{root}(\mathcal{T})\in \mathcal{X}$, say. Then, for $x\in \mathcal{X}$ and $x\neq\mathrm{root}(\mathcal{T})$ we may define $\mathrm{parent}(x)\in \mathcal{X}$ as the immediate neighbor of $x$ in the unique path connecting $x$ and $\mathrm{root}(\mathcal{T})$. We set $\mathrm{parent}(\mathrm{root}(\mathcal{T}))=\mathrm{root}(\mathcal{T})$. We also define $\mathrm{children}(x)$ as the set of vertices $x'\in \mathcal{X}$ such that there exists a sequence $x' = x_1, \dots , x_l = x \in \mathcal{X}$ with $\mathrm{parent}(x_j) = x_{j+1}$ for $j=1,\dots,l-1$. Note that with this definition $x\in\mathrm{children}(x)$. Additionally, define the linear operator $S_\mathcal{T} : \mathbb{R}^\mathcal{X} \rightarrow \mathbb{R}^\mathcal{X}$ \[
(S_\mathcal{T} {\bm{u}})_x = \sum_{x'\in\mathrm{children}(x)} u_{x'}. \] \begin{thm}
\label{thm:trees}
Let $p\geq 1$, $\bm{r}\in \mathcal{P}_\mathcal{X}$, defining a probability distribution on
$\mathcal{X}$ and
let the empirical measures $\hat{\bm{r}}_n$ and
$\hat{\bm{s}}_m$
be generated by independent random variables $X_1,\dots, X_n $ and $Y_1 ,
\dots Y_m$, respectively, all drawn from
$\bm{r} = \bm{s}$.
Then, with a Gaussian vector $\bm{G}\sim\mathcal{N}(0, \Sigma(\bm{r}))$ as
defined in
\eqref{eq:def_Sigma} we have the following.
\begin{enumerate}[a)]
\item \textbf{(One sample)} As $n\rightarrow\infty$,
\begin{equation}
n^{\frac{1}{2p}} W_p(\hat{\bm{r}}_n, \bm{r}) \Rightarrow \left\{\sum_{
x \in \mathcal{X}} |(S_\mathcal{T} \bm{G})_x| d_\mathcal{T}(x,\mathrm{parent}(x))^p\right\}^{\frac{1}{p}}
\label{eq:weak_conv_trees}
\end{equation}
\item \textbf{(Two samples)} If $n\wedge m\rightarrow\infty$ and
$n/(n + m)\rightarrow\lambda\in(0,1)$ we have
\begin{equation}
\left( \frac{nm}{n+m} \right)^{\frac{1}{2p}} W_p(\hat{\bm{r}}_n, \hat{\bm{s}}_m)
\Rightarrow \left\{
\sum_{ x \in \mathcal{X}} |(S_\mathcal{T} \bm{G})_x|
d_\mathcal{T}(x,\mathrm{parent}(x))^p\right\}^{\frac{1}{p}}.
\end{equation}
\end{enumerate} \end{thm} The proof of Theorem \ref{thm:trees} is given in the supplementary material. The theorem includes the special case of a discrete measure on the real line, that is $\mathcal{X}\subset \mathbb{R}$, since in this case, $\mathcal{X}$ can be regarded as a simple rooted tree consisting of only one branch. \begin{cor}[{\citealp[Theorem 2.6]{Samworth2004}}]
\label{cor:samworth}
Let $\mathcal{X} = \{x_1 < \dots < x_N\}\in\mathbb{R}$, $\bm{r}\in\mathcal{P}_\mathcal{X}$ and $\hat{\bm{r}}_n$ the
empirical measure generated by i.i.d. random variables $X_1, \dots ,
X_n\sim\bm{r}$. With $\bar{r}_j = \sum_{i=1}^j r_{x_i} $, for $j=1,\dots N$ and
$B$ a standard Brownian bridge, we have as $n\rightarrow\infty$,
\begin{equation}
n^{\frac{1}{4}} W_2(\hat{\bm{r}}_n, \bm{r}) \Rightarrow \left\{ \sum_{j=1}^{N-1}
|B(\bar{r}_j)|
(x_{j+1} - x_j)^2 \right\}^{\frac{1}{2}}.
\label{eq:samworth}
\end{equation} \end{cor}
\section{Simulations and applications} \label{sec:appl} The following numerical experiments were performed using R \citep{good_r_core_team_r:_2016}. All computations of Wasserstein distances and optimal transport plans as well as their visualizations were performed with the R-package \texttt{transport} \citep{Schuhmacher2014,Gottschlich2014}. The code used for the computation of the limiting distributions is available as an R-package \texttt{otinference} \citep{sommerfeld_otinference:_2017}. \subsection{Speed of convergence} We investigate the speed of convergence to the limiting distribution in Theorem \ref{thm:full} in the one-sample case under the null hypothesis. To this end, we consider as ground space $\mathcal{X}$ a regular two-dimensional $L\times L$ grid with the euclidean distance as the metric $d$ and $L= 3, 5, 10$. We generate five random measures $\bm{r}$ on $\mathcal{X}$ as realizations of a Dirichlet random variable with concentration parameter $\bm{\alpha} = (\alpha, \dots, \alpha)\in\mathbb{R}^{L\times L}$ for $\alpha = 1, 5, 10$. Note, that $\alpha = 1$ corresponds to a uniform distribution on the probability simplex. For each measure, we generate $20,000$ realizations of $n^{1/2p}W_p(\hat{\bm{r}}_n, \bm{r})$ with $n \hat{\bm{r}}_n\sim\mathrm{Multinom}(\bm{r})$ for $n = 10, 1000, 1000$ and of the theoretical limiting distribution given in Theorem \ref{thm:full}. The Kolmogorov-Smirnov distance (that is, the maximum absolute difference between their cdfs) between these two samples (averaged over the five measures) is shown in Figure \ref{fig:conSpeed}. \begin{figure}
\caption{Comparison of the finite sample distribution and the theoretical
limiting distribution on a regular grid of length $L$ for different sample sizes. The two top rows show Q-Q-plots and kernel density estimates (bandwidth: Silverman's rule of thumb \citep{silverman_density_1986}, solid line: finite sample, dotted line: limiting distribution) for $L = 10$. Last row shows the KS statistic between the two distributions as a function of the sample size for different $L$ and for different concentration parameters $\alpha$.}
\label{fig:conSpeed}
\end{figure} The experiment shows that the limiting distribution is a good approximation of the finite sample version even for small sample sizes. For the considered parameters the size of the ground space $N=L^2$ seems to slow the convergence only marginally. Similarly, the underlying measure seems to have no sizeable effect on the convergence speed as the dependence on the concentration parameter $\alpha$ demonstrates. \subsection{Testing the null: real and synthetic fingerprints} \label{sub:FP} The generation and recognition of synthetic fingerprints is a topic of great interest in forensic science and current state-of-the-art methods \citep{cappelli_synthetic_2000} produce synthetic fingerprints that even human experts fail to recognize as such {\citep[p. 292ff]{maltoni_handbook_2009}}. Recently, \cite{gottschlich_separating_2014} presented a method using the Wasserstein distance that is able to distinguish synthetic from real fingerprints with high accuracy. Their method is probabilistic in nature, since it is based on a hypothesized unknown distribution of certain features of the fingerprint. We use our distributional limits to assess the statistical significance of the differences.
\paragraph{Minutiae histograms} The basis for the comparison of fingerprints are so called minutiae which are key qualities in biometric identification based on fingerprints \citep{jain_technology:_2007}. They are certain characteristic features such as bifurcations of the line patterns of the fingerprint. Each of the minutiae have a location in the fingerprint and a direction such that it can be characterized by two real numbers and an angle. Figure \ref{fig:minutiae} shows a real and a synthetic fingerprint with their minutiae.
The recognition method of \cite{gottschlich_separating_2014} considers pairs of minutiae and records their distance and the difference between their angles. Based on these two values each minutiae pair is put in one of 100 bins arranged in a regular grid (10 directional by 10 distance bins) to obtain a so called minutiae histogram (MH). Based on the bin-wise mean of MHs for several fingerprints to construct a typical MH, they found that the proximity in Wasserstein distance to these references is a good classifier for distinguishing real and synthetic fingerprints.
In order to assess the statistical significance of the difference in minutiae pair distributions, we consider fingerprints from the databases 1 and 4 of the Fingerprint Verification Competition of 2002 \citep{maio_fvc2002}, containing $110$ real and synthetic fingerprints, respectively. From each database the minutiae were obtained by automatic procedure using a commercial off-the-shelf program. For each fingerprint we chose disjoint minutiae pairs at random to avoid the issue of pairs being dependent yielding a total of 1917 and 1437 minutiae pairs from real and synthetic fingerprints, respectively. \begin{figure}
\caption{The optimal transport plan between the MHs of real and fake
fingerprints. The grey values indicate the magnitude of the difference of the
two MHs. The arrows show the transport. The amount of mass transported is encoded in the color and thickness of the arrows.}
\label{fig:FPtransport}
\end{figure}
While two-sample tests for univariate data are abundant and well studied there are no multivariate methods that could be considered standard in this setting. Therefore, we report on the findings of several tests from the literature for comparison with the Wasserstein based method from \eqref{eq:two_sample_null}. We tested the null hypothesis of the underlying distributions being equal for the un-centered, the centered and the centered and scaled (to variance $1$) data to assess effects beyond first moments using the following methods: 1) comparing the empirical Wasserstein distance $W_1$ after binning on a regular $10\times 10$ grid with the limiting distribution from Theorem \ref{thm:full}; 2) a permutation test; 3) the crossmatch test proposed by \citep{rosenbaum_exact_2005} and 4) the kernel based test \citep{anderson_two-sample_1994} implemented in the R package \textup{ks}.
Table \ref{tab:FPResults} shows the resulting empirical distributions on a $10\times 10$ grid and the $p$-values for the different tests. \begin{table} \caption{\label{tab:FPResults} Results of different two-sample tests for difference in the distribution of MHs of real and fake fingerprints.}
\begin{tabular}{rrrrr}
& Wasserstein & Crossmatch & Permutation & KDE \\
\hline
Raw & 0.00E+00 & 2.99E-01 & 1.00E-03 & 1.12E-08 \\
Centered & 4.00E-04 & 4.48E-05 & 1.00E-03 & 2.60E-21 \\
Centered \& Scaled & 2.54E-02 & 1.01E-02 & 1.71E-01 & 1.79E-14 \\
\end{tabular} \end{table} The differences are highly significant according to all tests, except the permutation test for the centered and scaled data. In this particular example at least, the Wasserstein based test seems to be able to pick up differences in distributions (in the first moment and beyond) at least as good as current state-of-the-art methods.
In addition to testing, the Wasserstein method provides us with an optimal transport plan, transforming one measure into the other. For the minutiae histograms under consideration this is illustrated in Figure \ref{fig:FPtransport}. This transport plan gives information beyond a simple test for equality as it highlights structural changes in the distribution. In this specific application it reveals how in the minutiae histogram of synthetic fingerprints compared to the one of real fingerprints mass has been shifted from large and intermediate directional differences to smaller ones. In particular to small and large distances, and only to a lesser extent to intermediate distances. In conclusion one may say that synthetic fingerprints show smaller differences in the directions of minutiae and stronger clustering of minutiae distances around small and large values. Insight of this sort may lead to improved generation or detection of synthetic fingerprints. \begin{figure}
\caption{Top row: Minutiae of a real (left) and a synthetic (right)
fingerprint. Bottom row: Minutiae histograms of real and synthetic fingerprints.}
\label{fig:minutiae}
\end{figure}
\subsection{Asymptotic under the alternative: metagenomics} \label{sub:appl_alt} \begin{figure}
\caption{Relative abundances of the $30$ first OTUs in the $12$ samples
(left) and
Wasserstein distances of the microbial communities (right). Here, $ij$ is
the $j$-th sample of the $i$-th person.}
\label{fig:stoolResults}
\end{figure} Metagenomics studies microbial communities by analyzing genetic material in an environmental sample such as a stool sample of a human. High-throughput sequencing techniques no longer require cultivated cloned microbial cultures to perform sequencing. Instead, a sample with potentially many different species can be analyzed directly and the abundance of each species in the sample can be recovered. The applications of this technique are countless and constantly growing. In particular, the composition of microbial communities in the human gut has been associated with obesity, inflammatory bowel disease and others \citep{turnbaugh_human_2007}.
The analysis of a sample with high-throughput sequencing techniques yields several thousands to many hundreds of thousands sequences. After elaborate pre-processing, these sequences are aligned to a reference database and clustered in \textit{operational taxonomic units} (OTUs). These OTUs can be thought of (albeit omitting some biological detail) as the different species present in the sample. For each OTU this analysis yields the number of sequences associated with it, that is how often this particular OTU was detected in the sample. Further, comparing the genetic sequences associated with an OTU yields a biologically meaningful measure of similarity between OTUs - and hence a distance. A metagenomic sample can therefore be regarded as a sample in a discrete metric space with OTUs being the points of the space. Comparing such samples representing microbial communities is of great interest \citep{kuczynski_microbial_2010}. The Wasserstein distance has been recognized to provide valuable insight and to facilitate tests for equality of two communities \citep{evans_phylogenetic_2012}. This previous application however, relies on a phylogenetic tree that is build on the OTUs and the distance is then measured in the tree. This additional pre-processing step involves many parameter choices and is unnecessary with our method.
A further drawback of the method of \cite{evans_phylogenetic_2012} is that it only allows for testing the null hypothesis of two communities being equal. In practice, one frequently finds that natural variation is so high that even two samples from the same source taken at different times will be recognized as different. This raises the question whether variation within samples from the same source is smaller than the difference to samples of another source. Statistically speaking we are looking for confidence sets for differences which are assumed to be different from zero. This requires asymptotics under the alternative $\bm{r} \neq\bm{s}$, which is provided by Theorem \ref{thm:full}. \begin{figure}
\caption{Display of $95\%$ confidence
intervals of Wasserstein distances of
microbial communities. The horizontal axis shows which person pair the distances belong to (separated by gray vertical lines). The dotted vertical line separates intra- (left) from inter- (right) -personal distances.}
\label{fig:stoolIntraInter}
\end{figure}
\paragraph{Data analysis} We consider part of the data of \cite{costello_bacterial_2009}. Four stool samples were taken from each of three persons at different times. We used the preparation of this data by P. Schloss available at \url{https://www.mothur.org/w/images/d/d8/CostelloData.zip}. The reads were pre-processed with the program \texttt{mothur} \citep{schloss_introducing_2009} using the procedure outlined in \cite{schloss_reducing_2011} and \cite{454}. The relative abundances of the $30$ most frequent OTUs and the Wasserstein-2 distances of the microbial communities are shown in Figure \ref{fig:stoolResults}. In this and all other figures we use $i-j$ to denote sample $j$ of person $i$. Note that it is typical for this data that most of the mass is concentrated on a few OTUs.
The Wasserstein-2 distances for all $66$ pairs and their $99\%$ confidence intervals were computed using the asymptotic distribution in Theorem \ref{thm:full}. The results are shown in Figure \ref{fig:stoolIntraInter}. The entire analysis took less than a minute on a standard laptop. The confidence intervals show that intra-personal distances are in fact significantly smaller than inter-personal distances.
\section{Discussion} \label{sec:dis} We discuss limitations, possible extensions of the presented work and promising directions for future research.
\paragraph{Beyond finite spaces I: rates in the finite and the continuous setting ($d=1$)} The scaling rate in Theorem \ref{thm:full} depends solely on $p$ and is completely independent of the underlying space $\mathcal{X}$. This contrasts known bounds on the rate of convergence in the continuous case (see references in the Introduction), which exhibit a strong dependence on the dimension of the space and the moments of the distribution.
Under the null hypothesis (that is, the two underlying population measures are equal) and when $\mathcal{X}=\mathbb{R}$ and $p=2$, the scaling rate for a continuous distribution is known to be $n^{1/2}$, at least under additional tail conditions (see e.g. \cite{Barrio2005}). This means that in this case the scaling rate for a discrete distribution is slower (namely $n^{1/4}$). Under the alternative (different population measures) the scaling rate is $n^{1/2}$ and coincide in the discrete and the continuous case (see \cite{Munk1998}).
\paragraph{Beyond finite spaces II: higher dimensions ($d\geq 2$)} For a continuous measure $\mu$ the Wasserstein distance is the solution of an infinite-dimensional optimization problem. Although differentiability results also exist for such problems (e.g. \cite{shapiro_perturbation_1992}), there are strong indications that the argument presented here cannot carry over to the this case for $d\geq 2$. This is most easily seen from the classical results of \cite{ajtai_optimal_1984}. We consider the uniform distribution on the unit square. For two samples of size $n$ independently drawn from this distribution, \cite{ajtai_optimal_1984} showed that there exist constants $C_1,C_2$ such that the 1-Wasserstein distance $\hat{W}_1^{(n)}$ between them satisfies \[
C_1 n^{-1/2}(\log n)^{1/2} \leq \hat{W}_1^{(n)} \leq C_2 n^{-1/2}(\log n)^{1/2} \] with probability $1 - o(1)$. Hence, for $c_n\hat{W}_1^{(n)}$ to have a non-degenerate limit, we need $c_n = \sqrt{n / \log n}$. However, a common property of all delta-methods is that they preserve the rate of convergence, which is not satisfied here. \paragraph{Transport distances on trees} Complementing our Theorem \ref{thm:trees} a further result on transport distances on trees was proven by \cite{evans_phylogenetic_2012} in the context of phylogenetic trees for the comparison of metagenomic samples (see also our application in Section \ref{sec:appl}). They point out that the Wasserstein-1 distance on trees is equal to the so-called \textit{weighted uni-frac distance} which is very popular in genetics. Inspired by this distance they give a formal generalization mimicking a cost exponent $p>1$ and consider its asymptotic behavior. However, as they remark, these generalized expressions are no longer related (beyond a formal resemblance) to Wasserstein distances with cost exponent $p>1$. Comparing the performance of their ad-hoc metric and the true Wasserstein distance on trees that is under consideration here is an interesting topic for further research.
\paragraph{Bootstrap} We showed that while the naive $n$-out-of-$n$ bootstrap fails for the Wasserstein distance (Section \ref{sub:boot}), the $m$-out-of-$n$ bootstrap is consistent. An interesting and challenging question is how $m$ should be chosen.
\paragraph{Wasserstein barycenters} Barycenters in the Wasserstein space \citep{agueh_barycenters_2011} have recently received much attention {\citep{cuturi_fast_2014, del_barrio_statistical_2015}. We expect that the techniques developed here can be of use in providing a rigorous statistical theory (e.g. distributional limits). The same applies to geodesic principal component analysis in the Wasserstein space \citep{bigot_geodesic_2013,seguy_algorithmic_2015}.
\paragraph{Alternative cost matrices and transport distances} Theorem \ref{thm:full} holds in very large generality for arbitrary cost matrices, including in particular the case of a cost matrix derived from a metric but using a cost exponent $p <1$.
Beyond this obvious modification it seems worthwhile to extend the methodology of directional differentiability in conjunction with a delta-method to other functionals related to optimal transport, e.g. entropically regularized \citep{Cuturi2013a} or sliced Wasserstein distances \citep{bonneel_sliced_2015}. This would require a careful investigation of the analytical properties of these quantities similar to classical results for the Wasserstein distance. \section*{Acknowledgment} The authors gratefully acknowledge support by the DFG Research Training Group 2088 Project A1. They would like to thank L. D\"umbgen, A. Hein, S. Huckemann, C. Gottschlich D. Schuhmacher and R. Schultz for helpful discussions and C. Tameling for careful reading of the manuscript.
\scriptsize
\normalsize
\appendix \section{An alternative representation of the limiting distribution} \label{sec:altalt} We give a second representation of the limiting distribution under the alternative $\bm{r}\neq \bm{s}$. The random part of the limiting distribution \eqref{eq:two_sample_alt} is the linear program \[
\max_{({\bm{u}}, \bm{v})\in\Phi^*_p(\bm{r}, \bm{s})} \sqrt{\lambda} \scp{\bm{G}}{{\bm{u}}} + \sqrt{ 1-
\lambda} \scp{\bm{H}}{\bm{v}}. \] With the representation \eqref{eq:sets} of $\Phi^*_p(\bm{r}, \bm{s})$ we obtain the dual linear program \begin{align*}
\min\quad &zW_p^p(\bm{r}, \bm{s}) + \sum_{x,x'\in \mathcal{X}} w_{x,x'} d^p(x,x') \\
\text{s.t.}\quad &
\bm{w}\geq 0, z\in \mathbb{R} \\
& \sum_{x'\in\mathcal{X}} w_{x,x'} +z r_x = G_x \\
& \sum_{x\in\mathcal{X}} w_{x,x'} +z s_x = H_x \\ \end{align*} Note that the constraints can only be satisfied if both $\sqrt{\lambda}\bm{G} - z\bm{r}$ and $\sqrt{1-\lambda}\bm{H} - z\bm{s}$ have only non-negative entries and $z\leq 0$. In this case the second term in the objective function is clearly minimized by $-z\bm{w}^*$, with $\bm{w}^*$ an optimal transport plan between these two measures $\bm{r} - \sqrt{\lambda}\bm{G} / z$ and $\bm{s} -\sqrt{1-\lambda}\bm{H} / z$ and the second term of the objective function is equal to $-z W_p^p(\bm{r} - \sqrt{\lambda}\bm{G} / z, \bm{s} - \sqrt{1-\lambda}\bm{H} / z)$.
To write this more compactly let us slightly extend our notation. For $\bm{r}, \bm{s} \in\mathbb{R}^\mathcal{X}$ with $\sum_x r_x = \sum_x s_x = 1$ let \[
\tilde{W}_p^p(\bm{r}, \bm{s}) =
\begin{cases}
W_p^p(\bm{r}, \bm{s}) & \text{if } \bm{r}, \bm{s} \geq 0; \\
\infty & \text{else.}
\end{cases} \] With this we can thus write the random variable in the limiting distribution \eqref{eq:two_sample_alt} as the one-dimensional non-linear optimization problem \begin{equation}
\frac{1}{p} W_p^{1-p}(\bm{r}, \bm{s})\min_{z\geq 0} z \left\{\tilde{W}_p^p(\bm{r} +
\sqrt{\lambda}\bm{G} / z, \bm{s} + \sqrt{1-\lambda}\bm{H} / z) - W_p^p(\bm{r}, \bm{s})\right\}.
\label{eq:altalt} \end{equation} \section{Bootstrap} \label{sub:boot} In this section we discuss the bootstrap for the Wasserstein distance. In addressing the usual measurability issues that arise in the formulation of consistency for the bootstrap, we follow \cite{van_der_vaart_weak_1996}. We denote by $\hat{\bm{r}}_n^*$ and $\hat{\bm{s}}_m^*$ some bootstrapped versions of $\hat{\bm{r}}_n$ and $\hat{\bm{s}}_m$. More precisely, let $\hat{\bm{r}}_n^*$ a measurable function of $X_1 ,\dots,X_n$ and random weights $W_1, \dots , W_n$, independent of the data and analogously for $\hat{\bm{s}}_m^*$. This setting is general enough to include many common bootstrapping schemes. We say that, with the assumptions and notation of Theorem \ref{thm:full}, the bootstrap is consistent if
the limiting distribution of \[
\rho_{n,m} \left\{ (\hat{\bm{r}}_n, \hat{\bm{s}}_m) - (\bm{r}, \bm{s})
\right\} \Rightarrow (\sqrt{\lambda}\bm{G}, \sqrt{1-\lambda}\bm{H}) \] is consistently estimated by the law of \[
\rho_{n,m} \left\{ (\hat{\bm{r}}_n^*, \hat{\bm{s}}_m^*) - (\hat{\bm{r}}_n,
\hat{\bm{s}}_m) \right\}. \] To make this precise, we define for $A\subset\mathbb{R}^d$, with $d\in\mathbb{N}$, the set of bounded Lipschitz-1 functions \[
\mathrm{BL}_1 (A) = \left\{ f:A\rightarrow\mathbb{R}: \sup_{x\in A}|f(x)|\leq 1, \quad |f(x_1) -
f(x_2)| \leq ||x_1 - x_2|| \right\}, \]
where $||\cdot||$ is the Euclidean norm. We say that the bootstrap versions $(\hat{\bm{r}}_n^*, \hat{\bm{s}}_m^*)$ are consistent if \begin{equation}
\begin{split}
\sup_{f\in\mathrm{BL}_1(\mathbb{R}^\mathcal{X} \times \mathbb{R}^\mathcal{X})} |E\left[ f(\rho_{n,m}\left\{ (\hat{\bm{r}}_n^*,
\hat{\bm{s}}_m^*) - (\hat{\bm{r}}_n, \hat{\bm{s}}_m) \right\}) | X_1, \dots, X_n, Y_1, \dots,Y_m \right] \\
- E\left[ f( (\sqrt{\lambda}\bm{G}, \sqrt{1-\lambda}\bm{H})) \right]|
\end{split}
\label{eq:boot_cons} \end{equation} converges to zero in probability. \paragraph{Bootstrap for directionally differentiable functions} The most straightforward way to bootstrap $W_p^p(\hat{\bm{r}}_n, \hat{\bm{s}}_m)$ is to simply plug-in $\hat{\bm{r}}_n^*$ and $\hat{\bm{s}}_m^*$. That is, trying to approximate the limiting distribution of $\rho_{n,m}\left\{W_p^p(\hat{\bm{r}}_n, \hat{\bm{s}}_m) - W_p^p(\bm{r}, \bm{s})\right\}$ by the law of \begin{equation}
\rho_{n,m}\left\{ W_p^p(\hat{\bm{r}}_n^*, \hat{\bm{s}}_m^*) - W_p^p(\hat{\bm{r}}_n, \hat{\bm{s}}_m) \right\}
\label{eq:plug_in_boot} \end{equation} conditional on the data. While for functions that are Hadamard differentiable this approach yields a consistent bootstrap (e.g. \cite{gill_non-_1989, van_der_vaart_weak_1996}), it has been pointed out by \cite{dumbgen_nondifferentiable_1993} and more recently by \cite{fang_inference_2014} that this is in general not true for functions that are only directionally Hadamard differentiable. In particular the plug-in approach fails for the Wasserstein distance.
For the Wasserstein distance there are two alternatives. First, \cite{dumbgen_nondifferentiable_1993} already pointed out that re-sampling fewer than $n$ (or $m$, respectively) observations yield a consistent bootstrap. Second, \cite{fang_inference_2014} propose to plug-in $\rho_{n,m}\left\{ (\hat{\bm{r}}_n^*, \hat{\bm{s}}_m^*) - (\hat{\bm{r}}_n, \hat{\bm{s}}_m) \right\}$ into the derivative of the function.
Recall from Section \ref{sec:asymp_distr} that \begin{equation}
\phi_p:\mathbb{R}^N\times\mathbb{R}^N \rightarrow \mathbb{R}, \quad \phi_p(\bm{h}_1 ,\bm{h}_2)
= \max_{{\bm{u}}\in\Phi^*_p}
\scp{{\bm{u}}}{\bm{h}_2 - \bm{h}_1}
\label{eq:der_phi} \end{equation} is the directional Hadamard derivative of $(\bm{r},\bm{s})\mapsto W_p^p(\bm{r},\bm{s})$ at $\bm{r}=\bm{s}$. With this notation, the following Theorem summarizes the implications of the results of \cite{dumbgen_nondifferentiable_1993} and \cite{fang_inference_2014} for the Wasserstein distance. \begin{thm}[Prop. 2 of \cite{dumbgen_nondifferentiable_1993} and Thms. 3.2 and 3.3 of \cite{fang_inference_2014}]
\label{thm:boot}
Under the assumptions of Theorem \ref{thm:full} let $\hat{\bm{r}}_n^*$ and $\hat{\bm{s}}_m^*$
be consistent bootstrap versions of $\hat{\bm{r}}_n$ and $\hat{\bm{s}}_m$, that is,
\eqref{eq:boot_cons} converges to zero in probability. Then,
\begin{enumerate}
\item the plug-in bootstrap \eqref{eq:plug_in_boot} is \textit{not}
consistent, that is,
\begin{equation*}
\begin{split}
\sup_{f\in\mathrm{BL}_1(\mathbb{R})} E\left[ f(\rho_{n,m} \left\{W_p^p(\hat{\bm{r}}_n^*, \hat{\bm{s}}_m^*) -
W_p^p(\hat{\bm{r}}_n, \hat{\bm{s}}_m) \right\}) | X_1, \dots X_n,Y_1,\dots,Y_m \right] \\
- E[f(\rho_{n,m}\left\{ W_p^p(\hat{\bm{r}}_n, \hat{\bm{s}}_m) - W_p^p(\bm{r}, \bm{s}) \right\})]
\end{split}
\end{equation*}
does \textit{not} converge to zero in probability.
\item Under the null hypothesis $\bm{r} = \bm{s}$, the derivative plug-in
\begin{equation}
\phi_p(\rho_{n,m}\left\{ (\hat{\bm{r}}_n^*,\hat{\bm{s}}_m^*) - (\hat{\bm{r}}_n, \hat{\bm{s}}_m) \right\})
\label{eq:der_boot}
\end{equation}
is consistent, that is
\begin{equation*}
\begin{split}
\sup_{f\in\mathrm{BL}_1(\mathbb{R})}E\left[ f(\phi_p(\rho_{n,m}\left\{ (\hat{\bm{r}}_n^*,
\hat{\bm{s}}_m^*) - (\hat{\bm{r}}_n, \hat{\bm{s}}_m)
\right\})) | X_1, \dots,X_n,Y_1, \dots, Y_m \right] \\
- E\left[ f\left(\rho_{n,m}\left\{ W_p^p(\hat{\bm{r}}_n, \hat{\bm{s}}_m) - W_p^p(\bm{r},
\bm{s}) \right\}\right) \right]
\end{split}
\end{equation*}
converges to zero in probability.
\end{enumerate} \end{thm} \section{Proofs} \subsection{Proof of Theorem \ref{thm:derivative_Wasserstein}}
By {\cite[Ch. 3, Thm. 3.1]{Gal1997}} the function $(\bm{r},\bm{s})\mapsto
W_p^p(\bm{r},\bm{s})$ is directionally differentiable with derivative
\eqref{eq:derivative} in the sense of G\^ateaux, that is, the limit
\eqref{eq:def_hadamard} exists for a fixed $\bm{h}$ and not a sequence
$\bm{h}_n\rightarrow\bm{h}$ (see e.g. \cite{Shapiro1990}). To see that this is also a
directional
derivative in the Hadamard sense \eqref{eq:def_hadamard} it suffices
{\cite[Prop. 3.5]{Shapiro1990}} to show that $(\bm{r},\bm{s})\mapsto
W_p^p(\bm{r},\bm{s})$ is locally Lipschitz. That is, we need to show that for
$\bm{r},\bm{r}',\bm{s},\bm{s}'\in\mathcal{P}_\mathcal{X}$
\begin{equation*}
|W_p^p(\bm{r},\bm{s}) - W_p^p(\bm{r}',\bm{s}')| \leq C ||(\bm{r},\bm{s}) - (\bm{r}',\bm{s}')||,
\end{equation*}
for some constant $C>0$ and some (and hence all) norm $||\cdot||$ on
$\mathbb{R}^N\times\mathbb{R}^N$. Exploiting symmetry, it suffices to show that
\begin{equation*}
W_p^p(\bm{r}, \bm{s}) - W_p^p(\bm{r}, \bm{s}') \leq C ||\bm{s} - \bm{s}'||
\end{equation*}
for some constant $C>0$ and some norm $||\cdot||$. To this end, we employ an
argument similar to that used to prove the triangle inequality for the
Wasserstein distance (see e.g. {\cite[p. 94]{villani_optimal_2008}}). Indeed,
by the gluing Lemma {\cite[Ch. 1]{villani_optimal_2008}} there exist random
variables $X_1,X_2,X_3$ with marginal distributions $\bm{r},\bm{s}$ and $\bm{s}'$,
respectively, such that $E[d^p(X_1, X_3)] = W_p^p(\bm{r}, \bm{s}')$ and
$E[d(X_2,X_3)] = W_1(\bm{s}, \bm{s}')$. Then, since $(X_1, X_2)$ has marginals
$\bm{r}$ and $\bm{s}$, we have
\begin{align*}
W_p^p(\bm{r}, \bm{s}) - W_p^p(\bm{r}, \bm{s}') &\leq E\left[ d^p(X_1, X_2) -
d^p(X_1, X_3) \right] \\
& \leq p\:\mathrm{diam}(\mathcal{X})^{p-1} E\left[ |d(X_1, X_2) - d(X_1, X_3)| \right]\\
& \leq p\:\mathrm{diam}(\mathcal{X})^{p-1} E\left[ d(X_2, X_3) \right] = p\:\mathrm{diam}(\mathcal{X})^{p-1}
W_1(\bm{s}, \bm{s}') \\
& \leq p\:\mathrm{diam}(\mathcal{X})^{p} ||\bm{s} - \bm{s}'||_1,
\end{align*}
where the last inequality follows from {\cite[Thm.
6.15]{villani_optimal_2008}}. This completes the proof.
\subsection{Proof of Theorem \ref{thm:trees}} \paragraph{Simplify the set of dual solutions $\Phi^*_p$} As a first step, we rewrite the set of dual solutions $\Phi^*_p$ given in \eqref{eq:sets} in our tree notation as \begin{equation}
\Phi^*_p = \left\{ {\bm{u}}\in\mathbb{R}^\mathcal{X}: u_x - u_{x'} \leq d_\mathcal{T}(x,x')^p, \quad x,x'\in \mathcal{X} \right\}. \label{eq:Phi*_tree}
\end{equation}
The key observation is that in the condition $u_x - u_{x'}\leq d_\mathcal{T}(x,x')^p$ we
do not need to consider all pairs of vertices $x,x'\in \mathcal{X}$, but only those which
are joined by an edge. To see this, assume that only the latter condition holds.
Let $x,x'\in \mathcal{X}$ arbitrary and $x = x_1,
\dots , x_l = x'$ the sequence of vertices defining the unique path joining
$x$ and $x'$, such that $(x_j,x_{j+1})\in E$ for $j=1,\dots,n-1$. Then
\[
u_x - u_{x'} = \sum_{j=1}^{n-1} (u_{x_j} - u_{x_{j+1}}) \leq \sum_{j=1}^{n-1}
d_\mathcal{T}(x_j, x_{j+1})^p \leq d_\mathcal{T}(x,x')^p,
\]
such that the condition is satisfied for all $x,x'\in \mathcal{X}$. Noting that if two
vertices are joined by an edge than one has to be the parent of the other, we
can write the set of dual solutions as
\begin{equation}
\Phi^*_p = \left\{ {\bm{u}}\in \mathbb{R}^\mathcal{X} : |u_x -
u_{\mathrm{parent}(x)}| \leq d_\mathcal{T}(x,\mathrm{parent}(x))^p ,\quad x\in \mathcal{X} \right\}.
\label{eq:Phi*_trees_simple}
\end{equation}
\paragraph{Rewrite the target function}
We define linear operators $S_\mathcal{T}, D_\mathcal{T} : \mathbb{R}^\mathcal{X} \rightarrow \mathbb{R}^\mathcal{X}$ by
\[
(D_\mathcal{T} v)_x =
\begin{cases}
v_x - v_{\mathrm{parent}(x)} & x\neq\mathrm{root}(\mathcal{T}) \\
v_{\mathrm{root}(\mathcal{T})} & x=\mathrm{root}(\mathcal{T}).
\end{cases}, \quad
(S_\mathcal{T} u)_x = \sum_{x'\in\mathrm{children}(x)} u_{x'}.
\]
\begin{lem}
\label{lem:S_D}
For ${\bm{u}},\bm{v} \in\mathbb{R}^\mathcal{X}$ we have $\scp{{\bm{u}}}{\bm{v}} = \scp{S_\mathcal{T} {\bm{u}}}{D_\mathcal{T} \bm{v}}$.
\end{lem}
\begin{proof}
We compute
\begin{align*}
\scp{S_\mathcal{T} {\bm{u}}}{D_\mathcal{T} \bm{v}} &= \sum_{x\in \mathcal{X}} (S_\mathcal{T} {\bm{u}})_x (D_\mathcal{T} \bm{v})_x \\
& = \sum_{x\in \mathcal{X}\setminus \left\{ \mathrm{root}(\mathcal{T}) \right\}}
\sum_{x'\in\mathrm{children}(x)} (v_x - v_{\mathrm{parent}(x)}) u_{x'} \\
&\quad + \sum_{x'\in\mathrm{children}(\mathrm{root}(\mathcal{T}))} v_{\mathrm{root}(\mathcal{T})}u_{x'} \\
& = \sum_{x\in \mathcal{X}} \sum_{x'\in\mathrm{children}(x)} v_x u_{x'} \\
&\quad - \sum_{x\in \mathcal{X}\setminus \left\{ \mathrm{root}(\mathcal{T})
\right\}}\sum_{x'\in\mathrm{children}(x)} v_{\mathrm{parent}(x)} u_{x'}\\
& = \sum_{x\in \mathcal{X}} u_x v_x,
\end{align*}
which proves the Lemma. To see how the last line follows let
$\mathrm{children}^1(x)$ be the set of immediate predecessors of $x$, that is
children of $x$ that are connected to $x$ by an edge. Then we can write
the second term in the second to last line above as
\begin{align*}
\sum_{x\in \mathcal{X}\setminus \{ \mathrm{root}(\mathcal{T}) \}}
\sum_{x'\in\mathrm{children}(x)} v_{\mathrm{parent}(x)} u_{x'}
& = \sum_{y\in\mathcal{X}} \sum_{x\in\mathrm{children}^1(y)}\sum_{x'\in\mathrm{children}(x)} v_y
u_{x'} \\
&= \sum_{y\in\mathcal{X}}\sum_{x'\in\mathrm{children}(y)\setminus \{y\}} v_y u_{x'}
\end{align*}
and the claim follows.
\end{proof}
If ${\bm{u}}\in\Phi^*_p$, as given in \eqref{eq:Phi*_trees_simple}, we
have for $x\neq\mathrm{root}(\mathcal{T})$ that
\[
|(D_\mathcal{T} {\bm{u}})_x| = |u_x - u_{\mathrm{parent}(x)}| \leq d_\mathcal{T}(x,\mathrm{parent}(x))^p.
\]
With these two observations and Lemma \ref{lem:S_D}, we get for
$\bm{G}\sim\mathcal{N}(0,\Sigma(\bm{r}))$ and
${\bm{u}}\in\Phi^*_p$ that
\begin{equation}
\scp{\bm{G}}{{\bm{u}}} = \scp{S_\mathcal{T} \bm{G}}{D_\mathcal{T} {\bm{u}}}
\leq \sum_{\mathrm{root}(\mathcal{T})\neq x \in \mathcal{X}} |(S_\mathcal{T} \bm{G})_x| d_\mathcal{T}(x,\mathrm{parent}(x))^p.
\label{eq:tree_bound_on_max}
\end{equation}
Therefore, $\max_{{\bm{u}}\in\Phi^*_p} \scp{\bm{G}}{{\bm{u}}}$ is bounded by
$\sum_{\mathrm{root}(\mathcal{T})\neq x \in \mathcal{X}} |(S_\mathcal{T} \bm{G})_x|
d_\mathcal{T}(x,\mathrm{parent}(x))^p$. Since $D_\mathcal{T}$ is an isomorphism, we can define a vector
$\bm{v}\in\mathbb{R}^\mathcal{X}$ by
\[
(D_\mathcal{T}\bm{v})_x = \mathrm{sgn}\:( (S_\mathcal{T} \bm{G})_x) d_\mathcal{T}(x,\mathrm{parent}(x))^p.
\]
From \eqref{eq:Phi*_trees_simple} we see that $\bm{v}\in\Phi^*_p$ and Lemma
\ref{lem:S_D}
shows that $\scp{\bm{G}}{\bm{v}}$ attains the upper bound in
\eqref{eq:tree_bound_on_max}.
This concludes the proof.
\subsection{Proof of Corollary \ref{cor:samworth}}
In order to use Theorem \ref{thm:trees} we define the tree $\mathcal{T}$ with vertices $\left\{
x_1,\dots, x_N \right\}$, edges $E=\left\{ (x_j, x_{j+1}),j =1,\dots,N-1
\right\}$ and $\mathrm{root}(\mathcal{T})=x_N$. Then, if $\bm{G}\sim\mathcal{N}(0,\Sigma(\bm{r}))$,
we have that $\left\{ (S_\mathcal{T}\bm{G})_j \right\}_{j=1,\dots, N}$ is a Gaussian
vector
such that for $i\leq j$
\begin{align*}
& \mathrm{cov}( (S_\mathcal{T}\bm{G})_i, (S_\mathcal{T}\bm{G})_j) = \sum_{\substack{k\leq i \\ l\leq
j}} E\left[ G_k G_l \right]
= \sum_{k\leq i} r_k(1-r_k) - \sum_{\substack{k\leq i\\l\leq j\\
k\neq l}} r_kr_l \\
&= \bar{r}_i - \sum_{\substack{k\leq i \\ l\leq i}}r_kr_l -
\sum_{\substack{k\leq i \\ i<l\leq j}} r_k r_l
= \bar{r}_i - \bar{r}_i^2 - \bar{r}_i(\bar{r}_j - \bar{r}_i))
= \bar{r}_i - \bar{r}_i \bar{r}_j.
\end{align*}
Therefore, we have that for a standard Brownian bridge $B$
\[
S_\mathcal{T}\bm{G} \sim (B(\bar{r}_1), \dots , B(\bar{r}_N)).
\]
Together with $d(x_j, \mathrm{parent}(x_j))=(x_{j+1} - x_j)^2$, and
\eqref{eq:weak_conv_trees} this proves the Corollary.
\end{document} | arXiv | {
"id": "1610.03287.tex",
"language_detection_score": 0.6997278928756714,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title[A class of nonlocal hypoelliptic operators, etc.]{A class of nonlocal hypoelliptic operators\\ and their extensions}
\subjclass[2010]{35H10, 35R11, 47D06} \keywords{Kolmogorov equations, hypoelliptic operators of H\"ormander type, nonlocal equations, extension problems}
\date{}
\begin{abstract} In this paper we study nonlocal equations driven by the fractional powers of hypoelliptic operators in the form \[ \mathscr K u = \mathscr A u - \de_t u \overset{def}{=} \operatorname{tr}(Q \nabla^2 u) + <BX,\nabla u> - \de_t u, \] introduced by H\"ormander in his 1967 hypoellipticity paper. We show that the nonlocal operators $(-\mathscr K)^s$ and $(-\mathscr A)^s$ can be realized as the Dirichlet-to-Neumann map of doubly-degenerate extension problems. We solve such problems in $L^\infty$, and in $L^p$ for $1\le p<\infty$ when $\operatorname{tr}(B)\ge 0$. In forthcoming works we use such calculus to establish some new Sobolev and isoperimetric inequalities. \end{abstract}
\author{Nicola Garofalo}
\address{Dipartimento d'Ingegneria Civile e Ambientale (DICEA)\\ Universit\`a di Padova\\ Via Marzolo, 9 - 35131 Padova, Italy} \vskip 0.2in \email{nicola.garofalo@unipd.it}
\thanks{The first author was supported in part by a Progetto SID (Investimento Strategico di Dipartimento) ``Non-local operators in geometry and in free boundary problems, and their connection with the applied sciences", University of Padova, 2017.}
\author{Giulio Tralli} \address{Dipartimento d'Ingegneria Civile e Ambientale (DICEA)\\ Universit\`a di Padova\\ Via Marzolo, 9 - 35131 Padova, Italy} \vskip 0.2in \email{giulio.tralli@unipd.it}
\maketitle
\tableofcontents
\section{Introduction}\label{intro}
In 1967 H\"ormander proved his celebrated theorem stating that if for smooth vector fields $Y_0, Y_1,...,Y_m$ in $\R^{N+1}$ the Lie algebra generated by them has maximum rank, then the second order partial differential operator $\mathscr L = \sum_{i=1}^m Y_i^2 + Y_0$ is hypoelliptic, see \cite{Ho}. As a motivation to his study, in the opening of his paper the author considered the following class of equations \begin{equation}\label{K0} \mathscr K u = \mathscr A u - \de_t u \overset{def}{=} \operatorname{tr}(Q \nabla^2 u) + <BX,\nabla u> - \de_t u = 0, \end{equation} and showed that $\K$ is hypoelliptic if and only if the covariance matrix \begin{equation}\label{Kt} K(t) = \frac 1t \int_0^t e^{sB} Q e^{s B^\star} ds \end{equation} is invertible, i.e., $\operatorname{det} K(t) >0$ for every $t>0$. We note that the strict positivity of $K(t)$ is equivalent to the finite rank condition on the Lie algebra. In \eqref{K0} $Q$ and $B$ are $N\times N$ matrices with real, constant coefficients, with $Q\ge 0$, $Q = Q^\star$. We have denoted by $X$ the variable in $\R^N$, and thus $(X,t)\in \R^{N+1}$, and by $A^\star$ the transpose of a matrix $A$.
The class of operators \eqref{K0} includes several examples of interest in analysis, physics and the applied sciences. The simplest one is of course the ubiquitous heat equation, corresponding to the nondegenerate case when $\mathscr A = \Delta$ ($Q = I_N$, $B = O_N$). When $\mathscr A = \Delta - <X,\nabla>$ ($Q=I_N$, $B = - I_N$) one has the Ornstein-Uhlenbeck operator, of great interest in probability, see e.g. \cite{DZ}. Our primary motivating example, however, is the degenerate Kolmogorov operator, which arose in the seminal paper \cite{Kol} on Brownian motion and the theory of gases. Denote by $(X,t)=(v,x,t)$ the generic point in $\R^{N+1}$ with $N=2n$. With the choices $Q = \begin{pmatrix} I_n & 0_n\\ 0_n& 0_n\end{pmatrix}$, and $B = \begin{pmatrix} 0_n & 0_n\\ I_n & 0_n\end{pmatrix}$, the operator $\K$ in \eqref{K0} becomes \begin{equation}\label{kolmo0} \K u = \Delta_v u+ <v,\nabla_x u > - \de_t u. \end{equation} Clearly, \eqref{kolmo0} fails to be parabolic since it is missing the diffusive term $\Delta_x u$, but it is easily seen to satisfy H\"ormander's finite rank condition, and thus $\K$ is hypoelliptic. We note that, remarkably, Kolmogorov had already proved this fact thirty years prior to \cite{Ho} by exhibiting the following explicit fundamental solution for \eqref{kolmo0} \begin{equation}\label{kolmofs0}
p(X,Y,t) = \frac{c_n}{t^{2n}} \exp\left\{- \frac 1t \left(|v-w|^2 + \frac 3t <v-w,y-x-tv> + \frac{3}{t^2} |x- y +tv|^2\right)\right\}. \end{equation} Since \eqref{kolmofs0} is $C^\infty$ off the diagonal, it follows that \eqref{kolmo0} is hypoelliptic.
The class of partial differential operators \eqref{K0} has been intensively studied over the past thirty years, and thanks to the work of many people a lot is known about it. Nonetheless, some fundamental aspects presently remain elusive, such as Sobolev or isoperimetric inequalities, a Calder\'on-Zygmund theory (but for some interesting progress in this direction, see \cite{BCLP}), and one of local and nonlocal minimal surfaces. The difficulties with these hypoelliptic operators stem from the fact that the drift term in \eqref{K0} mixes the variables inextricably and this complicates the geometry considerably. This is already evident at the level of the model equation \eqref{kolmo0} and its probability transition kernel \eqref{kolmofs0}. Unlike what happens for H\"ormander operators of the form $\sum_{i=1}^m Y_i^2 - \partial_t$ (see, e.g., \cite{VSC}, \cite{Gjems} and the references therein), where there is only one intrinsic distance $d(x,y)$ that controls the geometry for all times, for \eqref{kolmofs0} there is a one-parameter family of non-symmetric pseudo-distances $d_t(X,Y)$ that drive the evolution. Such intertwined geometries are reflected in the large time behaviour of H\"ormander's fundamental solution of \eqref{K0}. In many respects such behaviour parallels the diverse situations that one encounters in the Riemannian setting when passing from positive to negative curvature. In general, the relevant volume function is not power-like in $t$ and need not be doubling. A detailed description of the different behaviours is contained in \cite{GT}.
Having said this, we turn to the focus of the present note. Our primary objective is to establish a sufficiently robust nonlocal calculus for a subclass of the hypoelliptic operators \eqref{K0} that includes \eqref{kolmo0} as a special case. In the forthcoming works \cite{GT}, \cite{GTiso}, starting from such calculus, we will establish some new Sobolev and isoperimetric inequalities. To be specific, our focus is on operators which, besides H\"ormander's hypoellipticity condition $K(t)>0$ for all $t>0$, also satisfy the assumption on the drift \begin{equation}\label{trace} \operatorname{tr} B \ge 0. \end{equation} Let us notice explicitly that such hypothesis includes, as special cases, the heat equation or \eqref{kolmo0}, for both of which we have $\operatorname{tr} B = 0$. But it leaves out examples such as the Ornstein-Uhlenbeck operator mentioned above, or the equation \begin{equation*} \K u = \de_{xx} u - 2(x+y) \de_x u + x \de_y u - \de_t u = 0, \end{equation*} which arises in the Smoluchowski-Kramers' approximation of Brownian motion with friction, see \cite{Bri} and \cite{Fre}. For the former we have $B = - I_N$, while for the latter one has $B = \begin{pmatrix} -2 & -2\\ 1 & 0\end{pmatrix}$.
To understand the role of \eqref{trace} in the present work we recall that the Cauchy problem $\K u = 0$ in $\R^{N+1}_+$, $u(X,0) = f$, admits a unique solution for $f\in \So$, see Theorem \ref{T:hor}. This generates a strongly continuous semigroup $\{P_t\}_{t>0}$ on $L^p$ defined by \[ P_t f(X) = \int_{\RN} p(X,Y,t) f(Y) dt, \] where $p(X,Y,t)$ is the transition distribution constructed by H\"ormander in \cite{Ho}, see also \eqref{PtKt}. However, the spectral properties of this semigroup dramatically change depending on the sign of $\tr\ B$. The assumption \eqref{trace} guarantees that $\{P_t\}_{t>0}$ is contractive on $L^p$, and this aspect plays a pervasive role in the present paper. We will return to the analysis of \eqref{K0} in the case $\operatorname{tr} B<0$ in a forthcoming study. In this work, regardless of the sign of $\operatorname{tr} B$, we solve the extension problem in $L^\infty$ (see Theorem \ref{T:epKinfty} and Theorem \ref{T:extLinfty}). For the $L^p$-case, instead, we shall assume \eqref{trace}.
\vskip 0.4cm
To put our results in the proper perspective we mention that the study of nonlocal equations is very classical, stretching back to the seminal works of M. Riesz \cite{R1, R2} on the fractional powers of the Laplacian $(-\Delta)^s$ and the wave operator $(\de_{tt} - \Delta)^s$. A semigroup based fractional calculus for closed linear operators was first introduced by Bochner in his visionary note \cite{Bo}, see also Feller's work \cite{Fe}. Phillips showed in \cite{Phi} that one could embed these approaches into a more general one based on the Kolmogorov-Levy representation theorem for infinitely divisible distributions. In \cite{B} Balakrishnan introduced a new fractional calculus that extended
the previous contributions to situations in which the relevant operator does not necessarily generate a semigroup. For a given closed operator $A$ on a Banach space $X$, under the assumption that $||\lambda R(\lambda,A)||\le M$ for $\lambda>0$ (there exist operators $A$ which satisfy such hypothesis but do not generate a semigroup), he constructed the fractional powers of $A$ by the beautiful formula \begin{equation}\label{bala0} A^\alpha x = - \frac{\sin(\pi \alpha)}{\pi} \int_{0}^{\infty} \lambda^{\alpha-1} R(\lambda,A) A x d\lambda,\ \ \ \ \ \ \ \ 0<\Re \alpha < 1, \end{equation} see \cite[(2.1)]{B}. When $A$ does generate a strongly continuous semigroup $\{T(t)\}_{t>0}$ on $X$, then it is well-known that \eqref{bala0} can also be expressed as follows \begin{equation}\label{bala02} A^\alpha x = - \frac{\alpha}{\G(1-\alpha)} \int_{0}^{\infty} t^{-\alpha-1} [T(t) x - x] dt,\ \ \ \ \ \ \ \ 0<\Re \alpha < 1. \end{equation} Similarly to the existing literature in the classical setting $\K = \Delta - \partial_t$, see \cite[(5.84) on p. 120]{SKM}, Balakrishnan's formula \eqref{bala02} is the starting point of our analysis. The gist of our work is to develop those mathematical tools that allow to successfully push the ideas in \cite{CS} to the class of degenerate hypoelliptic equations \eqref{K0}.
With $\mathscr A$ as in \eqref{K0}, we use \eqref{bala02} and the semigroup $\{P_t\}_{t>0}$ to define the fractional powers on functions $f\in \So(\RN)$ by the pointwise formula \begin{equation}\label{As0} (-\mathscr A)^s f(X) = - \frac{s}{\G(1-s)} \int_{0}^{\infty} t^{-s-1} [P_t f(X) - f(X)] dt,\ \ \ \ \ \ \ \ 0<s < 1. \end{equation} Since we also want to have a nonlocal calculus for the time-dependent operator $\K$, we introduce on a function $u\in \mathscr S(\RNu)$ what we call the \emph{H\"ormander evolutive semigroup} $$P^\K_\tau u(X,t) \overset{def}{=} \int_{\R^N} p(X,Y,\tau) u(Y,t-\tau) dY,\qquad (X,t)\in\RNu,\,\,\tau>0.$$ The notion of \emph{evolution semigroup} is well-known in dynamical systems, and the reader should see \cite{CL} in this respect. Using $\{P^\K_\tau\}_{\tau>0}$ we define on a function $u\in \mathscr S(\RNu)$, \begin{equation}\label{As02} \left(-\K\right)^s u(X,t) = - \frac{s}{\G(1-s)} \int_{0}^{\infty} \tau^{-s-1} [P^\K_\tau u(X,t) - u(X,t)] d\tau,\ \ \ \ \ \ \ \ 0<s < 1. \end{equation}
Having in mind the development of the program mentioned above, with definitions \eqref{As0} and \eqref{As02} in hand we turn the attention to the basic question of characterizing these nonlocal operators as traces of suitable Bessel processes. In probability this was first introduced by Molchanov and Ostrovskii in \cite{MO} for symmetric stable processes. But it was not until the celebrated 2007 extension paper of Caffarelli and Silvestre \cite{CS} that such idea became a powerful tool in analysis and geometry. Their work has allowed to convert problems involving the nonlocal operator $(-\Delta)^s$ in $\mathbb R^n$, into problems in $\R^{n}\times(0,\infty)$ involving the (local) partial differential equation of degenerate type $$\begin{cases} \operatorname{div}_{(x,z)}\left(z^a \nabla_{(x,z)} U\right) = 0, \\ U(x,0) = u(x). \end{cases}$$ One remarkable aspect of this procedure is represented by the limiting relation $$- \frac{2^{-a} \Gamma\left(\frac{1-a}2\right)}{\Gamma\left(\frac{1+a}2\right)} \underset{z\to 0^+}{\lim} z^a \de_z U(x,z) = (-\Delta)^s u(x),$$ where the parameters $0<s<1$ and $a\in (-1,1)$ are connected by the equation $a = 1-2s$ (hereafter, for $\ell>0$ we indicate with $\G(\ell) = \int_0^\infty \tau^\ell e^{-\tau} \frac{d\tau}\tau$ Euler's gamma function evaluated at $\ell$).
In the present paper we establish results analogous (at least on the formal level) to Caffarelli and Silvestre's (see also \cite{ST10}) for the nonlocal operators $(-\mathscr K)^s$ and $(-\mathscr A)^s$. Precisely, we first solve the extension problem for \eqref{As02}, and then we combine it with Bochner's subordination to obtain a corresponding solution for \eqref{As0}. The construction of the relevant Poisson kernels is based on fairly explicit formulas which involve H\"ormander's fundamental solution \eqref{PtKt}, and provide a flexible and robust tool for the theory developed in \cite{GT}, \cite{GTiso}.
As a final comment we mention that the novelty of our work is in the treatment of the genuinely degenerate hypoelliptic operators in \eqref{K0} when $Q\ge 0$ and $B\not = O_N$. In fact, in the nondegenerate case when $\mathscr A = \Delta$, and thus no drift is present, in a remarkable 1968 paper Frank Jones first solved the extension problem for the fractional heat equation $(\partial_t -\Delta)^{1/2}$ and constructed an explicit Poisson kernel for the extension operator, see \cite[(2.1) in Sec.2]{Jo} and the subsequent formulas. Such Poisson kernel was recently generalised by Nystr\"om and Sande \cite{NS} and Stinga and Torrea \cite{ST} to the case of fractional powers with $s\not= 1/2$. Our results can be seen as a far-reaching extension of these results to the much larger class \eqref{K0}, under the hypothesis \eqref{trace}. In connection with extension problems for sub-Laplacians in Carnot groups and for sum of squares of H\"ormander vector fields we mention \cite{FF, G18b}, and \cite{FGMT} for a related but more geometric result in the CR setting.
\vskip 0.4cm
The organization of the paper is as follows. In Section \ref{S:ks} we collect some well-known properties of the H\"ormander semigroup which are used throughout the rest of the paper. We also introduce the evolutive H\"ormander semigroup $\{P^\K_\tau\}_{\tau>0}$ and extend to the latter the results for $\{P_t\}_{t>0}$. This allows us to define in Section \ref{S:fracK} the fractional powers $(-\K)^s$ and the related extension problem, see Definition \ref{D:epKa}. In Proposition \ref{P:Ga} we introduce the Neumann fundamental solution, and in Definition \ref{D:PaK} the Poisson kernel for the extension problem. Section \ref{S:epKs} is devoted to the proof of Theorems \ref{T:epKinfty} and \ref{T:epKL2}. With these two results we prove the validity of the Dirichlet-to-Neumann condition respectively in $L^\infty$ and, under the additional assumption \eqref{trace}, in $L^p$. In Section \ref{S:fracA} we study the nonlocal operator $(- \mathscr A)^s$, where $\mathscr A$ is the diffusive part in \eqref{K0}. The main result of this section is Theorem \ref{T:extLinfty}, where we solve the relevant extension problem.
\subsection{Notation}
All the function spaces in this paper are based either on $\RN$ or on $\RNu$. For spaces on $\RN$ we will routinely avoid reference to the ambient space, for those on $\RNu$ we will explicitly mention the ambient space. For instance, the Schwartz space of rapidly decreasing functions in $\RN$ will be denoted by $\So$, whereas $\S$ denotes the Schwartz space in $\RNu$. The same convention applies to the $L^p$-spaces for $1\le p \le \infty$. The norm in $L^p$ will be denoted by $||\cdot||_p$ whenever there is no confusion with the ambient space. We will indicate with $L^\infty_0$ the Banach space of the $f\in C(\RN)$ such that $\underset{|X|\to \infty}{\lim}\ |f(X)| = 0$ with the norm $||\cdot||_\infty$. If $T:L^p\to L^q$ is a bounded linear map, we will indicate with $||T||_{p\to q}$ its operator norm. If $ q =p$, the spectrum of $T$ on $L^p$ will be denoted by $\sigma_p(T)$, the resolvent set by $\rho_p(T)$, the resolvent operator by $R(\lambda,T) = (\lambda I - T)^{-1}$. We adopt the convention that $a/\infty = 0$ for any $a\in \R$.
\section{The H\"ormander semigroup $\{P_t\}_{t>0}$}\label{S:ks}
In this section we collect some well-known properties of the semigroup associated with \eqref{K0} which will be used throughout the rest of the paper. The reader should see the works \cite{Ku}, \cite{K82}, \cite{GL}, \cite{LP}, \cite{L}, \cite{Pritorino}, \cite{Me}, \cite{LB}, \cite{Pristudia} and \cite{AB}. As we have mentioned in the introduction, the starting point is the following result from \cite{Ho}.
\begin{theorem}[H\"ormander]\label{T:hor} Given $Q$ and $B$ as in \eqref{K0}, for every $t> 0$ consider the \emph{covariance matrix} \eqref{Kt}. Then, the operator $\K$ is hypoelliptic if and only if $\det K(t)>0$ for every $t>0$. In such case, given $f \in \So$, the unique solution to the Cauchy problem $\K u = 0$ in $\R^{N+1}_+$, $u(X,0) = f$, is given by $u(X,t) = \int_{\R^N} p(X,Y,t) f(Y) dY,$ where \begin{equation}\label{PtKt} p(X,Y,t) = (4\pi)^{- \frac N2} \left(\operatorname{det}(t K(t))\right)^{-1/2} \exp\left( - \frac{<K(t)^{-1}(Y-e^{tB} X ),Y-e^{tB} X >}{4t}\right). \end{equation} \end{theorem}
Throughout the paper we always assume that $K(t)>0$ for every $t>0$. One should keep in mind that the hypoellipticity of \eqref{K0} can be expressed in a number of different ways, see \cite{LP}. It was noted in the same paper that the operator $\K$ is invariant with respect to the following non-Abelian group law $(X,s)\circ (Y,t) = (Y+ e^{-tB}X,s+t)$. Endowed with the latter the space $\R^{N+1}$ becomes a non-Abelian Lie group. In what follows it will be convenient to also have the following alternative expression for the kernel $p(X,Y,t)$ in \eqref{PtKt} (see, e.g., \cite{Ku, LP}): \begin{equation}\label{pcomeG} p(X,Y,t)= (4\pi)^{- \frac N2} e^{- t \operatorname{tr}(B)} \left(\operatorname{det}(C(t))\right)^{-1/2} \exp\big(- \frac{<C(t)^{-1} X-e^{-tB} Y,X-e^{-tB} Y>}{4}\big), \end{equation} where $C(t) = \int_0^t e^{-sB} Q e^{-sB^\star} ds$. Notice that $C(t)^\star = C(t)$ and since \begin{equation}\label{KC} t K(t) = e^{tB} C(t) e^{tB^\star}, \end{equation} it is clear that $K(t)>0$ if and only if $C(t)>0$. Now, given a function $f\in \So$ we define \begin{align}\label{pt} P_t f(X) \overset{def}{=} \int_{\R^N} p(X,Y,t) f(Y) dY. \end{align} In the next two lemmas we collect the main properties of $\{P_t\}_{t>0}$. These results are well-known to the experts, but we include them for completeness.
\begin{lemma}\label{L:invS} For any $t>0$ we have: \begin{itemize} \item[(a)] $\mathscr A(\So)\subset \So$ and $P_t(\So) \subset \So$; \item[(b)] For any $f\in \So$ and $X\in \RN$ one has $\frac{\de}{\de t} P_t f(X) = \mathscr A P_t f(X)$; \item[(c)] For every $f\in \So$ and $X\in \RN$ the commutation property is true \begin{equation}\label{eqPtA} \mathscr A P_t f(X) = P_t \mathscr A f(X). \end{equation} \end{itemize} \end{lemma}
\begin{proof}
(a) The first part is obvious. For the second part it suffices to show that $\widehat{P_t f}\in \So$, and this follows from the following formula \begin{equation}\label{FTPt} \widehat{P_t f}(\xi) = e^{-t \operatorname{tr} B} e^{- 4 \pi^2 <C(t)\xi,\xi>} \hat{f}(e^{-tB^\star} \xi). \end{equation} (b) Easily follows from differentiating \eqref{FTPt} with respect to $t$, and using the following formula, $$ \widehat{\mathscr A f}(\xi) = - \left[<B^\star \xi, \nabla_\xi \hat f(\xi)> + \left(4 \pi^2 <Q\xi,\xi> + \operatorname{tr} B\right) \hat f(\xi)\right]$$ in combination with \eqref{FTPt}. (c) By (a), \eqref{eqPtA} is equivalent to showing that $\widehat{\mathscr A P_t f} = \widehat{P_t \mathscr A f}$ for $f\in \So$. After a routine computation, this is shown equivalent to the identity between the two symmetric quadratic forms \[
<e^{-tB} Q e^{-tB^\star} \xi,\xi>\ =\ <Q\xi,\xi> - <B C(t) \xi,\xi> - <C(t) B^\star \xi,\xi>,\ \ \ \ \ \ \ \xi\in \RN,\ t>0. \] This is true as a consequence of the matrix identity $$e^{-tB}Q e^{-tB^*}= Q - B C(t) - C(t) B^*,\ \ \ \ \ \ \ \ t>0,$$ that can be verified by noting that both sides vanish at $t = 0$ and they have the same derivative in $t$ (see also \cite[equation (4.6)]{AT}).
\end{proof}
We observe the following simple fact.
\begin{lemma}\label{L:Linfty} One has: (1) $P_t : L^\infty_0 \to L^\infty_0$ for every $t>0$; (2) $\So$ is dense in $L^\infty_0$. \end{lemma}
We next collect some known results concerning the action of $\{P_t\}_{t>0}$ on the spaces $L^p$, see \cite{Me} and \cite{LB}.
\begin{lemma}\label{L:Pt} The following properties hold: \begin{itemize} \item[(i)] For every $X\in \RN$ and $t>0$ we have $P_t 1(X) = \int_{\RN} p(X,Y,t) dY = 1$;
\item[(ii)] $P_t:L^\infty \to L^\infty$ with $||P_t||_{L^\infty\to L^\infty} \le 1$; \item[(iii)] For every $Y\in \RN$ and $t>0$ one has $ \int_{\RN} p(X,Y,t) dX = e^{- t \operatorname{tr} B}. $
\item[(iv)] Let $1\le p<\infty$, then $P_t:L^p \to L^p$ with $||P_t||_{L^p\to L^p} \le e^{-\frac{t \operatorname{tr} B}p}$. If $\operatorname{tr} B\ge 0$, $P_t$ is a contraction on $L^p$ for every $t>0$; \item[(v)] [Chapman-Kolmogorov equation] for every $X, Y\in \R^N$ and $t>0$ one has $$ p(X,Y,s+t) = \int_{\R^N} p(X,Z,s) p(Z,Y,t) dZ. $$ Equivalently, one has $P_{t+s} = P_t \circ P_s$ for every $s, t>0$. \end{itemize} \end{lemma}
We note that it was shown in \cite{L} that $\{P_t\}_{t>0}$ is not a strongly continuous semigroup in the space of uniformly continuous bounded functions in $\RN$, but this fact will have no bearing on our results since we are primarily concerned with the action of the H\"ormander semigroup on $L^p$, when $1\le p<\infty$, and on the replacement space $L^\infty_0$ when $p = \infty$. In this respect, we begin with a simple but quite useful lemma.
\begin{lemma}\label{L:Lprate} Let $1\le p \le \infty$. Given any $f\in \So$ for any $t\in [0,1]$ we have \[
||P_t f - f||_{p} \le ||\mathscr A f||_{p}\ \omega(t), \] where $\omega(t)\le \max\{1,e^{-\frac{\operatorname{tr} B}p}\}\ t$. \end{lemma}
\begin{proof} By Lemma \ref{L:invS}, part (b) and the commutation identity \eqref{eqPtA}, we have for any $f\in \So$, \begin{align*} P_t f(X) - f(X) & = \int_0^t \frac{d}{d\tau} P_\tau f(X) d\tau = \int_0^t \mathscr A P_\tau f(X) d\tau = \int_0^t P_\tau \mathscr A f(X) d\tau. \end{align*} This gives for any $0\le t \le 1$, \[
||P_t f - f||_{p} \le \int_0^t ||P_\tau \mathscr A f||_p d\tau \le ||\mathscr A f||_{p} \int_0^t e^{- \tau \frac{\operatorname{tr} B}p} d\tau = ||\mathscr A f||_{p}\ \omega(t), \] where in the second inequality we have used (ii) and (iv) of Lemma \ref{L:Pt}.
\end{proof}
\begin{corollary}\label{C:Ptpzero} Let $1\le p< \infty$. For every $f\in L^p$, we have
$||P_tf-f||_{p}\rightarrow 0$ as $t \to 0^+.$ Consequently, $\{P_t\}_{t>0}$ is a strongly continuous semigroup on $L^p$. The same is true when $p = \infty$, if we replace $L^\infty$ by the space $L^\infty_0$. \end{corollary}
\begin{proof} The first part of the statement follows immediately from the density of $\So$ in $L^p$ and from Lemmas \ref{L:Lprate} and \ref{L:Linfty}). The second part is a standard consequence of the former, see e.g. \cite[Proposition 1.3]{EN}.
\end{proof}
\begin{remark}\label{R:infty} The reader should keep in mind that from this point on when we consider $\{P_t\}_{t>0}$ as a strongly continuous semigroup in $L^p$, when $p = \infty$ we always mean that $L^\infty_0$ must be used instead of $L^\infty$. \end{remark}
Denote by $(\mathscr A_p,D_p)$ the infinitesimal generator of the semigroup $\{P_t\}_{t>0}$ on $L^p$ with domain \[ D_p = \left\{f\in L^p\mid \mathscr A_p f \overset{def}{=} \underset{t\to 0^+}{\lim}\ \frac{P_t f - f}{t}\ \text{exists in }\ L^p\right\}. \] One knows that $(\mathscr A_p,D_p)$ is closed and densely defined (see \cite[Theorem 1.4]{EN}).
\begin{corollary}\label{C:lp} We have $\So\subset D_p$. Furthermore, $\mathscr A_p f = \mathscr A f$ for any $f\in \So$, and $\So$ is a core for $(\mathscr A_p,D_p)$. \end{corollary}
\begin{proof}
For any $f\in \So$ we obtain from \eqref{eqPtA}: $\frac{P_t f - f}{t} - \mathscr A f = \frac 1t \int_0^t \left[P_s \mathscr A f - \mathscr A f\right] ds$. An application of Minkowski's integral inequality and Lemma \ref{L:Lprate} (keeping in mind that $\mathscr A f\in \So$ as well) give \[
\left\|\frac{P_t f - f}{t} - \mathscr A f\right\|_{p} \le \frac 1t \int_0^t ||P_s \mathscr A f - \mathscr A f||_{p} ds \le C ||\mathscr A^2 f||_{p} \ t. \] This shows that $\So \subset D_p$, and moreover the two linear operators $\mathscr A_p$ and $\mathscr A$ coincide on the dense subspace $\So$. Finally, the fact that $\So$ is a core for $(\mathscr A_p,D_p)$ follows from the second part of (a) in Lemma \ref{L:invS} and the fact that $\So$ is dense in $L^p$, see \cite[Proposition 1.7]{EN}.
\end{proof}
\begin{remark}\label{R:id} From now on for a given $p\in [1,\infty]$ with a slight abuse of notation we write $\mathscr A : D_p\to L^p$ instead of $\mathscr A_p$. In so doing, we must keep in mind that $\mathscr A$ actually indicates the closed operator $\mathscr A_p$ that, thanks to Corollary \ref{C:lp}, coincides with the differential operator $\mathscr A$ on $\So$. Using this identification we will henceforth say that $(\mathscr A,D_p)$ is the infinitesimal generator of the semigroup $\{P_t\}_{t>0}$ on $L^p$. \end{remark} Up to now we have not made use of the assumption \eqref{trace}. In the next lemma we change course.
\begin{lemma}\label{L:specter} Assume that \eqref{trace} be in force, and let $1\le p \le \infty$. Then: \begin{itemize} \item[(1)] For any $\lambda\in \mathbb C$ such that $\Re \lambda >0$, we have $\lambda\in \rho_p(\sA)$; \item[(2)] If $\lambda\in \mathbb C$ such that $\Re \lambda >0$, then $R(\lambda,\mathscr A)$ exists and for any $f\in L^p$ it is given by the formula $R(\lambda,\mathscr A) f = \int_0^\infty e^{-\lambda t} P_t f\ dt$; \item[(3)] For any $\Re \lambda > 0$ we have
$||R(\lambda,\mathscr A)||_{p\to p} \le \frac{1}{\Re \lambda}$. \end{itemize} \end{lemma}
We omit the proof of Lemma \ref{L:specter} since it is a direct consequence of (ii), (iv) in Lemma \ref{L:Pt}, and of \cite[Theorem 1.10]{EN}.
In semigroup theory a procedure for forming a new semigroup from a given one is that of evolution semigroup, see \cite{CL}. In what follows we exploit this idea to introduce a new semigroup that will be used as a building block for: (1) defining the fractional powers of the operator $\K$ in \eqref{K0} above; (2) solve the extension problem for such nonlocal operators. Henceforth, we use the notation $\RNu$ to indicate the space $\RN \times \R$ with respect the variables $(X,t)$.
\begin{definition}\label{D:eks} With $p(X,Y,\tau)$ as in \eqref{PtKt}, we define the \emph{evolutive H\"ormander semigroup} on a function $u\in \mathscr S(\RNu)$ as \begin{equation}\label{eks} P^\K_\tau u(X,t) \overset{def}{=} \int_{\R^N} p(X,Y,\tau) u(Y,t-\tau) dY,\qquad (X,t)\in\RNu,\,\,\tau>0. \end{equation} \end{definition} We observe that if we let $\Lambda_h u(X,t) = u(X,t+h)$, then \eqref{eks} can be also written as $P^\K_\tau u = P_\tau(\Lambda_{-\tau} u)$.
\begin{lemma}\label{L:eks} If for $u\in \S$ we define $v(X,t;\tau) = P^\K_\tau u(X,t)$, then $v\in C^\infty(\RNu\times (0,\infty))$ and it solves the Cauchy problem $$\begin{cases} \de_\tau v = \K v \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \text{in}\ \RNu\times (0,\infty), \\ v(X,t;0) = u(X,t)\ \ \ \ \ \ \ (X,t)\in \RNu. \end{cases}$$ \end{lemma} \begin{proof} First of all, from the properties of $P_\tau$, it is easy to verify that $v(X,t;\tau)$ tends to $u(X,t)$ as $\tau\rightarrow 0^+$. Moreover, the assumption that $u\in \S$ implies that it has bounded time-derivatives of any order. This fact, together with the Gaussian behavior of the kernel $p(X,Y,\tau)$ (and of its derivatives), allows to differentiate under the integral sign for $\tau>0$: for more details, the reader can find in \eqref{Xderp} an explicit computation of the first derivatives of $p$. In particular, $v$ is $C^\infty(\RNu\times (0,\infty))$. Finally $\de_\tau v = \K v$ since, for positive $\tau$, we have $$\de_\tau v + \de_t v =\int_{\R^N} \de_\tau p(X,Y,\tau) u(Y,t-\tau) dY = \int_{\R^N} \mathscr A p(X,Y,\tau) u(Y,t-\tau) dY=\mathscr A v.$$ \end{proof}
We will need the counterpart of Lemmas \ref{L:invS} and \ref{L:Pt} for the semigroup $\{P^\K_\tau\}_{\tau>0}$.
\begin{lemma}\label{L:invSK} For any $t>0$ we have: \begin{itemize} \item[(a)] $\K(\S)\subset \S$ and $P^\K_\tau(\S) \subset \S$; \item[(b)] For any $u\in \S$ and $(X,t)\in \RNu$ one has $\frac{\partial}{\partial_\tau} P^\K_\tau u(X,t) = \K P^\K_\tau u(X,t)$; \item[(c)] For every $u\in \S$ and $(X,t)\in \RNu$ the commutation property is true $$ \K P^\K_\tau u(X,t) = P^\K_\tau \K u(X,t). $$ \end{itemize} \end{lemma} \begin{proof}
(a) The first part is obvious. For the second part it suffices to show that $\widehat{P^\K_\tau \psi}\in \S$ if $\psi\in\S$, and this follows from the following formula
\[ \widehat{P^\K_\tau \psi}(\xi,\sigma) = e^{-\tau \operatorname{tr} B} e^{- 4 \pi^2 <C(\tau)\xi,\xi>} e^{-2\pi i \tau \sigma} \hat{\psi}(e^{-\tau B^\star} \xi, \sigma). \] (b) Is a consequence of Lemma \ref{L:eks}. (c) Follows from the commutation property $\mathscr A P_t=P_t\mathscr A$ proved in Lemma \ref{L:invS}, and from the relations $P^\K_\tau u = P_\tau(\Lambda_{-\tau} u)$, $\K \Lambda_{-\tau}= \Lambda_{-\tau} \K$.
\end{proof}
\begin{lemma}\label{L:PtK} The following properties hold: \begin{itemize} \item[(i)] For every $(X,t)\in \RNu$ and $\tau>0$ we have $P^\K_\tau 1(X,t)=1$; \item[(ii)] We have $P^\K_{\tau+s} = P^\K_\tau \circ P^\K_s$ for every $s, \tau>0$.
\item[(iii)] $P^\K_\tau:L^\infty(\RNu) \to L^\infty(\RNu)$ with $||P^\K_\tau||_{L^\infty\to L^\infty} \le 1$;
\item[(iv)] Let $1\le p<\infty$, then $P^\K_\tau:L^p(\RNu) \to L^p(\RNu)$ with $||P^\K_\tau||_{L^p\to L^p} \le e^{-\frac{\tau \operatorname{tr} B}p}$.
\item[(v)] If \eqref{trace} holds, $\{P^\K_\tau\}_{\tau>0}$ is a strongly continuous semigroup of contractions on $L^p(\RNu)$. \end{itemize} \end{lemma} \begin{proof} The proof of the desired statements easily follows from Definition \ref{D:eks}, the identity $P^\K_\tau u = P_\tau(\Lambda_{-\tau} u)$ and Lemma \ref{L:Pt}. We only provide the details of (iii). Using the above mentioned ingredients and Tonelli's theorem we have for any $u\in\S$ \begin{align*}
& ||P^\K_\tau u||_{L^p(\RNu)} = \left(\int_\R ||P_\tau(\Lambda_{-\tau} u(\cdot,t))||^p_{L^p(\RN)} dt\right)^{1/p} \le e^{-\tau \frac{\operatorname{tr} B}p} \left(\int_\R ||\Lambda_{-\tau} u(\cdot,t)||^p_{L^p(\RN)} dt\right)^{1/p} \\
&= e^{-\tau \frac{\operatorname{tr} B}p} \left(\int_\R ||u(\cdot,t)||^p_{L^p(\RN)} dt\right)^{1/p} = e^{-\tau \frac{\operatorname{tr} B}p} ||u||_{L^p(\RNu)}. \end{align*} \end{proof}
We conclude the section with the analogue of Lemma \ref{L:Lprate} for the semigroup $\{P^\K_\tau\}_{\tau>0}$. Its proof proceeds along the same lines exploiting Lemma \ref{L:invSK} and Lemma \ref{L:PtK}.
\begin{lemma}\label{P:Kdiff} Let $1\le p \le \infty$. Given any $f\in \S$ for any $\tau\in [0,1]$ we have \[
||P^\K_t f - f||_{p} \le ||\K f||_{p}\ \omega(\tau), \] where $\omega(\tau)\le \max\left\{1,e^{-\frac{\operatorname{tr} B}p}\right\}\ \tau$. \end{lemma}
\section{The nonlocal operators $(-\mathscr A)^s$, $(-\K)^s$ and their extension problems}\label{S:fracK}
Fix $0<s<1$. With the results of the previous section in hand we are now ready to introduce the definition of the nonlocal operators $(-\mathscr A)^s$ and $(-\K)^s$.
\begin{definition}\label{D:Ks} For any $\vf\in \So$ we define the nonlocal operator by the following pointwise formula \begin{align}\label{As} (-\mathscr A)^s \vf(X) & = - \frac{s}{\G(1-s)} \int_0^\infty t^{-s-1} \left[P_t \vf(X) - \vf(X)\right] dt,\qquad X\in\RN. \end{align} Similarly, for $u \in\S$ and $(X,t)\in \RNu$, we define \begin{equation}\label{Ks} \left(-\K\right)^s u(X,t) = - \frac{s}{\G(1-s)} \int_0^\infty \tau^{-1-s} \left[P^\K_\tau u(X,t)- u(X,t)\right] d\tau. \end{equation} \end{definition}
\begin{remark}\label{R:KsAs} We note explicitly that when $u(X,t) = u(X)$, then we obtain from \eqref{eks} \[ P^\K_\tau u(X,t) \overset{def}{=} \int_{\R^N} p(X,Y,\tau) u(Y) dY = P_\tau u(X),\qquad (X,t)\in\RNu,\,\,\tau>0. \] In such case, formulas \eqref{Ks} and \eqref{As} give $$ \left(-\K\right)^s u(X,t) = - \frac{s}{\G(1-s)} \int_0^\infty \tau^{-1-s} \left[P_\tau u(X)- u(X)\right] d\tau = (-\mathscr A)^s u(X). $$ \end{remark}
As a first observation we note that the integrals in the right-hand side of \eqref{As}, \eqref{Ks} are convergent. To check this, for instance, for \eqref{Ks}, write \begin{align*} & \int_0^\infty \tau^{-1-s} \left[P^\K_\tau u(X,t)- u(X,t)\right] d\tau = \int_0^1 \tau^{-1-s} \left[P^\K_\tau u(X,t)- u(X,t)\right] d\tau \\ & + \int_1^\infty \tau^{-1-s} \left[P^\K_\tau u(X,t)- u(X,t)\right] d\tau. \end{align*} In the second integral we use (ii) in Lemma \ref{L:PtK} which gives \begin{align*}
& \tau^{-1-s} \left|P^\K_\tau u(X,t)- u(X,t)\right| \le \tau^{-1-s} \left(||P^\K_\tau u||_{L^\infty(\RNu)} + ||u||_{L^\infty(\RNu)}\right) \\
& \le 2 ||u||_{L^\infty(\RNu)} \tau^{-1-s}\in L^1(1,\infty). \end{align*} For the first integral we use the crucial Lemma \ref{P:Kdiff}, that implies \begin{align*}
& \tau^{-1-s} \left|P^\K_\tau u(X,t)- u(X,t)\right| \le \tau^{-1-s} ||P^\K_\tau u - u||_{L^\infty(\RNu)} \le C \tau^{-s} \in L^1(0,1). \end{align*}
\begin{remark}\label{R:L2K} We emphasise that, because of the large-time behaviour of the semigroups $P_t$ and $P^\K_\tau$, when $1\leq p<\infty$ it may not be true in general that the function defined by the right-hand side of \eqref{As}, \eqref{Ks} be in $L^p$! We note however that, when \eqref{trace} holds, we can appeal to (iv) in Lemma \ref{L:Pt}, or (v) of Lemma \ref{L:PtK}, to show, by arguments similar to those above, that the equations \eqref{As}, \eqref{Ks} do define $L^p$ functions. \end{remark}
With Definition \ref{D:Ks} in hands we next introduce the extension problem for the nonlocal operator $(-\K)^s$. Following \cite{CS}, this is going to be a Dirichlet problem in one dimension up. Precisely, on the half-line $\R^+ = (0,\infty)$ with variable $z$ we consider the Bessel operator $\mathscr B_z^{(a)} = \frac{\partial^2}{\partial z^2} + \frac az \frac{\partial}{\partial z}$ with $a> -1$. We define the \emph{extension operator} as the following second-order partial differential operator in $\RNu\times (0,\infty)$ \begin{equation}\label{Ka} \K_a = z^a(\K + \mathscr B_z^{(a)}) = z^a(\mathscr A + \mathscr B_z^{(a)} - \partial_t). \end{equation}
\begin{definition}\label{D:epKa} The \emph{extension problem} consists in finding, for a given $u\in \S$, a function $U\in C^\infty(\RNu \times (0,\infty))$ such that \begin{equation}\label{epKa} \begin{cases} \K_a U = 0\ \ \text{in}\ \RNu\times (0,\infty), \\ U(X,t,0) = u(X,t). \end{cases} \end{equation} \end{definition}
In order to solve the problem \eqref{epKa} we are going to construct an appropriate Poisson kernel for it. Since the Bessel process plays a pivotal role in what follows, we recall some well-known properties of the latter. On the half-line $(0,\infty)$ we consider the Cauchy problem for $\mathscr B_z^{(a)}$ with the Neumann boundary condition (this corresponds to reflected Brownian motion, as opposed to killed Brownian motion, when a Dirichlet condition is imposed): \[ \begin{cases} \partial_t u - \mathscr B_z^{(a)} u = 0,\ \ \ \ \ \ \ \text{in} \ (0,\infty)\times (0,\infty), \\ u(z,0) = \vf(z),\ \ \ \ \ \ \ \ z\in (0,\infty), \\ \underset{z\to 0^+}{\lim} z^a \de_z u(z,t) = 0. \end{cases} \] The fundamental solution for this problem is given by \begin{equation}\label{pa} p^{(a)}(z,\zeta,t) =(2t)^{-\frac{a+1}{2}}\left(\frac{z\zeta}{2t}\right)^{\frac{1-a}{2}}I_{\frac{a-1}{2}}\left(\frac{z\zeta}{2t}\right)e^{-\frac{z^2+\zeta^2}{4t}}, \end{equation} where we have denoted by $I_\nu$ the modified Bessel function of the first kind. Formula \eqref{pa} is well-known in probability. For an explicit derivation based on purely analytical tools we refer the reader to \cite[Section 22]{G18} or also \cite[Section 6]{EM}. We note that for every $z>0$ and $t>0$ one has \begin{equation}\label{Patone} \int_0^\infty p^{(a)}(z,\zeta,t) \zeta^a d\zeta = 1, \end{equation} see \cite[Proposition 2.3]{G18c}. Also, from \cite[Proposition 2.4]{G18c} we have for every $z, \zeta>0$ and every $0<s, t<\infty$ \begin{equation}\label{ckpa} p^{(a)}(z,\zeta,t) = \int_0^\infty p^{(a)}(z,\eta,t) p^{(a)}(\eta,\zeta,s) \eta^a d\eta. \end{equation}
Using \eqref{pa} we now obtain the following result, whose verification is classical.
\begin{proposition}\label{P:Ga} The \emph{Neumann fundamental solution} for the operator $\K_a$ in \eqref{Ka} with singularity at a point $(Y,\tau,\zeta)\in \RNu \times (0,\infty)$, is given by \[ \mathscr G^{(a)}(X,t,z;Y,\tau,\zeta) = p(X,Y,t-\tau) p^{(a)}(z,\zeta,t-\tau), \] where $p(X,Y,t)$ is H\"ormander's fundamental solution of $\K$ in \eqref{PtKt} above. \end{proposition}
By \cite[Remark 22.27]{G18} we see that if the pole of $\mathscr G^{(a)}$ is on the thin manifold $\RNu\times \{0\}$, and in particular at $(Y,0,0)$, then we have $$\mathscr G^{(a)}(X,t,z;Y,0,0) = \frac{1}{2^{a} \G(\frac{a+1}2)} t^{-\frac{a+1}2} e^{-\frac{z^2}{4t}} p(X,Y,t).$$
We note the following two basic properties of $\mathscr G^{(a)}$.
\begin{proposition}\label{P:2p} For every $X\in \RN$, $z>0$ and $t>0$ one has \[ \int_{\mathbb R^{N+1}_+} \mathscr G^{(a)}(X,t,z;Y,0,\zeta) \zeta^a dY d\zeta = 1. \] Furthermore, for $X, Y\in \RN$, $z, \zeta\ge 0$ and $t, s >0$, one has \[ \mathscr G^{(a)}(X,t+s,z;Y,0,\zeta) = \int_{\mathbb R^{N+1}_+} \mathscr G^{(a)}(X,t,z;Z,0,\eta) \mathscr G^{(a)}(Z,s,\eta;Y,0,\zeta) \eta^a dZ d\eta. \] \end{proposition}
\begin{proof} The proof of the first claim immediately follows from Tonelli's theorem, (i) in Lemma \ref{L:Pt} and from \eqref{Patone} above. To establish the second claim, we argue as follows. Tonelli's theorem again gives \begin{align*} & \int_{\mathbb R^{N+1}_+} \mathscr G^{(a)}(X,t,z;Z,0,\eta) \mathscr G^{(a)}(Z,s,\eta;Y,0,\zeta) \eta^a dZ d\eta \\ & = \int_{\mathbb R^{N+1}_+} p(X,Z,t) p^{(a)}(z,\eta,t) p(Z,Y,s) p^{(a)}(\eta,\zeta,s) \eta^a dZ d\eta \\ & = \int_{\RN} p(X,Z,t) p(Z,Y,s) dZ \int_0^\infty p^{(a)}(z,\eta,t) p^{(a)}(\eta,\zeta,s) \eta^a d\eta \\ & = p(X,Y,t+s) p^{(a)}(z,\zeta,t+s) = \mathscr G^{(a)}(X,t+s,z;Y,0,\zeta), \end{align*} where in the second to the last equality we have used Lemma \ref{L:Pt} $(v)$ and \eqref{ckpa} above. \end{proof} We refer the interested reader to the recent results in \cite[Section 5]{GTfi} for sharp pointwise estimates for $\mathscr G^{(a)}$ and the associated extension semigroup.
\begin{definition}\label{D:PaK} We define the \emph{Poisson kernel} for the operator $\K_a$ as the function in $C^\infty(\RNu\times (0,\infty))$ given by \begin{align*} P^{(a)}_z(X,Y,t) & \overset{def}{=} - z^{-a} \partial_z \mathscr G^{(-a)}(X,t,z;Y,0,0) \\ & = \frac{1}{2^{1-a} \G(\frac{1-a}{2})} \frac{z^{1-a}}{t^{\frac{3-a}{2}}} e^{-\frac{z^2}{4t}} p(X,Y,t). \end{align*} \end{definition} We emphasize that, since $a\in (-1,1)$, we have $\frac{3-a}{2}>1$. The next result expresses a first basic property of the kernel $P^{(a)}_z(X,Y,t)$.
\begin{proposition}\label{P:Pa1} For every $(X,z)\in \RN \times \R^+$ one has \[ \int_0^\infty \int_{\RN} P^{(a)}_z(X,Y,t) dY dt = 1. \] \end{proposition}
\begin{proof} By Definition \ref{D:PaK} and Tonelli's theorem we have \[ \int_0^\infty \int_{\RN} P^{(a)}_z(X,Y,t) dY dt = \frac{1}{2^{1-a} \G(\frac{1-a}{2})} \int_0^\infty \frac{z^{1-a}}{t^{\frac{3-a}{2}}} e^{-\frac{z^2}{4t}} dt \int_{\RN} p(X,Y,t) dY. \] The desired conclusion now follows from (i) in Lemma \ref{L:Pt} and from the observation that for every $z>0$ one has \begin{equation}\label{ga1} \frac{1}{2^{1-a} \G(\frac{1-a}{2})} \int_0^\infty \frac{z^{1-a}}{t^{\frac{3-a}{2}}} e^{-\frac{z^2}{4t}} dt = 1. \end{equation}
\end{proof}
Another crucial property of $P^{(a)}_z(X,Y,t)$ is that it satisfies the partial differential equation $\K_a P^{(a)}_z(X,Y,t) = 0$, where $\K_a$ is the extension operator in \eqref{Ka}.
\begin{proposition}\label{P:pdePa} Fix $(Y,0,0)\in \RNu\times \{0\}$. Then, in every $(X,t,z)\in \RNu\times (0,\infty)$ with $t>0$, one has \[ z^{-a} \K_a P^{(a)}_z(X,Y,t) = \K P^{(a)}_z(X,Y,t) + \mathscr B_z^{(a)} P^{(a)}_z(X,Y,t) = 0. \] \end{proposition}
\begin{proof} For ease of computation let us denote \begin{equation}\label{ga} g^{(a)}(z,t) = \frac{1}{2^{1-a} \G(\frac{1-a}{2})} \frac{z^{1-a}}{t^{\frac{3-a}{2}}} e^{-\frac{z^2}{4t}}, \end{equation} so that \begin{equation}\label{prodPa} P^{(a)}_z(X,Y,t) = g^{(a)}(z,t) p(X,Y,t). \end{equation} Keeping in mind that $\K = \mathscr A - \partial_t$, we have \begin{align*} & \K P^{(a)}_z(X,Y,t) + \mathscr B_z^{(a)} P^{(a)}_z(X,Y,t) = g^{(a)}(z,t) \K p(X,Y,t) - p(X,Y,t) \partial_t g^{(a)}(z,t) \\ & + p(X,Y,t) \mathscr B_z^{(a)} g^{(a)}(z,t). \notag \end{align*} Since $\K p(X,Y,t) = 0$, we infer \begin{align*} & \K P^{(a)}_z(X,Y,t) + \mathscr B_z^{(a)} P^{(a)}_z(X,Y,t) = p(X,Y,t) \left(\mathscr B_z^{(a)} g^{(a)}(z,t) - \partial_t g^{(a)}(z,t)\right). \end{align*} A computation now gives \begin{equation}\label{eqga} \mathscr B_z^{(a)} g^{(a)}(z,t) = \left(\frac{z^2}{4t^2} - \frac{3-a}{2t}\right) g^{(a)}(z,t) = \partial_t g^{(a)}(z,t). \end{equation} We infer that $\mathscr B_z^{(a)} g^{(a)}(z,t) - \partial_t g^{(a)}(z,t) = 0$, thus reaching the desired conclusion.
\end{proof}
We finally establish a lemma that will prove critical in the proof of Proposition \ref{P:PKextsolution} below.
\begin{lemma}\label{L:limits} For every $X, Y\in \RN$, and any $z>0$, we have \[ P^{(a)}_z(X,Y,\infty) \overset{def}{=} \underset{t\to \infty}{\lim} P^{(a)}_z(X,Y,t) = 0, \ \ \text{and} \ \ \ P^{(a)}_z(X,Y,0) \overset{def}{=} \underset{t\to 0^+}{\lim} P^{(a)}_z(X,Y,t) = 0. \] \end{lemma}
\begin{proof} We begin by observing that, by the definition of $K(t)$ in \eqref{Kt}, we have the monotonicity of $t\mapsto tK(t)$ (in the sense of matrices). This implies that, if we fix arbitrarily a number $t_0>0$, then by \eqref{PtKt} we have for every $t\geq t_0$ and for all $X,Y\in\RN$ \[ 0< p(X,Y,t)\leq \frac{\left( 4\pi\right) ^{-N/2}}{\sqrt{\det \left(t_0K\left( t_0\right)\right) }}. \] Since on the other hand it is obvious from \eqref{ga} that for every $z>0$ we have $\underset{t\to \infty}{\lim} g^{(a)}(z,t) = 0$, then the conclusion regarding $P^{(a)}_z(X,Y,\infty)$ follows immediately by \eqref{prodPa}.\\ Concerning the behavior near $t=0$, we start noticing that, for every $X,Y$, and $t$, by the expression in \eqref{pcomeG} we easily have \[ 0\leq p(X,Y,t)\leq \frac{\left( 4\pi\right) ^{-N/2}}{\sqrt{\det C\left( t\right) }} e^{ - t\, \mathrm{tr} B}. \] Furthermore, it can be seen from its definition that the matrix $C(t)$ (and thus $\det C\left( t\right)$) behaves polynomially at $t=0$. We can in fact write, as $t\rightarrow 0^+$, $C(t)=tQ-\frac{1}{2}t^2\left(BQ+QB^\star\right)+o(t^2)$. More precisely, it is proved in \cite[equation (3.14) and Proposition 2.3]{LP} that $\det C\left( t\right)$ is asymptotic to $t^{D_0}$ as $t\rightarrow 0^+$, where $D_0$ is the homogeneous dimension of a suitable homogeneous operator associated with $\K$. Hence, since $g^{(a)}(z,t)$ tends to $0$ exponentially for every $z>0$, we can conclude the proof by using again \eqref{prodPa}.
\end{proof}
\section{Solving the extension problem for $(-\K)^s$}\label{S:epKs}
In this section we solve the extension problem \eqref{epKa}. Using the Poisson kernel $P^{(a)}_z(X,Y,t) $ we define an explicit solution formula, and prove that the latter does actually solve the problem \eqref{epKa}. The following theorem contains one of the main results of the present paper.
\begin{theorem}\label{T:epKinfty} Given $0<s<1$, let $a = 1-2s$. Let $\K$ be given as in \eqref{K0}, with the assumption $K(t)>0$ for $t>0$ in force. Let $u\in \S$ and consider the function defined by the equation \begin{align}\label{UKa} U(X,t,z) & = \int_{-\infty}^t \int_{\RN} P^{(a)}_z(X,Y,t-\tau) u(Y,\tau) dY d\tau \\ & = \int_0^\infty \int_{\RN} P^{(a)}_z(X,Y,\tau) u(Y,t-\tau) dY d\tau . \notag \end{align} Then, $U\in C^\infty(\RNu \times (0,\infty))$, and $U$ solves the extension problem in $L^\infty(\RNu)$, in the sense that we have $\K_a U = 0$ in $\RNu\times (0,\infty)$, and moreover \begin{equation}\label{convKainfty}
\underset{z\to 0^+}{\lim} ||U(\cdot,\cdot,z) - u||_{L^\infty(\RNu)} = 0. \end{equation} Furthermore, we also have in $L^\infty(\RNu)$ \begin{equation}\label{nconvKainfty} - \frac{2^{-a} \Gamma\left(\frac{1-a}2\right)}{\Gamma\left(\frac{1+a}2\right)} \underset{z\to 0^+}{\lim} z^a \de_z U(\cdot,\cdot,z) = (-\K)^s u. \end{equation} \end{theorem}
\begin{proof} We first prove that $U\in C^\infty(\RNu \times (0,\infty))$. With $(X,t,z)\in \RNu \times (0,\infty)$ fixed, we want to differentiate under the integral sign around $(X,t,z)$ by using the second equality in \eqref{UKa}. From \eqref{prodPa} and the Gaussian character of $g^{(a)}$ in \eqref{ga}, there is no problem in differentiating with respect to the $z$-variable. Moreover, since $u\in\S$ and it has bounded $t$-derivatives, also $\de_t U$ can be performed easily. The problems might arise when we differentiate with respect to $X$, and in particular concerning the behavior in $\tau$ (for both $\tau\rightarrow 0^+$ and $\tau\rightarrow \infty$) of $$\nabla_X\left( P^{(a)}_z(X,Y,\tau) u(Y,t-\tau)\right)=g^{(a)}(z,\tau)u(Y,t-\tau)\nabla_X p(X,Y,\tau).$$ A direct computation shows that \begin{equation}\label{Xderp} \nabla_X p(X,Y,\tau)=-\frac{1}{2}C^{-1}(\tau)\left(X-e^{-\tau B}Y\right)p(X,Y,\tau). \end{equation} On one side, for small $\tau$, we can bound
$$|\nabla_X p(X,Y,\tau)|\leq c(N,B) \left\|C^{-1}(\tau)\right\|\left(|X|+|Y|\right)p(X,Y,\tau),$$
and we can use the fact that, as we have mentioned in $C(\tau)$ behaves like a polynomial for small $\tau$ (see, e.g., \cite[Lemma 3.3]{LP} for a precise behavior). Hence, thanks to the Gaussian behavior of $g^{(a)}(z,\tau)p(X,Y,\tau)$ (we recall that $z>0$ and $u\in\S$), we can find a uniform bound for $\left|\nabla_X\left( P^{(a)}_z(X,Y,\tau) u(Y,t-\tau)\right)\right|$ which is in $L^1(\RN\times (0,1))$. We now have to consider the behavior for large values of $\tau$. We notice that we can write \begin{eqnarray*}
&&|C^{-1}(\tau)\left(X-e^{-\tau B}Y\right)|^2\leq \left\|C^{-\frac{1}{2}}(\tau)\right\|^2\left\langle C^{-1}(\tau)\left(X-e^{-\tau B}Y\right),\left(X-e^{-\tau B}Y\right) \right\rangle\\
&\leq& \left\|C^{-\frac{1}{2}}(\tau)\right\|^2\left(\left\langle C^{-1}(\tau)X,X \right\rangle + \left\langle e^{-\tau B^\star}C^{-1}(\tau)e^{-\tau B}Y,Y \right\rangle+\right.\\ &+&\left.2\left\langle C^{-1}(\tau)X,X \right\rangle^{\frac{1}{2}}\left\langle e^{-\tau B^\star}C^{-1}(\tau)e^{-\tau B}Y,Y \right\rangle^{\frac{1}{2}}\right). \end{eqnarray*} Furthermore, from $C(t) = \int_0^t e^{-sB} Q e^{-sB^\star} ds$ it is obvious that $C(\tau)\geq C(\tau_0)$ for all $\tau\geq \tau_0>0$, and \eqref{KC} gives $e^{\tau B}C(\tau)e^{\tau B^\star}=\tau K(\tau)\geq \tau_0 K(\tau_0)= e^{\tau_0 B}C(\tau_0)e^{\tau_0 B\star}$. Fixing $\tau_0=1$, we then infer that for all $\tau\geq 1$, \begin{eqnarray*}
&&|C^{-1}(\tau)\left(X-e^{-\tau B}Y\right)|^2\leq \left\|C^{-\frac{1}{2}}(1)\right\|^2\left(\left\langle C^{-1}(1)X,X \right\rangle + \left\langle e^{-B^\star}C^{-1}(1)e^{-B}Y,Y \right\rangle+\right.\\ &+&\left.2\left\langle C^{-1}(1)X,X \right\rangle^{\frac{1}{2}}\left\langle e^{-B^\star}C^{-1}(1)e^{- B}Y,Y \right\rangle^{\frac{1}{2}}\right). \end{eqnarray*}
This estimate, together with \eqref{Xderp} and the behaviour of $g^{(a)}(z,\tau)$ for large values of $\tau$, allows to find a uniform bound for $\left|\nabla_X\left( P^{(a)}_z(X,Y,\tau) u(Y,t-\tau)\right)\right|$ which is in $L^1(\RN\times (1,+\infty))$. This proves that we can differentiate (at least one time) $U$ under the integral sign around any $(X,t,z)$. We can argue in the same way for derivatives of arbitrary order. Therefore, $U\in C^\infty(\RNu \times (0,\infty))$ and, by Proposition \ref{P:pdePa}, we can say that $$ \K_aU(X,t,z)=\int_0^\infty \int_{\RN} \K_a P^{(a)}_z(X,Y,\tau) u(Y,t-\tau) dY d\tau =0$$ for all $(X,t,z)\in\RNu\times (0,+\infty)$. As a second step we show that \eqref{convKainfty} holds. To reach this goal we make the observation that $U$ can be written in the following form using the semigroup $P^\K_\tau$ \begin{equation}\label{rem} U(X,t,z) = \frac{1}{2^{1-a} \G(\frac{1-a}{2})} z^{1-a} \int_0^\infty \frac{1}{\tau^{\frac{3-a}{2}}} e^{-\frac{z^2}{4\tau}} P^\K_\tau u(X,t) d\tau. \end{equation} To recognize the validity of \eqref{rem} we use the second equality in \eqref{UKa} and \eqref{prodPa} to find \begin{align*} U(X,t,z) & = \int_0^\infty \int_{\RN} P^{(a)}_z(X,Y,\tau) u(Y,t-\tau) dY d\tau \\ &= \int_0^\infty g^{(a)}(z,\tau) \left(\int_{\RN} p(X,Y,\tau) u(Y,t-\tau) dY\right) d\tau \\ & = \int_0^\infty g^{(a)}(z,\tau) P^\K_\tau u(X,t) d\tau, \end{align*} where in the last equality we have used \eqref{eks} above. Keeping \eqref{ga} in mind, we have proved \eqref{rem}.
In view of \eqref{ga1} we now obtain from \eqref{rem} that we can also write \begin{align}\label{beauty00} & U(X,t,z) - u(X,t) \\ & = \frac{1}{2^{1-a} \G(\frac{1-a}{2})} z^{1-a} \int_0^\infty \frac{1}{\tau^{\frac{3-a}{2}}} e^{-\frac{z^2}{4\tau}} \left[P^\K_\tau u(X,t) - u(X,t)\right] d\tau. \notag \end{align} Using the representation \eqref{beauty00} we can now write \begin{align*}
& ||U(\cdot,\cdot,z) - u||_{L^\infty(\RNu)} \\
& \le \frac{1}{2^{1-a} \G(\frac{1-a}{2})} z^{1-a} \int_0^1 \frac{1}{\tau^{\frac{3-a}{2}}} e^{-\frac{z^2}{4\tau}} \left\|P^\K_\tau u - u\right\|_{L^\infty(\RNu)} d\tau \\
& + \frac{1}{2^{1-a} \G(\frac{1-a}{2})} z^{1-a} \int_1^\infty \frac{1}{\tau^{\frac{3-a}{2}}} e^{-\frac{z^2}{4\tau}} \left\|P^\K_\tau u - u\right\|_{L^\infty(\RNu)} d\tau. \end{align*} In the second integral we use the contractivity of $P^\K_\tau$ on $L^\infty(\RNu)$ (Lemma \ref{L:PtK}) to bound \[
\frac{1}{\tau^{\frac{3-a}{2}}} e^{-\frac{z^2}{4\tau}} \left\|P^\K_\tau u - u\right\|_{L^\infty(\RNu)} \le 2 \left\|u\right\|_{L^\infty(\RNu)} \frac{1}{\tau^{\frac{3-a}{2}}}\in L^1(1,\infty), \] since $\frac{3-a}{2}>1$. In the first integral, instead, we need to crucially use the rate in Lemma \ref{P:Kdiff} \[
\left\|P_\tau^\K u - u\right\|_{L^\infty(\RNu)} = O(\tau), \] to estimate \begin{align*}
& \int_0^1 \frac{1}{\tau^{\frac{3-a}{2}}} e^{-\frac{z^2}{4\tau}} \left\|P^\K_\tau u - u\right\|_{L^\infty(\RNu)} d\tau
\le C \int_0^1 \frac{1}{\tau^{\frac{1-a}{2}}} d\tau < \infty, \end{align*} since $0< \frac{1-a}{2}<1$. In conclusion, the right-hand side in \eqref{beauty00} goes to $0$ in $L^\infty(\RNu)$ norm with $z^{1-a}$, and since $1-a>0$, we have demonstrated \eqref{convKainfty}.
In order to complete the proof we are left with establishing \eqref{nconvKainfty}. The proof of this hinges again on the representation formula \eqref{beauty00}. Differentiating it, we find \begin{align}\label{wow} & - \frac{2^{-a} \Gamma\left(\frac{1-a}2\right)}{\Gamma\left(\frac{1+a}2\right)} z^a \de_z U(X,t,z) \\ & = - \frac{1-a}{2\Gamma\left(\frac{1+a}2\right)} \int_0^\infty \frac{1}{\tau^{\frac{3-a}{2}}} e^{-\frac{z^2}{4\tau}} \left[P^\K_\tau u(X,t) - u(X,t)\right] d\tau \notag \\ & + \frac{1}{4\Gamma\left(\frac{1+a}2\right)} z^2 \int_0^\infty \frac{1}{\tau^{\frac{3-a}{2}}} e^{-\frac{z^2}{4\tau}} \left[P^\K_\tau u(X,t) - u(X,t)\right] \frac{d\tau}\tau. \notag \end{align} On the other hand, keeping in mind that $a = 1-2s$, we can rewrite the definition \eqref{Ks} as follows \begin{equation}\label{newclothes} (-\K)^s u(X,t) = - \frac{1-a}{2\Gamma\left(\frac{1+a}2\right)} \int_0^\infty \frac{1}{\tau^{\frac{3-a}{2}}} \left[P^\K_\tau u(X,t) - u(X,t)\right] d\tau. \end{equation} Subtracting \eqref{newclothes} from \eqref{wow} we thus find \begin{align*}
& \left\|- \frac{2^{-a} \Gamma\left(\frac{1-a}2\right)}{\Gamma\left(\frac{1+a}2\right)} z^a \de_z U(\cdot,\cdot,z) - (-\K)^s u\right\|_{L^\infty(\RNu)} \\
& \le \frac{1-a}{2\Gamma\left(\frac{1+a}2\right)} \int_0^\infty \frac{1}{\tau^{\frac{3-a}{2}}} \left|e^{-\frac{z^2}{4\tau}} - 1\right| \left\|P^\K_\tau u - u\right\|_{L^\infty(\RNu)} d\tau \\
& + \frac{z^2}{4\Gamma\left(\frac{1+a}2\right)} \int_0^\infty \frac{1}{\tau^{\frac{3-a}{2}}} e^{-\frac{z^2}{4\tau}} \left\|P^\K_\tau u - u\right\|_{L^\infty(\RNu)} \frac{d\tau}\tau \\ & = I(z) + II(z). \end{align*} To complete the proof of the theorem it suffices to show that both $I(z), II(z) \longrightarrow 0$ as $z\to 0^+$. We handle $II(z)$ as follows \begin{align*} & II(z) \cong z^2 \int_0^1 \frac{1}{\tau^{\frac{1-a}{2}}} e^{-\frac{z^2}{4\tau}} \frac{d\tau}\tau
+ z^2 \int_1^\infty \frac{1}{\tau^{\frac{3-a}{2}}} \frac{d\tau}\tau
\\
& = O(z^{1+a}) \ \longrightarrow\ 0\ \ \ \ \ \text{since}\ a\in (-1,1). \end{align*} For $I(z)$ we consider the integrand \[
0\le g_z(\tau) \overset{def}{=} \frac{1}{\tau^{\frac{3-a}{2}}} \left|e^{-\frac{z^2}{4\tau}} - 1\right| \left\|P^\K_\tau u - u\right\|_{L^\infty(\RNu)}, \ \ \ \ \ \ 0<\tau <\infty. \] We clearly have $g_z(\tau) \to 0$ as $z\to 0^+$ for every $\tau>0$. On the other hand, there exists an absolute constant $C>0$ and a function $g\in L^1(0,\infty)$ such that $0\le g_z(\tau) \le C g(\tau)$ for every $\tau >0$. In fact, using Lemmas \ref{L:PtK} and \ref{P:Kdiff} it is not difficult to convince oneself that we can take \[ g(\tau) = \begin{cases} \frac{1}{\tau^{\frac{1-a}{2}}} \ \ \ \ \ \ 0<\tau \le 1, \\ \frac{1}{\tau^{\frac{3-a}{2}}}\ \ \ \ \ \ \ 1<\tau<\infty. \end{cases} \] By Lebesgue dominated convergence we conclude that $I(z)\to 0$ as $z\to 0^+$. \end{proof}
We can now state the second main result in this paper.
\begin{theorem}\label{T:epKL2} Suppose that \eqref{trace} holds.
Let $u\in \S$ and consider the function $U$ defined by \eqref{UKa} above. Then, $U\in C^\infty(\RNu \times (0,\infty))$, and $U$ solves the extension problem in $L^p(\RNu)$ for any $1\leq p<\infty$. In the sense that we have $\K_a U = 0$ in $\RNu\times (0,\infty)$, and moreover \begin{equation}\label{convKaL2}
\underset{z\to 0^+}{\lim} ||U(\cdot,\cdot,z) - u||_{L^p(\RNu)} = 0. \end{equation} Furthermore, we also have in $L^p(\RNu)$ \begin{equation}\label{nconvKaL2} - \frac{2^{-a} \Gamma\left(\frac{1-a}2\right)}{\Gamma\left(\frac{1+a}2\right)} \underset{z\to 0^+}{\lim} z^a \de_z U(\cdot,\cdot,z) = (-\K)^s u. \end{equation} \end{theorem}
\begin{proof} We begin by observing that, in view of Remark \ref{R:L2K}, the assumption \eqref{trace} guarantees that $(-\K)^s u\in L^p(\RNu)$. Next, since the first part of the theorem has already been established in the proof of Theorem \ref{T:epKinfty} we only need to show that \eqref{convKaL2} and \eqref{nconvKaL2} hold. Now, the proof of these facts proceeds exactly as in the proof of \eqref{convKainfty} and \eqref{nconvKainfty}, except that we must replace $L^\infty$ norms with $L^p$ ones, which we can do since by (v) in Lemma \ref{L:PtK} we know that the semigroup $P^\K_\tau$ is contractive in $L^p(\RNu)$. For the integrals near zero, say on the interval $(0,1)$, we use the crucial convergence rate in Lemma \ref{P:Kdiff}, and everything proceeds as in the proof of Theorem \ref{T:epKinfty}.
\end{proof}
\section{The extension problem for the nonlocal operator $(-\mathscr A)^s$}\label{S:fracA}
In this last section we use the results of Section \ref{S:epKs} and Bochner's subordination to solve the extension problem for the fractional powers \eqref{As} of the hypoelliptic operators $\mathscr A$ which constitute the ``diffusive" part of the H\"ormander operators $\K$ in \eqref{K0}. Since once the properties of the relevant Poisson kernel are established the details are completely analogous to those in Theorems \ref{T:epKinfty} and \ref{T:epKL2}, we will skip them altogether.
We consider the space $\mathbb R^{N+1}_+ = \R^N \times (0,\infty)$, and use the letters $(X,z), (Y,\zeta)$, etc. to indicate generic points in such space. For any number $a\in (-1,1)$ we now consider the following partial differential operator in $\mathbb R^{N+1}_+$ \begin{equation}\label{extA} \mathscr A_a \overset{def}{=} z^a \left(\mathscr A + \mathscr B_z^{(a)}\right). \end{equation} Again in analogy with \cite{CS}, when $a = 1-2s$ we call the operator $\mathscr A_a$ in \eqref{extA} the \emph{extension operator} for $(-\mathscr A)^s$ in \eqref{As}. We now introduce the following.
\begin{definition}\label{D:extpoisson} Given any $a\in (-1,1)$, we define the \emph{Poisson kernel} for the operator $\mathscr A_a$ in \eqref{extA} above as \begin{equation}\label{slPK} \mathscr P^{(a)}(X,Y,z) = \int_0^\infty P^{(a)}_z(X,Y,t) dt,\qquad X,Y\in\RN,\,\, z>0, \end{equation} where the function $P^{(a)}_z(X,Y,t) $ is as in Definition \ref{D:PaK}. \end{definition}
A first basic property of the kernel $\mathscr P^{(a)}(X,Y,z) $ is expressed by the next result.
\begin{proposition}\label{P:Ka1} For every $X\in \R^N$ and $z>0$, one has \[ \int_{\R^N} \mathscr P^{(a)}(X,Y,z) dY = 1. \] \end{proposition}
\begin{proof} Using \eqref{slPK} and Tonelli's theorem we find for every $X\in \R^N$ \begin{align*} & \int_{\R^N} \mathscr P^{(a)}(X,Y,z) dY = \int_{\R^N} \int_0^\infty P^{(a)}_z(X,Y,t) dt dY = \int_0^\infty \int_{\R^N} P^{(a)}_z(X,Y,t) dY dt = 1, \end{align*} where in the last equality we have used Proposition \ref{P:Pa1}.
\end{proof}
We now show that the kernel $\mathscr P^{(a)}(X,Y,z)$ is a solution of the extension operator $\mathscr A_a$ in \eqref{extA} above.
\begin{proposition}\label{P:PKextsolution} Fix $Y\in \R^N$. The function $(X,z)\to \mathscr P^{(a)}(X,Y,z)$ belongs to $C^\infty(\RN\times (0,\infty))$. Furthermore, for every $X\not= Y$ and $z>0$ one has \[ \mathscr A_{a} \mathscr P^{(a)}(X,Y,z) = 0. \] \end{proposition}
\begin{proof} We show that we can differentiate under the integral sign in the definition \eqref{slPK} and prove that $(X,z)\to \mathscr P^{(a)}(X,Y,z)$ belongs to $C^\infty(\RN \times (0,\infty))$. To do this, we have on one side that the required bound on the $z$-derivatives is straightforward (since $z>0$). On the other side, we need to be careful when we differentiate with respect to the $X$-variables. However, this can be done by arguing as in the proof of Theorem \ref{T:epKinfty}, where we establish the right bounds of $\nabla_X P_z^{(a)}(X,Y,t)$ respectively for small values and large values of $t$. In this way we accomplish the first part of the statement. Furthermore, using \eqref{extA} we find for any $z>0$ and $X\not = Y$ \[ z^{-a} \mathscr A_{a} \mathscr P^{(a)}(\cdot,Y,z) = \mathscr A \mathscr P^{(a)}(\cdot,Y,z) + \mathscr B_z^{(a)} \mathscr P^{(a)}(\cdot,Y,z). \] To compute the quantities in the r.h.s., we differentiate under the integral sign obtaining \begin{align}\label{KaLL} & z^{-a} \mathscr A_{a} \mathscr P^{(a)}(X,Y,z) = \int_0^\infty g^{(a)}(z,t) \mathscr A p(X,Y,t) dt + \int_0^\infty p(X,Y,t) \mathscr B_z^{(a)} g^{(a)}(z,t) dt. \end{align} To compute the first integral in the right-hand side of \eqref{KaLL} we now use the equation satisfied by $p(X,Y,t)$, $\mathscr A p(X,Y,t) = \partial_t p(X,Y,t)$. This gives for every $X\not=Y$ and $t>0$, \begin{align}\label{KaLL2} & \int_0^\infty g^{(a)}(z,t) \mathscr A p(X,Y,t) dt = \int_0^\infty g^{(a)}(z,t) \partial_t p(X,Y,t) dt \\ & \ \ \ \ \ \ \ \text{(integrating by parts)} \notag\\ & = P^{(a)}_z(X,Y,\infty) - P^{(a)}_z(X,Y,0) - \int_0^\infty \partial_t g^{(a)}(z,t) \partial_t p(X,Y,t) dt \notag\\ & = - \int_0^\infty p(X,Y,t) \mathscr B_z^{(a)} g^{(a)}(z,t) dt, \notag \end{align} where in the last equality we have used the crucial Lemma \ref{L:limits} and \eqref{eqga}. Substituting \eqref{KaLL2} into \eqref{KaLL} we reach the desired conclusion.
\end{proof}
In closing, we solve the extension problem for the operator $(-\mathscr A)^s$.
\begin{definition}\label{D:ep} For $0<s<1$, let $a= 1-2s$. The extension problem in $\mathbb R^{N+1}_+$ for the nonlocal operator $(-\mathscr A)^s$, is, for a given $\vf\in\So$, the following: \begin{equation}\label{ep} \begin{cases} \mathscr A_a U = 0,\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \text{in}\ \mathbb R^{N+1}_+, \\ U(X,0) = \vf(X)\ \ \ \ \ \ \ X\in \R^N. \end{cases} \end{equation} \end{definition}
Our final result is the counterpart of Theorem \ref{T:epKinfty}. Since the details are completely analogous we omit them altogether.
\begin{theorem}\label{T:extLinfty} Given $\vf\in \So$ consider the function $U$ defined by \begin{equation}\label{pos} U(X,z) = \int_{\R^{N}} \mathscr P^{(a)}(X,Y,z) \vf(Y) dY. \end{equation} One has $U\in C^\infty(\RN\times (0,\infty))$ and solves the extension problem \eqref{ep}. By this we mean that $\mathscr A_a U = 0$ in $\mathbb R^{N+1}_+$, and we have $\underset{z\to 0^+}{\lim} U(\cdot;z) = \vf$ in $L^\infty$. Moreover, we also have in $L^\infty$ \begin{equation}\label{nconvAinfty} - \frac{2^{-a} \Gamma\left(\frac{1-a}2\right)}{\Gamma\left(\frac{1+a}2\right)} \underset{z\to 0^+}{\lim} z^a \de_z U(\cdot,z) = (-\mathscr A)^s u. \end{equation} If furthermore the hypothesis \eqref{trace} is satisfied, then the convergence is also in $L^p$ for any $1\leq p<\infty$. \end{theorem}
In closing we mention that when $\mathscr A$ is a nondegenerate Ornstein-Uhlenbeck operator $\Delta + <BX,\nabla>$, then some properties of $(-\mathscr{A})^{1/2}$ were obtained by Priola in \cite{Pri} in his study of the Dirichlet problem in half-spaces.
\end{document} | arXiv | {
"id": "1811.02968.tex",
"language_detection_score": 0.6728969812393188,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{A Nutrient-Prey-Predator Model: Stability and Bifurcations} \author{Mary Ballyk\thanks{Keywords: Chemostat, Hopf bifurcation, coexistence equilibrium}
\and Ibrahim Jawarneh\thanks{Mathematics Subject Classification: Primary 37G10; Secondary 34C23 92D25 34A34 } \\ \\ Department of Mathematical Sciences \\ New Mexico State University \\ Las Cruces, NM 88003, USA \and Ross Staffeldt} \maketitle \begin{abstract} In this paper we consider a model of a nutrient-prey-predator system in a chemostat with general functional responses, using the input concentration of nutrient as the bifurcation parameter. We study the changes in the existence of isolated equilibria and in their stability, as well as the global dynamics, as the nutrient concentration varies. The bifurcations of the system are analytically verified and we identify conditions under which an equilibrium undergoes a Hopf bifurcation and a limit cycle appears. Numerical simulations for specific functional responses illustrate the general results. \end{abstract}
\section{Introduction} \label{Introduction}
We consider a mathematical model of two-species predator-prey interaction in the chemostat under nutrient limitation.
With the exception of one nutrient, all nutrients that the prey species
requires are supplied to the growth vessel from the feed vessel in ample supply. The predator species grows exclusively on the prey.
With $N$ the concentration of the limiting nutrient, $P$ the concentration of prey (say, phytoplankton), and $Z$ the concentration of predator (say, zooplankton), we consider the following model: \begin{align}
dN/dt &= (\mu - N) D - P f_1(N) \notag \\
dP/dt &= \gamma_1 P f_1(N) - D_1 P - Z f_2(P) \label{NPZsys} \\
dZ/dt &= \gamma_2 Z f_2(P) - D_2 Z \notag \end{align} for initial conditions $N(0) = N_0 >0$, $P(0) = P_0 \geq 0$, and $Z(0) = Z_0 \geq 0$.
The concentration of the growth-limiting nutrient in the feed vessel is denoted $\mu$, and will be the bifurcation parameter in our analysis. $D$ is the input rate from the feed vessel to the growth vessel as well as the washout rate from the growth vessel to the receptacle, so that constant volume is maintained. The parameters $D_{1}=D+\epsilon_1$ and $D_{2}=D+\epsilon_2$ are the removal rates of $P$ and $Z$, respectively, from the growth vessel, incorporating the washout rate $D$ and the intrinsic death rates $\epsilon_i$ of $P$ and $Z$. Our analysis does not necessarily require that $\epsilon_1$ and $\epsilon_2$ are positive; however, $D_1$ and $D_2$ should be positive. The yield coefficient $\gamma_1$ gives the amount of prey biomass produced per unit of nutrient consumed, while $\gamma_2$ gives the amount of predator biomass produced per unit of prey biomass consumed.
The function $f_1(N)$ represents the per capita consumption rate of nutrient by the prey populations
as a function of the concentration of available nutrient; similarly, the function $f_2(P)$ represents the per capita consumption rate of the prey by the predator
as a function of available prey. These functions are assumed to satisfy $f_i(0) = 0$, $i=1$, 2, $f_1'(N)>0$ for all $N\geq 0$, and $f_2'(P)>0$ for all $P\geq 0$. We further assume that $f_1(N)$ and $f_2(P)$ are bounded. To avoid the case of washout due to an inadequate resource, we assume that $\lim_{N\to\infty} f_{1}(N) > D_1/\gamma_1$, and to avoid the case of an inadequate prey, we assume that $\lim_{P\to\infty} f_{2}(P) > D_2/\gamma_2$. Define $\lambda_P(D_1)$ and $\lambda_Z(D_2)$ to be the unique numbers satisfying \begin{equation} \label{breakeven}
f_1\bigl(\lambda_P(D_1)\bigr)=D_1/\gamma_1 \mbox{ and }
f_2\bigl(\lambda_Z(D_2)\bigr)=D_2/\gamma_2. \end{equation} The number $\lambda_P(D_1)$ is the break-even concentration of nutrient, at which the growth and removal of phytoplankton balance; $\lambda_Z(D_2)$ is similarly interpreted. The number $D$ plays a central role in our investigation, so we assume that $\lambda_P(D)$ and $\lambda_Z(D)$ are also defined. From the perspective of $D$, due to the boundedness assumptions on $f_1$ and $f_2$, $D_1$ and $D_2$ are perturbations of $D$. \begin{lemma} \label{lambda_diff}
From a functional point of view, $\lambda_P$ and $\lambda_Z$ are right
inverses of $\gamma_1f_1$ and $\gamma_2f_2$, respectively. Accordingly, on their respective domains $\lambda_P$ and $\lambda_Z$ are as differentiable as $f_1$ and $f_2$. We note for later use \begin{equation} \label{lambdaderivs}
\lambda_P'(D_1) = \bigl( \gamma_1\cdot f_1'(\lambda_P(D_1))\bigr)^{-1}
\quad \text{and} \quad
\lambda_Z'(D_2) = \bigl( \gamma_2\cdot f_2'(\lambda_P(D_2))\bigr)^{-1}. \qed \end{equation} \end{lemma} Kuang and Li \cite{Kuang1999} studied this system with general functional responses and distinct values of $D$, $D_1$, and $D_2$. However, they fixed the input nutrient concentration, whereas we have this as a parameter. With the hypothesis that $D{=}D_1{=}D_2$, they provide numerical criteria for the stability of a coexistence equilibrium, and prove that a cycle exists when the equilibrium is unstable \cite[Theorem~3.2]{Kuang1999}. When the hypothesis $D{=}D_1{=}D_2$ does not hold, they provide numerical evidence that stability of the coexistence equilibrium breaks down and a cycle appears. A similar model was studied in \cite{Zhang2012} with functional response $f_1(N)$ of Holling type I and $f_2(P)$ of Holling type II, demonstrating the existence of a Hopf bifurcation in response to varying nutrient concentration. These results inspire our work. Our goal is to prove analytically that the system undergoes a Hopf bifurcation without restricting the forms of the uptake functions or the values of the removal rates $D_1$ and $D_2$.
This paper is organized as follows. In section \ref{Equilibria}, conditions for existence and local stability of predator-free equilibria are obtained for general functional responses $f_1(N)$ and $f_2(P)$. In section \ref{Coexistence_equilibrium_stability}, we study stability of a coexistence equilibrium which appears as the parameter $\mu$ increases. We quote a version of the Hopf bifurcation theorem, stating the result in a limited form most useful for us. Accordingly, application of this theorem requires us to develop specific information about the behavior of eigenvalues of the linearizations as the paramter $\mu$ increases. In section \ref{Cycles}, we use a smooth change of variables to reach a situation where the Hopf bifurcation theorem in three dimensions applies, enabling us to conclude the existence of cycles. In section \ref{Examples}, our results are illustrated using simulations arising by choosing rate functions of Holling type II and Holling type III forms. In section \ref{Appendix}, we explain in detail the approximations and estimates that support our work in the latter part of section \ref{Coexistence_equilibrium_stability}.
\section{Steady States and Their Stability} \label{Equilibria} To begin, we establish that the solutions of \eqref{NPZsys} are nonnegative and bounded. These are minimum requirements for a reasonable model of the chemostat. We then develop conditions for the existence and stability of equilibria. We conclude the section by proving uniform persistence in the sense of \cite{Butler1986} when $\mu$ is sufficiently large and the initial values $P_0$ and $Z_0$ are positive. \begin{lemma} \label{positivity_boundedness} All solutions $N(t)$, $P(t)$, and $Z(t)$ of \eqref{NPZsys} are nonnegative and bounded. \end{lemma}
\begin{proof} The plane $Z = 0$ is invariant for system \eqref{NPZsys}. Therefore, by existence and uniqueness, if $Z_0 > 0$ then $Z(t) > 0$ for all $t \geq 0$. Similarly, since $f_2(0)=0$, the plane $P = 0$ is invariant, so $P_0 > 0$ implies $P(t) > 0$ for all $t \geq 0$.
Suppose $N_0 > 0$. If there exists a $t > 0$ with $N(t) = 0$, then there is a least such number, say $t_0$. Then $N'(t_0) = \mu D > 0$ since $f_1(0)=0$. Consequently, there is $t<t_0$ such that $N(t) < 0$, which is a contradiction to the choice of $t_0$.
For the boundedness of solutions, set $U(t) = N(t) + \gamma_1^{-1}P(t) +(\gamma_1 \gamma_2)^{-1}Z(t)$. From system \eqref{NPZsys}, it follows that
\begin{equation*} U'(t) = D \mu - D N(t) -\gamma_1^{-1} D_1 P(t)
-(\gamma_1 \gamma_2)^{-1}D_2 Z(t) \leq D \mu - \widehat{D} U(t), \end{equation*}
where $\widehat{D} = \min\{D,D_1,D_2\}$. Then \begin{align*}
U(t) &\leq (D\mu)/{\widehat{D}} + \bigl(U(0) - (D\mu)/\widehat{D} \bigr) e^{-\widehat{D}t}
\\
&\leq
\begin{cases} U(0), & \text{if $U(0) > (D\mu)/\widehat{D}$,} \notag
\\
(D\mu)/\widehat{D}, & \text{if $U(0) \leq (D\mu)/\widehat{D}$.}
\end{cases} \end{align*} Since $N(t)$, $P(t)$, and $Z(t)$ are nonnegative, the boundedness of $U(t)$ implies the boundedness of $N(t)$, $P(t)$, and $Z(t)$. \end{proof}
There are at most three biologically relevant equilibria of system \eqref{NPZsys} depending on the value of $\mu$. The equilibria and the conditions of their existence are summarized in the following theorem.
\begin{theorem} \label{existence equilibria NPZ} Let $\lambda_P(D_1)$ and $\lambda_Z(D_2)$ be as in \eqref{breakeven}.
The equilibria of the system \eqref{NPZsys} satisfy the following conditions: \begin{enumerate}
\item The washout equilibrium $E_{0} = (\mu,0,0)$ exists for all $\mu > 0$.
\item The single-species equilibrium $E_1(\mu, D_1) = ( \lambda_P(D_1), P(\mu, D_1), 0)$ exists for all $\mu > \lambda_P(D_1)$,
where
\begin{equation}
\label{Poneformula}
P(\mu, D_1) = \bigl(\mu - \lambda_P(D_1)\bigr) \frac{D \gamma_1}{D_1}.
\end{equation}
\item The coexistence equilibrium
\begin{equation*} E_2(\mu, D_1, D_2) = \bigl(N(\mu, D_1, D_2), \lambda_Z(D_2), Z(\mu, D_1, D_2)\bigr)
\end{equation*} exists for all $\displaystyle{\mu > \mu_{ c_1}(D_1,D_2) } $, where
\begin{equation}\label{mu_{c1}}
\mu_{c_1}(D_1,D_2)= \lambda_P(D_1) + \frac{D_1 \lambda_Z(D_2)}{D\gamma_1}
\end{equation} and $N(\mu, D_1, D_2)$, $Z(\mu, D_1, D_2)$ satisfy the simultaneous equations \begin{gather}
\bigl(\mu - N(\mu, D_1, D_2)\bigr) D - \lambda_Z(D_2) f_1\bigl(N(\mu, D_1, D_2)\bigr) = 0, \label{1steqnwithZ_2notzero} \\
\gamma_1 \lambda_Z(D_2) f_1\bigl(N(\mu, D_1, D_2)\bigr) - D_1 \lambda_Z(D_2)
- Z(\mu, D_1, D_2) f_2\bigl(\lambda_Z(D_2)\bigr) = 0. \label{2ndeqnwithZ_2notzero}
\end{gather} \end{enumerate} \end{theorem} Thus, for $\mu \leq \lambda_P(D_1)$ only the equilibrium $E_0$ exists; for $\lambda_P(D_1) < \mu \leq \mu_{c_1}(D_1,D_2)$, there are two equilibria $E_0, E_1$; and, when $ \mu_{c_1}(D_1,D_2) < \mu$, there are three equilibria $E_0$, $E_1$, and $E_2$. \begin{proof} From the $Z$-equation, either $Z = 0$ or $\gamma_2 f_2(P) - D_2 = 0$ (so that $P=\lambda_Z(D_2)$). If $Z = 0$, the $P$-equation yields \begin{equation*}
0 = \gamma_1 P f_1(N) - D_1 P, \end{equation*} which implies either $P = 0$ or $\gamma_1 f_1(N) - D_1 =0$ (so that $N=\lambda_P(D_1)$).
If $Z=0$ and $P=0$, then the $N$-equation gives $0= (\mu - N) D $, so that $N = \mu$. Thus, the washout equilibrium is given by $E_0 = (\mu, 0, 0)$ and exists for $\mu > 0$. Note that as $\mu$ increases, $E_0$ moves along the $N$-axis in ${\mathbf R}_{\geq 0}^{3}$. This is the proof of the first part of the theorem.
If $Z = 0$ and $P \neq 0$, then $N = \lambda_P(D_1)$ in the $N$-equation gives \begin{equation} \label{P_1 equation}
0 = \bigl(\mu - \lambda_P(D_1)\bigr) D - P \frac{D_1}{\gamma_1}, \quad \text{with solution} \quad P = \bigl(\mu - \lambda_P(D_1)\bigr) \frac{D \gamma_1}{D_1}\mathrel{\mathop :}= P(\mu, D_1). \end{equation} Note that $P(\mu, D_1)>0$ for all $\mu>\lambda_P(D_1)$, and so the single-species equilibrium $E_{1}(\mu, D_1) = (\lambda_P(D_1), P(\mu, D_1),0)$ exists in the positive cone for all $\mu > \lambda_P(D_1)$. This is the proof of the second part of the theorem.
If $Z \neq 0$, then $P = \lambda_Z(D_2)$ in the $N$- and $P$-equations, and equations \eqref{1steqnwithZ_2notzero} and \eqref{2ndeqnwithZ_2notzero}
define $N$ and $Z$ as implicit functions of $\mu$, $D_1$, and $D_2$.
We now determine the critical value $\mu_{c_1}(D_1,D_2)$ of $\mu$ at which $E_2(\mu, D_1, D_2)$ first appears in the positive cone.
Equation \eqref{2ndeqnwithZ_2notzero} implies \begin{equation} \label{valueZ_2} Z(\mu, D_1, D_2) = \frac{\gamma_2}{D_2}\lambda_Z(D_2)\bigl(\gamma_1f_1\bigl(N(\mu, D_1, D_2)\bigr) - D_1 \bigr). \end{equation} Since $f_1$ is strictly increasing, $Z(\mu, D_1, D_2) = 0$ if and only if $N(\mu, D_1, D_2) = \lambda_P(D_1)$, and $Z(\mu, D_1, D_2) > 0$ for $N(\mu, D_1, D_2) > \lambda_P(D_1)$. Substituting $N(\mu, D_1, D_2) = \lambda_P(D_1)$ in \eqref{1steqnwithZ_2notzero}, we obtain the equation \begin{equation*}
0 = \bigl( \mu - \lambda_P(D_1)\bigr) D - \lambda_Z(D_2) \frac{D_1}{\gamma_1} \; \text{with solution} \; \mu = \lambda_P(D_1) + \frac{D_1 \lambda_Z(D_2)}{D\gamma_1} \mathrel{\mathop :}= \mu_{c_1}(D_1,D_2). \end{equation*} Thus, $Z(\mu, D_1, D_2)$ is positive when $\mu > \mu_{c_1}(D_1, D_2)$, and the coexistence equilibrium $E_2 (\mu, D_1, D_2)= \bigl(N(\mu, D_1, D_2), \lambda_Z(D_2), Z(\mu, D_1, D_2)\bigr)$ exists in the positive cone for all $\mu > \mu_{c_1}(D_1, D_2)$. This proves part three of the theorem. \end{proof} In Theorems~\ref{NPZ stability E_0}, \ref{NPZ stability of E_1}, and \ref{coexistence_equilibrium_1} we investigate the stability of the equilibria of system \eqref{NPZsys} by finding the eigenvalues of the associated Jacobian matrices. The Jacobian matrix of the system \eqref{NPZsys} takes the form
\begin{equation} \label{Jacobian} J =
\begin{bmatrix}
-D & -f_{1}(N) & 0\\ \gamma_{1}P f_1'(N) & \gamma_{1}f_{1}(N)-D_{1}-Z f_2'(P) & -f_{2}(P) \\ 0 & \gamma_{2}Z f_2'(P) & \gamma_{2}f_{2}(P) -D_{2} \end{bmatrix} . \end{equation}
We first summarize the stability of $E_0$ in the following theorem. Here, the breakeven concentration of nutrient given in \eqref{breakeven} plays a critical role. \begin{theorem} \label{NPZ stability E_0}
The equilibrium point $E_{0}$ is locally asymptotically stable if $\mu < \lambda_P(D_1)$ and unstable if $\mu > \lambda_P(D_1)$. When $\mu > \lambda_P(D_1)$, $E_0$ is globally asymptotically stable with respect to solutions initiating in $\{(N,P,Z) \in {\mathbf R}^3_+ \mid P = 0 \}$. That is, the plane $P=0$ is $m^+(E_0)$, the stable manifold of $E_0$. \end{theorem} \begin{proof} The Jacobian at $E_{0}=(\mu, 0 ,0)$ is \begin{equation*} J(E_{0}) =
\begin{bmatrix}
-D & -f_{1}(\mu) & 0\\ 0 & \gamma_{1}f_{1}(\mu)-D_{1}& 0 \\ 0 & 0 & -D_{2} \end{bmatrix}, \end{equation*} so that the eigenvalues of $J(E_0)$ are $x_1 = -D, x_2 = \gamma_1 f_1(\mu) - D_1$, and $\displaystyle{x_3 = -D_2 }$. The stability of $E_0$ now follows from \eqref{breakeven} and the fact that $f_1$ is strictly increasing: $x_2<0$ when $\mu<\lambda_P(D_1)$ and $x_2>0$ when $\mu>\lambda_P(D_1)$. To see that $m^+(E_0)= \{(N,P,Z) \in {\mathbf R}^3_+ \mid P = 0 \}$ when $\mu>\lambda_P(D_1)$, consider the Lyapunov function \begin{equation*}
L(N,Z) = \frac{(\mu -N)^2}{2} + \frac{Z^2}{2}. \end{equation*} Clearly, $L(\mu,0) = 0$ and $L(N,Z) > 0$ if $(N,Z) \neq (\mu,0) $.
The time derivative of $L(N,Z)$ at a point $(N,0,Z)$ on a trajectory of system \eqref{NPZsys} is
\begin{equation*}
L'(N,Z) = -D(\mu-N)^2-D_2Z^2 < 0
\end{equation*} for $(N,Z) \ne (\mu,0)$. Thus $E_0$ is globally asymptotically stable in the plane $P = 0$. \end{proof}
For $\mu=\lambda_P(D_1)$, $P(\lambda_P(D_1), D_1)=0$, so that $E_0$ and $E_1$ coalesce (see equation \eqref{Poneformula}). When $\mu > \lambda_P(D_1) $, $E_1(\mu,D_1) = (\lambda_{P}(D_1),P(\mu, D_1),0)$ enters the positive cone. We summarize the stability of $E_{1}(\mu, D_1)$ in the following theorem. Note that the critical value of $\mu$ given in \eqref{mu_{c1}} now plays a central role. \begin{theorem} \label{NPZ stability of E_1} The equilibrium point $E_1(\mu, D_1)$ is locally stable if $\lambda_P(D_1)< \mu < \mu_{c_1}(D_1,D_2)$ and unstable if $\mu > \mu_{c_1}(D_1,D_2)$. When $\mu > \mu_{c_1}(D_1,D_2)$, $E_1(\mu, D_1)$ is globally asymptotically stable with respect to solutions initiating in $\{(N,P,Z) \in {\mathbf R}^3_+ \mid Z = 0 \}$. That is, the plane $Z=0$ is $m^+\bigl(E_1(\mu, D_1)\bigr)$, the stable manifold of $E_1(\mu, D_1)$. \end{theorem} \begin{proof} The Jacobian matrix at $E_1(\mu, D_1)$ is \begin{equation*} J\bigl(E_{1}(\mu, D_1)\bigr) = \\
\begin{bmatrix}
-D-P(\mu, D_1) f_1'\bigl(\lambda_{P}(D_1)\bigr) & -f_{1}\bigl(\lambda_{P}(D_1)\bigr) & 0\\ \gamma_{1}P(\mu, D_1) f_1'\bigl(\lambda_{P}(D_1)\bigr) & 0& -f_{2}\bigl(P(\mu, D_1)\bigr)) \\ 0 & 0 & \gamma_{2}f_{2}\bigl(P(\mu, D_1)\bigr)-D_{2} \end{bmatrix}. \end{equation*} The determinant of the upper lefthand $2$-by-$2$ submatrix is positive and its trace is negative, so its eigenvalues have negative real parts. The third eigenvalue is $x_3 = \gamma_{2}f_{2}\bigl(P\bigl(\mu, D_1)\bigr)-D_{2}$. If $\mu < \mu_{c_1}(D_1,D_2)$, so that $P(\mu, D_1) < \lambda_{Z}(D_2)$, then $x_3<0$, and $E_1(\mu, D_1)$ is locally stable. Similarly, if $\mu >\mu_{c_1}(D_1,D_2)$, so that $P(\mu, D_1) > \lambda_Z(D_2)$, then $x_3>0$ and $E_1(\mu, D_1)$ is unstable with one dimension of instability.
To see that $m^+(E_1)=\{(N,P,Z) \in {\mathbf R}^3_+ \mid Z = 0 \}$ when $\mu > \mu_{c_1}$, consider the Lyapunov function introduced by Hsu in \cite{Hsu1978} \begin{multline*}
L(N,P) = \\ \int_{\lambda_P(D_1)}^N \frac{ f_1(n) -f_1\bigl(\lambda_P(D_1)\bigr)}{f_1(n)} \, dn + \frac{1}{\gamma_1}\Bigl( P - P(\mu, D_1) - P(\mu, D_1) \ln \bigl(P/P(\mu, D_1)\bigr) \Bigr). \end{multline*} Notice that $L\bigl(\lambda_P(D_1), P(\mu, D_1)\bigr) = 0$, \begin{equation*}
\frac{\partial L}{\partial N} = \frac{f_1(N) -f_1\bigl(\lambda_P(D_1)\bigr)}{f_1(N)} = 0 \quad \text{and} \quad
\frac{\partial L}{\partial P} = \frac{1}{\gamma_1}\bigl( 1 - P(\mu, D_1)/P \bigr) = 0 \end{equation*} precisely when $(N,P) = \bigl(\lambda_P(D_1),P(\mu, D_1)\bigr)$. Moreover, \begin{align*}
\frac{\partial^2 L}{\partial N^2}\bigl(\lambda_P(D_1), P(\mu, D_1)\bigr) &=
\frac{f'_1\bigl(\lambda_P(D_1)\bigr)}{f_1\bigl(\lambda_P(D_1)\bigr)}>0 \intertext{and}
\frac{\partial^2 L}{\partial P^2}\bigl(\lambda_P(D_1), P(\mu, D_1)\bigr) &= \frac{1}{\gamma_1 P(\mu,D_1)}>0. \end{align*} Therefore, $\bigl(\lambda_P(D_1), P(\mu, D_1)\bigr)$ is the only critical point of $L(P,N)$ and it is a local minimum, so that $L(N,P) > 0$ for all $(N,P) \neq \bigl(\lambda_P(D_1), P(\mu, D_1)\bigr)$.
Now we compute the time derivative of $L(N,P)$ at a point $(N,P,0)$ along a trajectory of system \eqref{NPZsys}. Noting from \eqref{breakeven} and \eqref{Poneformula} that \begin{equation*}
f_1\bigl(\lambda_P(D_1)\bigr)=\frac{D_1}{\gamma_1} \quad \text{and} \quad P(\mu,D_1) = \frac{\bigl(\mu-\lambda_P(D_1)\bigr)D}{f_1\bigl(\lambda_P(D_1)\bigr)}, \end{equation*} we have \begin{equation*}
\begin{split}
L'(N,P) &=\frac{ f_1(N) -f_1\bigl(\lambda_P(D_1)\bigr) }{f_1(N)}\bigl((\mu-N)D - P f_1(N) \bigr) \\
& \quad \quad \quad \quad + \frac{1}{\gamma_1}\biggl( 1 - \frac{P(\mu, D_1)}{P} \biggr) (\gamma_1 f_1(N) - D_1) P \\ &= \bigl(f_1(N)-f_1\bigl(\lambda_P(D_1)\bigr)\bigr)
\biggl( \frac{(\mu-N)D}{f_1(N)} - \frac{\bigl(\mu-\lambda_P(D_1)\bigr)D}{f_1\bigl(\lambda_P(D_1)\bigr)}\biggr) \\ &= \bigl(f_1(N)-f_1\bigl(\lambda_P(D_1)\bigr)\bigr) \biggl( \frac{(\mu-N)}{\bigl(\mu-\lambda_P(D_1)\bigr)}
- \frac{f_1(N)}{f_1\bigl(\lambda_P(D_1)\bigr)}\biggr)
\frac{\bigl(\mu - \lambda_P(D_1)\bigr) D}{f_1(N)}. \end{split} \end{equation*} If $N < \lambda_P(D_1)$, then $f_1(N)-f_1\bigl(\lambda_P(D_1)\bigr) < 0$, so that $f_1(N)/f_1\bigl(\lambda_P(D_1)\bigr) < 1$. Also, $\mu{-}N > \mu{-}\lambda_P(D_1)$, so that \begin{equation*} \frac{\mu {-} N}{\mu {-} \lambda_P(D_1)} > 1 > \frac{f_1(N)}{f_1\bigl(\lambda_P(D_1)\bigr)},\;\text{and}\; \frac{\mu {-} N}{\mu {-} \lambda_P(D_1)} - \frac{f_1(N)}{f_1\bigl(\lambda_P(D_1)\bigr)} > 0. \end{equation*} Thus, $L^{\prime}(N,P) < 0$ when $N < \lambda_P(D_1)$.
If $N>\lambda_P(D_1)$, then $f_1(N)-f_1\bigl(\lambda_P(D_1)\bigr)>0$, so that $f_1(N)/f_1\bigl(\lambda_P(D_1)\bigr)>1$. When $\mu > N > \lambda_P(D_1)$, we have $\mu {-} N < \mu {-} \lambda_P(D_1)$, so that \begin{equation*} \frac{\mu - N}{\mu - \lambda_P(D_1)} < 1 < \frac{f_1(N)}{f_1\bigl(\lambda_P(D_1)\bigr)}, \end{equation*} while for $N \ge \mu$, then $\displaystyle{\frac{\mu - N}{\mu - \lambda_P(D_1)} < 0}$. In either case, $\displaystyle{\frac{\mu - N}{\mu - \lambda_P(D_1)} - \frac{f_1(N)}{f_1\bigl(\lambda_P(D_1)\bigr)} < 0}$, and so $L^{\prime}(N,P) < 0$ when $N>\lambda_P(D_1)$.
Finally, $L^{\prime}(N,P) = 0$ if and only if $N = \lambda_P(D_1)$. By LaSalle's extension theorem \cite{LaSalleLefschetz1961}, any trajectory of system \eqref{NPZsys} in the plane $Z=0$ for which $P_0>0$ approaches the largest invariant set in the line $N= \lambda_P(D_1)$, and this is simply $\{E_1(\mu, D_1)\}$. Therefore, $E_1(\mu, D_1)$ is globally asymptotically stable in the plane $Z=0$. \end{proof} When $\mu = \mu_{c_1}(D_1, D_2)$, $E_1(\mu, D_1)$ and $E_2(\mu, D_1, D_2)$ coalesce. With $\mu = \mu_{c_1}(D_1, D_2)$, we have $P\bigl(\mu_{c_1}(D_1, D_2), D_1\bigr) = \lambda_Z(D_2)$ (see equations \eqref{mu_{c1}} and \eqref{Poneformula}). Also $N(\mu, D_1, D_2)=\lambda_P(D_1)$, so that $Z(\mu, D_1, D_2)=0$ (see the discussion around \eqref{valueZ_2}). Thus, \begin{equation*} E_2(\mu_{c_1}(D_1, D_2), D_1, D_2) = (\lambda_P(D_1), \lambda_Z(D_2), 0) = E_1(\mu_{c_1}(D_1, D_2), D_1). \end{equation*} Said another way, as $\mu$ increases through $\mu_{c_1}(D_1,D_2)$, $E_2(\mu, D_1, D_2)$ enters the positive cone by passing through $E_1(\mu, D_1)$.
With $f_2\bigl(\lambda_Z(D_2)\bigr) = D_2/\gamma_2$, the Jacobian matrix at $E_2(\mu, D_1, D_2)$ takes the form \begin{multline} \label{JacobianE2} J\bigl(E_{2}(\mu, D_1, D_2)\bigr) = \\
\begin{bmatrix}
-D{-}\lambda_Z(D_2)f_1'(N(\mu,\! D_1,\! D_2)) & \mspace{-15mu} -f_{1}(N(\mu, \! D_1,\! D_2)) & \mspace{-1mu} 0 \\ \gamma_{1}\lambda_{Z}(D_2)f_1'(N(\mu,\! D_1,\! D_2))
&\mspace{-15mu}
\begin{matrix}
\Bigl( \gamma_{1}f_{1}(N(\mu,\! D_1, \!D_2)){-}D_{1} \mspace{15mu} \\
\mspace{45mu} {-}Z(\mu,\! D_1,\! D_2)
f_2'(\lambda_{Z}(D_2)) \Bigr)
\end{matrix}
&\mspace{-1mu} -D_2/\gamma_2 \\ 0 & \mspace{-15mu} \gamma_{2}Z(\mu,\! D_1,\! D_2) f_2'(\lambda_{Z}(D_2)) & \mspace{-1mu} 0 \end{bmatrix}. \end{multline} The eigenvalues of $J\bigl(E_{2}(\mu, D_1, D_2)\bigr)$ satisfy \begin{equation*} x^3 +a_{1}x^2 + a_{2}x +a_{3} = 0, \end{equation*} where \begin{align}
a_{1}(\mu, D_1, D_2) &= Z(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr)+\lambda_{Z}(D_2)f_1'\bigl(N(\mu, D_1, D_2)\bigr) \notag \\ &\quad \; -\gamma_{1}f_{1}(N(\mu, D_1, D_2))+D_{1}+D, \label{JE2coefficienta1} \\
a_{2}(\mu, D_1, D_2) &= \lambda_Z(D_2) Z(\mu, D_1, D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr) f_2'(\lambda_{Z}(D_2)) \notag \\ &\quad \;+ D_2Z(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr)+ D Z(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr) \notag \\ &\quad \quad + D_{1}\lambda_{Z}(D_2)f_1'\bigl(N(\mu, D_1, D_2)\bigr) -
D \gamma_{1}f_{1}\bigl(N(\mu, D_1, D_2)\bigr) + D D_{1}, \label{JE2coefficienta2} \intertext{and} a_{3}(\mu, D_1, D_2)
&=D_{2}Z(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr)\Bigl(D+\lambda_{Z}(D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr) \Bigr).\label{JE2coefficienta3} \end{align} \begin{theorem}\label{coexistence_equilibrium_1} The coexistence equilibrium point \begin{equation*}
E_2(\mu, D_1, D_2) = E_2\bigl(N(\mu, D_1, D_2),\lambda_{Z}(D_2),Z(\mu, D_1, D_2)\bigr) \end{equation*} is asymptotically stable if and only if $a_1 > 0$ and $ a_1 a_2 > a_3$. \end{theorem} \begin{proof} Since $a_{3}$ is positive, this follows from the Routh-Hurwitz criterion. \end{proof}
We conclude this section with a significant strengthening of Lemma \ref{positivity_boundedness}. We use the concept of uniform persistence introduced in \cite{Butler1986}. \begin{theorem}
\label{uniform persistence}
Assume that $\mu > \mu_{c_1}(D_1, D_2)$. Then system \eqref{NPZsys} is uniformly persistent with respect to all solutions satisfying $P_0 > 0$ and $Z_0 > 0$. \end{theorem} \begin{proof} Recall from Lemma \ref{positivity_boundedness} that all solutions of system \eqref{NPZsys} for which $P_0 > 0$ and $Z_0 > 0$ are positive and bounded.
We first show that $\liminf_{t \rightarrow \infty}{N(t) > 0}$. If $\liminf_{t \rightarrow \infty} N(t) = 0$ and $\limsup_{t \rightarrow \infty}N(t) = 0$, then $\lim_{t \rightarrow \infty} N(t) = 0$. But this is impossible,
for then it follows from the $N$-equation that $N'(t) \rightarrow \mu D > 0$ as $t \rightarrow \infty$ and this, in turn, contradicts the fact that $N(t)$ is bounded.
Now, suppose $\liminf_{t \rightarrow \infty}{N(t) = 0}$ while $\limsup_{t \rightarrow \infty}{N(t) > 0}$. Then there exists a sequence $\{ \tau_n \}_{n=1}^\infty$ of local minima of $N(t)$ satisfying $\tau_n \rightarrow \infty$ as $n \rightarrow \infty$. Thus, \begin{enumerate}
\item $N'(\tau_n) = 0$, since $\tau_n$ is a local minimum, and
\item $N(\tau_n) \rightarrow 0$ as $n \rightarrow \infty$, since $\liminf_{t \rightarrow \infty}{N(t) = 0}$. \end{enumerate} From the $N$-equation we have \begin{equation*}
N'(\tau_n) = \mu D - \Bigl( N(\tau_n)D + P(\tau_n)f_1\bigl(N(\tau_n)\bigr) \Bigr), \end{equation*} so that
\begin{equation*}
0 = \bigabs{N'(\tau_n)} \geq \bigabs{\mu D} - \Bigabs{ N(\tau_n)D + P(\tau_n)f_1\bigl(N(\tau_n)\bigr) } \end{equation*} Rearranging and using the facts that $N(\tau_n)\rightarrow0$ as $n\rightarrow\infty$, $f_1$ is continuous and $f_1(0)=0$, we get \begin{equation*}
0 = \lim_{\tau_n \rightarrow \infty} \Bigabs{ N(\tau_n)D + P(\tau_n)f_1\bigl(N(\tau_n)\bigr) } \geq \mu D > 0, \end{equation*} a contradiction. Hence, $\liminf_{t \rightarrow \infty}{N(t) > 0}$.
Choose $X(0) = \bigl(N_0, P_0,Z_0\bigr) \in {\mathbf R}^3_+$. Then $\omega \bigl(X(0) \bigr)$ is a nonempty, compact invariant set with respect to system \eqref{NPZsys}. We claim $E_0 = (\mu,0,0)$ and $E_1(\mu, D_1) = (\lambda_P(D_1),P(\mu, D_1),0)$ are not in $\omega(X(0))$. Suppose $E_0 =(\mu, 0,0) \in \omega \bigl(X(0) \bigr)$. Since $\mu > \mu_{c_1}(D_1,D_2)$, $E_0$ is an unstable hyperbolic equilibrium point. By theorem \ref{NPZ stability E_0}, $E_0$ is globally asymptotically stable with respect to solutions initiating in the plane $P=0$. Since $X(0) \not \in m^+(E_0)$, $\{ E_0\} \neq \omega \bigl(X(0) \bigr)$. By the Butler-McGehee lemma (see lemma A1, \cite{ButlerMcGeheelemma}), there exists $Q \in \bigl( m^+(E_0) \setminus \{ E_0\}\bigr) \cap \omega \bigl(X(0) \bigr)$, so that $\cl \mathcal{O}(Q) \subset \omega(X(0))$. For such an initial condition, the governing system is $\displaystyle{N'(t) = D\bigl(\mu -N(t)\bigr)}$, and $\displaystyle{Z'(t) = -D_2 Z(t)}$. But then $\mathcal{O}(Q)$ becomes unbounded as $t \rightarrow - \infty$. This is a contradiction to the compactness of $\omega \bigl(X(0) \bigr)$, and so $E_0 \not \in \omega \bigl(X(0) \bigr)$.
Suppose $E_1(\mu, D_1) = (\lambda_P(D_1), P(\mu, D_1),0)$ is in $\omega(X(0))$. Since $\mu > \mu_{c_1}(D_1,D_2)$, $E_1(\mu, D_1)$ is an unstable hyperbolic equilibrium point. By theorem \ref{NPZ stability of E_1} $E_1(\mu, D_1)$ is globally asymptotically stable with respect to solutions initiating in the plane $Z = 0$. Since $X(0) \not \in m^+\bigl(E_1(\mu, D_1)\bigr)$, $\{ E_1(\mu, D_1)\} \neq \omega \bigl(X(0) \bigr)$. By the Butler-McGehee lemma, there exists $\widehat{Q} \in \bigl( m^+\bigl(E_1(\mu, D_1)\bigr) \setminus \{ E_1(\mu, D_1)\}\bigr) \cap \omega \bigl(X(0) \bigr)$, so that $\cl \mathcal{O}(\widehat{Q}) \subset \omega(X(0))$. If $\widehat{Q} \in m^-(E_0)$, then $E_0 \in \cl{\mathcal{O}(\widehat{Q})} \subset \omega(X(0))$, a contradiction. Thus $\widehat{Q} \not \in m^-(E_0)$, and this implies $\mathcal {O}(\widehat{Q})$ is unbounded as $t \rightarrow -\infty$, contradicting the compactness of $\omega(X(0))$. Therefore, $E_1(\mu, D_1) \not \in \omega(X(0))$.
Suppose the system \eqref{NPZsys} is not persistent. Then there exists $\widetilde{Q} \in \omega(X(0))$ such that $\widetilde{Q} \in m^+(E_0)$ or $\widetilde{Q} \in m^+\bigl(E_1(\mu, D_1)\bigr)$. Then $\cl {\mathcal{O}(\widetilde{Q})} \subset \omega(X(0))$,
which implies either $E_0 \in \omega(X(0))$ or $E_1(\mu, D_1) \in \omega(X(0))$, neither of which can be true. Thus, $\liminf_{t \rightarrow \infty}P(t) > 0$ and $\liminf_{t \rightarrow \infty}Z(t) > 0$, and it follows from the main result of \cite{Butler1986} that system \eqref{NPZsys} is uniformly persistent. \end{proof}
\section{Stability of the coexistence equilibrium} \label{Coexistence_equilibrium_stability} In this section we study the coexistence equilibrium point \begin{equation*}
E_2(\mu, D_1, D_2) = \bigl(N(\mu, D_1, D_2), \lambda_Z(D_2), Z(\mu, D_1, D_2)\bigr) \end{equation*} first when $D{=}D_1{=}D_2$ and $\mu$ varies and then after relaxing the assumption $D{=}D_1{=}D_2$. In particular, we study the eigenvalues of the Jacobians at the coexistence equilibria as $\mu$ increases. To prepare for the study of the evolution of the equilibrium $E_2(\mu, D_1, D_2)$, we observe the following consequence of the implicit function theorem \cite[p.122]{LangAnalysisII}.
\begin{lemma}\label{NZsmoothness} For $D_1> 0$, $D_2>0$, and $\mu>\mu_{c_1}(D_1,D_2)$ there is a local parameterization of the locus of coexistence equilibria \begin{equation*}
E_2(\mu, D_1, D_2) = \bigl(N(\mu, D_1, D_2), \lambda_Z(D_2), Z(\mu, D_1, D_2)\bigr) \end{equation*} defined on an interval containing $\mu$ and a disc around $(D_1, D_2)$ that is smooth to the smaller of the degree of smoothness of $f_1(N)$ and the degree of smoothness of $f_2(P)$. \end{lemma}
\begin{proof} Set $P=\lambda_Z(D_2)$ in the $N$- and $P$- equations of system \eqref{NPZsys}. With $f_2(\lambda_Z(D_2)) = D_2/\gamma_2$ from \eqref{breakeven}, let \begin{equation*}
G_1(N, Z, \mu, D_1, D_2) = D(\mu - N ) - f_1(N)\lambda_Z(D_2) \end{equation*} and \begin{equation*}
G_2(N, Z, \mu, D_1, D_2) = \gamma_1f_1(N)\lambda_Z(D_2) - D_1\lambda_Z(D_2) - (D_2/\gamma_2) Z, \end{equation*} and define $G \colon {\mathbf R}^5 \rightarrow {\mathbf R}^2 $ by $G(N, Z, \mu, D_1, D_2) = \bigl(G_1(N,Z,\mu, D_1, D_2), G_2(N,Z,\mu, D_1, D_2)\bigr)$. Then we want to parametrize the set $G^{-1}(0,0)$.
The derivative of $G$ is represented by the matrix \begin{equation*}
DG =
\begin{bmatrix}
\partial G_1/\partial N & \partial G_1/\partial Z & \partial G_1 /\partial \mu
& \partial G_1/\partial D_1 & \partial G_1/\partial D_2 \\
\partial G_2/\partial N & \partial G_2/\partial Z & \partial G_2 / \partial \mu
& \partial G_2/\partial D_1 & \partial G_2/\partial D_2
\end{bmatrix}. \end{equation*} Observe that, for fixed $\mu_0>\mu_{c_1}(D_1,D_2)$, the first two columns of $DG$ at the point \begin{equation*} (N(\mu_0, D_1, D_2), Z(\mu_0,D_1, D_2), \mu_0, D_1, D_2) \end{equation*} evaluate to
\begin{equation*}
\begin{bmatrix}
-D - f'_1\bigl(N(\mu_0, D_1, D_2)\bigr)\lambda_Z(D_2) & 0 \\
\gamma_1 f_1'\bigl(N(\mu_0, D_1, D_2)\bigr) \lambda_Z(D_2) & -D_2/\gamma_2
\end{bmatrix}. \end{equation*} Since $f_1'(N) > 0$, the first two columns are linearly independent and so the implicit function theorem applies. There exists a ball $B_1$ around $(\mu_0, D_1, D_2)$ and a ball $B_2$ around $\bigl(N(\mu_0, D_1, D_2), Z(\mu_0, D_1, D_2)\bigr)$ such that for each $(\mu, D_1, D_2)$ in $B_1$ there is a unique point $\bigl(N(\mu, D_1, D_2), Z(\mu, D_1, D_2)\bigr)$ in $B_2$ such that $G\bigl( N(\mu, D_1, D_2) , Z(\mu, D_1, D_2), \mu \bigr) =0$. Moreover, the functions $N(\mu,D_1, D_2)$ and $Z(\mu,D_1, D_2)$ have the same degree of differentiability as does $G$, which is the minimum of the degrees of differentiability of $f_1(N)$ and $f_2(P)$. To explain how the differentiability of $f_2$ enters, note that the computation of $\partial N/\partial D_2$ and $\partial Z/\partial D_2$ via $\partial G/\partial D_2$ involves $\lambda_Z'(D_2) = \bigl( \gamma_2\cdot f_2'(\lambda_P(D_2))\bigr)^{-1}$ by \eqref{lambdaderivs}. This all gives us a smooth parameterization of the equilibrium locus, as desired. \end{proof}
\begin{remark}
From the point of view of calculus, each of the independent variables $\mu$, $D_1$, $D_2$ is on an equal footing with the others. However, viewed through the lens of the structure of system \eqref{NPZsys}, we can describe the variable $\mu$ as a control parameter and the variables $D_1$ and $D_2$ as experimental parameters fixed at some earlier time. This distinction informs our analysis where we first consider the model when $D{=}D_1{=}D_2$ as $\mu$ varies, and subsequently vary $D_1$ and $D_2$ in a neighborhood of $D$. \end{remark}
\begin{theorem} \label{boundedness of Z_2} Fix $D_1$ and $D_2$ and suppose $\mu > \mu_{c_1}(D_1,D_2)$, so that the coexistence equilibrium point $ E_2(\mu, D_1, D_2) = \bigl(N(\mu, D_1, D_2), \lambda_Z(D_2), Z(\mu, D_1, D_2)\bigr) $ exists. Then
\begin{enumerate}
\item the $\mu$-derivatives $N'(\mu, D_1, D_2)$ and $Z'(\mu, D_1, D_2)$ are positive.
\item $\lim_{\mu \rightarrow \infty} N(\mu, D_1, D_2) = \infty$ and $Z(\mu, D_1, D_2)$ is bounded.
\end{enumerate} \end{theorem} \begin{proof} Since $D_1$ and $D_2$ are fixed throughout this proof, we drop
these symbols and write simply $N(\mu)$ and $Z(\mu)$. To show $N'(\mu) > 0$, recall equation \eqref{1steqnwithZ_2notzero} from theorem \ref{existence equilibria NPZ}: \begin{equation*}
0 = \bigl(\mu - N(\mu) \bigr)D - \lambda_Z(D_2) \cdot f_1\bigl(N(\mu) \bigr). \end{equation*}
Differentiating \eqref{1steqnwithZ_2notzero} with respect to $\mu$ and rearranging, we get \begin{equation}
N'(\mu) \cdot \Bigl( D + \lambda_Z(D_2) f_1'\bigl(N(\mu)\bigr) \Bigr) = D. \end{equation} Since $f_1'(N)$ is positive, it follows that $N'(\mu)$ is positive.
To show $Z'(\mu) > 0$,
set $f_2\bigl(\lambda_Z(D_2)\bigr) = D_2/\gamma_2$ in equation \eqref{2ndeqnwithZ_2notzero}, obtaining \begin{equation*}
0 = \gamma_1 \lambda_Z(D_2) f_1\bigl(N(\mu) \bigr) -D_1\lambda_Z(D_2) - Z(\mu) \cdot (D_2/\gamma_2). \end{equation*} Differentiating with respect to $\mu$ and rearranging, we get \begin{equation*}
Z'(\mu) = (\gamma_1 \gamma_2/D_2) \cdot \lambda_Z(D_2) \cdot f_1'\bigl(N(\mu)\bigr)\cdot N'(\mu). \end{equation*} Since $N'(\mu)$ and $f_1'(N)$ are positive, it follows that $Z'(\mu)$ is positive. This proves part one.
To prove part two, note that equation \eqref{1steqnwithZ_2notzero} implies \begin{equation*}
N(\mu) = \mu - (\lambda_Z(D_2)/D) \cdot f_1\bigl(N(\mu) \bigr). \end{equation*} Since $f_1(N)$ is bounded, $\lim_{\mu \rightarrow \infty} N(\mu) = \infty$. From \eqref{2ndeqnwithZ_2notzero} we have \begin{equation*}
Z(\mu) =(\gamma_2/D_2)\cdot \bigl( \gamma_1 \lambda_Z(D_2) f_1\bigl(N(\mu) \bigr) -D_1\lambda_Z(D_2) \bigr) . \end{equation*} Since $f_1(N)$ is bounded, $Z(\mu)$ is bounded. \end{proof}
Before we turn to a study of the stability properties of the coexistence equilibrium,
we include a partial paraphrase of the $C^L$ Hopf bifurcation theorem as stated in \cite[p.16]{Hassard81}. Since our goal is to make an application of this result to the coexistence equilibrium $E_2$, and because the verification of the hypotheses is lengthy, we refer to our paraphrase to keep track of progress. \begin{theorem} \label{HassardHopf}
Consider a system $dX/dt = F(X,\mu)$ with $X \in {\mathbf R}^n$ and $\mu$ a real parameter. If,
\begin{enumerate}
\item for $\mu$ in an open interval containing $\mu_c$ (characterized in 3 below), $F(0,\mu) =0$ and $0 \in {\mathbf R}^n$ is an isolated equilibrium point of $dX/dt{=}F(X,\mu)$; \item all partial derivatives of the components $F^{\ell}$ of the vector $F$ of orders $\leq L{+}2$, ($L \geq 2$) exist and are continuous in $X$ and $\mu$ in a neighborhood of $(0,\mu_c)$ in ${\mathbf R}^n{\times}{\mathbf R}$; \item the Jacobian $J(0, \mu)= D_XF(0, \mu)$ has a pair of complex eigenvalues $\alpha(\mu){\pm} i\, \omega(\mu)$, where $\alpha(\mu_c) = 0$ and $\alpha'(\mu_c) \neq 0$; \item the remaining $n{-}2$ eigenvalues of $J(0,\mu_c)$ have strictly negative real parts,
\end{enumerate} then the system $dX/dt = F(X, \mu)$ has a family of periodic solutions. \end{theorem} \begin{remark}
For the purposes of the proof in \cite{Hassard81} the authors assume the critical value of the bifurcation parameter is $\mu_c = 0$, which is a trivial alteration. We are only interested in the existence of cycles, so we do not quote several additional conclusions offered in \cite{Hassard81}. In our situation the equilibrium $E_2$ depends on the parameter $\mu$, an issue which we circumvent in section \ref{Cycles} using the inverse function theorem. Hypothesis 2 is fulfilled by imposing more differentiability conditions on the functions $f_1(N)$ and $f_2(P)$ at an appropriate point in the exposition. Verification of hypotheses 3 and 4 in the statement of theorem \ref{HassardHopf} is the most involved part of our process and occupies the rest of this section. \end{remark} To begin the stability analysis, we conjugate the Jacobian $J(E_2)$ in \eqref{JacobianE2} by \begin{equation*}
W = \begin{bmatrix}
1 & \gamma_1^{-1} & (\gamma_1 \gamma_2)^{-1}
\\
0 & 1 & 0
\\
0 & 0 & 1
\end{bmatrix}. \end{equation*} Using $f_2\bigl(\lambda_Z(D_2)\bigr) = D_2/\gamma_2$ and writing $D_1 = D{+}\epsilon_1$, $D_2 = D{+}\epsilon_2$ yields the matrix \begin{equation*}
WJ(E_2)W^{-1} =
\\ \\
\begin{bmatrix}
-D
& -\gamma_1^{-1}\epsilon_1
& -(\gamma_1 \gamma_2)^{-1}\epsilon_2
\\
\gamma_1 \lambda_Z(D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr)
& A
& B
\\
0 & C & 0
\end{bmatrix}, \end{equation*} where \begin{align*}
A &= \gamma_1 f_1 \bigl(N(\mu, D_1, D_2)\bigr)
- \lambda_Z(D_2) f_1' \bigl(N(\mu, D_1, D_2)\bigr)
- D_1
- Z(\mu, D_1, D_2) f_2'\bigl(\lambda_Z(D_2)\bigr), \\ \begin{split} B &=-( \lambda_Z(D_2) /\gamma_2 ) f_1'\bigl(N(\mu, D_1, D_2)\bigr) - f_2\bigl(\lambda_Z(D_2)\bigr) \\
&= - \bigl( \lambda_Z(D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr) + D_2 \bigr)/\gamma_2<0, \end{split} \\
C &=\gamma_2 Z(\mu, D_1, D_2)f'_2\bigl(\lambda_Z(D_2)\bigr)>0.
\end{align*}
First we make the assumption that $D{=}D_1{=}D_2$, so that $\epsilon_1{=}\epsilon_2{=}0$; that is, we assume the death rates of $P$ and $Z$ are negligible with respect to washout rate $D$. The results of \cite{Kuang1999} suggest this is a useful initial assumption. Since we regard $D$ as fixed for the discussion, we will abbreviate $N(\mu, D, D)$ by $N(\mu)$ and $Z(\mu, D, D)$ by $Z(\mu)$. Similarly, we will abbreviate $E_2(\mu, D, D)$ by $E_2(\mu)$.
We can explicitly compute the eigenvalues of $J\bigl(E_2(\mu)\bigr)$, since conjugation does not change them. The characteristic polynomial of $J\bigl(E_2(\mu)\bigr)$ is \begin{equation} \label{cpJE2DD}
p(x) = (-D - x) ( -BC -Ax + x^2) \end{equation} where \begin{align}
A &= \gamma_1 f_1 \bigl(N(\mu)\bigr) - \lambda_Z(D) f_1' \bigl(N(\mu)\bigr)- D - Z(\mu) f_2'\bigl(\lambda_Z(D)\bigr), \label{Aspecial} \\
B &= - \bigl( \lambda_Z f_1'\bigl(N(\mu)\bigr) + D \bigr)/\gamma_2, \label{Bspecial} \intertext{and}
C &= \gamma_2 Z(\mu)f'_2\bigl(\lambda_Z(D)\bigr). \label{Cspecial} \end{align} The next result amplifies theorem \ref{coexistence_equilibrium_1} in the case $D{=}D_1{=}D_2$.
\begin{theorem} \label{E^*_2} Assume $D{=}D_{1}{=}D_{2}$ and that $\mu > \mu_{c_1}(D,D)$, so the coexistence equilibrium $E_2(\mu) = (N(\mu), \lambda_Z(D), Z(\mu))$ exists. Then \begin{enumerate} \item $E_2(\mu)$ is locally asymptotically stable if \begin{equation*}
Z(\mu) \Bigl(\frac{D}{\gamma_{2}\lambda_{Z}(D)}-f_{2}'\bigl(\lambda_{Z}(D)\bigr)\Bigr) < \lambda_{Z}(D) f_{1}'\bigl(N(\mu)\bigr). \end{equation*} \item $E_2(\mu)$ is unstable if \begin{equation*}
Z(\mu) \Bigl(\frac{D}{\gamma_{2}\lambda_{Z}(D)}-f_{2}'\bigl(\lambda_{Z}(D)\bigr) \Bigr) > \lambda_{Z}(D) f'_1\bigl(N(\mu)\bigr). \end{equation*} \end{enumerate} \end{theorem}
\begin{proof} With $f_2\bigl(\lambda_Z(D)\bigr) = D/\gamma_2$ in \eqref{2ndeqnwithZ_2notzero} we have $\gamma_1f_1\bigl(N(\mu)\bigr)-D = \bigl(DZ(\mu)\bigr)/\bigl(\gamma_2\lambda_Z(D)\bigr)$. Then \begin{align}
A&= \gamma_{1} f_{1} (N(\mu)) - D -Z(\mu)f_{2}'\bigl(\lambda_Z(D)\bigr) - \lambda_Z(D) f_{1}'(N(\mu)) \notag \\ &= Z(\mu) \Bigl(\frac{D}{\gamma_{2}\lambda_Z(D)}-f_{2}'\bigl(\lambda_Z(D)\bigr) \Bigr) -\lambda_Z(D) f_{1}'(N(\mu)).\label{A} \end{align} The result now follows from the Routh-Hurwitz criterion, since $-BC>0$ is easily verified from formulas \eqref{Bspecial} and \eqref{Cspecial}. \end{proof}
From the factorization of the characteristic polynomial of $J\bigl(E_2(\mu)\bigr)$ given in \eqref{cpJE2DD} it is immediate that the Jacobian at $E_2(\mu)$ has one negative eigenvalue. Thus, hypothesis 4 of theorem \ref{HassardHopf} for $E_2(\mu)$ is satisfied in the case $D{=}D_1{=}D_2$. Now we verify hypothesis 3 for this situation. \begin{theorem} \label{stability_special} Assume $D{=}D_{1}{=}D_{2}$ and let $\mu > \mu_{c_1}(D,D)$, so that the coexistence equilibrium $E_2(\mu)= \bigl(N(\mu), \lambda_Z, Z(\mu)\bigr)$ exists.
\begin{enumerate}
\item If $f'_1(N)$ is continuous and $D/\bigl(\gamma_2 \lambda_Z(D)\bigr)>f_2'\bigl(\lambda_Z(D)\bigr)$, then there exists a value $\mu_{c_2} > \mu_{c_1}(D,D) $ for which $A(\mu_{c_2}) = 0$. Consequently, when $\mu = \mu_{c_2}$, the Jacobian has a conjugate pair of imaginary eigenvalues.
\item If, in addition, $f_1$ is twice differentiable with respect to $N$ and $f_1^{(2)} \bigl(N(\mu_{c_2})\bigr)< 0$, then $A'(\mu_{c_2}) > 0$. Combining this with part 1, we have that hypothesis 3 of theorem \ref{HassardHopf} is satisfied for $E_2(\mu)$. \item If $f_1^{(2)}(N) < 0$ for all $N$, then $\mu_{c_2}$ is unique. \end{enumerate} \end{theorem} \begin{remark}
If we assume $f^{(2)}_2(P) < 0$, i.e., that $f_2$ is is concave down, then the slope of the secant that passes through the points $(0,0)$ and $\bigl(\lambda_Z(D), (D/\gamma_{2})\bigr)$ is greater than the slope of the tangent line to the graph of $f_2$ at $\lambda_{Z}(D)$; that is, $D/\bigl(\gamma_{2}\lambda_Z(D)\bigr) > f_{2}'\bigl(\lambda_{Z}(D)\bigr)$. This will be the case, for example, when $f_2(P)$ is Holling Type II. However, the one-point condition $D/\bigl(\gamma_{2}\lambda_Z(D)\bigr)-f_{2}'\bigl(\lambda_Z(D)\bigr) > 0$ may hold even if $f_2$ is not concave down. We will give such an example in Section \ref{Examples}. \end{remark}
\begin{proof} Consider the expression \begin{equation*}
A(\mu) = Z(\mu) \Bigl(\frac{D}{\gamma_{2}\lambda_Z(D)}-f_{2}'\bigl(\lambda_Z(D)\bigr) \Bigr) -\lambda_Z(D) f_{1}'\bigl(N(\mu)\bigr) \end{equation*} given in \eqref{A}. Since $f_1'(N)$ is a continuous function, $A$ is a continuous function of $\mu$ by Lemma~\ref{NZsmoothness}. To prove that $A(\mu)$ has a zero value for some $\mu>\mu_{c_1}(D,D)$, it is enough to prove that $A(\mu)$ passes from negative to positive. For $\mu =\mu_{c_1}(D,D)$, $Z\bigl(\mu_{c_1}(D,D)\bigr) =0$, so \begin{equation*} A\bigl(\mu_{c_1}(D,D)\bigr) = -\lambda_Z(D) f_{1}'\bigl( N\bigl(\mu_{c_1}(D, D)\bigr) \bigr) < 0. \end{equation*}
To find a value of $\mu>\mu_{c_1}(D, D)$ at which $A(\mu)$ is positive, we use Theorem \ref{boundedness of Z_2}. We have that $Z(\mu)$ is increasing and bounded for $\mu > \mu_{c_1}(D, D)$. Let ${Z_{\infty} = \sup_{\mu \geq \mu_{c_1}(D, D)}Z(\mu)}$. Then there exists an $M_1$ such that for $\mu > M_1$, $Z(\mu) > Z_{\infty}/2$. Since $f_1$ is bounded and increasing, $\lim_{N \to +\infty} f'_1(N) = 0$. Then, for any $\epsilon>0$, there exists an $N_{\epsilon}>0$ such that $0<f_1'(N) < \epsilon$ for all $N>N_{\epsilon}$. With $\epsilon = \bigl(Z_{\infty}/2\lambda_Z(D)\bigr)\cdot\Bigl( D/\bigl(\gamma_2 \lambda_Z(D)\bigr) - f'_2\bigl(\lambda_Z(D)\bigr)\Bigr)$, this implies that there exists an $N^*$ such that, if $N > N^*$, then \begin{equation*}
0 < f'_1(N) < \bigl(Z_{\infty}/2\lambda_Z(D)\bigr) \cdot\Bigl( D/\bigl(\gamma_2 \lambda_Z(D)\bigr) - f'_2\bigl(\lambda_Z(D)\bigr)\Bigr). \end{equation*} In addition, $N(\mu)$ is increasing without bound by theorem \ref{boundedness of Z_2}, so there is an $M_2 > \mu_{c_1}(D, D)$ such that, if $\mu > M_2$, then $N(\mu) > N^*$. Choose $\mu^* > \max\{ M_1,M_2\}$. Then \begin{multline*} A(\mu^*) = Z(\mu^*)\cdot \Bigl(D/\bigl(\gamma_{2}\lambda_Z(D)\bigr)-f_{2}'\bigl(\lambda_Z(D)\bigr) \Bigr) - \lambda_Z(D) f_{1}'\bigl( N(\mu^*) \bigr) \\ > (Z_{\infty}/2)\cdot \Bigl( D/\bigl(\gamma_2 \lambda_Z(D)\bigr) - f'_2\bigl(\lambda_Z(D)\bigr)\Bigr) \\
- \lambda_Z(D) \cdot \bigl(Z_{\infty}/2\lambda_Z(D)\bigr) \cdot \Bigl( D/\bigl(\gamma_2 \lambda_Z(D)\bigr) - f'_2\bigl(\lambda_Z(D)\bigr)\Bigr)
= 0. \end{multline*} Since $A\bigl(\mu_{c_1}(D, D)\bigr) < 0$ and $A(\mu^*) > 0$, there is a number $\mu_{c_2} > \mu_{c_1}(D, D)$ such that $A(\mu_{c_2}) = 0$. Note that when $\mu = \mu_{c_2}$ the discriminant of the quadratic factor of the characteristic polynomial in \eqref{cpJE2DD} is \begin{equation} \label{discriminantspecial}
A(\mu_{c_2})^2 + 4\, B(\mu_{c_2})\cdot C(\mu_{c_2})
= -4\,\bigl(\lambda_Z(D) f_1'\bigl(N(\mu_{c_2})\bigr)+D \bigr)\cdot \bigl(f_2'\bigl(\lambda_Z(D)\bigr) Z(\mu_{c_2})\bigr) < 0, \end{equation} so its roots are indeed purely imaginary. This proves part one.
For part two, by continuity of the discriminant as a function of $\mu$, there is a neighborhood of $\mu_{c_2}$ on which the discriminant is negative. Continuing, differentiate $A(\mu)$ with respect to $\mu$ to obtain
\begin{equation} \label{A'}
A'(\mu) = Z'(\mu) \Bigl( D/\bigl(\gamma_2 \lambda_Z(D)\bigr) - f_2'\bigl(\lambda_Z(D)\bigr) \Bigr)
-\lambda_Z(D) f_1^{(2)}\bigl( N(\mu)\bigr)\cdot N'(\mu). \end{equation} By theorem \ref{boundedness of Z_2}, $N'(\mu)$ and $Z'(\mu)$ are positive. By the hypotheses of the present theorem, $D/\bigl(\gamma_2 \lambda_Z(D)\bigr) - f_2'\bigl(\lambda_Z(D)\bigr) > 0$ and $f_1^{(2)}(N(\mu_{c_2})) < 0$. Thus, $A'(\mu_{c_2}) > 0$. Combining parts one and two means that hypothesis 3 of Theorem \ref{HassardHopf} holds at $\mu_{c_2}$ for the case $D{=}D_1{=}D_2$.
For part three, if $f_1^{(2)}(N) < 0$ for all $N$, then $A'(\mu) > 0$ for $\mu > \mu_{c_1}(D, D)$. \end{proof}
Let us now discuss weakening the condition $D{=}D_1{=}D_2$. Intuitively, for $(D_1, D_2)$ sufficiently close to $(D,D)$ the eigenvalues of $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ should exhibit behavior similar to those of $J\bigl(E_2(\mu, D, D)\bigr)$. In particular, the equilibrium $E_2(\mu, D_1, D_2)$ should exhibit a similar loss of stability. To make these considerations precise, we first require lemma \ref{product_decomposition}. \begin{lemma}
\label{product_decomposition} Let $P_1(x)^- = \{ (\alpha - x) \mid \alpha < 0\}$ be the space of polynomials of degree 1 in $x$, with leading coefficient $-1$ and negative constant term, let $P_2(x)^- = \{ \beta - \gamma x + x^2 \mid \gamma^2 - 4\beta < 0\}$ be the space of monic quadratic polynomials in $x$ with real coefficients and having a complex conjugate pair of roots, and let $P_3(x)^- = \{ p_0 + p_1x + p_2x^2 - x^3 \}$ be the space of cubic polynomials in $x$ with leading coefficient~$-1$ and real coefficients.
Then the multiplication map $M \colon P_1^- \times P_2^- \rightarrow P_3^-$ is locally a diffeomorphism. \end{lemma} \begin{proof}
Identify $P_3^-$ with Euclidean space using $p_0$, $p_1$, and $p_2$ as coordinates, identify $P_1^-$ with an open subset of ${\mathbf R}$ using $\alpha$ as the coordinate, and identify $P_2^-$ with an open subset of the plane ${\mathbf R}^2$ using $\beta$ and $\gamma$ as coordinates. Then the map $M$ has the expression \begin{equation*}
M(\alpha, \beta, \gamma) = \bigl(p_0(\alpha, \beta, \gamma), p_1(\alpha, \beta, \gamma), p_2(\alpha, \beta, \gamma)\bigr)
= \bigl(\alpha \beta, (-\alpha \gamma -\beta),( \alpha + \gamma)\bigr). \end{equation*} The derivative, or Jacobian, of $M$ at $(\alpha, \beta, \gamma)$ is \begin{equation*}
DM =
\begin{bmatrix}
\beta & \alpha & 0 \\
-\gamma & -1 & -\alpha \\
1 & 0 & 1
\end{bmatrix}, \end{equation*} which fails to be invertible if and only if \begin{equation*}
\det(DM) =-( \beta - \alpha \gamma + \alpha^2) = 0. \end{equation*} Should this occur, then $\alpha = \bigl(\gamma \pm \sqrt{\gamma^2 - 4\beta}\bigr)/2$. But we assume $\alpha < 0$ is real and $\gamma^2 - 4\beta < 0$, so $\det(DM) = 0$ is impossible. The map $M$ is smooth, so, by the inverse function theorem \cite[p.125]{LangAnalysisII}, it is locally a diffeomorphism. \end{proof} To explain how lemma \ref{product_decomposition} comes into play, consider the map $\chi \colon M_{3,3}({\mathbf R}) \rightarrow P_3^-$ which takes as input a real-valued $3$ by $3$ matrix $R$ and produces its characteristic polynomial $\chi(R) = \det(R - xI)$. The map $\chi$ has a coordinate expression by taking the coefficients in degrees $0$, $1$, and~$2$. These coefficients are polynomials in the matrix entries, so $\chi$ is smooth. Now look at \begin{equation*}
M_{3,3}({\mathbf R}) \stackrel{\chi}{\longrightarrow} P_3^- \stackrel{M}{\longleftarrow} P_1^- \times P_2^-. \end{equation*} Suppose we are in the situation of theorem \ref{stability_special}, where it is easy to factor the characteristic polynomial of $J\bigl(E_2(\mu, D, D)\bigr)$, and we have seen the Jacobian
has a negative real eigenvalue and a complex conjugate pair of eigenvalues as $\mu$ varies near a potential bifurcation value $\mu_{c_2}$. The factorization explicitly inverts the polynomial multiplication $M$ at particular points, and lemma \ref{product_decomposition} enables us to extend the factorization, in principle, to the characteristic polynomial of $J\bigl(E_2(\mu, D_1, D_2)\bigr)$.
In particular, we can understand how the characteristic polynomial of $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ behaves as $\mu$ varies when $(D_1,D_2)$ is close to $(D,D)$ (in the Euclidean norm, for definiteness). We remind the reader that we think of $\mu$ as a control parameter, adjustable by the experimenter, and $D_1$ and $D_2$ as experimental parameters, set at the beginning of an experiment. To bring out this distinction, we will write the components of the formal factorization of the characteristic polynomial of $J\bigl( E_2(\mu, D_1, D_2)\bigr)$ as $\alpha(D_1, D_2)(\mu)$, $\beta(D_1, D_2)(\mu)$, and $\gamma(D_1, D_2)(\mu)$.
\begin{lemma}
\label{uniformapproximation} Assume $f_1$ is three times continuously differentiable and $f_2$ is two times continuously differentiable. Then there exists a $\mu$-interval $[\mu{-}\hat{\delta}, \mu{+}\hat{\delta}]$ on which the $\mu$-derivative $\gamma'(D_1,D_2)(\mu)$ is uniformly approximated by $\gamma'(D,D)(\mu) = A'(\mu)$. In fact, there exists a constant $C$ such that \begin{equation*}
\abs{ \gamma'(D_1, D_2)( \mu) - \gamma'(D,D)(\mu)} \leq C \cdot \Dist{(D_1, D_2)}{(D, D)} \end{equation*} for any $\mu \in [\mu_{c_2}{-}\hat{\delta}, \mu_{c_2}{+}\hat{\delta}]$ \end{lemma} The details of the proof of lemma \ref{uniformapproximation} are relegated to section \ref{Appendix} so as not to disturb the flow of the exposition. \begin{theorem} \label{stability_general} Let $\mu > \mu_{c_1}(D_1, D_2)$, so that the equilibrium point $E_2(\mu, D_1, D_2)$ exists in the interior of the positive octant. Assume $f_1$ is three times continuously differentiable and $f_2$ is two times continuously differentiable and that $D/\bigl(\gamma_2 \lambda_Z(D)\bigr) > f_2'\bigl(\lambda_Z(D)\bigr)$ so that the hypotheses of theorem \ref{stability_special} part 1 are satisfied for $E_2(\mu, D, D)$, and let $\mu_{c_2}$ be as in theorem \ref{stability_special}.
For $(D_{1}, D_{2})$ sufficiently close to $(D,D)$, \begin{enumerate} \item hypothesis 4 of theorem \ref{HassardHopf} holds for $E_2(\mu, D_1, D_2)$; \item and if, in addition, $f_1^{(2)}\bigl(N(\mu_{c_2}, D, D)\bigr) < 0$ so that the hypotheses of theorem \ref{stability_special} part 2 are satisfied, then hypothesis 3 of theorem \ref{HassardHopf} holds for $E_2(\mu, D_1, D_2)$. \end{enumerate} \end{theorem}
\begin{remark} Theorem \ref{stability_special} gives a condition, namely, $f_1^{(2)} < 0$, on system \eqref{NPZsys} under which there is a unique number $\mu_{c_2}$ at which $J\bigl(E_2(\mu, D, D)\bigr)$ has a purely imaginary pair of eigenvalues meeting the transverality condition. However, it is not {\em a priori} clear that, in general, there is precisely one number at which these properties of the eigenvalues hold. Therefore, for the theorem and its proof, choose one such number $\mu_{c_2}$ and fix it throughout the discussion. \end{remark}
\begin{proof}[Proof of theorem \ref{stability_general}.]
We have proved in theorem \ref{stability_special} that there is an interval of parameters $\mu$ in which the characteristic polynomials
\begin{equation*}
p(x) = (-D -x) (-BC -Ax + x^2)
\end{equation*}
of $J\bigl(E_2(\mu, D, D)\bigr)$ have a complex conjugate pair of roots in addition to the eigenvalue $-D{<}0$, so they are in the image of the multiplication map $M$. For $(D_1, D_2)$ close to $(D,D)$ the entries in $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ are close to the entries in $J\bigl(E_2(\mu, D, D)\bigr)$, so the characteristic polynomial of $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ is close to the characteristic polynomial of $J(E_2)(\mu, D, D)$. To see this explicitly, refer to the formulas \eqref{JE2coefficienta1}, \eqref{JE2coefficienta2}, and \eqref{JE2coefficienta3}; to account for the normalization of the characteristic polynomial to leading coefficient $-1$ multiply each expression by~$-1$. Therefore, in view of lemma \ref{product_decomposition}, the characteristic polynomial of $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ has a decomposition of the same form as that of the characteristic polynomial of $J\bigl(E_2(\mu, D, D)\bigr)$. Written formally, the decomposition is \begin{equation*}
M^{-1}\bigl(\chi \bigl( J\bigl(E_2(\mu, D_1, D_2)\bigr) \bigr) \bigr) = \bigl( \alpha(D_1, D_2)(\mu) - x ,
\beta(D_1, D_2)(\mu) - \bigl(\gamma(D_1, D_2)(\mu)\bigr)\, x + x^2 \bigr). \end{equation*} The map $M\colon P_1^-(x){\times}P_2^-(x) \rightarrow P_3^-(x)$ is infinitely differentiable, so the local inverse $M^{-1}$ is also. In particular, for a fixed value $\mu$, the coefficients of the decomposition are smooth functions of $(D_1,D_2)$ defined in a neighborhood of $(D,D)$.
The first consequence is that, by definition of $P_1^-$, the characteristic polynomial of $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ has a linear factor $\alpha(D_1, D_2)(\mu) - x$ with $\alpha(D_1, D_2)(\mu) < 0$. This shows that hypothesis 4 of theorem \ref{HassardHopf} can be satisfied.
Now we start the verification of hypothesis 3 of theorem \ref{HassardHopf} for $E_2(\mu, D_1, D_2)$. The discriminant of the quadratic factor of the characteristic polynomial of $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ is \begin{equation*}
\bigl(\gamma(D_1, D_2)(\mu)\bigr)^2 - 4\, \beta(D_1, D_2)(\mu). \end{equation*} At $\mu_{c_2}$ and for $(D_1, D_2)$ sufficiently close to $(D, D)$, this is close to the expression \begin{equation*}
\bigl(\gamma(D, D)(\mu_{c_2})\bigr)^2 - 4\,\beta(D, D)(\mu_{c_2}) = A(\mu_{c_2})^2 + 4\, B(\mu_{c_2})C(\mu_{c_2}) \end{equation*} for the discriminant of the quadratic factor of the characteristic polynomial of $J\bigl(E_2(\mu, D,D)\bigr)$ given in \eqref{discriminantspecial}, which is negative. Therefore, at $\mu_{c_2}$, the discriminant $\bigl(\gamma(D_1, D_2)(\mu)\bigr)^2 - 4\,\beta(D_1, D_2)(\mu)$ is also negative. By continuity of the discriminant as a function of $\mu$, there is an interval $[\mu_{c_2}-\delta_0, \mu_{c_2}+\delta_0]$ on which it is negative. Therefore, the characteristic polynomial of $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ has a complex conjugate pair of roots on this interval.
Under the assumptions that $f_1^{(2)}$ is continuous and $f_1^{(2)}\bigl(N(\mu_{c_2}, D, D)\bigr)< 0$, $A'(\mu)$ as calculated in \eqref{A'} is continuous and $A'(\mu_{c_2}) > 0$. So there is a $\delta_1 > 0$ such that $A'(\mu) > A'(\mu_{c_2})/2$ on the interval $[\mu_{c_2}-\delta_1, \mu_{c_2}+\delta_1]$. Choose $\delta= \min\{ \delta_0, \delta_1\}$. Put $\eta_1 = -A(\mu_{c_2}{-}\delta)/2>0$. By continuity of $\gamma$ as a function of $(D_1, D_2)$, there is a $\rho_1 > 0$ such that $\Dist{(D_1,D_2)}{(D, D)} < \rho_1$ implies \begin{align*} \abs{ \gamma(D_1, D_2)(\mu_{c_2}{-}\delta) - \gamma(D, D)(\mu_{c_2}{-}\delta) } &< \eta_1. \intertext{Remembering that $\gamma(D, D)(\mu) = A(\mu)$, we have}
\abs{ \gamma(D_1, D_2)(\mu_{c_2}{-}\delta) - A(\mu_{c_2}{-}\delta) } &< \eta_1, \\
\gamma(D_1,D_2)(\mu_{c_2}{-}\delta) - A(\mu_{c_2}{-}\delta) &< -A(\mu_{c_2}{-}\delta)/2, \\
\gamma(D_1, D_2)(\mu_{c_2}{-}\delta) &< A(\mu_{c_2}{-}\delta)/2 < 0. \end{align*} Similarly, put $\eta_2 = A(\mu_{c_2}{+}\delta)/2>0$. There is a $\rho_2 > 0$ such that $\Dist{(D_1,D_2)}{(D, D)} < \rho_2$ implies \begin{align*} \abs{ \gamma(D_1, D_2)(\mu_{c_2}{+}\delta) - \gamma(D, D)(\mu_{c_2}{+}\delta) } &< \eta_2, \\
\abs{ \gamma(D_1, D_2)(\mu_{c_2}{+}\delta) - A(\mu_{c_2}{+}\delta) } &< \eta_2, \\
-A(\mu_{c_2}{+}\delta)/2 &< \gamma(D_1, D_2)(\mu_{c_2}{+}\delta) - A(\mu_{c_2}{+}\delta) \\
0 < A(\mu_{c_2}{+}\delta)/2 &< \gamma(D_1, D_2)(\mu_{c_2}{+}\delta). \end{align*} Now using the continuity of $\gamma(D_1, D_2)(\mu)$ as function of $\mu$ to combine these results, we find $\gamma(D_1, D_2)(\mu)$ has a zero in the interval $[\mu_{c_2}-\delta, \mu_{c_2}+\delta]$, provided that $\Dist{(D_1,D_2)}{(D, D)} < \min\{ \rho_1, \rho_2 \}$.
Combining the results of the previous two paragraphs, on the interval $[\mu_{c_2} - \delta, \mu_{c_2} + \delta]$ and for $(D_1,D_2)$ sufficiently close to $(D,D)$, the characteristic polynomial of $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ has a complex conjugate pair of roots and at least one pair of purely imaginary roots. Moreover, by choice of $\delta$, $\gamma'(D, D)(\mu) = A'(\mu) > A'(\mu_{c_2})/2>0$ .
To see that the transversality condition holds, let $\eta_3 = A'(\mu_{c_2})/2 > 0$. By lemma \ref{uniformapproximation}, if $(D_1,D_2)$ is sufficiently close to $(D,D)$, on the interval $[\mu_{c_2}- \delta, \mu_{c_2}+ \delta]$ we have \begin{align*}
\abs{ \gamma'(D_1, D_2)(\mu) - \gamma'(D, D)(\mu)} &\leq \eta_3, \\
- A'(\mu_{c_2})/2 &< \gamma'(D_1, D_2)(\mu) - \gamma'(D, D)(\mu), \\
\gamma'(D, D)(\mu) - A'(\mu_{c_2})/2 &< \gamma'(D_1, D_2)(\mu), \\
0 &< \gamma'(D_1, D_2)(\mu), \end{align*} since $\gamma'(D, D)(\mu) = A'(\mu) > A'(\mu_{c_2})/2$ on the interval $[\mu_{c_2}- \delta, \mu_{c_2}+ \delta]$, in particular, at the point where $\gamma(D_1, D_2)(\mu)$ has a zero. This completes the proof that hypothesis 3 of theorem \ref{HassardHopf} holds for $E_2(\mu, D_1, D_2)$. \end{proof}
\section{Bifurcation to cycles} \label{Cycles} In \cite{Kuang1999} it is shown that cycles exist for certain values of parameters and under the assumption that $D{=}D_1{=}D_2$. In a nutshell, the assumption implies that the $\omega$-limit set of a solution starting near the unstable equilibrium $E_2(\mu, D, D)$ is contained in a plane in $NPZ$-space. Of course, this plane also contains $E_2$.
The authors observe that there is a two-dimensional attracting set for this system. The Poincar\'{e}-Bendixson theorem applies to the two-dimensional limit system, delivering the existence of a cycle for the $(N,P,Z)$-system.
In this section we fix $(D_1, D_2)$ sufficiently close to $(D, D)$ so that theorem \ref{stability_general} applies, and we want to apply the Hopf bifurcation theorem \cite{Hassard81, MarsdenMcCracken76}, restated in theorem \ref{HassardHopf}, to deduce that the system~\eqref{NPZsys} undergoes a Hopf bifurcation as $\mu$ passes a critical value. In section \ref{Coexistence_equilibrium_stability} we studied the characteristic polynomial of $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ relative to the characteristic polynomial of $J\bigl(E_2(\mu, D, D)\bigr)$. Features of these polynomials continue to play a role.
The results of section~\ref{Coexistence_equilibrium_stability} show that for parameter values $(D_1, D_2)$ near $(D,D)$, hypotheses 3 and 4 of theorem \ref{HassardHopf} concerning the behavior of the eigenvalues of the Jacobian $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ are satisfied. However, in system \eqref{NPZsys}, the coordinates of the equilibrium $E_2\bigl( N(\mu, D_1, D_2), \lambda_Z(D_2), Z(\mu, D_1, D_2)\bigr)$ are changing with the parameter $\mu$, so hypothesis 1 of theorem \ref{HassardHopf} is not satisfied. The immediate goal of this section is to overcome this difficulty by using the inverse function theorem. \begin{figure}
\caption{A curve of coexistence equilibria}
\label{coexisteqcurveh3}
\end{figure} Augment system \eqref{NPZsys} by introducing the bifurcation parameter as an extra variable, giving \begin{equation} \label{aug_system} \begin{split}
dN/dt &= D(\mu - N ) - f_1(N)P \\
dP/dt &= \gamma_1f_1(N)P - D_1P - f_2(P) Z \\
dZ/dt &= \gamma_2f_2(P)Z - D_2Z \\
d\mu/dt &= 0 \end{split} \end{equation} We are interested in the equilibria of system \eqref{aug_system} as $\mu$ varies in a small interval around a number $\mu_{c_2}$ (depending on $D_1$ and $D_2$, but not necessarily uniquely determined)
characterized in the proof of theorem \ref{stability_general} as a parameter value at which the Jacobian $J\bigl(E(\mu_{c_2}, D_1, D_2)\bigr)$ has a pair of purely imaginary eigenvalues, crossing the imaginary axis in the complex plane transversally. By lemma \ref{NZsmoothness} the functions $N(\mu, D_1, D_2)$ and $Z(\mu, D_1, D_2)$ are smooth to the same degree of smoothness possessed by $f_1(N)$ and $f_2(P)$.
With $\delta$ as in the proof of theorem \ref{stability_general}, let $I$ be an interval containing $\mu_{c_2}$, contained in $[\mu_{c_2} - \delta, \mu_{c_2} + \delta]$, and supporting a curve \begin{equation*} h \colon I \rightarrow {\mathbf R}^4, \quad h(\mu) = \bigl(N(\mu, D_1, D_2), \lambda_Z(D_2), Z(\mu, D_1, D_2), \mu \bigr) \end{equation*} parameterizing the equilibrium locus of the augmented system \eqref{aug_system} near the critical point $h(\mu_{c_2})$. We note that \begin{equation*} h'(\mu) = \bigl( N'(\mu, D_1, D_2), 0, Z'(\mu, D_1, D_2)\bigr), 1\bigr) \neq (0,0,0,0), \end{equation*} so $h$ is an immersion of an interval into ${\mathbf R}^4$.
Consider next the map $H \colon {\mathbf R}^3 \times I \rightarrow {\mathbf R}^4$ defined by \begin{equation*}
H(x,y,z, \mu) = (x,y,z, 0) + h(\mu)= (x+N(\mu, D_1, D_2), y+\lambda_Z(D_2), z + Z(\mu, D_1, D_2), \mu). \end{equation*} Observe that $H(0,0,0, \mu) = \bigl(N(\mu,D_1,D_2),\lambda_Z(D_2), Z(\mu,D_1,D_2), \mu \bigr)$ and that \begin{equation*}
DH\bigl(0,0,0, \mu)\bigr) =
\begin{bmatrix}
1 & 0 & 0 & N'(\mu, D_1, D_2)) \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & Z'(\mu, D_1, D_2)) \\
0 & 0 & 0 & 1
\end{bmatrix}, \end{equation*} which is an invertible matrix for any $\mu$. In particular, on a neighborhood $U$ of $(0,0,0,\mu)$, $H$ is a diffeomorphism of $U$ onto its image, smooth to the degree of smoothness of $N$ and $Z$, by the inverse function theorem \cite[p.122]{LangAnalysisII}. Consequently, an interval of the form $(0,0,0){\times}I'$ is mapped smoothly and bijectively onto the equilibrium locus of the system \eqref{aug_system}.
For $Y = (N, P, Z, \mu)$ in $H(U)$ we can write $Y = H(X)$, where $X = (x, y, z, \mu)$. To simplify the notation, write $dY/dt = F(Y)$ for system \eqref{aug_system}. Then
\begin{align}
\frac{dY}{dt}& = DH(X)\cdot \frac{dX}{dt} = F(Y) = (F\circ H)(X), \notag \intertext{so} \frac{dX}{dt} &= DH(X)^{-1}\cdot (F\circ H)(X) \label{aug_system_abbr}
\end{align} is a formal expression for the system with respect to the new coordinates. If $Y_0$ is an equilibrium solution of system \eqref{aug_system}, and $H(X_0) = Y_0$, then $X_0$ is an equilibrium solution of the system \eqref{aug_system_abbr}.
Let us now examine the relation between $DF(Y_0)$, the Jacobian of system \eqref{aug_system} at $Y_0$,
and the Jacobian of the system \eqref{aug_system_abbr} at $X_0$. We compute
\begin{multline*}
D\bigl( DH(X)^{-1} \cdot F \circ H(X) \bigr)\bigr\rvert_{X = X_0} \\=
D\bigl(X \mapsto DH(X)^{-1}\bigr)\bigr\rvert_{X = X_0} \cdot F \circ H (X_0)
+ DH(X_0)^{-1}\cdot D\bigl(X \mapsto F \circ H (X)\bigr) \bigr\rvert_{X = X_0}, \\ \shoveleft{ \text{applying the Leibniz rule,}} \\ = D\bigl(X \mapsto DH(X)^{-1}\bigr)\bigr\rvert_{X = X_0} \cdot 0
+ DH(X_0)^{-1}\cdot DF\bigl( H (X_0)\bigr) \cdot DH(X) \bigr\rvert_{X = X_0}, \\ \shoveleft{\text{by the chain rule,}}
\\= DH(X_0)^{-1}\cdot DF\bigl( H (X_0)\bigr) \cdot DH(X_0) = DH(X_0)^{-1}\cdot DF(Y_0 ) \cdot DH(X_0) .
\end{multline*} Thus, the Jacobian of system \eqref{aug_system_abbr} at the equilibrium $X_0$ is simply a conjugate of the Jacobian of system \eqref{aug_system} at the equilibrium $Y_0 = H(X_0)$. In particular, the eigenvalues of the Jacobians are the same. We can now prove our main theorem. \begin{theorem}
\label{Hopfbifurcation} Assume the hypotheses of theorem \ref{stability_general} hold and that $f_1$ and $f_2$ are four times continuously differentiable. For $D_1$ and $D_2$ both sufficiently close to $D$ there is a value $\mu_{c_2}$ of the growth parameter at which the system \eqref{NPZsys} undergoes a Hopf bifurcation, resulting in the appearance of a cycle. \end{theorem} \begin{proof} Let us now make the assumption that $\Dist{(D_1, D_2)}{(D,D)}$ is so small that the conclusions of theorem \ref{stability_general} hold, giving us a number $\mu_{c_2}$ (depending on $D_1$ and $D_2$, but not necessarily uniquely determined) at which the Jacobian $J\bigl(E(\mu_{c_2}, D_1, D_2)\bigr)$ has a purely imaginary pair of eigenvalues, crossing the imaginary axis in the complex plane transversally. The assumption that $f_1$ and $f_2$ are four times continuously differentiable fulfills part two of theorem \ref{HassardHopf}. Now review elements of the proof pertaining to the eigenvalues of the Jacobian of system \eqref{aug_system} at $(N(\mu, D_1, D_2), \lambda_Z(D_2), Z(\mu, D_1, D_2), \mu)$ as $\mu$ ranges over a small interval and increases through $\mu_{c_2}$. Throughout, the Jacobian has a negative real eigenvalue, by theorem \ref{stability_general}, part one. By part two of theorem \ref{stability_general}, for $\mu < \mu_{c_2}$ and sufficiently near $\mu_{c_2}$, there is a complex-conjugate pair of eigenvalues with negative real part. At $\mu = \mu_{c_2}$, the real part vanishes, and, for $\mu > \mu_{c_2}$ and sufficiently near $\mu_{c_2}$, there is a complex-conjugate pair of eigenvalues with positive real part. Moreover, the derivative of the function selecting the real part of the complex-conjugate pair is positive at $\mu_{c_2}$. That is, the locus of the complex-conjugate pair of eigenvalues crosses the imaginary axis transversally.
By our observations on the relationship of the system \eqref{aug_system} to the system \eqref{aug_system_abbr}, as the parameter $\mu$ varies, the evolution of the eigenvalues of equilibria of system \eqref{aug_system_abbr} has the same characteristics. Thus, hypotheses 4 and 3 of theorem \ref{HassardHopf} are satisfied for system \eqref{aug_system_abbr}. Having assumed that $f_1$ and $f_2$ are four times continuously differentiable, then hypothesis 2 of theorem \ref{HassardHopf} is also satisfied. Finally, since $(0,0,0)$ is an isolated equilibrium for all relevant values of $\mu$ for system \eqref{aug_system_abbr},
hypothesis 1 of theorem \ref{HassardHopf} is satisfied. The consequence is that the equilibrium $(0,0,0)$ undergoes a Hopf bifurcation at $\mu = \mu_{c_2}$, after which cycles appear in the phase portrait of system \eqref{aug_system_abbr}. Then the local diffeomorphism $H$ carries this portrait forward into the phase portrait of system \eqref{aug_system}. Thus, we have proved that cycles appear in our original system \eqref{NPZsys} when the parameter $\mu$ slightly exceeds $\mu_{c_2}$. \end{proof}
One can make similar observations viewing the Routh-Hurwitz expressions $a_1$, $a_2$, and $a_3$ as functions of $(D_1, D_2)$, but it seems that this only delivers ``loss of stability,'' which is not quite enough. One can also play around with derivatives of $J\bigl(E_2(\mu, D_1, D_2)\bigr)$ with respect to $D_1$ and $D_2$. One might get information about how far $(D_1, D_2)$ can vary from $(D,D)$ and still have a result.
\section{Simulations and Examples} \label{Examples} We consider now system \eqref{NPZsys} and take $f_1$ and $f_2$ to be Michaelis-Menten functions, also called Holling type II functions. We choose notations as follows. \begin{equation}
f_1(N) = m_1 N/(\alpha_1 + N) \quad \text{and} \quad f_2(P) = m_2P/(\alpha_2 + P). \label{HIIdefs}
\end{equation} With these choices explicit formulas can be given for many quantities studied in earlier sections. For example, we obtain formulas for the numbers $\lambda_P(D_1)$ and $\lambda_Z(D_2)$ defined in equation \eqref{breakeven}. For $\lambda_P(D_1)$ we have \begin{equation}\label{HIIlambdaP}
\gamma_1 \cdot \bigl(m_1 N/(\alpha_1 + N)\bigr) = D_1 \quad \text{with solution} \quad N = D_1 \alpha_1/(\gamma_1 m_1 - D_1) \mathrel{\mathop :}= \lambda_P(D_1); \end{equation} for $\lambda_Z(D_2)$ we have \begin{equation} \label{HIIlambdaZ}
\gamma_2 \bigl( m_2 P/(\alpha_2 + P)\bigr) = D_2 \quad \text{with solution} \quad P = D_2 \alpha_2/(\gamma_2 m_2 - D_2) \mathrel{\mathop :}= \lambda_Z(D_2). \end{equation} For rate functions of this type, the equation \eqref{1steqnwithZ_2notzero} determining $N(\mu, D_1,D_2)$ reduces to a quadratic equation. In principle, one obtains explicit solutions for $N(\mu, D_1, D_2)$ and the nonnegative solution is easily identified. Then the equation \eqref{2ndeqnwithZ_2notzero} for $Z(\mu, D_1, D_2)$ is linear and easily solved for $Z(\mu, D_1, D_2)$.
With these remarks we can turn to an illustration of theorems \ref{stability_special} and \ref{stability_general} in a case using Holling type II rate functions. With these rate functions, the concavity of the function $f_2$ guarantees that the condition $D/\bigl(\gamma_2 \lambda_Z(D)\bigr) > f_2'\bigl(\lambda_Z(D)\bigr)$ is satisfied. First, we set $D{=}D_1{=}D_2{=}1.0 $ and the remaining parameters to the following values. \begin{align*}
m_1 &= 1 & \alpha_1 &= 0.2 & \gamma_1 &= 2 \\
m_2 &= 2 & \alpha_2 &= 0.5 & \gamma_2 &=1.5 \end{align*} With all quantities involved explicitly computed, the formula given in \eqref{A} for the real part of the complex conjugate pair of eigenvalues of the linearization of the system at the coexistence equilibrium can be made explicit, though messy, and is easily plotted by a computer algebra system. This is the solid curve in figure \ref{realpartcompare}, which shows that a Hopf bifurcation occurs in the vicinity of $\mu = 0.6$. Figure \ref{h2Dsequal} exhibits solutions of the system for $\mu$ slightly smaller and slightly larger than the bifurcation value $\mu_{c_2}(D, D) \approx 0.6$.
Next, keep $D{=}1$ and set $D_1{=}1.2$ and $D_2{=}1.3$. Interpreted graphically, theorem \ref{stability_general} says that the graph of the function defined by taking the real part of the complex conjugate pair of eigenvalues associated with the coexistence equilibrium is an increasing function whose graph lies in a neighborhood of the curve we discussed in the preceding paragraph. We do not have an explicit formula for {\em this} function with the new values for $D_1$ and $D_2$, but we can can estimate its values by numerically computing the eigenvalues of the linearization along a sequence of $\mu$-values. Figure \ref{realpartcompare} also shows a sequence of points derived from eigenvalue approximations when $D_1{=}1.2$ and $D_2{=}1.3$. Interpolating a curve through the plotted points, we see a Hopf bifurcation occurs in the vicinity of $\mu = 0.9$. Figure \ref{h2Dsunequal} shows trajectories of this system for values of $\mu$ slightly smaller and slightly larger than the bifurcation value.
To provide an additional illustration of the result of theorem \ref{stability_general}, we consider a version of system \eqref{NPZsys} incorporating rate functions with the property that the graphs have inflection points. Consider \begin{equation}
f_1(N) = m_1 N^2/(\alpha_1 + N^2) \quad \text{and} \quad f_2(P) = m_2P^2/(\alpha_2 + P^2). \label{HIIIdefs}
\end{equation} with the parameter values \begin{equation*}
m_1 = 1.7, \quad \alpha_1 = 0.8, \quad m_2 = 1.6 \quad \alpha_2 = 0.9. \end{equation*} Further, set \begin{equation*}
\gamma_1 = 0.8, \quad \gamma_2 = 0.9, \quad D=1, \quad D_1=1.2, \quad D_2=1.1. \end{equation*}
Then the condition $D/\bigl(\gamma_2 \lambda_Z(D)\bigr) > f_2'\bigl(\lambda_Z(D)\bigr)$ of theorem \ref{stability_general} is satisfied, but this is not a consequence of the concavity of the graph of $f_2$.
Determining $\lambda_P(D_1)$ and $\lambda_Z(D_2)$ in this example requires solving quadratic equations, so obtaining exact values is quite easy. Consequently, one can compute explicitly from \eqref{mu_{c1}} the value $\mu_{c_1}(D_1, D_2)$ beyond which the coexistence equilibrium exists. Locating a coexistence equilibrium $E_2(\mu, D_1, D_2)$ requires solving a cubic equation for $N(\mu, D_1, D_2)$, for which it is more appropriate to use numerical methods. We choose a sequence of $\mu$ values starting beyond $\mu_{c_1}(D_1, D_2)$, approximate the coexistence equilibria and their Jacobian matrices $J\bigl(E_2(\mu, D_1, D_2)\bigr)$, and numerically compute the real part of the complex pair of eigenvalues of the Jacobian for each $\mu$ value.
Plotting the real part against $\mu$ produces figure~\ref{fig:hh3realpart}, which exhibits the expected change of sign and shows that a Hopf bifurcation occurs in the vicinity of $\mu = 7.25$; trajectories for parameters slightly smaller and slightly larger than the bifurcation value are shown in figure~\ref{h3Dsunequal}. \begin{figure}
\caption{Comparison of real parts. For the solid curve $D{=}D_1{=}D_2=1$; for the symbols $D{=}1$, $D_1{=}1.2$ and $D_2{=}1.3$.}
\label{realpartcompare}
\end{figure}
\begin{figure}
\caption{Before and after a Hopf bifurcation: $D{=}D_1{=}D_2{=}1$ and Holling type II rate functions}
\label{h2Dsequal}
\end{figure} \begin{figure}
\caption{Before and after Hopf bifurcation: $D{=}1$, $D_1{=}1.2$ and $D_2{=}1.3$ and Holling type II rate functions}
\label{h2Dsunequal}
\end{figure} \begin{figure}
\caption{Real parts using rate functions \eqref{HIIIdefs}}
\label{fig:hh3realpart}
\end{figure} \begin{figure}
\caption{Before and after Hopf bifurcation: $D{=}1$, $D_1{=}1.2$, and $D_2{=}1.1$ and using rate functions \eqref{HIIIdefs}}
\label{h3Dsunequal}
\end{figure}
\section{Uniform Approximation} \label{Appendix} For the proof of theorem \ref{stability_general}, we need lemma \ref{uniformapproximation}, which states that, when $(D_1,D_2)$ is close to $(D,D)$, $\gamma'(D_1,D_2)(\mu)$ is uniformly approximated by $\gamma'(D,D)(\mu) = A'(\mu)$ on an interval $[\mu_{c_2}- \delta_0, \mu_{c_2}+ \delta_0]$, where $\mu_{c_2}$ is a point where $A(\mu_{c_2}){=}0$ and $A'(\mu_{c_2}) > 0$.
To recapitulate theorem \ref{stability_special}, the working assumptions are that a value $D$ is fixed,
a coexistence equilibrium $E_2(\mu, D, D)$ exists, and that there is a range of parameters $\mu$ for which the linearizations at the coexistence equilibrium have a complex conjugate pair of eigenvalues. Moreover, at the parameter value $\mu_{c_2}$, the linearization of the system has a purely imaginary pair of eigenvalues. For any $\mu$ slightly smaller than $\mu_{c_2}$, the pair of complex eigenvalues has a negative real part and for any $\mu$ slightly larger than $\mu_{c_2}$ the pair of complex eigenvalues has a positive real part. By lemma \ref{NZsmoothness} there is an interval $I$ containing $\mu_{c_2}$ and a disc $\Delta$ centered at $(D,D)$ such that the functions $N(\mu, D_1, D_2)$ and $Z(\mu, D_1, D_2)$ defined on $I{\times}\Delta$ smoothly parametrize the locus of coexistence equilibria near $E_2(\mu, D,D)$.
Uniform approximation of $\gamma'(D_1, D_2)(\mu)$ by $\gamma'(D, D)(\mu)$ implies that, for the coexistence equilibrium $E_2(\mu, D_1, D_2)$, there is a $\mu$-interval in $I$ for which the eigenvalues of the linearizations exhibit the same qualitative behavior as described for $E_2(\mu,D,D)$, as shown in theorem~\ref{stability_general}. \begin{proposition}[Lemma \ref{uniformapproximation}]
\label{convergenceestimate} Assume $f_1$ is three times continuously differentiable and $f_2$ is two times continuously differentiable. Then there exists a $\mu$-interval $[\mu{-}\hat{\delta}, \mu{+}\hat{\delta}]$ on which the $\mu$-derivative $\gamma'(D_1,D_2)(\mu)$ is uniformly approximated by $\gamma'(D,D)(\mu) = A'(\mu)$. In fact, there exists a constant $C$ such that \begin{equation}
\label{derivativebound}
\abs{ \gamma'(D_1, D_2)( \mu) - \gamma'(D,D)(\mu)} \leq C \cdot \Dist{(D_1, D_2)}{(D, D)} \end{equation} for any $\mu \in [\mu_{c_2}{-}\hat{\delta}, \mu_{c_2}{+}\hat{\delta}]$. \end{proposition} The proposition follows from a sequence of lemmas and estimates, given below. In the course of proving these results, we find it necessary to impose the differentiability conditions on $f_1$ and $f_2$.
An essential ingredient in the process is to obtain bounds on magnitudes of the differences \begin{align*}
p_0(\mu, D_1, D_2) - p_0(\mu, D, D),& & p_1(\mu, D_1, D_2) - p_1(\mu, D, D),& &
p_2(\mu, D_1, D_2) - p_2(\mu, D, D), \intertext{and}
p'_0(\mu, D_1, D_2) - p'_0(\mu, D, D),& & p'_1(\mu, D_1, D_2) - p'_1(\mu, D, D),& &
p'_2(\mu, D_1, D_2) - p'_2(\mu, D, D) \end{align*} in terms of $\Dist{(D_1, D_2)}{(D, D)}$,
and where the $p_i(\mu, D_1, D_2)= -a_{3-i}(\mu, D_1, D_2)$, $0 \leq i \leq 2$ are given by the formulas \eqref{JE2coefficienta1}, \eqref{JE2coefficienta2}, and \eqref{JE2coefficienta3}.
We now explain the role played by these bounds. By the chain rule, we compute $\gamma'(D_1,D_2)(\mu)$ as the inner product of
a row vector $\nabla \gamma$ with a column vector $(p_0', p_1', p_2')$: \begin{equation*} \gamma'(D_1,D_2)(\mu) = \bigl \langle \nabla \gamma(p_0,p_1,p_2) , ( p_0', p_1', p_2') \bigr \rangle (\mu, D_1, D_2). \end{equation*} \begin{remark}
In order to avoid extremely long expressions in the following analysis,
we use abbreviations such as \begin{equation} \label{abbreviation} \bigl[D(M^{-1})\bigr](\mu, D_1,D_2) \mathrel{\mathop :}=
D(M^{-1})\bigl(p_0(\mu, D_1, D_2),p_1(D_1, D_2, \mu),p_2(\mu, D_1, D_2)\bigr). \end{equation} \end{remark} We can write \begin{align*} [\nabla \gamma(p_0, p_1,p_2)](\mu, D_1, D_2) &= \pi_3 \circ [D(M^{-1})(p_0, p_1,p_2)](\mu, D_1, D_2) \\
&=
\langle 0, 0, 1 \rangle \cdot
\begin{bmatrix}
\nabla \alpha (p_0, p_1,p_2) \\
\nabla \beta (p_0, p_1,p_2) \\
\nabla \gamma (p_0, p_1,p_2)
\end{bmatrix}(\mu, D_1, D_2), \end{align*} where $\langle 0, 0, 1 \rangle$ represents the projection $\pi_3$ to the third coordinate $\gamma$ in $P_1(x)^-{\times}P_2(x)^-$. The gradients $\nabla \alpha$, $\nabla \beta$, and $\nabla \gamma$ are evaluated at \begin{align*} p_0(\mu, D_1, D_2) &= -a_3(\mu, D_1, D_2), \\ p_1(\mu, D_1, D_2) &= -a_2(\mu, D_1, D_2), \intertext{and} p_2(\mu, D_1, D_2) &= -a_1(\mu, D_1, D_2), \end{align*} where explicit expressions for $a_i(\mu, D_1, D_2)$ are given in \eqref{JE2coefficienta1}, \eqref{JE2coefficienta2}, and \eqref{JE2coefficienta3}. The derivatives $p_0'$, $p_1'$, and $p_2'$ are evaluated at $(\mu, D_1, D_2)$.
Applying the convention of \eqref{abbreviation}, we have \begin{multline*}
\gamma'(D_1, D_2)( \mu) - \gamma'(D,D)(\mu)
\\ = \bigl[\pi_3 \circ D(M^{-1})(p_0, p_1, p_2)\cdot ( p_0', p_1', p_2')\bigr](\mu, D_1, D_2) \\
- \bigl[\pi_3 \circ D(M^{-1})(p_0, p_1, p_2)\cdot ( p_0',p_1', p_2' ) \bigr](\mu, D, D ). \end{multline*} Before we go farther, we compress taking the derivative $D(M^{-1})$ at $(p_0, p_1, p_2)$ and evaluating on the vector $( p_0', p_1', p_2' )$, writing \begin{equation*}
D(M^{-1})\cdot ( p_0', p_1', p_2' ) \mathrel{\mathop :}= D(M^{-1})(p_0, p_1, p_2)\cdot ( p_0', p_1', p_2'). \end{equation*} Then the previous equation becomes \begin{multline*}
\gamma'(D_1, D_2)( \mu) - \gamma'(D,D)(\mu)
\\ = \bigl[\pi_3 \circ D(M^{-1}) \cdot ( p_0', p_1', p_2' )\bigr](\mu, D_1, D_2)
- \bigl[\pi_3 \circ D(M^{-1}) \cdot ( p_0',p_1', p_2' ) \bigr](\mu, D, D ). \end{multline*} We can estimate using operator norms computed in terms of the Euclidean metrics. \begin{multline}\label{gammaprimedifference}
\abs{ \gamma'(D_1, D_2)( \mu) - \gamma'(D,D)(\mu)} \\ =
\abs{\bigl[ \pi_3 \circ D(M^{-1})\cdot ( p_0', p_1', p_2' ) \bigr](\mu, D_1, D_2)
- \bigl[\pi_3 \circ D(M^{-1})\cdot ( p_0',p_1', p_2') \bigr](\mu, D, D)} \\ \leq \norm{\pi_3} \bignorm {\bigl[ D(M^{-1})\cdot ( p_0', p_1', p_2' ) \bigr](\mu, D_1, D_2)-\bigl[D(M^{-1})\cdot ( p_0',p_1', p_2') \bigr](\mu, D, D)} \\ \leq \bignorm { \bigl[D(M^{-1}) \cdot ( p_0', p_1', p_2' ) \bigr](\mu, D_1, D_2)-\bigl[D(M^{-1})\cdot ( p_0',p_1', p_2') \bigr](\mu, D, D)}, \end{multline} since the norm of a projection is $1$. Now we use the triangle inequality to bound the last expression. \begin{multline} \label{gammaprimedifference1}
\bignorm{ \bigl[ D(M^{-1})\cdot ( p_0', p_1', p_2' ) \bigr](\mu, D_1, D_2)-\bigl[D(M^{-1})\cdot ( p_0',p_1', p_2') \bigr](\mu, D, D)} \\
\leq \bignorm{\bigl[ D(M^{-1})\cdot ( p_0', p_1', p_2' ) \bigr](\mu, D_1, D_2)
- \bigl[ D(M^{-1})\bigr](\mu, D_1, D_2)\cdot \bigl[ ( p_0', p_1', p_2' ) \bigr](\mu, D, D)} \\
+ \bignorm{ \bigl[ D(M^{-1})\bigr](\mu, D_1, D_2)\cdot \bigl[ ( p_0', p_1', p_2' ) \bigr](\mu, D, D)
- \bigl[D(M^{-1})\cdot ( p_0',p_1', p_2') \bigr](\mu, D, D) }. \end{multline} Explicit expansion of the first summand in \eqref{gammaprimedifference1} is given in the proof of lemma \ref{firstsummand}, where we will see the role of the bounds on \begin{equation*}
p'_0(\mu, D_1, D_2) - p'_0(\mu, D, D), \quad p'_1(\mu, D_1, D_2) - p'_1(\mu, D, D), \quad
p'_2(\mu, D_1, D_2) - p'_2(\mu, D, D). \end{equation*} Similarly, explicit expansion of the second summand in \eqref{gammaprimedifference1} is given in the proof of lemma \ref{secondsummand}, where we will see the role of the bounds on \begin{equation*}
p_0(\mu, D_1, D_2) - p_0(\mu, D, D), \quad p_1(\mu, D_1, D_2) - p_1(\mu, D, D), \quad
p_2(\mu, D_1, D_2) - p_2(\mu, D, D). \end{equation*} \begin{lemma}
\label{firstsummand} There is a constant $C_3$ such that the first summand in the expansion \eqref{gammaprimedifference1} satisfies \begin{multline}
\label{firstsummandbound}
\bignorm{\bigl[ D(M^{-1})\cdot ( p_0', p_1', p_2' ) \bigr](\mu, D_1, D_2)
- \bigl[ D(M^{-1})\bigr](\mu, D_1, D_2)\cdot\bigl[ ( p_0', p_1', p_2' ) \bigr](\mu, D, D)} \\ \leq C_3 \, \cdot \, \Dist{(D_1, D_2)}{(D,D)}. \end{multline} \end{lemma} \begin{proof} By the basic property of the operator norm, \begin{multline*}
\bignorm{\bigl[ D(M^{-1})\cdot( p_0', p_1', p_2' ) \bigr](\mu, D_1, D_2)
- \bigl[ D(M^{-1})\bigr](\mu, D_1, D_2)\cdot\bigl[ ( p_0', p_1', p_2' ) \bigr](\mu, D, D)} \\ \leq \bignorm{\bigl[ D(M^{-1})\bigr](\mu, D_1, D_2) }\cdot
\bignorm{\bigl[( p_0', p_1', p_2' ) \bigr](\mu, D_1, D_2) - \bigl[ ( p_0', p_1', p_2' ) \bigr](\mu, D, D) } \end{multline*} Examining the factor coming from the operator norm, \begin{multline} \label{firstfactorfirstsummand}
\bignorm{\bigl[ D(M^{-1})\bigr](\mu, D_1, D_2)} \\
= \bignorm{D(M^{-1})\bigl(p_0(\mu, D_1, D_2), p_1(\mu, D_1, D_2), p_2(D_1,D_2, \mu)\bigr)}\leq C_{DM^{-1}}, \end{multline} for some constant $C_{DM^{-1}}$. This is because, as $(D_1,D_2)$ ranges over any closed disc centered on $(D,D)$ and $\mu$ ranges over any closed interval containing $\mu_{c_2}$, the coordinates $(p_0, p_1, p_2)$ are contained in a compact set, so there is a constant $C_{DM^{-1}}$ that bounds the norm of $D(M^{-1})$ at any of these points.
Now we require a bound on the other factor, for which we have \begin{multline} \label{secondfactorfirstsummand}
\bignorm{[( p_0', p_1', p_2') ](\mu, D_1, D_2)- [(p_0', p_1', p_2') ](\mu, D, D)}^2 \\
= \bigabs{p_0'(\mu, D_1, D_2){-}p_0'(\mu, D, D)}^2
\!+\! \bigabs{p_1'(\mu, D_1, D_2){-}p_1'(\mu, D, D)}^2
\!+\! \bigabs{p_2'(\mu, D_1, D_2){-}p_2'(\mu, D, D)}^2 \\ \leq \bigl((C_0')^2{+}(C_1')^2{+}(C_2')^2\bigr)\,\cdot\,\Dist{(D_1, D_2)}{(D, D)}^2, \end{multline} provided by combining the results of propositions \ref{p0bounds}, \ref{p1bounds}, and \ref{p2bounds}. Combining this bound with the bound on the expression \eqref{firstfactorfirstsummand}, the first summand in \eqref{gammaprimedifference1} is bounded by a constant times $\Dist{(D_1, D_2)}{(D, D)}$. \end{proof} Now we bound the second summand in \eqref{gammaprimedifference1}. \begin{lemma}
\label{secondsummand} There is a constant $C_4$ such that the second summand in the expansion \eqref{gammaprimedifference1} satisfies \begin{multline}
\label{secondsummandbound}
\bignorm{ \bigl[ D(M^{-1})\bigr](\mu, D_1, D_2)\cdot\bigl[ ( p_0', p_1', p_2' ) \bigr](\mu, D, D) - \bigl[D(M^{-1})\cdot( p_0',p_1', p_2') \bigr](\mu, D, D) } \\ \leq C_4 \, \cdot \, \Dist{(D_1, D_2)}{(D,D)}. \end{multline} \end{lemma} \begin{proof} To handle the second summand in \eqref{gammaprimedifference1}, we have the bound \begin{multline} \label{gammaprimesecondsummand}
\bignorm{ \bigl[ D(M^{-1})\bigr](\mu, D_1, D_2)\cdot\bigl[ ( p_0', p_1', p_2' ) \bigr](\mu, D, D) - \bigl[D(M^{-1})\cdot( p_0',p_1', p_2') \bigr](\mu, D, D) } \\ \leq
\bignorm{ \bigl[ D(M^{-1})\bigr](\mu, D_1, D_2)- \bigl[D(M^{-1})\bigr](D,D,\mu)} \bignorm{\bigl[ ( p_0',p_1', p_2') \bigr](\mu, D, D) }. \end{multline} To the first factor in the bounding term, apply the mean value theorem \cite[p.103, Corollary~1]{LangAnalysisII} for vector-valued functions of several variables, obtaining \begin{multline*}
\bignorm{[D(M^{-1})](\mu, D_1, D_2)- [D(M^{-1})](D,D,\mu)} \\
\leq \bignorm{[D(D(M^{-1}))](q_0,q_1,q_2)}\cdot\bignorm{[(p_0,p_1, p_2)](\mu, D_1, D_2)-[(p_0,p_1, p_2)](\mu, D, D)}. \end{multline*} The derivative of the map $P_3(x)^- \rightarrow M_{3,3}({\mathbf R})$, $(p_0,p_1,p_2) \mapsto D(M^{-1})(p_0,p_1,p_2)$ is continuous, because $M^{-1}$ is $C^{\infty}$. The evaluation point $(q_0, q_1,q_2)$ is on the line segment connecting $[(p_0,p_1, p_2)](\mu, D_1, D_2)$ and $[(p_0,p_1, p_2)](\mu, D, D)$. Again the possibilities range over a compact set, so the norm of the second derivative satifies \begin{equation}
\label{firstfactorbound}
\bignorm{[D(D(M^{-1}))](q_0,q_1,q_2)} \leq C_{D^2(M^{-1})}, \end{equation} for a constant $C_{D^2(M^{-1})}$ independent of $(D_1,D_2, \mu)$. We compute \begin{multline} \label{secondfactorbound}
\bignorm{p(\mu, D_1, D_2)-p(\mu, D, D)}^2 = \\
\bigabs{p_0(\mu, D_1, D_2){-}p_0(\mu, D, D)}^2 + \bigabs{p_1(\mu, D_1, D_2){-}p_1(\mu, D, D)}^2 + \bigabs{p_2(\mu, D_1, D_2){-}p_2(\mu, D, D)}^2 \\ \leq \bigl((C_0)^2{+}(C_1)^2{+}(C_2)^2\bigr) \cdot \Dist{(D_1, D_2)}{(D, D)}^2, \end{multline}
by combining the results of propositions \ref{p0bounds}, \ref{p1bounds}, and \ref{p2bounds}. Assembling the bounds in \eqref{firstfactorbound} and \eqref{secondfactorbound}, $\bignorm{[D(M^{-1})](\mu, D_1, D_2)- [D(M^{-1})](D,D,\mu)}$ is bounded by a constant times $\Dist{(D_1, D_2)}{(D, D)}$. This takes care of the first factor on the righthand side of \eqref{gammaprimesecondsummand}.
For the remaining factor in the bounding term in \eqref{gammaprimesecondsummand}, the norm of the tangent vector satisfies \begin{equation} \label{thirdfactorbound}
\bignorm{[( p_0', p_1', p_2') ](\mu, D, D)} \leq C_T, \end{equation} for some constant $C_T$, independent of $\mu$. Now that we have taken care of both factors on the righthand side of \eqref{gammaprimesecondsummand} the second summand in \eqref{gammaprimedifference1} is bounded by a constant times $\Dist{(D_1, D_2)}{(D, D)}$, as claimed. \end{proof} We can now prove the uniform convergence result. \begin{proof}
[Proof of proposition \ref{convergenceestimate}.] Combining the inequalities \eqref{gammaprimedifference} and \eqref{gammaprimedifference1}, we have \begin{multline*}
\abs{ \gamma'(D_1, D_2)( \mu) - \gamma'(D,D)(\mu)} \\ \leq \bignorm{\bigl[ D(M^{-1})\cdot ( p_0', p_1', p_2' ) \bigr](\mu, D_1, D_2)
- \bigl[ D(M^{-1})\bigr](\mu, D_1, D_2)\cdot \bigl[ ( p_0', p_1', p_2' ) \bigr](\mu, D, D)} \\
+ \bignorm{ \bigl[ D(M^{-1})\bigr](\mu, D_1, D_2)\cdot \bigl[ ( p_0', p_1', p_2' ) \bigr](\mu, D, D)
- \bigl[D(M^{-1})\cdot ( p_0',p_1', p_2') \bigr](\mu, D, D) } \end{multline*} Using the observations detailed in lemmas \ref{firstsummand} and \ref{secondsummand}, \begin{equation*}
\abs{ \gamma'(D_1, D_2)( \mu) - \gamma'(D,D)(\mu)} \leq (C_3 + C_4) \, \cdot \, \Dist{(D_1, D_2)}{(D,D)} \end{equation*} Thus, there is a constant $C$ such that \begin{equation*}
\abs{ \gamma'(D_1, D_2)( \mu) - \gamma'(D,D)(\mu)} \leq C\cdot \Dist{(D_1, D_2)}{(D,D)} \end{equation*} for any $\mu \in [\mu_{c_2}- \delta_0, \mu_{c_2}+ \delta_0]$. \end{proof} We have already used a technique of obtaining bounds by splitting quantities. As we will continue to exploit the technique in the following results, we formulate the lemma \ref{elementary} for reference. \begin{lemma}
\label{elementary} Let $Q_1(\mu, D_1, D_2)$ and $Q_2(\mu, D_1, D_2)$ be quantities defined on a domain $I{\times}\Delta$ satisfying the following conditions. \begin{enumerate} \item There is a constant $c_1$ such that
\begin{equation*}
\abs{Q_1(\mu, D_1, D_2) - Q_1(\mu, D, D)} \leq c_1\Dist{(D_1, D_2)}{(D, D)}.
\end{equation*} \item There is a constant $c_2$ such that
\begin{equation*}
\abs{Q_2(\mu, D_1, D_2) - Q_2(\mu, D, D)} \leq c_2\Dist{(D_1, D_2)}{(D, D)}.
\end{equation*} \item There are constants $c_3$ and $c_4$ such that $\abs{Q_1(\mu, D_1, D_2)} \leq c_3$ for $(\mu, D_1, D_2) \in I{\times}\Delta$ and for $\Dist{(D_1, D_2)}{(D, D)}$ sufficiently small, and $\abs{Q_2(\mu,D,D)}\leq c_4$ for $\mu \in I$. \end{enumerate} Then there is a constant $c_5$ such that, for $\Dist{(D_1, D_2)}{(D, D)}$ sufficiently small, \begin{equation*}
\abs{Q_1(\mu, D_1, D_2)\cdot Q_2(\mu, D_1, D_2) - Q_1(\mu, D, D)\cdot Q_2(\mu, D, D)}
\leq c_5 \Dist{(D_1, D_2)}{(D, D)} \qed \end{equation*} \end{lemma} As has been seen, the proof of proposition \ref{convergenceestimate} depends on the following three propositions. \begin{proposition}
\label{p0bounds} There are constants $C_0$ and $C_0'$ such that \begin{align}
\abs{p_0(\mu, D_1, D_2) - p_0(\mu, D, D)} &\leq C_0\cdot \Dist{(D_1, D_2)}{(D, D)}, \label{p0bound} \\
\abs{p_0'(\mu, D_1, D_2) - p_0'(\mu, D, D)} &\leq C_0'\cdot \Dist{(D_1, D_2)}{(D, D)}. \label{p0primebound} \end{align} \end{proposition} \begin{proposition}
\label{p1bounds} There are constants $C_1$ and $C_1'$ such that \begin{align}
\abs{p_1(\mu, D_1, D_2) - p_1(\mu, D, D)} &\leq C_1\cdot \Dist{(D_1, D_2)}{(D, D)}, \label{p1bound} \\
\abs{p_1'(\mu, D_1, D_2) - p_1'(\mu, D, D)} &\leq C_1'\cdot \Dist{(D_1, D_2)}{(D, D)}, \label{p1primebound} \end{align} \end{proposition} \begin{proposition}
\label{p2bounds} There are constants $C_2$ and $C_2'$ such that \begin{align}
\abs{p_2(\mu, D_1, D_2) - p_2(\mu, D, D)} &\leq C_2\cdot \Dist{(D_1, D_2)}{(D, D)}, \label{p2bound} \\
\abs{p_2'(\mu, D_1, D_2) - p_2'(\mu, D, D)} &\leq C_2'\cdot \Dist{(D_1, D_2)}{(D, D)}, \label{p2primebound} \end{align} \end{proposition} The proofs of propositions \ref{p0bounds}, \ref{p1bounds}, and \ref{p2bounds} depend in turn on a number of elementary bounds and estimates, given below in lemma \ref{f2primebound} and propositions \ref{DDbounds}, \ref{muD1D2bounds}, and \ref{constituentbounds}. We give the quick proofs of lemma \ref{f2primebound} and propositions \ref{DDbounds} and \ref{muD1D2bounds}, because they are quite short, postponing the proof of the many parts of proposition \ref{constituentbounds} to the end of the section. After we state these results, we prove propositions \ref{p0bounds}, \ref{p1bounds}, and \ref{p2bounds}. \begin{lemma} \label{f2primebound}
Given $D$, there is an interval $J$ containing $\lambda_Z(D)$ such that
\begin{equation*}
f_2'(P) > f_2'\bigl(\lambda_Z(D)\bigr)/2 > 0
\end{equation*} for all $P \in J$. Thus, for all $P \in J$, $f_2'(P)$ is bounded away from zero, and, for all $D_2$ in the preimage $\lambda_Z^{-1}(J)$, $f_2'\bigl(\lambda_Z(D_2)\bigr)$ is bounded away from zero. \end{lemma} \begin{proof}
By assumption on $f_2$, $f_2'\bigl(\lambda_Z(D)\bigr) > 0$. By continuity of $f_2'$, there is an interval $J$ containing $\lambda_Z(D)$ such that, for all $P \in J$, \begin{equation*}
-f_2'\bigl(\lambda_Z(D)\bigr)/2 < f_2'(P) - f_2'\bigl(\lambda_Z(D)\bigr) < f_2'\bigl(\lambda_Z(D)\bigr)/2. \qedhere \end{equation*} \end{proof} \begin{proposition}
\label{DDbounds} Each of the quantities \begin{gather*}
f_1\bigl(N(\mu, D, D)\bigr), \quad f_1'\bigl(N(\mu, D, D)\bigr), \quad Z(\mu, D, D), \\ \quad f_1''\bigl(N(\mu, D, D)\bigr), \quad N'(\mu, D, D), \quad \text{and} \quad Z'(\mu, D, D) \end{gather*} is bounded by some constant on the interval $I$. \end{proposition} \begin{proof}
Each of the listed functions is continuous on the closed interval $I$, so each one is bounded. \end{proof} \begin{proposition} \label{muD1D2bounds}
Each of the quantities
\begin{gather*}
\lambda_Z(D_2) \quad f_1\bigl(N(\mu, D_1, D_2)\bigr) \quad f_1'\bigl(N(\mu, D_1, D_2)\bigr) \quad Z(\mu, D_1, D_2) \\
f_2'\bigl(\lambda_Z(D_2)\bigr) \quad f_1''\bigl(N(\mu, D_1, D_2)\bigr) \quad N'(\mu, D_1, D_2) \quad \text{and} \quad Z'(\mu, D_1, D_2)
\end{gather*} is bounded by some constant on the domain $I {\times} \Delta$. \end{proposition} \begin{proof}
Each of the listed functions is continuous on the compact set $I{\times}\Delta$, so each one is bounded. \end{proof} The proof of the next result depends on many more details of system \eqref{NPZsys} and consequences drawn from them. Some steps in the proof are quite lengthy, so we postpone these details to the end of the section. \begin{proposition}
\label{constituentbounds} Assume that the domain $I{\times}\Delta$ for which $N(\mu,D_1,D_2)$ and $Z(\mu,D_1,D_2)$ are defined is a subset of $I{\times}{\mathbf R}{\times}\lambda_Z^{-1}(J)$, where $J$ is as in lemma \ref{f2primebound}. Then for $\mu \in I$ and $(D_1, D_2) \in \Delta$, these differences are bounded by constants times $\Dist{(D_1, D_2)}{(D, D)}$. \begin{align*} &\text{1.} \; \lambda_Z(D) - \lambda_Z(D_2). &\text{2.} \; &f_1\bigl(N(\mu, D_1, D_2)\bigr) - f_1\bigl(N(\mu, D, D)\bigr). \\ &\text{3.} \; f_1'\bigl(N(\mu, D_1, D_2)\bigr) - f_1'\bigl(N(\mu, D, D)\bigr). &\text{4.} \; &Z(\mu, D_1, D_2) - Z(\mu, D, D). \\ &\text{5.} \; f_2'\bigl(\lambda_Z(D_2)\bigr) - f_2'\bigl(\lambda_Z(D)\bigr). &\text{6.} \; &f_1^{(2)}\bigl(N(\mu, D_1, D_2)\bigr) - f_1^{(2)}\bigl(N(\mu, D, D)\bigr). \intertext{Moreover,} &\text{7.}\; N'(\mu, D_1, D_2) - N'(\mu, D, D), &\text{8.}\; &Z'(\mu, D_1, D_2) - Z'(\mu, D, D), \end{align*} where the derivatives are taken with respect to $\mu$, are also bounded by constants times $\Dist{(D_1, D_2)}{(D, D)}$. \end{proposition} \begin{proof}[Proof of proposition \ref{p0bounds}] After some reorganization, we have from \eqref{JE2coefficienta3} \begin{multline} \label{p0difference}
p_0(\mu, D_1, D_2) - p_0(D,D,\mu) = -a_3(\mu, D_1, D_2) + a_3(D,D,\mu) \\
= -D_2 f_2'\bigl(\lambda_Z(D_2)\bigr) Z(\mu, D_1, D_2)\Bigl( D + \lambda_Z(D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr)\Bigr) \\
+ D f_2'\bigl(\lambda_Z(D)\bigr) Z(\mu, D, D)\Bigl( D + \lambda_Z(D) f_1'\bigl(N(\mu, D, D)\bigr) \Bigr) \\
= \Bigl( D f_2'\bigl(\lambda_Z(D)\bigr) Z(\mu, D, D) -D_2 f_2'\bigl(\lambda_Z(D_2)\bigr) Z(\mu, D_1, D_2)\Bigr) D \\
+ \Bigl[ D f_2'\bigl(\lambda_Z(D)\bigr) Z(\mu, D, D) \lambda_Z(D) f_1'\bigl(N(\mu, D, D)\bigr) \\
- D_2 f_2'\bigl(\lambda_Z(D_2)\bigr) Z(\mu, D_1, D_2) \lambda_Z(D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr)\Bigr]. \end{multline} To bound $\abs{p_0(\mu, D_1, D_2) - p_0(D,D,\mu)}$, we bound the absolute values of summands in \eqref{p0difference} as follows. First, note $\abs{D{-}D_2} \leq \Dist{(D_1, D_2)}{(D, D)}$, so we bound \begin{equation*} \abs{ D f_2'\bigl(\lambda_Z(D)\bigr) Z(\mu, D, D) - D_2 f_2'\bigl(\lambda_Z(D_2)\bigr) Z(\mu, D_1, D_2)} \end{equation*} by using lemma \ref{elementary}, lemma \ref{DDbounds}, and lemma \ref{muD1D2bounds} to combine the noted bound with bounds 4 and 5 from proposition \ref{constituentbounds}; bound \begin{multline*}
\abs{D f_2'\bigl(\lambda_Z(D)\bigr) Z(\mu, D, D) \lambda_Z(D) f_1'\bigl(N(\mu, D, D)\bigr)
\\
- D_2 f_2'\bigl(\lambda_Z(D_2)\bigr) Z(\mu, D_1, D_2) \lambda_Z(D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr)} \end{multline*} using lemma \ref{elementary} to combine bounds 1, 3, 4 and 5 with $\abs{D{-}D_2} \leq \Dist{(D_1, D_2)}{(D, D)}$. We compute from \eqref{p0difference} \begin{multline*}
p_0'(\mu, D_1, D_2) - p_0'(\mu, D, D) \\ = \Bigl( D f_2'\bigl(\lambda_Z(D)\bigr) Z'(\mu, D, D) -D_2 f_2'\bigl(\lambda_Z(D_2)\bigr) Z'(\mu, D_1, D_2)\Bigr) D \\ \shoveleft{\quad \quad \quad + \Bigl[ D f_2'\bigl(\lambda_Z(D)\bigr) Z'(\mu, D, D) \lambda_Z(D) f_1'\bigl(N(\mu, D, D)\bigr)} \\
- D_2 f_2'\bigl(\lambda_Z(D_2)\bigr) Z'(\mu, D_1, D_2) \lambda_Z(D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr)\bigr] \\
+\bigl[ D f_2'\bigl(\lambda_Z(D)\bigr) Z(\mu, D, D) \lambda_Z(D) f_1^{(2)}\bigl(N(\mu, D, D)\bigr) N'(\mu, D, D) \\
- D_2 f_2'\bigl(\lambda_Z(D_2)\bigr) Z(\mu, D_1, D_2) \lambda_Z(D_2) f_1^{(2)}\bigl(N(\mu, D_1, D_2)\bigr)N'(\mu, D_1, D_2) \Bigr]. \end{multline*} For $\abs{p_0'(\mu, D_1, D_2) - p_0'(D,D,\mu)}$, we bound from the first line of the expansion \begin{equation*}
\abs{D f_2'\bigl(\lambda_Z(D)\bigr) Z'(\mu, D, D) - D_2 f_2'\bigl(\lambda_Z(D_2)\bigr) Z'(\mu, D_1, D_2)} \end{equation*} by combining bounds 5 and 8 with the bound $\abs{D{-}D_2} \leq \Dist{(D_1, D_2)}{(D, D)}$; bound from the second and third lines \begin{multline*} \abs{D f_2'\bigl(\lambda_Z(D)\bigr) Z'(\mu, D, D) \lambda_Z(D) f_1'\bigl(N(\mu, D, D)\bigr)
\\- D_2 f_2'\bigl(\lambda_Z(D_2)\bigr) Z'(\mu, D_1, D_2) \lambda_Z(D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr)}
\end{multline*} by combining bounds 1, 3, 5, and 8 with the bound $\abs{D{-}D_2} \leq \Dist{(D_1, D_2)}{(D, D)}$; bound from the fourth and fifth lines \begin{multline*} \abs{D f_2'\bigl(\lambda_Z(D)\bigr) Z(\mu, D, D) \lambda_Z(D) f_1^{(2)}\bigl(N(\mu, D, D)\bigr) N'(\mu, D, D)
\\- D_2 f_2'\bigl(\lambda_Z(D_2)\bigr) Z(\mu, D_1, D_2) \lambda_Z(D_2) f_1^{(2)}\bigl(N(\mu, D_1, D_2)\bigr)N'(\mu, D_1, D_2)} \end{multline*} by combining bounds 1, 4, 5, 6, and 7 from proposition \ref{constituentbounds} with the bound $\abs{D{-}D_2} \leq \Dist{(D_1, D_2)}{(D, D)}$. \end{proof}
\begin{proof}[Proof of proposition \ref{p1bounds}] After some reorganization, placing terms belonging to $p_1(\mu, D_1, D_2)$ down the left side of the display, we have from \eqref{JE2coefficienta2} \begin{multline}
\label{p1difference}
p_1(\mu, D_1, D_2) - p_1(D,D,\mu) = -a_2(\mu, D_1, D_2) + a_2(D,D,\mu) \\
\shoveleft{\quad =-\lambda_{Z}(D_2) Z(\mu, D_1, D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr) f_2'\bigl(\lambda_{Z}(D_2)\bigr)} \\ \shoveright{+\lambda_{Z}(D) Z(\mu, D, D) f_1'\bigl(N(\mu, D, D)\bigr) f_2'\bigl(\lambda_{Z}(D)\bigr)} \\ \qquad \quad - D_2Z(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr) + D Z(\mu, D, D) f_2'\bigl(\lambda_{Z}(D)\bigr) \\ \qquad \qquad - D Z(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr) + D Z(\mu, D, D) f_2'\bigl(\lambda_{Z}(D)\bigr) \\ \qquad \qquad \quad - D_{1}\lambda_{Z}(D_2)f_1'\bigl(N(\mu, D_1, D_2)\bigr) + D \lambda_{Z}(D)f_1'\bigl(N(\mu, D, D)\bigr) \\ \shoveright{ + D \gamma_{1}f_{1}\bigl(N(\mu, D_1, D_2)\bigr) - D \gamma_{1}f_{1}(N(\mu, D, D)\bigr)} \\ - D D_{1} + D^2. \end{multline} To bound $\abs{p_1(\mu, D_1, D_2) - p_1(D,D,\mu)}$ by a constant multiple of $\Dist{(D_1, D_2)}{(D,D)}$, first observe that both $\abs{D{-}D_1}$ and $\abs{D{-}D_2}$ are bounded by $\Dist{(D_1, D_2)}{(D,D)}$. Similarly bound \begin{multline*} \abs{-\lambda_{Z}(D_2) Z(\mu, D_1, D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr) f_2'\bigl(\lambda_{Z}(D_2)\bigr) \\
+ \lambda_{Z}(D) Z(\mu, D, D) f_1'\bigl(N(\mu, D, D)\bigr) f_2'\bigl(\lambda_{Z}(D)\bigr)} \end{multline*} by combining bounds 1, 4, 3, and 5 from proposition \ref{constituentbounds}; bound \begin{align*}
\abs{-D_2 Z(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr) &+ D Z(\mu, D, D) f_2'\bigl(\lambda_{Z}(D)\bigr)} \intertext{by combining bounds 4 and 5 with $\abs{D{-}D_2} \leq \Dist{(D_1, D_2)}{(D,D)}$; bound} \abs{- D Z(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr) &+ D Z(\mu, D, D) f_2'\bigl(\lambda_{Z}(D)\bigr)} \intertext{by combining bounds 4 and 5; bound} \abs{- D_1\lambda_{Z}(D_2)f_1'\bigl(N(\mu, D_1, D_2)\bigr) &+ D \lambda_{Z}(D)f_1'\bigl(N(\mu, D, D)\bigr)} \intertext{by combining bounds 1 and 3 with $\abs{D{-}D_1}\leq \Dist{(D_1, D_2)}{(D,D)}$; and} \abs{D \gamma_{1}f_{1}\bigl(N(\mu, D_1, D_2)\bigr) &- D \gamma_{1}f_{1}\bigl(N(\mu, D, D)\bigr)} \end{align*} is taken care of in bound 2 of proposition \ref{constituentbounds}. Finally, $\abs{-DD_1 + D^2}$ has already been taken care of. Adding all these bounds, $\abs{p_1(\mu, D_1, D_2) - p_1(D,D,\mu)}$ is bounded by a constant multiple of $\Dist{(D_1, D_2)}{(D,D)}$.
Rather than exhibit a complete formula for $p_1'(\mu, D_1, D_2) - p_1'(D,D,\mu)$, we pick apart equation \eqref{p1difference} to express this difference as a sum of expressions. From the first two lines, \begin{multline} \label{firsttwolines} -\lambda_{Z}(D_2) Z'(\mu, D_1, D_2) f_1'\bigl(N(\mu, D_1, D_2)\bigr) f_2'\bigl(\lambda_{Z}(D_2)\bigr) \\
-\lambda_{Z}(D_2) Z(\mu, D_1, D_2) f_1^{(2)}\bigl(N(\mu, D_1, D_2)\bigr) N'(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr) \\ +\lambda_{Z}(D) Z'(\mu, D, D) f_1'\bigl(N(\mu, D, D)\bigr) f_2'\bigl(\lambda_{Z}(D)\bigr) \\ +\lambda_{Z}(D) Z(\mu, D, D) f_1^{(2)}\bigl(N(\mu, D, D)\bigr) N'(\mu, D, D) f_2'\bigl(\lambda_{Z}(D)\bigr) \end{multline} is involved in the sum. From lines three through six, the expressions involved are \begin{gather}
- D_2Z'(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr) + DZ'(\mu, D, D) f_2'\bigl(\lambda_{Z}(D)\bigr), \label{linethree} \\ - D Z'(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr) + D Z'(\mu, D, D) f_2'\bigl(\lambda_{Z}(D)\bigr), \label{linefour} \\ - D_{1}\lambda_{Z}(D_2)f_1^{(2)}\bigl(N(\mu, D_1, D_2)\bigr)N'(\mu, D_1, D_2)
+ D \lambda_{Z}(D)f_1^{(2)}\bigl(N(\mu, D, D)\bigr)N'(\mu, D, D), \label{linefive} \\
D \gamma_{1}f_{1}'\bigl(N(\mu, D_1, D_2)\bigr)N'(\mu, D_1, D_2) - D \gamma_{1}f_{1}'\bigl(N(\mu, D, D)\bigr)N'(\mu, D, D). \label{linesix} \end{gather} Making several applications of lemma \ref{elementary}, lemma \ref{DDbounds}, lemma \ref{muD1D2bounds}, and proposition \ref{constituentbounds}, we find that the absolute value of each of the quantities displayed in \eqref{firsttwolines}, \eqref{linethree}, \eqref{linefour}, \eqref{linefive}, and \eqref{linesix} is bounded by a constant times $\Dist{(D_1, D_2)}{(D, D)}$. Consequently, there is a constant $C_1'$ such that \begin{equation*}
\abs{ p_1'(\mu, D_1, D_2) - p_1'(D,D,\mu) } \leq C_1' \cdot \Dist{(D_1, D_2)}{(D, D)}. \qedhere \end{equation*} \end{proof}
\begin{proof}[Proof of proposition \ref{p2bounds}] Using the formula \eqref{JE2coefficienta1} and organizing the difference $p_2(\mu, D_1, D_2) - p_2(\mu, D, D)$ to display terms belonging to $p_2(\mu, D_1, D_2)$ down the left side of the display, we have \begin{multline} \label{p2difference}
p_2(\mu, D_1, D_2) - p_2(\mu, D, D) = -a_1(D_1 , D_2, \mu) + a_1(\mu, D, D)
\\ = - Z(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr) + Z(\mu, D, D) f_2'\bigl(\lambda_{Z}(D)\bigr) \\ \qquad \quad -\lambda_{Z}(D_2)f_1'\bigl(N(\mu, D_1, D_2)\bigr) + \lambda_{Z}(D)f_1'\bigl(N(\mu, D, D)\bigr)
\\ \qquad \qquad +\gamma_{1}f_{1}\bigl(N(\mu, D_1, D_2)\bigr)- \gamma_{1}f_{1}\bigl(N(\mu, D, D)\bigr) \\
- D_{1} + D. \qquad \qquad \qquad \end{multline} The bound on $\abs{p_2(\mu, D_1, D_2) - p_2(\mu, D, D)}$ in \eqref{p2bound} follows from bounds on summands in \eqref{p2difference}, as follows. Apply lemma \ref{elementary}, lemma \ref{DDbounds}, lemma \ref{muD1D2bounds}, and bounds 4 and 5 from proposition \ref{constituentbounds} to bound the term \begin{align*}
\bigabs{ - Z(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr) &+ Z(\mu, D, D) f_2'\bigl(\lambda_{Z}(D)\bigr)}; \intertext{bound} \bigabs{-\lambda_{Z}(D_2)f_1'\bigl(N(\mu, D_1, D_2)\bigr) &+ \lambda_{Z}(D)f_1'\bigl(N(\mu, D, D)\bigr)} \intertext{in the same manner, using bounds 1 and 3 from proposition \ref{constituentbounds}. Then bound}
\bigabs{ \gamma_{1}f_{1}\bigl(N(\mu, D_1, D_2)\bigr) &- \gamma_{1}f_{1}\bigl(N(\mu, D, D)\bigr)} \end{align*}
using bound 2 from proposition \ref{constituentbounds}. Finally, $\abs{ - D_1 + D}$ is bounded by $\Dist{(D_1, D_2)}{(D, D)}$.
From \eqref{p2difference}, \begin{multline} \label{p2primedifference}
p_2'(\mu, D_1, D_2) - p_2'(\mu, D, D) = \\ - Z'(\mu, D_1, D_2) f_2'\bigl(\lambda_{Z}(D_2)\bigr) + Z'(\mu, D, D) f_2'\bigl(\lambda_{Z}(D)\bigr) \\
-\lambda_{Z}(D_2)f_1^{(2)}\bigl(N(\mu, D_1, D_2)\bigr)\cdot N'(\mu, D_1, D_2) + \lambda_{Z}(D)f_1^{(2)}\bigl(N(\mu, D, D)\bigr)\cdot N'(\mu, D, D) \\
+\gamma_{1}f_{1}'\bigl(N(\mu, D_1, D_2)\bigr)\cdot N'(\mu, D_1, D_2)- \gamma_{1}f_{1}'\bigl(N(\mu, D, D)\bigr)\cdot N'(\mu, D, D). \end{multline} For \eqref{p2primebound}, the same sort of building block approach on terms in \eqref{p2primedifference} using bounds 5 and 8 from proposition \ref{constituentbounds}, then bounds 1, 6, and 7, and finally bounds 3 and 7 delivers the bound \eqref{p2primebound}. \end{proof} Now we embark on the proof of proposition \ref{constituentbounds}. Part of this work is made easier by the fact that the rate functions in \eqref{NPZsys} are explicitly linear in $D$, $D_1$, and $D_2$. On the other hand, because other quantities such as $N$, $P$, and $Z$ depend implicitly on $D$, $D_1$, and $D_2$, the details of the analyses are somewhat lengthy. \begin{proof}[Proof of bound 1]
By definition and the mean value theorem applied to $f_2$, we have
\begin{equation*}
(D_2 - D)/\gamma_2 = f_2( \lambda_Z(D_2)\bigr) - f_2\bigl(\lambda_Z(D)\bigr) = f_2'(P_1)\cdot\bigl(\lambda_Z(D_2) - \lambda_Z(D)\bigr)
\end{equation*} for some number $P_1$ between $\lambda_Z(D_2)$ and $\lambda_Z(D)$. Consequently, \begin{equation*} \abs{\lambda_Z(D_2) - \lambda_Z(D)} = \frac{\abs{D_2-D}}{\gamma_2\cdot f_2'(P_1)}
\leq \frac{\Dist{(D_1, D_2)}{(D, D)}}{\gamma_2\cdot f_2'(P_1)}. \end{equation*} Since $P_1$ is also close to $\lambda_Z(D)$, we may assume by lemma \ref{f2primebound} that $f_2'(P_1) > f_2'\bigl(\lambda_Z(D)\bigr)/2$. Thus, for $D_2$ sufficiently close to $D$ \begin{equation*}
\abs{\lambda_Z(D_2) - \lambda_Z(D)}
\leq \frac{\Dist{(D_1, D_2)}{(D, D)}}{\gamma_2\cdot f_2'(P_1)} < \frac{2 \cdot \Dist{(D_1, D_2)}{(D, D)}}{\gamma_2\cdot f_2'\bigl(\lambda_Z(D)\bigr)} \qedhere \end{equation*} \end{proof} \begin{proof}
[Proof of bound 2] Fix $\mu$ in the interval $I$. We may use the mean-value theorem \cite[p.103, Corollary~1]{LangAnalysisII} for functions of variables $(D_1, D_2)$, obtaining \begin{equation*}
\bigabs{f_1\bigl(N(\mu, D_1, D_2)\bigr) - f_1\bigl(N(\mu, D, D)\bigr)}
\leq \bignorm{\nabla (f_1 \circ N)(\widehat{D}_1, \widehat{D}_2)}\cdot \Dist{(D_1, D_2)}{(D, D)}, \end{equation*} where $(\widehat{D}_1, \widehat{D}_2)$ is a point on the line segment connecting $(D_1, D_2)$ and $(D,D)$. Therefore, we have to bound the magnitude of the gradient $\norm{\nabla (f_1 \circ N)(\widehat{D}_1, \widehat{D}_2)}$ in a disc surrounding $(D,D)$ by a constant.
To obtain information about the partial derivatives $\partial(f_1 \circ N)/\partial D_1 = f_1'(N)\cdot (\partial N / \partial D_1)$ and $\partial(f_1 \circ N)/\partial D_2 = f_1'(N)\cdot (\partial N / \partial D_2)$, we return to the defining equation \begin{equation*}
0 =G_1(N, Z, \mu, D_1, D_2) = D(\mu - N ) - f_1(N)\lambda_Z(D_2) \end{equation*}
and differentiate with respect to $D_1$ and $D_2$. We obtain
\begin{align*}
0 &= \partial G_1/ \partial D_1
= \partial G_1/\partial N \cdot \partial N/ \partial D_1
\\
&= \bigl( -D - f_1'(N) \cdot \lambda_Z(D_2)\bigr) \cdot (\partial N / \partial D_1) \intertext{and} 0 &= \partial G_1/ \partial D_2
= \partial \bigl(D(\mu{-}N )\bigr) / \partial D_2 - \partial \bigl( f_1(N)\lambda_Z(D_2)\bigr)/\partial D_2
\\
&= -D \cdot (\partial N / \partial D_2) - \bigl( f_1'(N)\cdot (\partial N /\partial D_2) \cdot \lambda_Z(D_2) + f_1(N) \lambda_Z'(D_2)\bigr).
\end{align*} From the first of these equations \begin{align}
\frac{\partial N}{\partial D_1}(\mu, D_1, D_2) &= 0, \label{PND1} \intertext{since $D + f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2) > 0$, and from the second} \frac{\partial N}{\partial D_2}(\mu, D_1, D_2)
&= - \frac{f_1\bigl(N(\mu, D_1, D_2)\bigr) \lambda_Z'(D_2)}{D + f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)}\label{PND2} \end{align} Now we obtain a bound on the gradient via \begin{align} \biggabs{\frac{\partial N}{\partial D_2}(\mu, D_1, D_2)}
&= \frac{f_1\bigl(N(\mu, D_1, D_2)\bigr) \lambda_Z'(D_2)}{D + f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)} \\ &< \frac{f_1\bigl((N(\mu, D_1, D_2)\bigr) \lambda_Z'(D_2)}{D}
= \frac{f_1\bigl((N(\mu, D_1, D_2)\bigr)}{D \cdot \gamma_2 \cdot f_2'\bigl(\lambda_Z(D_2)\bigr)}, \notag \intertext{since $f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2) > 0$ and because the defining relation $ f_2\bigl(\lambda_Z(D_2)\bigr) = D_2/\gamma_2$ implies $\lambda_Z'(D_2) = 1/\bigl( \gamma_2 \cdot f_2'\bigl(\lambda_Z(D_2)\bigr) \bigr)$,} &< \frac{2\cdot f_1\bigl((N(\mu, D_1, D_2)\bigr)}{D \cdot \gamma_2 \cdot f_2'\bigl(\lambda_Z(D)\bigr)}, \label{PND2bound} \end{align} since $f'_2\bigl(\lambda_Z(D_2)\bigr) > f_2'\bigl(\lambda_Z(D)\bigr)/2$ by choice of $\Delta$ and lemma \ref{f2primebound}. Also, $f_1\bigl((N(\mu, D_1, D_2)\bigr)$ is bounded by $\lim_{N \rightarrow \infty}f_1(N)$, since $N(\mu, D_1,D_2)$ is unbounded as $\mu$ tends to infinity according to theorem \ref{boundedness of Z_2}. Thus, $\abs{\partial N/\partial D_2(\mu, D_1, D_2)}$ is bounded by a constant in the disc $\Delta$.
We also observe that $f_1'\bigl(N(\mu, \widehat{D}_1, \widehat{D}_2)\bigr)$ is bounded by a constant depending only on $I{\times}\Delta$, because of the convexity of the closed disc $\Delta$ centered at $(D,D)$ from which we choose $(D_1, D_2)$.
Combining all this information, $\norm{\nabla (f_1 \circ N)(\widehat{D}_1, \widehat{D}_2)}$ is bounded by a constant depending on $I{\times}\Delta$. Therefore, $\bigabs{f_1\bigl(N(\mu, D_1, D_2)\bigr) - f_1\bigl(N(\mu, D, D)\bigr)} $ is bounded by a constant times $\Dist{(D_1, D_2)}{(D, D)}$. \end{proof} \begin{proof}
[Proof of bound 3] We again use the mean-value theorem \cite[p.103, Corollary~1]{LangAnalysisII} for functions of variables $(D_1, D_2)$, obtaining \begin{equation*}
\bigabs{f'_1\bigl(N(\mu, D_1, D_2)\bigr) - f'_1\bigl(N(\mu, D, D)\bigr)}
\leq \bignorm{\nabla (f'_1 \circ N)(\widehat{D}_1, \widehat{D}_2)}\cdot \Dist{(D_1, D_2)}{(D, D)}, \end{equation*} where $(\widehat{D}_1, \widehat{D}_2)$ is a point on the line segment connecting $(D_1, D_2)$ and $(D,D)$. Therefore, we have to bound the magnitude of the gradient $\norm{\nabla (f'_1 \circ N)(\widehat{D}_1, \widehat{D}_2)}$ in a disc surrounding $(D,D)$.
To obtain information about the partial derivatives $\partial(f_1' \circ N)/\partial D_1 = f_1^{(2)}(N)\cdot (\partial N / \partial D_1)$ and $\partial(f_1' \circ N)/\partial D_2 = f_1^{(2)}(N)\cdot (\partial N / \partial D_2)$, we cite \eqref{PND1} for the vanishing of $\partial N/\partial D_1$ and the bound on $\partial N/\partial D_2$ obtained in \eqref{PND2bound}. We also observe that $f_1^{(2)}\bigl(N(\mu, \widehat{D}_1, \widehat{D}_2)\bigr)$ is bounded by a constant, assuming a continuous second derivative of $f_1$. The constant depends on the $\mu$-interval $I$ and the closed disc $\Delta$ centered at $(D,D)$ from which we choose $(D_1, D_2)$, plus the convexity of the disc.
Combining all this information, $\norm{\nabla (f_1' \circ N)(\widehat{D}_1, \widehat{D}_2)}$ is bounded by a constant depending on $I{\times}\Delta$. Therefore, $\bigabs{f_1'\bigl(N(\mu, D_1, D_2)\bigr) - f_1'\bigl(N(\mu, D, D)\bigr)} $ is bounded by a constant times $\Dist{(D_1, D_2)}{(D, D)}$. \end{proof} \begin{proof}
[Proof of bound 4] Since $\mu$ is a fixed number in $I$, we use again the mean value theorem for functions of $(D_1, D_2)$. \begin{equation*}
\abs{Z(\mu, D_1, D_2) - Z(\mu, D, D)} \leq \bignorm{\nabla Z(\widehat{D}_1, \widehat{D}_2)}\cdot \Dist{(D_1, D_2)}{(D, D)}, \end{equation*} where $(\widehat{D}_1, \widehat{D}_2)$ is a point on the line segment connecting $(D_1, D_2)$ and $(D,D)$. Therefore, we have to bound the magnitude of the gradient $\norm{\nabla Z(\widehat{D}_1, \widehat{D}_2)}$ in a disc surrounding $(D,D)$.
To obtain information about $\partial Z/\partial D_1$ and $\partial Z/ \partial D_2$ we return to the defining equation \begin{equation*}
0 = G_2(N, Z, \mu, D_1, D_2) = \gamma_1f_1(N)\lambda_Z(D_2) - D_1\lambda_Z(D_2) - (D_2/\gamma_2) Z. \end{equation*} Differentiating with respect to $D_1$, we obtain \begin{align*}
0 &= \partial G_2/\partial D_1 \\ & = \partial \bigl(\gamma_1f_1(N)\lambda_Z(D_2) \bigr)/ \partial D_1
- \partial \bigl( (D_2/\gamma_2) Z \bigr)/ \partial D_1
- \partial \bigl( D_1\lambda_Z(D_2) \bigr)/\partial D_1 \\
&= \gamma_1f_1'(N)\lambda_Z(D_2)\cdot \partial N/ \partial D_1
- (D_2/\gamma_2) \partial Z / \partial D_1
- \lambda_Z(D_2) \\ &= - (D_2/\gamma_2) \partial Z / \partial D_1
- \lambda_Z(D_2), \end{align*} since $\partial N/\partial D_1 = 0$ by equation \eqref{PND1}. Differentiating with respect to $D_2$, we obtain \begin{align*}
0 &= \partial G_2/\partial D_2 \\
& = \partial \bigl(\gamma_1f_1(N)\lambda_Z(D_2) \bigr)/ \partial D_2
- \partial \bigl( (D_2/\gamma_2) Z \bigr)/\partial D_2
- \partial \bigl( D_1\lambda_Z(D_2) \bigr)/\partial D_2 \\
&=\gamma_1\bigl[f_1'(N)\lambda_Z(D_2)\cdot \partial N/ \partial D_2 {+}f_1(N)\lambda_Z'(D_2)\bigr]
- \bigl[(D_2/\gamma_2) \partial Z / \partial D_2 {+} Z/\gamma_2\bigr]
- D_1\lambda_Z'(D_2). \end{align*} Rewriting these equations, we obtain \begin{align}
\frac{\partial Z}{\partial D_1}(\mu, D_1, D_2) &= - \frac{\lambda_Z(D_2)\cdot \gamma_2}{D_2} \label{PZD1} \intertext{and}
\frac{\partial Z}{\partial D_2}(\mu, D_1, D_2)
&= \frac{\gamma_1\gamma_2}{D_2}f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \frac{\partial N}{\partial D_2}(\mu, D_1, D_2)\cdot \lambda_Z(D_2) \notag \\ &+ \frac{\gamma_2}{D_2}\cdot\bigl( \gamma_1 f_1\bigl( N(\mu, D_1, D_2) \bigr) -D_1\bigr)\cdot \lambda_Z'(D_2)
- \frac{Z(\mu, D_1, D_2)}{D_2}. \label{PZD2} \end{align} To bound $\partial Z/\partial D_1$, we require a bound on $\lambda_Z(D_2)$. By definition $f_2\bigl(\lambda_Z(D_2)\bigr) = D_2/\gamma_2$, so
restricting $D_2$ to be close to $D$ prevents $D_2/\gamma_2$ from approaching $\lim_{P \rightarrow \infty} f_2(P)$. Consequently, $\lambda_Z(D_2)$ is a bounded distance from $\lambda_Z(D)$.
To bound $\partial Z/ \partial D_2$, we discuss the terms on the righthand side of \eqref{PZD2} in reverse order. The term $D_2^{-1}Z(\mu, D_1, D_2)$ is bounded if $(D_1,D_2)$ is close to $(D,D)$, for it will be close to $D_2^{-1}Z(\mu, D,D)$. In turn $Z(\mu, D,D)$ is bounded by $\lim_{\mu \rightarrow \infty}Z(\mu, D,D)$, which exists by theorem \ref{boundedness of Z_2}. Concerning the second term, $f_2'\bigl(\lambda_Z(D_2)\bigr)\cdot \lambda_Z'(D_2) = 1/\gamma_2$, so \begin{multline*}
\frac{\gamma_2}{D_2}\cdot\bigl( \gamma_1 f_1\bigl( N(\mu, D_1, D_2) \bigr) -D_1\bigr)\cdot \lambda_Z'(D_2) \\ = \frac{\gamma_2}{D_2}\cdot\bigl( \gamma_1 f_1\bigl( N(\mu, D_1, D_2) \bigr) -D_1\bigr)\cdot \frac{1}{\gamma_2\cdot f_2'\bigl(\lambda_Z(D_2)\bigr)} = \frac{\bigl( \gamma_1 f_1\bigl( N(\mu, D_1, D_2) \bigr) -D_1\bigr)}{D_2\cdot f_2'\bigl(\lambda_Z(D_2)\bigr)}, \end{multline*} where $f_1\bigl(N(\mu, D_1, D_2)\bigr)$ can be bounded in terms of $\lim_{N \rightarrow \infty}f_1(N)$, and $f_2'\bigl(\lambda_Z(D_2)\bigr)$ is bounded away from zero by lemma \ref{f2primebound}. Concerning the first term, substitute the expression for $\partial N/\partial D_2$ given in \eqref{PND2}, obtaining \begin{multline*}
\frac{\gamma_1\gamma_2}{D_2}f_1'\bigl(N(\mu, D_1, D_2)\bigr)\frac{\partial N}{\partial D_2}(\mu, D_1, D_2)\cdot \lambda_Z(D_2) \\
= -\frac{\gamma_1\gamma_2}{D_2}f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot
\frac{f_1\bigl(N(\mu, D_1, D_2)\bigr) \lambda_Z'(D_2)}{D + f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)}\cdot \lambda_Z(D_2) \\
= -\frac{\gamma_2 \gamma_1}{D_2}\cdot
\frac{ f_1\bigl(N(\mu, D_1, D_2)\bigr) \cdot \lambda_Z'(D_2)}{\Bigl(\frac{D}{\lambda_Z(D_2)\cdot f_1'\bigl(N(\mu, D_1, D_2)\bigr)} + 1\Bigr)} \end{multline*} Consequently, \begin{multline*}
\Bigabs{\frac{\gamma_1\gamma_2}{D_2}f_1'\bigl(N(\mu, D_1, D_2)\bigr)\frac{\partial N}{\partial D_2}(\mu, D_1, D_2)\cdot \lambda_Z(D_2)} \\ \leq \frac{\gamma_2 \gamma_1}{D_2} \cdot f_1\bigl(N(\mu, D_1, D_2)\bigr) \cdot \lambda_Z'(D_2)=\frac{\gamma_1 f_1\bigl(N(\mu, D_1, D_2)\bigr)}{D_2 f_2'\bigl(\lambda_Z(D_2)\bigr)}, \end{multline*} where we use again the fact that $\lambda_Z'(D_2) = 1/\gamma_2 f_2'\bigl(\lambda_Z(D_2)\bigr)$. Arguing as above, we conclude this term
can be bounded by a constant, and, therefore, $\abs{\partial Z/\partial D_2(\mu, D_1, D_2)}$ itself is bounded by a constant depending only on the domain $I{\times}\Delta$.
Combining these bounds $\norm{\nabla Z(\widehat{D}_1, \widehat{D}_2)}$ is bounded by a constant, so we conclude that $\abs{Z(\mu, D_1, D_2) - Z(\mu, D, D)}$ is bounded by a constant times $\Dist{(D_1, D_2)}{(D, D)}$. \end{proof} \begin{proof}
[Proof of bound 5] By the mean value theorem \begin{equation*}
f_2'\bigl(\lambda_Z(D_2)\bigr) - f_2'\bigl(\lambda_Z(D)\bigr) = f_2^{(2)}(P_2)\cdot\bigl(\lambda_Z(D_2) - \lambda_Z(D)\bigr) \end{equation*} for some $P_2$ between $\lambda_Z(D_2)$ and $\lambda_Z(D)$. Moreover, \begin{equation*}
D_2/\gamma_2 - D/ \gamma_2 = f_2\bigl(\lambda_Z(D_2)\bigr) - f_2\bigl(\lambda_Z(D)\bigr) = f_2(P_1')\bigl(\lambda_Z(D_2) - \lambda_Z(D)\bigr), \end{equation*} for some $P_1'$ between $\lambda_Z(D_2)$ and $\lambda_Z(D)$. We may combine to obtain \begin{equation*}
f_2'\bigl(\lambda_Z(D_2)\bigr) - f_2'\bigl(\lambda_Z(D)\bigr) = \frac{f_2^{(2)}(P_2)}{\gamma_2 f_2'(P_1)}\cdot(D_2 - D). \end{equation*} Since we have control of the continuous derivatives $f_2'$ and $f_2^{(2)}$ on the interval $J$ around $\lambda_Z(D)$, the difference $\abs{ f_2'\bigl(\lambda_Z(D_2)\bigr) - f_2'\bigl(\lambda_Z(D)\bigr)}$ is indeed bounded by a constant times $\Dist{(D_1, D_2)}{(D, D)}$. \end{proof} \begin{proof}
[Proof of bound 6] This is precisely parallel to the proofs of bounds 2 and 3. We may use the mean-value theorem for functions of variables $(D_1, D_2)$, obtaining \begin{equation*}
\bigabs{f_1^{(2)}\bigl(N(\mu, D_1, D_2)\bigr) - f_1^{(2)}\bigl(N(\mu, D, D)\bigr)}
\leq \bignorm{\nabla (f_1^{(2)} \circ N)(\widehat{D}_1, \widehat{D}_2)}\cdot \Dist{(D_1, D_2)}{(D, D)}, \end{equation*} where $(\widehat{D}_1, \widehat{D}_2)$ is a point on the line segment connecting $(D_1, D_2)$ and $(D,D)$. Therefore, we have to bound the magnitude of the gradient $\norm{\nabla (f_1^{(2)} \circ N)(\widehat{D}_1, \widehat{D}_2)}$ in a disc surrounding $(D,D)$.
To bound the partial derivatives \begin{equation*}
\partial(f_1^{(2)} \circ N)/\partial D_1 = f_1^{(3)}(N)\cdot (\partial N / \partial D_1) \; \text{and} \; \partial(f_1^{(2)} \circ N)/\partial D_2 = f_1^{(3)}(N)\cdot (\partial N / \partial D_2), \end{equation*} we have the vanishing of $\partial N/\partial D_1$ by \eqref{PND1} and a bound on $\abs{\partial N/\partial D_2}$ from \eqref{PND2bound}. We also observe that $f_1^{(3)}\bigl(N(\mu, \widehat{D}_1, \widehat{D}_2)\bigr)$ is bounded by a constant, assuming a continuous third derivative of $f_1$.
Combining all this information, $\norm{\nabla (f_1^{(2)} \circ N)(\widehat{D}_1, \widehat{D}_2)}$ is bounded by a constant depending on $I{\times}\Delta$. Therefore, $\abs{f_1^{(2)}\bigl(N(\mu, D_1, D_2)\bigr) - f_1^{(2)}\bigl(N(\mu, D, D)\bigr)} $ is bounded by a constant times $\Dist{(D_1, D_2)}{(D, D)}$. \end{proof} \begin{proof}
[Proof of bound 7] For this proof, return to the defining relation for $N(\mu, D_1, D_2)$, namely, \begin{equation*}
0 =G_1(N, Z, \mu, D_1, D_2) = D(\mu - N ) - f_1(N)\lambda_Z(D_2), \end{equation*} and differentiate with respect to $\mu$, obtaining \begin{equation*}
0 = D + \bigl(-D - f_1'\bigl(N(\mu, D_1, D_2)\bigr) \cdot \lambda_Z(D_2) \bigr) \cdot N'(\mu, D_1, D_2), \end{equation*} so \begin{equation*}
N'(\mu, D_1, D_2) = \frac{D}{D + f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)}. \end{equation*} Consequently, \begin{multline}
\abs{N'(\mu, D_1, D_2) - N'(\mu, D, D) } \\ = \Bigabs{\frac{D}{D + f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)}
- \frac{D}{D + f_1'\bigl(N(\mu, D, D)\bigr)\cdot \lambda_Z(D)} } \\
= \Bigabs{\frac{D f_1'\bigl(N(\mu, D, D)\bigr)\cdot \lambda_Z(D) - D f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)}
{\bigl(D + f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)\bigr)\cdot\bigl( D + f_1'\bigl(N(\mu, D, D)\bigr)\cdot \lambda_Z(D)\bigr)} } \\ \leq \Bigabs{\frac{f_1'\bigl(N(\mu, D, D)\bigr)\cdot \lambda_Z(D) - f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)}{D}}. \label{Nprimedifference} \end{multline} Applying lemma \ref{elementary} to \eqref{Nprimedifference} with bounds 1 and 3 as input, we find that $\abs{N'(\mu, D_1, D_2) - N'(\mu, D, D) }$ is bounded by a constant times $\Dist{(D_1, D_2)}{(D, D)}$. \end{proof} \begin{proof}
[Proof of bound 8] For this proof, return to the defining relation for $Z$, namely, \begin{equation*}
0 = G_2(N, Z, \mu, D_1, D_2) = \gamma_1f_1(N)\lambda_Z(D_2) - D_1\lambda_Z(D_2) - (D_2/\gamma_2) Z, \end{equation*} and differentiate with respect to $\mu$, obtaining \begin{equation*}
0 = \gamma_1f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)\cdot N'(\mu, D_1, D_2) - (D_2/\gamma_2)\cdot Z'(\mu, D_1, D_2). \end{equation*} Thus, \begin{equation*}
Z'(\mu, D_1, D_2) = \frac{\gamma_1\gamma_2}{D_2} \cdot f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)\cdot N'(\mu, D_1, D_2), \end{equation*} and \begin{equation} \begin{split}
Z'(\mu, D_1, D_2) &- Z'(\mu, D, D) \\
&= \frac{\gamma_1\gamma_2}{D_2} \cdot f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)\cdot N'(\mu, D_1, D_2) \\ &\quad \quad - \frac{\gamma_1\gamma_2}{D} \cdot f_1'\bigl(N(\mu, D, D)\bigr)\cdot \lambda_Z(D)\cdot N'(\mu, D, D) \\
&= \frac{\gamma_1\gamma_2}{D_2 D}\cdot\bigl[ D f_1'\bigl(N(\mu, D_1, D_2)\bigr)\cdot \lambda_Z(D_2)\cdot N'(\mu, D_1, D_2) \\ &\hspace{10em} - D_2 f_1'\bigl(N(\mu, D, D)\bigr)\cdot \lambda_Z(D)\cdot N'(\mu, D, D) \bigr] \end{split} \end{equation} Obviously $\abs{D - D_2} \leq \Dist{(D_1, D_2)}{(D, D)}$, so we make several applications of lemma \ref{elementary} to combine this fact with bounds 1, 3, and 7 to deduce that $\abs{ Z'(\mu, D_1, D_2) - Z'(\mu, D, D) } $ is bounded by a constant times $\Dist{(D_1, D_2)}{(D,D)}$. \end{proof}
\end{document} | arXiv | {
"id": "1812.09964.tex",
"language_detection_score": 0.5986999869346619,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title {The Index of a Vector Field on an Orbifold with Boundary}
\author{Elliot Paquette, Kalamazoo College \thanks{The first author was supported by a Kalamazoo College Field Experience grant.} \\ 1880 Sumac Ave. \\ Boulder, CO 80304 \\ elliot.paquette@gmail.com \\\\ Christopher Seaton \thanks{The second author was supported by a Rhodes College Faculty Development Endowment Grant.} \\ Department of Mathematics and Computer Science \\ Rhodes College \\2000 N. Parkway \\ Memphis, TN 38112 \\ seatonc@rhodes.edu }
\maketitle
Subject class: Primary 57R25, 57R12, 55R91
Keywords: orbifold, orbifold with boundary, Euler-Satake characteristic, Poincar\'{e}-Hopf theorem, vector field, vector field index, Morse Index, orbifold double
\begin{abstract}
A Poincar\'{e}-Hopf theorem in the spirit of Pugh is proven for compact orbifolds with boundary. The theorem relates the index sum of a smooth vector field in generic contact with the boundary orbifold to the Euler-Satake characteristic of the orbifold and a boundary term. The boundary term is expressed as a sum of Euler characteristics of tangency and exit-region orbifolds. As a corollary, we express the index sum of the vector field induced on the inertia orbifold to the Euler characteristics of the associated underlying topological spaces.
\end{abstract}
\section{Introduction} \label{sec-intro}
Pugh gave a generalization (see \cite{Pugh}) of the Poincar\'e-Hopf Theorem for manifolds with boundary for continuous vector fields in generic contact with the boundary. This generalization bears the elegance of associating the index sum with a sum of Euler characteristics only. Here, we will show that, in the case of a compact orbifold with boundary and a smooth vector field in generic contact with the boundary, Pugh's result extends naturally. A proper introduction to orbifolds and the precise definition we will use are available as an appendix in \cite{chenruangwt}. Note that this definition of an orbifold requires group actions to have fixed-point sets of codimension 2 as opposed to, e.g., \cite{thurston}; we make this requirement as well. By ``smooth," we will always mean ${\mathcal C}^\infty$.
The main result we will prove is as follows.
\begin{theorem} \label{thrm-mainresult}
Let $Q$ be an $n$-dimensional smooth, compact orbifold with boundary. Let $Y$ be a smooth vector field on $Q$ that is in \emph{generic contact} with $\partial Q$, and then \begin{equation} \label{eq-mainformula}
\mathfrak{Ind}^{orb}(Y; Q) = \chi_{orb}(Q,
\partial Q) + \sum_{i = 1}^n \chi_{orb}(R_{-}^i, \Gamma^i). \end{equation}
\end{theorem}
The expressions $\mathfrak{Ind}^{orb}$ and $\chi_{orb}$ are the orbifold analogues of the manifold notions of the topological index of a vector field and the Euler characteristic, respectively. The definitions of both of these, along with the orbifolds $R_-^i$, $\Gamma^i$, and generic contact, will be reviewed in Section \ref{sec-preliminaries}.
In this paper, we will follow a procedure resembling Pugh's original technique, and we will show that many of the same techniques applicable to manifolds can be applied to orbifolds as well. In Section \ref{sec-preliminaries} we will explain our notation and review the result of Satake's which relates the orbifold index to the Euler-Satake characteristic for closed orbifolds. We give the definition of each of these terms. In Section \ref{sec-double}, we will show that a neighborhood of the boundary of an orbifold may be decomposed as a product $\partial Q \times [0, \epsilon)$. We will then construct the double of $Q$ and charts near the boundary respecting this product structure. This will generalize well-known results and constructions for manifolds with boundary.
Section \ref{sec-morseindex} provides elementary results relating the topological index of an orbifold vector field to an orbifold Morse Index. The orbifold Morse Index is defined in terms of the Morse Index on a manifold in a manner analogous Satake's definition of the topological vector field index. These results generalize corresponding results for manifolds. In Section \ref{sec-mainresult}, we will use the above constructions to show that a smooth vector field on $Q$ may perturbed near the boundary to form a smooth vector field on the the double whose index can be computed in terms of the data given by the original vector field. We use this to prove Theorem \ref{thrm-mainresult}. We also prove Corollary \ref{cor-inertiaversion}, which gives a similar formula where the left-hand side is the orbifold index of the induced vector field on the inertia orbifold and on the right-hand side, the Euler-Satake characteristics are replaced with the Euler characteristics of the underlying topological spaces.
Another generalization of the Poincar\'{e}-Hopf Theorem to orbifolds with boundary follows as a corollary to Satake's Gauss-Bonnet Theorem for orbifold with boundary in \cite{satake2}; this and related results are explored in \cite{seaton1}. In each of these cases, the boundary term is expressed by evaluation of an auxiliary differential form representing a global topological invariant of the boundary pulled back via the vector field. The generalization given here expresses the boundary term in terms of Euler-Satake characteristics of suborbifolds determined by the vector field.
The first author would like to thank Michele Intermont for guiding him through much of the background material required for this work. The second author would like to thank Carla Farsi for helpful conversations and suggesting this problem.
\section{Preliminaries and Definitions} \label{sec-preliminaries}
Satake proved a Poincar\'{e}-Hopf Theorem for closed orbifolds; however, he worked with a slightly different definition of orbifold, the so-called $V$-manifold (see \cite{satake1} and \cite{satake2}). A $V$-manifold corresponds to modern day \emph{effective} or \emph{reduced} (codimension-2) orbifold. An effective orbifold is such that the group in each chart acts effectively (see \cite{chenruangwt}). We will adapt the language of his result and use it here.
\begin{theorem}[Satake's Poincar\'{e}-Hopf Theorem for Closed Orbifolds] \label{thrm-satakeindex}
Let $Q$ be an effective, closed orbifold, and let $X$ be a vector field on $Q$ that has isolated zeroes. Then the following relationship holds. \[
\mathfrak{Ind}^{orb}(X; Q) =
\chi_{orb}(Q) \]
\end{theorem}
Note that the requirement that $Q$ is effective is unnecessary; as mentioned in \cite{chenruangwt}, an ineffective orbifold can be replaced with an effective orbifold $Q_{red}$, and the differential geometry of the tangent bundle (or any other \emph{good} orbifold vector bundle) is unchanged.
The \emph{orbifold index} $\mathfrak{Ind}^{orb}(X; p)$ at a zero $p$ of the vector field $X$ is defined in terms of the topological index of a vector field on a manifold. Let a neighborhood of $p$ be uniformized by the chart $\{V, G, \pi\}$ and choose $x \in V$ with $\pi(x) = p$. Let $G_x \leq G$ denote the isotropy group of $x$. Then $\pi^\ast X$ is a $G$-equivariant vector field on $V$ with a zero at $x$. The orbifold index at $p$ is then defined as \[
\mathfrak{Ind}^{orb}(X; p)
=
\frac{1}{|G_x|}\mathfrak{Ind}\left(\pi^\ast X;
x\right), \] where $\mathfrak{Ind}\left(\pi^\ast X; x\right)$ is the usual topological index of the vector field $\pi^\ast X$ on the manifold $V$ at $x$ (see \cite{GuilleminPollack} or \cite{Milnor}). Note that this definition does not depend on the chart, nor on the choice of $x$. We use the notation \[
\mathfrak{Ind}^{orb}(X; Q) = \sum\limits_{p \in Q, X(p) = 0} \mathfrak{Ind}^{orb} (X;
p). \]
The \emph{Euler-Satake characteristic} $\chi_{orb}(Q)$ is most easily defined in terms of an appropriate simplicial decomposition of $Q$. In particular, let ${\mathcal T}$ be a simplicial decomposition of $Q$ so that that the isomorphism class of the isotropy group is constant on the interior of each simplex (such a simplicial decomposition always exists; see \cite{moerdijkpronk}). For the simplex $\sigma$, the (isomorphism class of the) isotropy group on the interior of $\sigma$ will be denoted $G_\sigma$. The Euler-Satake characteristic of $Q$ is \[
\chi_{orb}(Q)
= \sum_{\sigma \in {\mathcal T}}(-1)^{\mbox{\scriptsize dim\:} \sigma}\frac{1}{|G_\sigma|}. \] This coincides with Satake's \emph{Euler characteristic of $Q$ as a $V$-manifold}. Note that it follows from this definition that if $Q = Q_1 \cup Q_2$ for orbifolds $Q_1$ and $Q_2$ with $Q_1 \cap Q_2$ a suborbifold, then \begin{equation} \label{eq-additiveeulerchar}
\chi_{orb}(Q) = \chi_{orb}(Q_1) + \chi_{orb}(Q_2) - \chi_{orb}(Q_1 \cap Q_2). \end{equation} In the case that $Q$ has boundary, $\chi_{orb}(Q)$ is defined in the same way. We let \[
\chi_{orb} (Q, \partial Q) = \chi_{orb}(Q) - \chi_{orb}(\partial Q). \] This coincides with Satake's \emph{inner Euler characteristic of $Q$ as a $V$-manifold with boundaries}. The reader is warned that there are many different Euler characteristics defined for orbifolds; both the topological index of a vector field and Euler-Satake characteristic used hear are generally rational numbers.
Vector fields in \emph{generic contact} have orbifold exit regions, which we will now describe. Let $Q$ be a compact $n$-dimensional orbifold with boundary. In Section \ref{sec-double} Lemma \ref{lm-normalspace}, we will show that, as with the case of manifolds, there is a neighborhood of $\partial Q$ in $Q$ diffeomorphic to $\partial Q \times [0, \epsilon)$. Given a metric, he tangent bundle of $Q$ on the boundary decomposes with respect to this product so that there is a well-defined normal direction at the boundary. Let $R_{-}^1$ be the closure of the subset of $\partial Q$ where the vector field points out of $Q$. Analogously, let $R_{+}^1$ be the closure of the subset of $\partial Q$ where the vector field points into $Q$. We require that $R_-^1$ and $R_+^1$ are orbifolds with boundary of dimension $n - 1$. The subset of $\partial Q$ where the vector field is tangent to $\partial Q$ is denoted $\Gamma^1$; we require that $\Gamma^1$ is a suborbifold of $\partial Q$ of codimension $1$. Note that, by the continuity of the vector field, the component of the vector field pointing outward must approach zero near the boundary of $R_{-}^i$ and $R_{+}^i$. Hence $\Gamma^1 = \partial R_{-}^1 = \partial R_{+}^1$.
The vector field is tangent to $\Gamma^1$, and so it may be considered a vector field on the orbifold $\Gamma^1$. We again require this vector field to have orbifold exit regions. Call $R_{-}^2$ the closure of the subset of $\partial \Gamma^1$ where the vector field points out of $R_-^1$, and $R_{+}^2$ the closure of the subset where it points into $R_-^1$. The subset of $\Gamma^1$ where the vector field is tangent to $\Gamma^1$ is denoted $\Gamma^2$, and is required to be a codimension-$1$ suborbifold of $\Gamma^1$.
In the same way, we define $\Gamma^i$, $R_{-}^i$, $R_{+}^i$, requiring that these sets form a chain of closed suborbifolds $\{\Gamma^i\}_{i=1}^n$ and compact orbifolds with boundary $\{R_{-}^i\}_{i=1}^n$. We require that that $\mbox{dim}\: R_{-}^i = \mbox{dim}\: R_{+}^i = n - i$ and $\mbox{dim}\:\Gamma^i = n - i - 1$. Since each successive $\Gamma^i$ will have strictly smaller dimension, we eventually run out of space, and so both of these sequences terminate. The last entry in the sequence of $\Gamma^i$ will be $\Gamma^n$, which is necessarily the empty set.
\section{Formation of the Double Orbifold} \label{sec-double}
In the proof of Theorem \ref{thrm-mainresult}, we will pass from an orbifold with boundary to a closed orbifold in order to employ Theorem \ref{thrm-satakeindex}. In this section we will construct the double of an orbifold with boundary. In the process, we will develop charts near the boundary of a specific form which will be required in the sequel. The construction of the double is similar to the case of a manifold; see \cite{munkresdifftop}.
Let $\mathbf{B}_x(r)$ denote the ball of radius $r$ about $x$ in $\mathbb{R}^n$ where $\mathbb{R}^n$ has basis $\{ e_i \}_{i=1}^n$. For convenience, $\mathbf{B}_0$ will denote the ball of radius $1$ centered at the origin in $\mathbb{R}^n$. We let $\mathbb{R}_+^n = \{ x_1, \ldots , x_n : x_n \geq 0 \}$ where the $x_i$ are the coordinates with respect to the basis $\{ e_i \}$, $\mathbf{B}_x^+(r) = \mathbf{B}_x(r) \cap \mathbb{R}_+^n$, and $\mathbf{B}_0^+ = \mathbf{B}_0 \cap \mathbb{R}_+^n$. Also $\mathbf{B}_0^k$ will denote the ball of radius $1$ about the origin in $\mathbb{R}^k$.
Let $Q$ be a compact orbifold with boundary. For each point $p \in Q$, we choose an orbifold chart $\{ V_p, G_p, \pi_p \}$ where $V_p$ is $\mathbf{B}_0$ or $\mathbf{B}_0^+$ and $\pi_p(0) = p$. Let $U_p$ denote $\pi_p(V_p) \subseteq Q$ for each $p$, and then the $U_p$ form an open cover of $Q$. Choose a finite subcover of the $U_p$, and on each $V_p$ corresponding to a $U_p$ in the subcover, we put the standard Riemannian structure on $V_p$ so that the $\left\{\frac{\partial}{\partial x_i} \right\}$ form an orthonormal basis. Endow $Q$ with a Riemannian structure by patching these Riemannian metrics together using a partition of unity subordinate to the finite subcover of $Q$ chosen above.
Now, let $p \in Q$, and then there is a geodesic neighborhood $U_p$ about $p$ uniformized by $\{ V_p, G_p, \pi_p \}$ where $V_p = \mathbf{B}_0(r)$ or $\mathbf{B}_0^+(r)$ for some $r > 0$ where $G_p$ acts as a subgroup of $O(n)$ (see \cite{chenruangwt}). Identifying $V_p$ with a subset of $T_0 V_p$ via the exponential map, we can assume as above that $\{ e_i \}$ forms an orthonormal basis with respect to which coordinates will be denoted $\{x_i \}$. In the case with boundary, $\mathbf{B}_0^+(r)$ corresponds to points with $x_n \geq 0$. We call such a chart a \emph{geodesic chart of radius $r$ at $p$}. Note that in such charts, the action of $\gamma \in G_p$ on $V_p$ and the action of $d\gamma = D(\gamma)_0$ on a neighborhood of $0$ in $T_0 V_p$ (or in half-space in the case with boundary) are identified via the exponential map.
We may now introduce the following lemma.
\begin{lemma} \label{lm-normalspace} At every point $p$ in $\partial Q$, there a geodesic chart at $p$ of the form $\{V_p, G_p, \pi_p\}$ where $G_p$
fixes $e_n$. On the boundary, the tangent space $TQ|_{\partial Q}$ is decomposed orthogonally into $(T\partial Q) \oplus \nu$ where $\nu$ is a trivial $1$-bundle on which each group acts trivially.
\end{lemma}
{\it \noindent Proof:} Let $p \in \partial Q$, and let a neighborhood of $p$ be uniformized by the geodesic chart $\{ V_p, G_p, \pi_p \}$ so that $V_p = \mathbf{B}_0^+(r)$. Let $\langle \cdot , \cdot \rangle_0$ denote the inner product on $T_0 V_p$. Let $T_0^+$ correspond to the half-space $T_0^+$ in $T_0 V_p$ corresponding to vectors with non-negative $\frac{\partial}{\partial x_n}$-component. The exponential map identifies an open ball about $0 \in T_0^+$ with $V_p$.
Suppose that $\gamma$ is an arbitrary element of $G_p$ so that $d\gamma$ acts on $T_0 V_p$. Any $v \in T_0^+$ satisfies $\left\langle v, \frac{\partial}{\partial x_n} \right\rangle_0 \geq 0$. Furthermore, $(d\gamma) v \in T_p^+$, so $\left\langle (d\gamma) v, \frac{\partial}{\partial x_n} \right\rangle_0 \geq 0$, or equivalently, $\left\langle v, d\gamma^{-1} \frac{\partial}{\partial x_n} \right\rangle_0 \geq 0$ for all $v \in T_0^+$.
It will be shown that this implies $G_p$ fixes $\frac{\partial}{\partial x_n}$. Pick $j\neq n$; since $\frac{\partial}{\partial x_j} \in T_0^+$, $\left\langle \frac{\partial}{\partial x_j}, d\gamma^{-1} \frac{\partial}{\partial x_n} \right\rangle_0 \geq 0$. However, $-\frac{\partial}{\partial x_j}$ is also a vector in $T_0^+$, and so $\left\langle -\frac{\partial}{\partial x_j}, d\gamma^{-1}\frac{\partial}{\partial x_n} \right\rangle_0 \geq 0$. By the linearity of the inner product, this is only possible if $\left\langle \frac{\partial}{\partial x_j}, d\gamma^{-1}\frac{\partial}{\partial x_n} \right\rangle_0 = 0$. Furthermore, since $j \neq n$ was arbitrary, this implies that $d\gamma^{-1} \frac{\partial}{\partial x_n}$ has no component in the direction of any $\frac{\partial}{\partial x_j}$, $j\neq n$. Since $d\gamma^{-1}$ is an isometry, $d\gamma^{-1} \frac{\partial}{\partial x_n} = \pm \frac{\partial}{\partial x_n}$, but because $d\gamma^{-1}T_0^+ = T_0^+$, it must be the case that $d\gamma^{-1}\frac{\partial}{\partial x_n} = \frac{\partial}{\partial x_n}$. As $\gamma \in G_p$ was arbitrary, this implies $G_p$ fixes $\frac{\partial}{\partial x_n}$.
Now, for each $p \in \partial Q$, pick a geodesic chart $\{V_p, G_p, \pi_p \}$ at $p$, and let $N_p$ denote the constant vector field $\frac{\partial}{\partial x_n}$ on $V_p$. Recall from \cite{satake2} that $\tilde{T}_0V_p$ denotes the $dG_p$-invariant tangent space of $T_0V_p$ on which the differential of $\pi_p$ is invertible. If $q \in \pi_p (V_p) \subset Q$ with geodesic chart $\{ V_q, G_q, \pi_q \}$ at $q$, then the fact that $D(\pi_q)_p^{-1} \circ D(\pi_p)_0 : \tilde{T}_0 V_p \rightarrow \tilde{T}_0 V_q$ maps $\tilde{T}_0 \partial V_p$ to $\tilde{T}_0 \partial V_q$ and preserves the metric ensures that the value of $N_q(0)$ coincides with that of $D(\pi_q)_p^{-1} \circ D(\pi_p)_0[N_p(0)]$ up to a sign. The sign is characterized by the property that for any curve $c : (-1, 1) \rightarrow V_p$ with derivative $c^\prime(t) = N_p$, there is an $\epsilon > 0$ such that $c(t)$ is in the interior of $V_p$ for $t \in (0, \epsilon)$; a curve in $V_q$ with derivative
$D(\pi_q)_p^{-1} \circ D(\pi_p)_0[N_p(0)]$ has the same property. With this, we see that the $N_p$ patch together to form a nonvanishing section of $TQ|_{\partial Q}$ that is orthogonal to $T\partial Q$ at every point; hence, it defines a trivial subbundle $\nu$ orthogonal to $T\partial Q$. Clearly, $TQ = (T\partial Q)\oplus \nu$.
{
$\square$}
Let $Q^\prime$ be an identical copy of $Q$. In order to form a closed orbifold from the two, the boundaries of these two orbifolds will be identified via \begin{equation}
\label{eq-glue}
\partial Q \ni x \longleftrightarrow x^\prime \in \partial Q^\prime \end{equation} The resulting space inherits the structure of a smooth orbifold from $Q$ as will be demonstrated below.
First, note that for each point $p \in \partial Q$, by Lemma \ref{lm-normalspace}, a geodesic chart $\{ V_p, G_p, \pi_p \}$ can be restricted to a chart $\{ C_p^+, G_p, \phi_p \}$ where $C_p^{+} = \mathbf{B}_0^{n-1}(r/2) \times [0,\epsilon_p)$, $\phi_p$ is the restriction of $\pi_p$ to $C_p^+$, and $\phi_p \left(\mathbf{B}_0^{n-1} \times \{ 0 \} \right) = \partial \phi_p(C_p^+)$. We will refer to such a chart as a \emph{boundary product chart} for $Q$.
It follows, in particular, that there is a neighborhood of $\partial Q$ in $Q$ that is diffeomorphic to $\partial Q \times [0, \epsilon]$ for some $\epsilon > 0$ and that the metric respects the product structure. This can be shown by forming a cover of $\partial Q$ of sets uniformized by charts of the form $\{C^{+}, G_p, \psi_p\}$, choosing a finite subcover, and setting $\epsilon = \mbox{min} \{ \epsilon_p/2 \}$.
\begin{lemma} \label{lm-smoothifold}
The glued set $\hat{Q}$, i.e. the set of equivalence classes under the identification made by Equation \ref{eq-glue}, may be made into a smooth orbifold containing diffeomorphic copies of both $Q$ and $Q^\prime$ such that $Q \cap Q^\prime = \partial Q = \partial Q^\prime$.
\end{lemma}
{\it \noindent Proof:} For each point $p \in \partial Q$, form a boundary product chart $\{ C_p^+, G_p, \phi_p \}$. Then glue each chart of the boundary of $Q$ to its corresponding chart of $Q^\prime$ in the following way. Let $\alpha: \mathbb{R}^n \to \mathbb{R}^n$ be the reflection that sends $e_n \mapsto -e_n$ and fixes all other coordinates. A point $p$ in the boundary is uniformized by two corresponding boundary product charts on either side of $\partial Q$, $\{C_p^+, G_p, \phi_p \}$ and $\{C_p^{+\prime}, G_p^\prime, \phi_p^\prime \}$. From these two charts, a new chart $\{C_p, G_p, \psi_p \}$ for a neighborhood of $p$ in $\hat{Q}$ is constructed where $C_p = \mathbf{B}_{0}^{n-1}(r/2) \times (-\epsilon_p, \epsilon_p)$, and \[
\psi_p (x) = \left\{\begin{array}{ll}
\phi_p (x), & x_n \geq 0, \\
\phi_p^\prime\circ\alpha (x), & x_n < 0.
\end{array}\right. \]
These charts cover a neighborhood of $\partial Q = \partial Q^\prime$ in $\hat{Q}$. By taking a geodesic chart at each point on the interiors of $Q$ and $Q^\prime$ together with these new charts, the entire set $\hat{Q}$ is covered. Injections of charts at points in the interior of $Q$ or $Q^\prime$ into charts of the form $\{ C_p^+, G_p, \phi_p \}$ induce injections into charts $\{ C_p, G_p, \psi_p \}$. Hence, $\hat{Q}$ is given the structure of a smooth orbifold with the desired properties.
{
$\square$}
Again, it follows that a neighborhood of $\partial Q \subset \hat{Q}$ admits a tubular neighborhood diffeomorphic to $\partial Q \times [-\epsilon, \epsilon]$ such that the metric respects this product structure.
\section{The Morse Index of a Vector Field on an Orbifold} \label{sec-morseindex}
The definitions of Morse Index and relation to the topological index of a vector field extend readily to orbifolds, which we now describe.
Let $Q$ be a compact orbifold with or without boundary, and let $X$ be a vector field on $Q$ that does not vanish on the boundary. If $X(p) = 0$ for $p \in Q$, then we say that $p$ is a \emph{non-degenerate} zero of $X$ if there is a chart $\{ V, G, \pi \}$ for a neighborhood $U_p$ of $p$ and an $x \in V$ with $\pi(x) = p$ such that $\pi^\ast X$ has a non-degenerate zero at $x$; i.e. $D(\pi^\ast X)_x$ has trivial kernel. As in the manifold case, non-degenerate zeros are isolated in charts and hence isolated on $Q$. The Morse Index $\lambda (\pi^\ast X; x)$ of $\pi^\ast X$ at $x$ is defined to be the number of negative eigenvalues of
$D(\pi^\ast X)_x$ (see \cite{Milnormorse}). Since the Morse Index is a diffeomorphism invariant, this index does not depend on the choice of chart nor on the choice of $x$. Since the isomorphism-class of the isotropy group does not depend on the choice of $x$, the expression $|G_p|$ is well-defined. Hence, for simplicity, we may restrict to charts of the form $\{ V_p, G_p, \pi_p \}$ where $\pi_p(0) = p$ and $G_p$ acts linearly. We define the \emph{orbifold Morse Index} of $X$ at $p$ to be \[
\lambda^{orb}(X; p) = \frac{1}{|G_p|} \lambda (\pi_p^\ast X; 0). \] Note that this index differs from that recently defined in \cite{hepworth}; however, it is sufficient for our purposes. We have \[ \begin{array}{rcl}
\mathfrak{Ind}^{orb}(X;p) &=& \frac{1}{|G_p|} \mathfrak{Ind}(\pi^\ast X; 0) \\\\
&=& \frac{1}{|G_p|} (-1)^{\lambda(\pi^\ast X; 0)}. \end{array} \]
Suppose $X$ has only non-degenerate zeros on $Q$. For each $\lambda \in \{ 0, 1, \ldots , n\}$, we let $\{ p_i : i = 1, \ldots , k_\lambda \}$ denote the points in $Q$ at which the pullback of $X$ in a chart has Morse Index $\lambda$. Then we let \[
C_\lambda = \sum\limits_{i=1}^{k_\lambda} \frac{1}{|G_{p_i}|} \] count these points, where the orbifold-contribution of each zero
$p_i$ is $\frac{1}{|G_{p_i}|}$. Note that as non-degenerate zeros are isolated, there is a finite number on $Q$.
Then, as in the manifold case, if we define \[
\Sigma^{orb}(X; Q) = \sum\limits_{\lambda = 0}^n (-1)^\lambda
C_\lambda, \] we have \[ \begin{array}{rcl}
\Sigma^{orb}(X;Q)&=& \sum\limits_{\lambda = 0}^n (-1)^\lambda \sum\limits_{i=1}^{k_\lambda} \frac{1}{|G_{p_i}|}
\\\\
&=& \sum\limits_{p \in Q, X(p) = 0} \frac{1}{|G_p|} (-1)^{\lambda(\pi_p^\ast X; 0)}
\\\\
&=& \sum\limits_{p \in Q, X(p) = 0}
\mathfrak{Ind}^{orb}(X;p) \\\\
&=& \mathfrak{Ind}^{orb}(X;Q). \end{array} \] In the case that $Q$ is closed, this quantity is equal to $\chi_{orb}(Q)$ by Theorem \ref{thrm-satakeindex}.
We summarize these results as follows.
\begin{proposition} \label{prop-morseindex}
Let $X$ be a smooth vector field on the compact orbifold $Q$ that has non-degenerate zeros only, none of which occurring on $\partial Q$. Then \[
\Sigma^{orb}(X; Q) = \mathfrak{Ind}^{orb}(X; Q). \] If $\partial Q = \emptyset$, then \[
\Sigma^{orb}(X; Q) = \chi_{orb}(Q). \]
\end{proposition}
\begin{remark} \label{rem-approximation}
If $Q$ is a compact orbifold (with or without boundary) and $X$ a smooth vector field on $Q$ that is nonzero on some compact subset $\Gamma$ of the interior of $Q$, then $X$ may be perturbed smoothly outside of a neighborhood of $\Gamma$ so that it has only isolated, nondegenerate zeros. This is shown in \cite{wanerwu} for the case of a smooth global quotient $M/G$ using local arguments, and so it extends readily to the case of a general orbifold by working in charts.
\end{remark}
\section{Proof of Theorem \ref{thrm-mainresult}} \label{sec-mainresult}
Let $Y$ be a vector field in generic contact with $\partial Q$ that has isolated zeroes on the interior of $Q$. Define $\hat{Y}$ on $\hat{Q}$ by letting $\hat{Y}$ be $Y$ on each copy of $Q$. Unfortunately, $\hat{Y}$ has conflicting definitions along the old boundary $\partial Q$. However, as in the manifold case treated in \cite{Pugh}, the vector field may be perturbed near the boundary to form a well-defined vector field using the product structure. We give an adaptation of Pugh's result to orbifolds.
\begin{proposition} \label{prop-approxvfieldconstruction}
Given a smooth vector field $Y$ in generic contact with $\partial Q$ and with isolated zeros, none of which lie on $\partial Q$, there is a smooth vector field $X$ on the double $\hat{Q}$ such that \begin{itemize} \item Outside of a tubular neighborhood $P_\epsilon$ of $\partial Q$ containing none of the zeros of $Y$,
$X$ coincides with $Y$ on $Q$ and $Q^\prime$;
\item $X|_{\partial Q}$ is tangent to $\partial Q$,
\item On $\Gamma^1$, $X$ coincides with $Y$ and in particular,
defines the same $\Gamma^i$, $R_-^i$, and $R_+^i$ for $i > 1$; and
\item The zeros of $X$ are those of $Y$ on the interior of $Q$ and
$Q^\prime$ and a collection of isolated zeros on
$\partial Q$ which are non-degenerate as zeros of $X|_{\partial Q}$. \end{itemize}
\end{proposition}
{\it \noindent Proof:} As above, $\hat{Y}$ is defined everywhere on $\hat{Q}$ except on the boundary. Let $P_\epsilon$ be a normal tubular $\epsilon$-neighborhood of $\partial Q$ in $\hat{Q}$ of the form $\partial Q \times [-\epsilon, \epsilon]$ which we parameterize as $\{ (x, v) : x \in \partial Q, v \in [-\epsilon, \epsilon] \}$. We assume that $P_\epsilon$ is small enough so that it does not contain any of the zeros of $\hat{Y}$. On $P_\epsilon$, decompose $\hat{Y}$ respecting the product structure of $P_\epsilon$ into \[ \hat{Y} = \hat{Y}_h + \hat{Y}_v \] These are the horizontal and vertical components of $\hat{Y}$, respectively. The horizontal component $\hat{Y}_h$ is well-defined, continuous, and smooth when restricted to the boundary. However, $\hat{Y}_v$ has conflicting definitions on the boundary, although they only differ by a sign. Note that the restriction of $\hat{Y}_h$ to $\partial Q$ may not have isolated zeros. However, as $Y$ does not have zeros on $\partial Q$ and $\hat{Y}_h \equiv Y$ on
$\Gamma^1$, none of the zeros of $\hat{Y}_h|_{\partial Q}$ occur on $\Gamma^1$.
Define $Z_h$ to be a smooth vector field on $\partial Q$ that coincides with $\hat{Y}_h$ on an open subset of $\partial Q$ containing $\Gamma^1$ and has only non-degenerate zeros (see Remark \ref{rem-approximation}). Let $f(x, v)$ be the parallel transport of $Z_h(x, 0)$ along the geodesic from $(x,0 )$ to $(x,v)$, and then $Z_h$ is a horizontal vector field on $P_\epsilon$. For $s \in (0, \epsilon)$, let $\phi_s:\mathbb{R}\to [0, 1]$ be a smooth bump function which is one on $[-s/2, s/2]$ and zero outside of $[s, s]$.
Define the vector field $X_s$ to be $\hat{Y}$ outside of $P_\epsilon$ and \[
X_s(x, v) = \phi_s(v) \left(f(x,v) + |v|\hat{Y}_v(x,v)\right)
+ (1-\phi_s(v))\hat{Y}(x,v) \] on $P_\epsilon$. Note that $X_s$ is smooth. By picking $s$
sufficiently small, it may be ensured that the zeroes of $X$ are the zeroes of $\hat{Y}$ and the zeroes of $Z_h|_{\partial{Q}}$ only. We prove this as follows.
On points, $(x, v)$ where $x \in \Gamma^1$ and $|v| \leq s$, the horizontal component of $X$ is $\phi_s(v) f(x, v) + (1 - \phi_s(v))\hat{Y}_h(x, v)$. Note that $f(x, 0) = \tilde{Y}_h(x, 0)$ for $x \in \Gamma^1$ and $f(x, 0) \neq 0$ on $\Gamma^1$. Let $m
> 0$ be the minimum value of $\| f(x, 0) \|$ on the compact set $\Gamma^1$, and then as $\Gamma^1 \times [-\epsilon, \epsilon]$ is compact and $\tilde{Y}_h(x, v)$ continuous, there is an $s_0$ such that \[
\| \hat{Y}_h(x, 0) - \hat{Y}_h(x, v) \|
= \| f(x, 0) - \hat{Y}_h(x, v) \| < m/2 \]
whenever $|v| < s_0$. Hence, for such $v$ and for any $t \in [0, 1]$, \[\ \begin{array}{rcl}
\left\| t f(x, v) + (1 - t)\hat{Y}_h(x, v) \right\|
&=& \left\| \hat{Y}_h(x, v) + t [f(x, v) - \hat{Y}_h(x, v)] \right\|
\\\\
&\geq& \left \|\hat{Y}_h(x, v) \right\| - t \left\|f(x, v) - \hat{Y}_h(x, v) \right\|
\\\\
&>& m - \frac{tm}{2}
\\\\
&\geq& \frac{m}{2} \; > \; 0. \end{array} \] Therefore, the horizontal component is nonvanishing, implying that $X_s(v, h)$ does not vanish here.
Now let $\{ x_i : i = 1, \ldots, k \}$ be the zeros of $Z_h$ on $\partial Q$. Each $x_i$ is contained in a ball $B_{\epsilon_i} \subset \partial Q$ whose closure does not intersect $\Gamma^1$. Hence, $\hat{Y}_v(x, 0) \neq 0$ on each $B_{\epsilon_i}$. Therefore, for each $i$, there is an $s_i$ such that $\hat{Y}_v(x, v) \neq 0$ on $B_{\epsilon_i} \times (-s_i, s_i)$. This implies that the vertical component of $X_s(x, v)$, and hence $X_s(x,v)$ itself, does not vanish on $B_{\epsilon_i} \times (-s_i, s_i)$ except where $v = 0$; i.e. on $\partial Q$.
Letting $s$ be less than the minimum of $\{ s_0, s_1, \ldots, s_k\}$, we see that $X_s$ does not vanish on $P_\epsilon$ except on $\partial Q$, where it coincides with $Z_h$. Therefore, $X = X_s$ is the vector field which was to be constructed.
{
$\square$}
It follows that the index of the vector field $X$ constructed in the proof of Proposition \ref{prop-approxvfieldconstruction} is \begin{equation} \label{eq-indexstep1}
\mathfrak{Ind}^{orb}(X;Q) = 2\mathfrak{Ind}^{orb}(Y;Q)
+ \sum_{p \in \partial Q}\mathfrak{Ind}^{orb}(X;p) \end{equation} Let $p$ be a zero of $X$ on $\partial{Q}$, i.e. it is a zero of $Z_h$. We will write the index of $X$ at $p$ in terms of the index of $Z_h$.
Because of Lemma \ref{lm-normalspace}, the isotropy group of $p$ as an element of $Q$ is the same as the isotropy group of $p$ as an element of $\partial{Q}$, and so we may refer to $G_p$ without ambiguity. About a neighborhood of $p$ in $Q$ small enough to contain no other zeros of $X$, choose a boundary product chart $\{C_p^+, G_p, \phi_p \}$. Then, as in Lemma \ref{lm-smoothifold}, $\{ C, G_p, \psi_p \}$ forms a chart about $p$ in $\hat{Q}$. The product structure $(y, w)$ of $C_p = \mathbf{B}_0^{n - 1}(r/2) \times (-\epsilon_p, \epsilon_p)$ coincides with that of $P_\epsilon$ near the boundary, so within the preimage of $\partial Q \times [-s/2, s/2]$ by $\psi_p$, we have that \[
\psi_p^\ast X = \psi_p^\ast f + |w| \psi_p^\ast \hat{Y}_v. \] Note that $\psi_p(0,0) = (p)$, and then \[ \begin{array}{rcl}
D(\psi_p^\ast X)_{(0, 0)} &=& \left( \begin{array}{cc}
D(\psi_p^\ast Z_h)_0
& \left(\frac{\partial \psi_p^\ast f}{\partial w} \right)_0
\\\\
D\left((|w| \psi_p^\ast \hat{Y}_v)|_{\partial C_p }\right)_0
& \left(\frac{\partial}{\partial w} |w| \psi_p^\ast \hat{Y}_v \right)_0
\end{array} \right)
\\\\
&=& \left( \begin{array}{cc}
D(\psi_p^\ast Z_h)_0
& 0 \\\\
0 & \psi_p^\ast \hat{Y}_v(0, 0)
\end{array} \right) . \end{array} \] As $\psi_p^\ast \hat{Y}_v(0, 0)$ is positive if $p \in R_+^1$ and negative if $p \in R_-^1$, it is seen that \[
\lambda\left(\psi_p^\ast X ; (0,0) \right) =
\left\{\begin{array}{ll}
\lambda\left(\psi_p^\ast X|_{\partial C_p }; 0 \right),
& p \in R_{+}, \\
\lambda\left(\psi_p^\ast X|_{\partial C_p }; 0 \right) + 1,
& p \in R_{-} . \end{array}\right. \] Hence \[
\mathfrak{Ind}\left(\psi_p^\ast X ;(0,0) \right) =
\left\{\begin{array}{ll}
\mathfrak{Ind}\left(\psi_p^\ast Z_h|_{\partial C_p }; 0\right),
& p \in R_{+}, \\
-\mathfrak{Ind}\left(\psi_p^\ast Z_h|_{\partial C_p }; 0 \right),
& p \in R_{-} . \end{array}\right. \] Therefore, for $p \in R_+$, \[ \begin{array}{rcl}
\mathfrak{Ind}^{orb}(X, p)
&=& \frac{1}{|G_p|} \mathfrak{Ind}\left(\psi_p^\ast X ;0 \right)
\\\\
&=& \frac{1}{|G_p|} \mathfrak{Ind}\left(\psi_p^\ast Z_h|_{\partial C^+}; 0 \right)
\\\\
&=& \mathfrak{Ind}^{orb}\left(Z_h ;p \right) \end{array} \] and similarly \[
\mathfrak{Ind}^{orb}\left(X ;p \right)
= - \mathfrak{Ind}^{orb}\left(Z_h ;p \right) \] for $p \in R_-$.
With this, Equation \ref{eq-indexstep1} becomes \[
\mathfrak{Ind}^{orb}(X; \hat{Q})
= 2\mathfrak{Ind}^{orb}(Y;Q)
+ \mathfrak{Ind}^{orb}(Z_h; R_+)
- \mathfrak{Ind}^{orb}(Z_h; R_-). \]
By Theorem \ref{thrm-satakeindex} and Equation \ref{eq-additiveeulerchar}, $\mathfrak{Ind}^{orb}(X; \hat{Q}) = 2\chi_{orb}(Q) - \chi_{orb}(\partial Q)$, so that \[
2\chi_{orb}(Q) - \chi_{orb}(\partial Q)
=
2\mathfrak{Ind}^{orb}(Y;Q)
+ \mathfrak{Ind}^{orb}(Z_h; R_+)
- \mathfrak{Ind}^{orb}(Z_h; R_-). \] Note that $\partial Q$ is also a closed orbifold, so \[ \begin{array}{rcl}
\chi_{orb}(\partial Q)
&=& \mathfrak{Ind}^{orb}(X; \partial Q) \\\\
&=& \mathfrak{Ind}^{orb}(X; R_{+}) - \mathfrak{Ind}^{orb}(X;
R_{-}). \end{array} \] Hence, \begin{equation} \label{eq-indexstep2} \begin{array}{rcl} \mathfrak{Ind}^{orb}(Y;Q)
&=& \chi_{orb}(Q) + \frac{1}{2}\left(-\chi_{orb}(\partial Q) + \mathfrak{Ind}^{orb}(X; R_{-})
- \mathfrak{Ind}^{orb}(X; R_{+}) \right)
\\\\
&=& \chi_{orb}(Q) + \frac{1}{2}\left[-\chi_{orb}(\partial Q) + 2\mathfrak{Ind}^{orb}(X; R_{-}) \right.
\\\\
&& \left.
- \left(\mathfrak{Ind}^{orb}(X; R_{+}) + \mathfrak{Ind}^{orb}(X; R_{-}) \right)\right]
\\\\
&=& \chi_{orb}(Q) + \frac{1}{2}\left(-2\chi_{orb}(\partial Q) + 2\mathfrak{Ind}^{orb}(X;R_{-})\right)
\\\\
&=& \chi_{orb}(Q) - \chi_{orb}(\partial Q) + \mathfrak{Ind}^{orb}(X; R_{-})
\\\\
&=& \chi_{orb}(Q, \partial Q) + \mathfrak{Ind}^{orb}(X; R_{-}) \end{array} \end{equation}
Because $X$ coincides with $Y$ on $\Gamma^1$, it defines the same $\Gamma^{i}$ that $Y$ does. Since $X$ is a smooth vector field defined on $R_{-}^1$ that does not vanish on $\partial R_-^1 = \Gamma^1$, we may recursively apply this formula to higher and higher orders of $R^{i}_{-}$ until $R^{i}_{-}$ is empty, and there will no longer be an index sum term. Hence, \[
\mathfrak{Ind}^{orb}(X; R_{-}) = \sum_{i = 1}^n \chi_{orb}(R_{-}^i,
\Gamma^i). \] Along with Equation \ref{eq-indexstep2}, this completes the proof of Theorem \ref{thrm-mainresult}.
{
$\square$}
Let $\tilde{Q}$ denote the inertia orbifold of $Q$ and $\pi : \tilde{Q} \rightarrow Q$ the projection (see \cite{chenruanorbcohom}). It is shown in \cite{seaton1} that a vector field $Y$ on $Q$ induces a vector field $\tilde{Y}$ on $\tilde{Q}$, and that $\tilde{Y}(p, (g)) = 0$ if and only if $Y(p) = 0$.
For each point $p \in Q$ and $g \in G_p$, a chart $\{ V_p, G_p, \pi_p\}$ induces a chart $\{ V_p^g, C(g), \pi_{p, g} \}$ at $(p, (g)) \in \tilde{Q}$ where $V_p^g$ denotes the points in $V_p$ fixed by $g$ and $C(g)$ denotes the centralizer of $g$ in $G_p$. Clearly, $\partial V_p^g = (\partial V_p) \cap V_p^g$. An atlas for $\tilde{Q}$ can be taken consisting of charts of this form, so it is clear that $\partial \tilde{Q} = \widetilde{\partial Q}$.
Let $p \in \partial Q$ and pick a boundary product chart $\{ C_p^+, G_p, \phi_p \}$. Then for $g \in G_p$, there is a chart $\{ (C_p^+)^g, C(g), \phi_{p, g} \}$ for $(p, (g)) \in \tilde{Q}$. As the normal component to the boundary of $C_p^+$ is $G_p$-invariant, \[ \begin{array}{rcl}
(C_p^+)^g &=& \left(\mathbf{B}_0^{n-1}(r/2) \times [0, \epsilon_p)
\right)^g \\\\
&=& \left(\mathbf{B}_0^{n-1}(r/2) \right)^g \times [0,\epsilon_p), \end{array} \] and so \[
T_0 (C_p^+)^g = T_0 \left(\mathbf{B}_0^{n-1}(r/2)
\right)^g \times \mathbb{R}. \] It follows that $\tilde{Y}$ points out of $\partial \tilde{Q}$ at $(p, (g))$ if and only if $Y$ points out of $\partial Q$ at $p$. With this, applying Theorem \ref{thrm-mainresult} to $\tilde{Y}$ yields \begin{equation} \label{eq-inert} \begin{array}{rcl}
\mathfrak{Ind}^{orb}(\tilde{Y}; \tilde{Q})
&=& \chi_{orb}(\tilde{Q}, \partial \tilde{Q}) +
\sum\limits_{i=1}^n \chi_{orb}\left(\widetilde{R_-^i},
\widetilde{\Gamma^i}\right) \\\\
&=& \chi_{orb}(\tilde{Q}) - \chi_{orb}(\partial \tilde{Q}) +
\sum\limits_{i=1}^n
\chi_{orb}\left(\widetilde{R_-^i}\right) - \chi_{orb}\left(
\widetilde{\Gamma^i}\right). \end{array} \end{equation} Each of the $\Gamma^i$ and $\partial Q$ are closed orbifolds, so it follows from the proof of Theorem 3.2 in \cite{seaton1} (note that the assumption of orientability is not used to establish this result) that \[
\chi_{orb}(\widetilde{\Gamma^i}) = \chi(\mathbb{X}_{\Gamma^i}) \] and \begin{equation} \label{eq-orbtopboundary}
\chi_{orb}(\partial \tilde{Q}) = \chi_{orb}(\widetilde{\partial Q}) = \chi(\mathbb{X}_{\partial Q}) \end{equation} where $\mathbb{X}_{\Gamma^i}$ and $\mathbb{X}_{\partial Q}$ denote the underlying topological spaces of $\Gamma^i$ and $\partial Q$, respectively, and $\chi$ the usual Euler characteristic.
Letting $\hat{Q}$ denote, as above, the double of $Q$, it is easy to see that $\hat{\tilde{Q}} = \tilde{\hat{Q}}$. Hence, applying the same result to $\hat{\tilde{Q}}$ yields \begin{equation} \label{eq-doubleinert} \begin{array}{rcl}
\chi(\mathbb{X}_{\hat{Q}})
&=& \chi_{orb}\left( \tilde{\hat{Q}} \right) \\\\
&=& \chi_{orb} \left( \hat{\tilde{Q}} \right) \\\\
&=& 2\chi_{orb}(\tilde{Q}) - \chi_{orb}(\partial \tilde{Q}) \end{array} \end{equation} However, as \[ \begin{array}{rcl}
\chi(\mathbb{X}_{\hat{Q}}) &=& 2\chi(\mathbb{X}_{Q}) -
\chi(\mathbb{X}_{\partial Q})
\\\\
&=& 2\chi(\mathbb{X}_{Q}) - \chi_{orb}(\partial \tilde{Q}) \end{array} \] it follows from Equation \ref{eq-doubleinert} that $\chi_{orb}(\tilde{Q}) = \chi(\mathbb{X}_{Q})$. The same holds for each $R_-^i$ so that Equation \ref{eq-inert} becomes the following.
\begin{corollary} \label{cor-inertiaversion}
Let $Q$ be an $n$-dimensional smooth, compact orbifold with boundary, and let $Y$ be a smooth vector field on $Q$. If $\tilde{Y}$ denotes the induced vector field on $\tilde{Q}$, then \[
\mathfrak{Ind}^{orb}(\tilde{Y}; \tilde{Q})
= \chi(\mathbb{X}_Q, \mathbb{X}_{\partial Q}) +
\sum\limits_{i=1}^n \chi(\mathbb{X}_{R_-^i},
\mathbb{X}_{\Gamma^i}). \]
\end{corollary}
\end{document} | arXiv | {
"id": "0806.2113.tex",
"language_detection_score": 0.7719813585281372,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{{\bf History and Physics of The Klein Paradox}} \author{{\bf A\ Calogeracos}} \author{NCA\ Research Consultants, PO Box 61147, Maroussi 151 22, Greece} \author{(acal@hol.gr)} \author{{\bf N Dombey}} \author{Centre for Theoretical Physics, University of Sussex, Brighton BN1 9QJ,UK} \author{(normand@sussex.ac.uk)} \date{SUSX-TH-99-032} \maketitle
\begin{abstract} \newline \noindent The early papers by Klein, Sauter and Hund which investigate scattering off a high step potential in the context of the Dirac equation are discussed to derive the 'paradox' first obtained by Klein. The explanation of this effect in terms of electron-positron production is reassessed. It is shown that a potential well or barrier in the Dirac equation can become supercritical and emit positrons or electrons spontaneously if the potential is strong enough. If the well or barrier is wide enough, a seemingly constant current is emitted. This phenomenon is transient whereas the tunnelling first calculated by Klein is time-independent. It is shown that tunnelling without exponential suppression occurs when an electron is incident on a high barrier, even when the barrier is not high enough to radiate. Klein tunnelling is therefore a property of relativistic wave equations and is not necessarily connected to particle emission. The Coulomb potential is investigated and it is shown that a heavy nucleus of sufficiently large $Z$ will bind positrons. Correspondingly, as $Z$ increases the Coulomb barrier should become increasingly transparent to positrons. This is an example of Klein tunnelling. Phenomena akin to supercritical positron emission may be studied experimentally in superfluid $^3$He \end{abstract}
\section{Some History}
\subsection{Introduction to the Klein Paradox(es)}
\noindent Seventy years ago Klein \cite{klein} published a paper where he calculated the reflection and transmission coefficients for electrons of energy $E,$ mass $m$ and momentum $k$ incident on the potential step (Fig. 1)
\begin{equation} \label{step} V(x)=V,\;x>0;\,V(x)=0,\;x<0 \end{equation}
\noindent within the context of the new relativistic equation which had just been published by Dirac\cite{dirac}. He found (see Section 2 below) that the reflection and transmission coefficients $R_S,T_S$ if $V$ was large were given by \begin{equation} R_S=
{\displaystyle {1-\kappa \overwithdelims() 1+\kappa }}
^2\qquad T_S=\frac{4\kappa }{(1+\kappa )^2} \label{Rs/Ts} \end{equation} \noindent where $\kappa $ is the kinematic factor \begin{equation} \kappa =\frac pk\frac{E+m}{E+m-V} \label{kappa} \end{equation} \noindent and $p$ is the momentum of the transmitted particle for $x>0.$ It is easily seen from Eq. (\ref{kappa}) that when $E<V-m$, $\kappa $ seems to be negative with the paradoxical result that the reflection coefficient $ R_S>1$ while $T_S<0$. So more particles are reflected by the step than are incident on it. This is what many articles and books call the Klein Paradox. It is not, however, what Klein wrote down.
\
\noindent Klein noted that Pauli had pointed out to him that for $x>0$, the particle momentum is given by $p^2=(V-E)^2-m^2$ while the group velocity $ v_g $ was given by
\begin{equation} \label{group} v_g=dE/dp=p/(E-V) \end{equation}
\noindent So if the transmitted particle moved from left to right, $v_g$ was positive implying that $p$ had to be assigned its negative value
\begin{equation} \label{neg} p=-\sqrt{(V-E)^2-m^2} \end{equation}
\noindent With this choice of $p$
\begin{equation} \kappa =\sqrt{\frac{(V-E+m)(E+m)}{(V-E-m)(E-m)}} \label{kappa2} \end{equation}
\noindent and $\kappa \geq 1$ ensuring that both $R_S$ and $T_S$ are positive or zero and satisfy $R_S+T_S=1$ for $m$ $\leq E\leq V-m.$ Is there still a paradox? The general consensus both now and for the authors who followed Klein and did the calculation correctly is that there is. Let the potential step $V\rightarrow \infty $ for fixed $E$ then from Eq. (\ref {kappa2})$\,$ $\kappa $ tends to a finite limit and hence $T_S$ tends to a non-zero limit. The physical essence of this paradox thus lies in the prediction that according to the Dirac equation, fermions can pass through strong repulsive potentials without the exponential damping expected in quantum tunnelling processes. We have called this process Klein tunnelling \cite{kleinf}.
\
\noindent We begin with a summary of the Dirac equation in one dimension in the presence of a potential $V(x)$ and show how Klein's original result for $ R_S$ and $T_S$ is obtained. We go on to the papers of Sauter in 1931, who replaced Klein's potential step with a barrier with a finite slope, and then to Hund in 1940 who realised that the Klein potential step gives rise to the production of pairs of charged particles when the potential strength is sufficiently strong. This result although not well known is a precursor of the famous results of modern quantum field theory of Schwinger \cite{schwing} and Hawking \cite{hawk} which show that particles are spontaneously produced in the presence of strong electric and gravitational fields. In Part II we turn to the underlying physics of the Klein paradox and show that particle production and Klein tunnelling arise naturally in the Dirac equation: when a potential well is deep enough it becomes supercritical (defined as the potential strength for which the bound state energy $E=-m)$ and positrons will be spontaneously produced. Supercriticality is well-understood \cite {zeld}, \cite{grein2} and can occur in the Coulomb potential with finite nuclear size when the nuclear charge $Z$ $>137$. Positron production via this mechanism has been the subject of experimental investigations in heavy ion collisions for many years. We then show that if a potential well is wide enough, a steady but transient current will flow when the potential becomes supercritical. In order to analyse these processes it is necessary to introduce the concept of vacuum charge. We consider the implications of these concepts for the Coulomb potential and for other physical phenomena and we end by pointing out that Klein was unfortunate in that the example he chose to calculate was pathological.
\subsection{The Dirac Equation in One Dimension}
\noindent In one-dimension it is unnecessary to use four-component Dirac spinors. It is much easier to use two-component Pauli spinors instead \cite {bruce}.We adopt the convention $\gamma _0=\sigma _z$, $\gamma _1=i\sigma _x$ . The above choice agrees with $\gamma _i\gamma _j+\gamma _j\gamma _i=2g_{ij}.$ The free Dirac Hamiltonian in one dimension is \[ H_0=-\sigma _yp+\sigma _zm \] and so the Dirac equation takes the form \begin{equation} (\sigma _x\frac \partial {\partial x}-E\sigma _z+m)\psi =0 \label{frd} \end{equation}
In what follows ${\bf k}$ stands for the wavevector, $k$ for its magnitude and $\varepsilon =\left| E\right| =+\sqrt{k^2+m^2}$. We try a plane wave of the form \begin{equation} \left( \begin{array}{c} A \\ B \end{array} \right) e^{ikx-iEt} \label{frs} \end{equation} and substitute in (\ref{frd}). The equation is satisfied by $A=ik,B=E-m$ where $E=\pm \varepsilon $. The positive energy (or particle) solutions have the form \begin{equation} N_{+}(\varepsilon )\left( \begin{array}{c} ik \\ \varepsilon -m \end{array} \right) e^{ikx-i\varepsilon t} \label{frp} \end{equation} and the negative energy (or hole) solutions are \begin{equation} N_{-}(\varepsilon )\left( \begin{array}{c} ik \\ -\varepsilon -m \end{array} \right) e^{ikx+i\varepsilon t} \label{frn} \end{equation} where $N_{\pm }(\varepsilon )$ are appropriate normalization factors. If we take the particle to be in a box of length 2$L$ with periodic boundary conditions at $x=-L$ and $x=L$ we obtain
\begin{equation} N_{+}(\varepsilon )=\frac 1{\sqrt{2L}\sqrt{2\varepsilon (\varepsilon -m)}} ,N_{-}(\varepsilon )=\frac 1{\sqrt{2L}\sqrt{2\varepsilon (\varepsilon +m)}} \label{norm} \end{equation}
\noindent Alternatively we can use continuum states and energy normalisation; then
\begin{equation} \label{en} N_{+}(\varepsilon )=\frac 1{\sqrt{2\pi }\sqrt{2\varepsilon (\varepsilon -m)}} ,N_{-}(\varepsilon )=\frac 1{\sqrt{2\pi }\sqrt{2\varepsilon (\varepsilon +m)} } \end{equation}
\subsection{The Klein Result}
\noindent In the presence of the Klein step, the Hamiltonian is \[ H_0=-\sigma _yp+V(x)+\sigma _zm \] where $V(x)$ is now given by Eq.(\ref{step}). The Dirac equation reads \begin{equation} (\sigma _x\frac \partial {\partial x}-(E-V(x))\sigma _z+m)\psi =0 \label{eqn} \end{equation}
\noindent Consider an electron incident from the left. The corresponding wavefunction is \begin{equation} \left( \begin{array}{c} ik \\ E-m \end{array} \right) e^{ikx}+B\left( \begin{array}{c} -ik \\ E-m \end{array} \right) e^{-ikx} \label{sa} \end{equation} for $x<0,$ and \begin{equation} F\left( \begin{array}{c} -ip \\ V-E-m \end{array} \right) e^{-ipx} \label{sc} \end{equation}
\noindent for $x>0$ since that state is a hole state (see Fig. 2). It is easy to see from Eqs. (\ref{sa}, \ref{sc}) that for continuity at $x=0$ we require
\begin{eqnarray} ik(1-B) &=&-ipF \\ (E-m)(1+B) &=&(V-E-m)F \nonumber \end{eqnarray}
\noindent giving
\[ \frac{1-B}{1+B}=\frac{-p}k\frac{E-m}{V-E-m}=\frac 1\kappa \]
\noindent in terms of the quantity $\kappa $ defined by Eq. (\ref{kappa}). This gives the expression for $R_S=\left| B\right| ^2$ of Eq. (\ref{Rs/Ts}) above while that for $T_S$ follows from $R_S+T_S=1$.
\subsection{Sauter's Contribution}
\noindent Klein's surprising result was widely discussed by theoretical physicists at the time. Bohr thought that the large transmission coefficient that Klein found was because the Klein step was so abrupt. He discussed this with Heisenberg and Sommerfeld and as a result Sommerfeld's assistant Sauter \cite{saut} in Munich calculated the transmission coefficient for a potential of the form
\begin{equation} V(x)=vx\,\qquad 0<x<L \label{saut} \end{equation}
\noindent with $V(x)=0$ for $x<0$ and $V(x)=vL$ for $x>L$ (Fig. 3). In order to obtain negative energy states (holes) to propagate through the barrier as in the Klein problem, we require $vL>2m$. Sauter's potential thus should reduce to the Klein step if $v$ were very large. Sauter's potential is of course more physical than Klein's: it simply represents a constant electric field $E=-v$ in a finite region of space. Klein tunnelling in this case would imply that low energy electrons could pass through a repulsive constant electric field without exponential damping. Bohr conjectured that the Klein result would only be reproduced if the Sauter field were so strong that the potential difference $\Delta V>2m$ would be attained at distances of the order of the Compton wavelength of the electron; that is to say that the electric field strength $\left| E\right| =\left| v\right| >2m^2$.
\
\noindent After a lengthy calculation involving the appropriate hypergeometric functions, Sauter obtained the result he was seeking: he obtained an expression for the reflection and transmission coefficients $R$
and $T$ which reduced to the Klein values $R_S$ and $T_S$ for $\left|
v\right| \sim m^2;$ nevertheless but for weaker fields he obtained
\begin{equation} R\simeq 1\qquad T=e^{-\pi m^2/v}=e^{-(\pi m^2L/\Delta V)} \label{sautres} \end{equation}
\noindent a non-paradoxical result since it shows the exponentially-suppressed tunnelling typical of quantum phenomena. What no one realised at the time is that Sauter had anticipated Schwinger's\cite
{schwing} result of quantum electrodynamics by twenty years (see next Section). Note also that Eq. (\ref{sautres}) shows explicitly that Bohr's conjecture is correct: in order to violate the rule that tunnelling in quantum mechanics is exponentially suppressed we require electric fields of field strength $\left| E\right| =\left| v\right| \sim \pi m^2$.
\subsection{Hund's Contribution}
\noindent The next major contribution to the subject came ten years later. Hund \cite{hund} looked again at the Klein step potential but from the viewpoint of quantum field theory, not just the one particle Dirac equation. He concentrated on charged scalar fields rather than spinor fields. He considered both the Klein step potential and a sequence of step potentials. His result was as surprising as Klein's original result. Hund found that provided $\Delta V>2m$ where $\Delta V=V(\infty )-V(-\infty )$, then a non-zero constant electric current $j$ had to be present where the current was given by an integral over the transmission coefficient $T(E)$ with respect to energy $E$. The current had to be interpreted as spontaneous production out of the vacuum of a pair of oppositely charged particles. Hund attempted to derive the same result for a spinor field but was unsuccessful: it was left to Hansen and Ravndal \cite{hans} forty years later to generalise this result to spinors (for a good discussion of the difference between scalar and spinor fields incident on a Klein step see Manogue\cite {mano}). We show in the Appendix for a Klein step or more general step potential such as those considered by Hund and Sauter in the Dirac equation that there is indeed a spontaneous current of electron-positron pairs produced given by
\begin{equation}
\left\langle 0\right| j\left| 0\right\rangle =-\frac 1{2\pi }\int dET(E) \label{pprod} \end{equation}
\noindent in agreement with Hund's result for scalars. Eq. (\ref{pprod}) is very powerful: it is a sort of optical theorem. If spontaneous pair production occurs at a constant rate, then the time-independent reflection and transmission coefficients must incorporate this process. If Sauter had known of Eq. (\ref{pprod}), he would have been able to predict Schwinger's \cite{schwing} result on spontaneous pair production by a constant electric field simply by using the value of the transmission coefficient he had calculated in Eq. (\ref{sautres}).
\section{The Underlying Physics}
\subsection{Scattering by a Square Barrier}
\noindent We now investigate the underlying physics behind these phenomena. Why is it that electrons can tunnel so easily through a high potential barrier? Why are particles produced in strong potentials? Are these two questions the same question; that is to say is the result that particles are produced by a Klein step or other strong field the reason for Klein tunnelling. To answer these questions we turn our attention to a potential barrier which is not the Klein step but is similar and has better-defined properties. This is the square barrier (Fig. 4)
\begin{equation}
V(x)=V,|\,x\,|<a;V(x)=0,|\,x\,|>a. \label{barreq} \end{equation}
\noindent Electrons incident from the left would not be expected to be able to distinguish between a wide barrier (i.e. $ma>>1)$ and a Klein step. The results are in fact not identical but they do display the same characteristics.
\
\noindent It is easy to show that the reflection and transmission coefficients are given for a square barrier by \cite{jens} \begin{eqnarray} R &=&\frac{(1-\kappa ^2)^2\sin ^2(2pa)}{4\kappa ^2+(1-\kappa ^2)^2\sin ^2(2pa)}\qquad \label{barr} \\ T &=&\frac{4\kappa ^2}{4\kappa ^2+(1-\kappa ^2)^2\sin ^2(2pa)} \end{eqnarray}
\noindent Note that tunnelling is easier for a barrier than a step: if
\begin{equation} \label{quant} 2pa=N\pi \end{equation}
\noindent corresponding to $E_N=V-\sqrt{m^2+N^2\pi ^2/4a^2}$ then the electron passes right through the barrier with no reflection: this is called a transmission resonance \cite{cdi}.
\
\noindent As $a$ becomes very large for fixed $m,E$ and $V$, $pa$ becomes very large and $\sin (pa)$ oscillates very rapidly. In those circumstances we can average over the phase angle $pa$ using $\sin ^2(pa)=\cos ^2(pa)= \frac 12$ to find the limit
\begin{equation} \label{inf} R_\infty =\frac{(1-\kappa ^2)^2}{8\kappa ^2+(1-\kappa ^2)^2}\qquad T_\infty = \frac{8\kappa ^2}{8\kappa ^2+(1-\kappa ^2)^2} \end{equation}
\noindent It may seem unphysical that $R_\infty $ and $T_\infty $ are not the same as $R_S$ and $T_S$ but it is not: it is well known in electromagnetic wave theory \cite{stratton} that reflection off a transparent barrier of large but finite width (with 2 sides) is different from reflection off a transparent step (with 1 side). The square barrier thus demonstrates Klein tunnelling but it now arises in a more physical problem than the Klein step. The zero of potential is properly defined for a barrier whereas it is arbitrary for a step and the energy spectrum of a barrier (which attracts positrons) or well (which attracts electrons) is easily calculable. Particle emission from a barrier or well is described by supercriticality: the condition when the ground state energy of the system overlaps with the continuum ($E=m$ for a barrier; $E=-m$ for a well) and so any connection between particle emission and the time-independent scattering coefficients $R$ and $T$ can be investigated.
\subsection{Fermionic Emission from a Narrow Well}
\noindent We discussed the field theoretic treatment of this topic in a previous paper \cite{cdi} which we refer to as CDI. We quickly review the argument of that paper. Spontaneous fermionic emission is a non-static process and in the case of a seemingly static potential, it is necessary to ask how the potential was switched on from zero. We follow CDI in turning on the potential adiabatically. We will consider the square well
\begin{equation}
V(x)=-V,|\,x\,|<a;V(x)=0,|\,x\,|>a \label{well} \end{equation}
\noindent but it is easiest to begin with the very narrow potential $ V(x)=-\lambda \delta (x)$ which is the limit of a square well with $\lambda =2Va$. The bound states are then very simple: for a given value of $\lambda $ there is just one bound state corresponding to either the even $(e)$ or odd ( $o)$ wave functions \cite{cdi} with energy given by
\begin{equation} \label{delta} E=m\cos \lambda \quad (e)\qquad E=-m\cos \lambda \quad (o) \end{equation}
\noindent When the potential is initially turned on and $\lambda $ is small the bound state is even and its energy $E$ is just below $E=m$. As $\lambda $ increases, $E$ decreases and at $\lambda =\pi /2$ $,$ $E$ reaches zero. For $ \lambda >\pi /2$, $E$ becomes negative. Assuming that we started in the vacuum state and therefore that the well was originally vacant, we now have for $\lambda >\pi /2$ the absence of a negative energy state which must be interpreted as the presence of a (bound) positron according to Dirac's hole theory. Let $\lambda $ increase further and $E$ decreases further until at $ \lambda =\pi ,$ $E=-m$ which is the supercriticality condition. So for $ \lambda >\pi ,$ the bound positron acquires sufficient energy to escape from the well. This is the phenomenon of spontaneous positron production as described originally by Gershtein and Zeldovich \cite{zeld} and Pieper and Greiner \cite{grein2}. Note that this picture requires that positrons (as well as electrons) are bound by potential wells when the potential strength is large enough: we return to this point later when we discuss the Coulomb potential.
\subsection{Digression on Vacuum Charge}
\noindent How is it possible to conserve charge and produce positrons out of the vacuum? This question has been a fruitful ground for theorists in recent years. The key point is that the definition of the vacuum state of the system (and of the other states) depends on the background potential: this leads to the concept of vacuum charge \cite{stone}, \cite{blank}. At this point a single particle interpretation of a potential in the Dirac equation is insufficient and field theory becomes necessary (as is also seen in the discussion of radiation from the Klein step in the Appendix). But nevertheless it turns out that once the concept of vacuum charge is introduced, first quantisation is all that is necessary to determine its value. We shall refer the reader to CDI for a proper treatment of vacuum charge; we just write down the essential equations here.
\
\noindent The total charge is defined by (according to our conventions the electron charge is $-$1)\ \begin{equation} Q(t)=\int dx\rho (x,t)=-\frac 12\int dx\left[ \psi ^{\dagger }(x,t),\psi (x,t)\right] \label{ch1} \end{equation} Writing the wave function $\psi (x,t)$ in terms of creation and annihilation operators we eventually find that \begin{equation} Q=Q_p+Q_0 \label{ch} \end{equation} where the particle charge $Q_p$ is an operator which counts the number of electrons in a state minus the number of positrons while the vacuum charge $ Q_0$ is just a number which is defined by the difference in the number of positive energy and negative energy states of the system: \begin{equation} Q_0=\frac 12\left\{ \sum_k(\text{states with }E>0)-\sum_k(\text{states with } E<0)\right\} \label{qo} \end{equation} \noindent Given the definition of the vacuum we immediately get \begin{equation}
\left\langle 0\right| Q\left| 0\right\rangle =Q_0 \label{vgch} \end{equation}
\noindent We illustrate the use of the vacuum charge by returning to the delta function potential $V(x)=-\lambda \delta (x)$. For $\lambda $ just larger than $\pi /2$, $Q_p=+1$ because a positron has been created, but now the vacuum charge $Q_0=-1$ because the number of positive energy states has decreased by one while the number of negative energy states has increased by one. So the total charge $Q$ is in fact conserved. As the potential is increased further, $\lambda $ will reach $\pi .$ where $E=-m$ and the bound positron reaches the continuum and becomes free. Note that at supercriticality, there is no change in vacuum charge; the change occurs when $E$ crosses the zero of energy. Note also that at supercriticality the even bound state disappears and the first odd state appears.
\
\noindent We can continue to increase $\lambda $ and count positrons: the total number of positrons produced for a given $\lambda $ is the number of times $E$ has crossed $E=0;$ that is \begin{equation} Q_p=Int\,[\frac \lambda \pi +\frac 12] \label{charge0} \end{equation}
\noindent and $Q_0=$ -$Q_p$ where $Int[x]$ denotes the integer part of $x.$ For positron emission the more interesting quantity is the number of supercritical positrons $Q_S$, that is the number of states which have crossed $E=-m$. This is given by
\begin{equation} \label{super} Q_S=Int\,[\frac \lambda \pi ] \end{equation}
\subsection{Wide Well}
\noindent We can now return to the case that we are interested in which is that of a wide well or barrier. So let us consider the general case of a square well potential of strength $V>2m$ and then look at a wide well for which $ma>>1$ most closely corresponding to the Klein step. We follow the discussion given in our papers CDI and CD \cite{kleinf}. We must find first the condition for supercriticality and then the number of bound and supercritical positrons produced for a given $V.$
\
\noindent The bound state spectrum for the well $V(x)=-V,|\,x\,|<a;V(x)=0,|
\,x\,|>a$ is easily obtained: there are even and odd solutions given by the equations
\begin{equation} \tan pa=\sqrt{\frac{(m-E)(E+V+m)}{(m+E)(E+V-m)}} \label{even} \end{equation}
\begin{equation} \tan pa=-\sqrt{\frac{(m+E)(E+V+m)}{(m-E)(E+V-m)}} \label{odd} \end{equation}
\noindent where now the well momentum is given by $p^2=(E+V)^2-m^2$. We have changed the sign of $V$ so that it is now attractive to electrons rather than positrons in order to conform with other authors who have studied supercritical positron emission rather than electron emission .
\
\noindent
From Eq (\ref{even}) we see that the ground state becomes supercritical when $pa=\pi /2$ and therefore $V_1^c=m+\sqrt{m^2+\pi ^2/4a^2}.$ From Eq (\ref {odd}) the first odd state becomes supercritical when $pa=\pi $ and $V_2^c=m+ \sqrt{m^2+\pi ^2/a^2}.$ Clearly the supercritical potential corresponding to the Nth positron is
\begin{equation} \label{Nth} V_N^c=m+\sqrt{m^2+N^2\pi ^2/4a^2} \end{equation}
\noindent It follows from Eq (\ref{Nth}) that $V=2m$ is an accumulation point of supercritical states as $ma\rightarrow \infty $. Furthermore it is a threshold: a potential $V$ is subcritical if $V<2m$. It is not difficult to show for a given $V>2m$ that the number of supercritical positrons is given by
\begin{equation} \label{super2} Q_S=Int[(2a/\pi )\sqrt{V^2-2mV}] \end{equation}
\noindent The corresponding value of the total positron charge $Q_p$ can be shown using Eqs (\ref{even},\ref{odd}) to satisfy
\begin{equation} \label{charge} Q_p-1\leq Int[(2a/\pi )\sqrt{V^2-m^2}]\leq Q_p \end{equation}
\noindent so for large $a$ we have the estimates
\begin{equation} \label{est} Q_p\sim (2a/\pi )\sqrt{V^2-m^2};\quad Q_S\sim (2a/\pi )\sqrt{V^2-2mV} \end{equation} $\qquad $
\noindent Now we can build up an overall picture of the wide square well $ ma>>1$. When $V$ is turned on from zero in the vacuum state an enormous number of bound states is produced. As $V$ crosses $m$ a very large number $ Q_p$ of these states cross $E=0$ and become bound positrons. As $V$ crosses $ 2m$ a large number $Q_S$ of bound states become supercritical together. This therefore gives rise to a positively charged current flowing from the well. But in this case, unlike that of the Klein step, the charge in the well is finite and therefore the particle emission process has a finite lifetime. Nevertheless, for $ma$ large enough the transient positron current for a wide barrier is approximately constant in time for a considerable time as we shall see in the next section.\
\subsection{Emission Dynamics}
\noindent We now restrict ourselves to the case $V=2m+\Delta $ with $\Delta <<m$. This is not necessary but it avoids having to calculate the dynamics of positron emission while the potential is still increasing beyond the critical value. We can assume all the positrons are produced almost instantaneously as the potential passes through $V=2m.$ It also means that the kinematics are non-relativistic. Hence for a sufficiently wide well so that $\Delta a$ is large, $Q_S\sim (2a/\pi )\sqrt{2m\Delta }$. The well momentum of the Nth supercritical positron is still given by Eq (\ref{quant} ) $p_Na=N\pi /2$ which corresponds to an emitted positron energy $
|\,E_N\,|=2m+\Delta -\sqrt{p_N^2+m^2}>m$. Note that the emitted energies have discrete values although for $a$ large, they are closely spaced.
\
\noindent The lifetime $\tau $ of the supercritical well is given by the time for the slowest positron to get out of the well. The slowest positron is the deepest lying state with $N=1$ and momentum $p_1=\pi /2a$. Hence $ \tau \approx ma/p_1=2ma^2/\pi .$ So the lifetime is finite but scales as $ a^2 $. But a large number of positrons will have escaped well before $\tau $ . There are $Q_S$ supercritical positrons initially and their average momentum $\overline{p}$ corresponds to $N=Q_S/2$; hence $\overline{p}=\sqrt{ m\Delta /2}$ which is independent of $a$. Thus a transient current of positrons is produced which is effectively constant in time for a long time of order $\overline{\tau }$ $=a\sqrt{2m/\Delta }$. We thus see that the square well (or barrier) for $a$ sufficiently large behaves just like the Klein step: it emits a seemingly constant current with a seemingly continuous energy spectrum. But initially the current must build up from zero and eventually must return to zero. So the well/barrier is a time-dependent physical entity with a finite but long lifetime for emission of supercritical positrons or electrons.
\
\noindent Note again that the transmission resonances of the time-independent scattering problem coincide with the energies of particles emitted by the well or barrier. It is therefore tempting to use the Pauli principle to explain the connection. Following Hansen and Ravndal \cite{hans} , we could say that $R$ must be zero at the resonance energy because the electron state is already filled by the emitted electron with that energy. But it is easy to show that the reflection coefficient is zero for bosons as well as fermions of that energy, and no Pauli principle can work in that case. Furthermore emission ceases after time $\tau $ whereas $R=0$ for times $t>\tau $ . It follows that we must conclude that Klein tunnelling is a physical phenomenon in its own right, independent of any emission process. It seems that Klein tunnelling is indeed distinct from the particle emission process: to show this is so we return to the square barrier to show that Klein tunnelling occurs even when the barrier is subcritical.
\subsection{Klein Tunnelling and the Coulomb Barrier}
\noindent It is clear from Eq (\ref{barr}) that while the reflection coefficient $R$ for a square barrier cannot be $0$, neither is the transmission coefficient $T$ exponentially small for energies $E<V$ when $ V>2m$ even though the scattering is classically forbidden. The simplest way to understand this is to consider the negative energy states under the potential barrier as corresponding to physical particles which can carry energy in exactly the same way that positrons are described by negative energy states which can carry energy. It follows from Eq (\ref{Rs/Ts}) that $ R_S$ and $T_S$ correspond to reflection and transmission coefficients in transparent media with differing refractive indices: thus $\kappa $ is nothing more than an effective fermionic refractive index corresponding to the differing velocities of propagation by particles in the presence and absence of the potential. On this basis, tuning the momentum $p$ to obtain a transmission resonance for scattering off a square barrier is nothing more than finding the frequency for which a given slab of refractive material is tranparent. This is not a new idea. In Jensen's words ''A potential hill of sufficient height acts as a Fabry-Perot etalon for electrons, being completely transparent for some wavelengths, partly or completely reflecting for others'' \cite{bak}.
\
\noindent We can now look in more detail at Klein tunnelling: both in terms of our model square well/barrier problem and at the analogous Coulomb problem. The interesting region is where the potential is strong but subcritical so that emission dynamics play no role and sensible time independent scattering parameters can be defined. For electron scattering off the square barrier $V(x)=V$ we would thus require $V<$ $V_1^c=m+\sqrt{ m^2+\pi ^2/4a^2}$ together with $V>2m$ so that positrons can propagate under the barrier. For the corresponding square well $V(x)=-V$ there are negative energy bound states $0>E>-m$ provided that $V>$ $\sqrt{m^2+\pi ^2/4a^2}$ [cf. Eq.(\ref{charge})]. So when the potential well is deep enough, it will in fact bind positrons. Correspondingly, a high barrier will bind electrons. It is thus not surprising that electrons can tunnel through the barrier for strong subcritical potentials since they are attracted by those potential barriers. Another way of seeing this phenomenon is by using the concept of effective potential $V_{eff}(x)$ which is the potential which can be used in a Schrodinger equation to simulate the properties of a relativistic wave equation. For a potential $V(x)$ introduced as the time-component of a four-vector into a relativistic wave equation (Klein-Gordon or Dirac), it is easy to see that $2mV_{eff}(x)=2EV(x)-V^2(x).$ Hence as the energy $E$ changes sign, the effective potential can change from repulsive to attractive.
\
\noindent For the pure Coulomb potential, it is well known that there is exponential suppression of the wave functions for a repulsive potential compared with an attractive potential. For example, if $\rho =\left| \psi
(0)\right| _{pos}^2/\left| \psi (0)\right| _{el}^2$ is the ratio of the probability of a positron penetrating a Coulomb barrier to reach the origin compared with the probability of an electron of the same energy, then if the particles are non-relativistic \begin{equation} \rho =e^{-2\pi Z\alpha E/p} \label{nonrel} \end{equation} \noindent where p and $E$ are the particle momenta and energies and this is exponentially small as $p\rightarrow 0$ \cite{LL}. But if the particles are relativistic \cite{rose} \begin{equation} \rho =fe^{-2\pi Z\alpha } \label{rel} \end{equation} \noindent where $f$ is a ratio of complex gamma-functions and is approximately unity for large $Z$ . So $\rho \sim e^{-2\pi Z\alpha }\approx 10^{-3}$ for $Z\alpha \sim 1$ which is not specially small although it still decreases exponentially with $Z..$
\
\noindent In order to demonstrate Klein tunnelling for a Coulomb potential we require first the inclusion of nuclear size effects so that the potential is not singular at $r=0$ and second that $Z$ is large enough so that bound positron states are present. This means that $Z$ must be below its supercritical value $Z_c$ of around $170$ but large enough for the $1s$ state to have $E<0$. The calculations of references \cite{zeld} and \cite {grein2} which depend on particular models of the nuclear charge distribution give this region as $150<Z<Z_c$ which unfortunately will be difficult to demonstrate experimentally. Nevertheless, the theory seems to be clear: in this subcritical region positrons should no longer obey a tunnelling relation which decreases exponentially with Z such as that of Eq. (\ref{rel}). Instead the Coulomb barrier should become more transparent as $ Z $ increases, at least for low energies. By analogy with the square barrier we may expect that maximal transmission for positron scattering on a Coulomb potential should occur around $Z=Z_c$ although the onset of supercriticality implies that time independent scattering quantities may no longer be well-defined. We are now carrying out further detailed calculations to clarify the situation for positron scattering off nuclei with $Z$ near $Z_c$ to see if we can simulate Klein tunnelling.
\section{Conclusions}
\noindent It seems that Klein was very unfortunate in that the potential step he considered is pathological and therefore a misleading guide to the underlying physics. Klein's step represents a limit in which time-dependent emission processes become time-independent and therefore a relationship between the emitted current and the transmission coefficient exists, as we show in the Appendix. In general no direct relationship would exist between the transient current emitted and the time-independent transmission coefficient. The physics of the Dirac equation which underlies Klein's result is rich: it includes spontaneous fermionic production by strong potentials and the separate phenomenon of Klein tunnelling by means of the negative energy states characteristic of relativistic wave equations, similar to interband tunnelling in semiconductors \cite{semi}. Spontaneous positron production due to supercriticality has not yet been unambiguously demonstrated experimentally in heavy ion collisions but experiments on superfluid $^3$He-B\cite{lanc1}, \cite{lanc2} have displayed anomalous effects when the velocity of a body moving in the fluid exceeds the critical Landau velocity $v_L$. These experiments have now been interpreted in the same way as supercritical positron production\cite{cv}. It may well be that fermionic many-body systems can be used to demonstrate the fundamental quantum processes which Klein unearthed seventy years ago
\
\noindent We wo uld like to thank A. Anselm, G Barton, J D Bjorken, B. Garraway, R Hall, L B Okun, R Laughlin, G E Volovik and D Waxman for advice and help.
\section{Appendix: Pair Production by a Step Potential}
\noindent Consider the Klein step of Eq. (\ref{step}) for $V>2m$. We will show that the expectation value of the current in the vacuum state in the presence of the step is non-zero which means that the Klein step produces electron-positron pairs out of the vacuum at a constant rate. The derivation hinges on a careful definition of the vacuum state.We use the derivation of CD2\cite{kleinf}.
\subsection{The normal modes in the presence of the Klein step.}
\noindent An energy-normalised positive energy or particle solution to the Dirac equation can be written from eq. (\ref{en})
\begin{equation} \label{pos} \sqrt{
{\displaystyle {\varepsilon +m \over 2k}}
}\left( \begin{array}{c} i \\
{\displaystyle {k \over E+m}}
\end{array} \right) e^{ikx} \end{equation}
\noindent A negative energy or hole solution reads \[ \sqrt{
{\displaystyle {\varepsilon -m \over 2k}}
}\left( \begin{array}{c} i \\
{\displaystyle {k \over E+m}}
\end{array} \right) e^{ikx} \]
\noindent Scattering is usually described by a solution describing a wave incident (say from the left) plus a reflected wave (from the right) plus a transmitted wave (to the right). It is convenient here to use waves of different form either describing a wave (subscript $L)$ incident from the left with no reflected wave or describing a wave (subscript $R$) incident from the right with no reflected wave. Particle and hole wavefunctions will be denoted by $u$ and $v$ respectively. It is clear that the nontrivial result we are seeking arises from the overlap of the hole continuum $E<V-m$ on the right with the particle continuum $E>m$ on the left. We are thus concerned with wavefunctions with energies in the range $m<E<V-m.$ The expressions for $u_L,u_R$ in this energy range are given below.
\
\begin{equation} \begin{array}{c} \sqrt{2\pi }u_L(E,x)=
{\displaystyle {\sqrt{2\kappa } \over \kappa +1}}
\sqrt{
{\displaystyle {E+m \over k}}
}\left( \begin{array}{c} i \\
{\displaystyle {k \over E+m}}
\end{array} \right) e^{ikx}\theta (-x)+ \\ \\ \left\{
{\displaystyle {\kappa -1 \over \kappa +1}}
\sqrt{
{\displaystyle {V-E-m \over 2\left| p\right| }}
}\left( \begin{array}{c} i \\
{\displaystyle {\left| p\right| \over E+m-V}}
\end{array}
\right) e^{i\left| p\right| x}+\sqrt{
{\displaystyle {V-E-m \over 2\left| p\right| }}
}\left( \begin{array}{c} i \\
{\displaystyle {-\left| p\right| \over E+m-V}}
\end{array}
\right) e^{-i\left| p\right| x}\right\} \theta (x) \end{array} \label{w1} \end{equation}
\begin{equation} \begin{array}{c} \sqrt{2\pi }u_R(E,x)=\left\{
{\displaystyle {1-\kappa \over 1+\kappa }}
\sqrt{
{\displaystyle {E+m \over 2k}}
}\left( \begin{array}{c} i \\
{\displaystyle {k \over E+m}}
\end{array} \right) e^{ikx}+\sqrt{
{\displaystyle {E+m \over 2k}}
}\left( \begin{array}{c} i \\
{\displaystyle {-k \over E+m}}
\end{array} \right) e^{-ikx}\right\} \theta (-x)+ \\ \\ +
{\displaystyle {\sqrt{2\kappa } \over \kappa +1}}
\sqrt{
{\displaystyle {V-E-m \over \left| p\right| }}
}\left( \begin{array}{c} i \\
{\displaystyle {\left| p\right| \over E+m-V}}
\end{array}
\right) e^{i\left| p\right| x}\theta (x) \end{array} \label{w2} \end{equation}
\noindent We write $\left| p\right| $ rather than $p$ in these equations since the group velocity is negative for $x>0$ (cf. Eq. (\ref{sc}))
\
\noindent We need to evaluate the currents corresponding to the solutions of Eqs (\ref{w1},\ref{w2}). According to our conventions $\alpha _x\,=\gamma _0\gamma _x=-\sigma _y$ so \begin{equation} j_L\equiv -u_L^{\dagger }(E,x)\sigma _yu_L(E,x)=-\frac{2\kappa /\pi }{ (\kappa +1)^2} \label{c1} \end{equation} \begin{equation} j_R\equiv -u_R^{\dagger }(E,x)\sigma _yu_R(E,x)=-\frac{2\kappa /\pi }{ (\kappa +1)^2} \label{c2} \end{equation}
\subsection{The definition of the vacuum and the vacuum expectation value of the current.}
Now expand the wave function $\psi $ in terms of creation and annihilation operators which refer to our left- and right-travelling solutions: \begin{equation} \begin{array}{c} \psi (x,t)=\int dE\{a_L(E)u_L(E,x)e^{-iEt}+a_R(E)u_R(E,x)e^{-iEt}+ \\ +b_L^{\dagger }(E)v_L(E,x)e^{iEt}+b_R^{\dagger }(E)v_R(E,x)e^{iEt}\} \end{array} \label{exp} \end{equation}
\noindent with $\psi ^{\dagger }$ given by the Hermitian conjugate expansion.We must now determine the appropriate vacuum state in the presence of the step. States described by wavefunctions $u_L(E,x)$ and $v_L(E,x)$ correspond to (positive energy) electrons and positrons respectively coming from the left. Hence with respect to an observer to the left (of the step) such states should be absent from the vacuum state, so
\begin{equation} \label{a1}
a_L(E)\left| 0\right\rangle =0,\,b_L(E)\left| 0\right\rangle =0 \end{equation}
\noindent Wavefunctions $u_R(E,x)$ for $E>m+V$ describe for an observer to the right, electrons incident from the right. These are not present in the vacuum state hence \begin{equation} \label{a2}
a_R(E)\left| 0\right\rangle =0\text{ for }E>m+V \end{equation}
\noindent Wavefunctions $v_R(E,x)$ describe, again with respect to an observer to the right, positrons incident from the right; again \begin{equation} \label{b2}
b_R(E)\left| 0\right\rangle =0\text{ } \end{equation}
\noindent The wavefunctions that play the crucial role in the Klein problem belong to the set $u_R(E,x)$ for $m<E<V-m.$ For an observer to the right these states are positive energy positrons and hence they should be filled in the vacuum state, i.e.
\begin{equation} \label{vac}
a_R^{\dagger }(E)a_R(E^{\prime })\left| 0\right\rangle =\delta (E-E^{\prime
})\left| 0\right\rangle \text{ , }m<E<V-m \end{equation}
\noindent Having specified the vacuum the next and final step is the calculation of the vacuum expectation value \ of the current: \begin{equation} \label{vev}
\left\langle 0\right| j\left| 0\right\rangle =\frac 12\left( -\left\langle 0\right| \psi ^{\dagger }\sigma _y\psi \left| 0\right\rangle +\left\langle 0\right| \psi \sigma _y\psi ^{\dagger }\left| 0\right\rangle \right) \end{equation}
\noindent Substituting (\ref{exp}) in (\ref{vev}) and noticing that all terms involving $v_L$ and $v_R$ can be dropped since the corresponding energies lie outside the interesting range $m<E<V-m$ we end up with \begin{equation} \label{sum} \begin{array}{c}
\left\langle 0\right| j\left| 0\right\rangle =-\frac 12\int dEdE^{\prime
}\{\left\langle 0\right| a_L^{\dagger }(E)a_L(E^{\prime })\left| 0\right\rangle u_L^{\dagger }(E,x)\sigma _yu_L(E^{\prime },x)+ \\ \\
+\left\langle 0\right| a_L(E)a_L^{\dagger }(E^{\prime })\left|
0\right\rangle u_L^{\dagger }(E^{\prime },x)\sigma _yu_L(E,x)-\left\langle 0\right| a_R^{\dagger }(E)a_R(E^{\prime })\left| 0\right\rangle u_R^{\dagger }(E,x)\sigma _yu_R(E^{\prime },x)+ \\ \\
+\left\langle 0\right| a_R(E)a_R^{\dagger }(E^{\prime })\left| 0\right\rangle u_R^{\dagger }(E^{\prime },x)\sigma _yu_R(E,x)\} \end{array} \end{equation}
\
\noindent The first term in (\ref{sum}) vanishes due to (\ref{a1}). The second term becomes
$u_L^{\dagger }(E^{\prime },x)\sigma _yu_L(E,x)\delta (E-E^{\prime })$ if we use the anticommutation relations and (\ref{a1}). The third term yields $ -u_R^{\dagger }(E,x)\sigma _yu_R(E,x)\delta (E-E^{\prime })$ using (\ref{vac}
) and the fourth term vanishes using the anticommutation relations (i.e. the exclusion principle; the state $\left| 0\right\rangle $ already contains an electron in the state $u_R$ hence we get zero when we operate on it with $ a_R^{\dagger }$). One energy integration is performed immediately using the $ \delta $ function. We obtain
\begin{equation}
\left\langle 0\right| j\left| 0\right\rangle =\frac 12\int dE(-j_L+j_R)=- \frac 1{2\pi }\int dE\frac{4\kappa (E)}{(\kappa (E)+1)^2}=-\frac 1{2\pi } \int dET_S(E) \label{res} \end{equation}
\noindent where the energy integration is over the Klein range $m<E<V-m$.
\
\noindent It is now straightforward to generalise Eq (\ref{res}) to any step potential for which $V(x<0)=V_1;\,V(x>L)=V_2$ and $V_2-V_1>2m$ such as those considered by Sauter and Hund to obtain Eq (\ref{pprod} linking the pair production current with the transmission coefficient.
\begin{references} \bibitem{klein} O.Klein, Z.Phys. ${\bf 53},157\,(1929)$
\bibitem{dirac} P A M Dirac, Proc. Roy. Soc. ${\bf 117},612(1928)$
\bibitem{kleinf} A\ Calogeracos and N Dombey, Int J Mod Phys A $(1999)$. This paper is referred to as CD in the text.
\bibitem{saut} F Sauter, Z.Phys ${\bf 69},742$ $(1931)$
\bibitem{hund} F Hund, Z.Phys ${\bf 117},1$ $(1941)$
\bibitem{schwing} J Schwinger, Phys. Rev. ${\bf 82}$ $664\,(1951)$
\bibitem{hawk} S Hawking, Nature, ${\bf 284,}30(1974)$
\bibitem{zeld} Ya B\ Zeldovich and S S Gershtein, Zh . Eksp. Teor. Fiz. $ {\bf 57},654(1969)$; Ya B\ Zeldovich and V\ S\ Popov, Uspekhi Fiz Nauk, $ {\bf 105},403(1971)$
\bibitem{grein2} W Pieper and W Greiner, Z. Phys.${\bf 218,}327\,(1969)$
\bibitem{bruce} S A Bruce, Am. J. Phys. ${\bf 54},446\,(1986)$
\bibitem{hans} A Hansen and F Ravndal, Physica Scripta, ${\bf 23} ,1033\,(1981)$
\bibitem{mano} C A Manogue, Ann. Phys.(N.Y.) ${\bf 181},261$ $(1988)$
\bibitem{jens} H G Dosch, J H D Jensen and V F Muller, Phys. Norvegica $ {\bf 5},151(1971)$
\bibitem{cdi} A\ Calogeracos, N Dombey and K Imagawa, Yadernaya Fizika $ {\bf 159},1331(1996)$ (Phys.At.Nucl ${\bf 159},1275(1996)$). This paper is referred to as CDI in the text.
\bibitem{stratton} J A Stratton, Electromagnetic Theory, McGraw-Hill, New York, 1941, p.512. See also D S Jones, The Theory of Electromagnetism, Pergamon, Oxford 1964
\bibitem{stone} M\ Stone, Phys Rev {\bf B31}, 6112 (1985).
\bibitem{blank} R Blankenbecler and D Boyanovsky, Phys. Rev {\bf D31}, 2089 (1985)
\bibitem{bak} Quoted by F Bakke and H Wergeland, Phys. Scripta 25 {\bf 911} (1982)
\bibitem{LL} L D Landau and E M Lifshitz, Quantum Mechanics (3rd Edition), Pergamon, Oxford 1977, p. 182
\bibitem{rose} M E Rose, Relativistic Electron Theory, Wiley, New York, p.191 1961
\bibitem{semi} E O Kane and E I Blount, Interband Tunneling, in
E Burstein and S Lundqvist Eds. Tunneling Phenomena in Solids, Plenum (New York) 1969, p.79.
\bibitem{lanc1} C\ A\ M\ Castelijns, K\ F\ Coates, A\ M\ Guenault, S\ G\ Mussett and G\ R\ Pickett, Phys Rev Lett {\bf 56}, 69 (1986).
\bibitem{lanc2} J\ P\ Carney, A\ M\ Guenault, G\ R\ Pickett and G\ F\ Spencer, Phys Rev Lett {\bf 62}, 3042 (1989)..
\bibitem{cv} A\ Calogeracos and G\ E Volovik, ZhETF {\bf 115}, 1 (1999). \end{references}
\begin{figure}
\caption{The potential $V(x)$ of the Klein step}
\end{figure}
\begin{figure}
\caption{An electron of energy $E$ scattering off a Klein step of height $V>2m$. The electrons are shown with solid arrowheads; the hole state has a hollow arrowhead. The particle continuum (slant bacground) and the hole continuum (shaded background) overlap when $m<E<V-m.$}
\end{figure}
\begin{figure}
\caption{A potential $V(x)$ of the Sauter form representing constant electric field in the region $0<x<L$.}
\end{figure}
\begin{figure}
\caption{A potential $V(x)$ representing a square barrier of height $V$ in the region $-a<x<a$.}
\end{figure}
\end{document} | arXiv | {
"id": "9905076.tex",
"language_detection_score": 0.8208939433097839,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\thispagestyle{empty} \title{On the Precision to Sort Line-Quadric Intersections}
\begin{abstract}
To support exactly tracking a neutron moving along a given line
segment through a CAD model with quadric surfaces, this paper
considers the arithmetic precision required to compute the order of
intersection points of two quadrics along the line segment. When the
orders of all but one pair of intersections are known, we show that a
resultant can resolve the order of the remaining pair using only
half the precision that may be required to eliminate radicals by
repeated squaring. We compare the time and accuracy of our technique
with converting to extended precision to calculate roots. \end{abstract}
\section{Introduction} In this work, we are concerned with ordering the points of line-quadric intersections in 3 dimensions, where the inputs are representable exactly using $w$-bit fixed-point numbers. We will actually use floating point in storage and computation, but our guarantees will be for well-scaled inputs, which are easiest described as fixed-point. A {\it representable point}~$q$ or {\it representable
vector}~$v$ is a $3$-tuple of representable numbers $(x, y, z)$. The line segment from point~$q$ to~$q+v$ is defined parametrically for $t\in [0,1]$ as $\ell(t)=q+tv$; note that there may be no representable points on line $\ell$ except its endpoints (and even $q+v$ may not be representable, if the addition carries to $w+1$~bits.)
A quadratic is an implicit surface defined by its 10 representable coefficients, \begin{align*}Q(x, y, z)=q_{xx} x^2 &+ q_{xy} xy + q_{xz} xz + q_x x + \dots \\ &+ q_{zz} z^2 + q_{z} z + q_c = 0. \end{align*} For more accuracy, we can allow more precision for the linear and quadratic coefficients, since we will need $3w$~bits to exactly multiply out the quadratic terms, or we can use a representable symmetric $3{\times} 3$ matrix~$M$, a representable vector~$v$, and a $3w$-bit constant~$R$ to give a different set of quadrics $\tilde Q(p) = (p-v)^TM(p-v) = R$ that is closed under representable translations of~$v$. Whichever definition of quadrics is chosen, the parameter values for line-quadric intersections are the roots of $Q(\ell(t))=0$, which can be expressed as a quadratic $at^2+2bt+c=0$ whose coefficients can have at most $3w+4$~bits. (Four carry bits suffice to sum the $3w$-bit products; $w=16$ allows exact coefficient computation as IEEE 754 doubles; $w=33$ as pairs of doubles.)
These definitions are motivated by a problem from David Griesheimer, of Bettis Labs: rather than tracking a particle through quadric surfaces in a CAD model, would it be more robust to compute the intervals of intersections with a segment? We compare three methods to order line-quadric intersections. Our methods, particularly the third, are developed and tested for the case where only one pair of roots has a difference that is potentially overwhelmed by the rounding errors in the computation. We comment at the end how to handle pairs of quadric surfaces that have more than one pair of ambiguous roots.
\section{Methods} This section outlines three methods---Approximate Comparison, Repeated Squaring, and Resultant---to sort the intersections with two quadrics, $Q_1$ and $Q_2$, with a given line $\ell(t)$, or equivalently, the roots of two quadratics, $a_1t^2+2b_1t+c_1=0$ and $a_2t^2+2b_2t+c_2=0$. For each, we evaluate correctness, precision, and floating-point arithmetic operations (FLOPs) required.
\subsection{Approximate Comparison} The approximate comparison method computes, for $i\in\{1, 2\}$, the roots~$r_i^\pm=({b_i\pm\sqrt{b_i^2-a_ic_i}})/{a_i}$ approximately by computing each operation in IEEE 754 double precision or in extended precision. Actually, to avoid subtractive cancellation, we calculate one of the two roots as $r_i^{-\sign
b_i}=-c_i/({b_i+(\sign{b_i})\sqrt{b_i^2-a_ic_i}})$. The order of any two chosen approximate roots can be calculated exactly as~$\sign(r_1^\pm-r_2^\pm)$.
The rounding of floating point arithmetic means that even with representable input, the correct order is not guaranteed unless we establish a gap or separation theorem (which are also established using resultants~\cite{brownawell2009lower,emiris2010dmm}) and compute with sufficient precision. Determining this precision is a longstanding open problem~\cite{demaine33open}. Without a guarantee, this method requires very little computation. Computing both roots takes $12$ FLOPs, with one more to compute the sign of the difference. Moreover, the roots can be reused in a scene of many quadrics.
We also use extended precision, where the multiplications and addition in the discriminants are calculated with $6w$ bits, square root and addition at $12w$ bits, and divisions at $24w$ bits. To actually perform the comparison, one final subtraction is required at 24 times the initial precision -- 1 FLOP, with an initialization cost of 10 FLOPs per quadric intersecting the line.
\subsection{Repeated Squaring} The repeated squaring method computes $\sign(r_1^\pm-r_2^\pm)$ by algebraic manipulations to eliminate division and square root operations, leaving multiplications and additions whose precision requirements can be bounded. It uses, for $x\ne 0$, the property that $\sign(y)=\sign(x)\sign(x\cdot y)$. Divisions can be removed directly, since $\sign(r_1^\pm-r_2^\pm)=\sign(a_1 a_2)\sign(a_1 a_2 (r_1^\pm-r_2^\pm))$. One square root can be eliminated by multiplying by $r_1^\pm-r_2^\mp$, giving~$\sign(a_1 a_2)\sign(a_1 a_2 (r_1^\pm-r_2^\mp))\cdot\sign(a_1^2 a_2^2 (r_1^\pm - r_2^\pm) (r_1^\pm - r_2^\mp))$. When simplified, the final sign is computed from~$a_2^2b_1^2-2a_1a_2^2c_1+2a_1^2a_2c_2-a_1a_2b_1b_2\pm \sqrt{(a_1a_2b_2-a_2^2b_1)^2(b_1^2-4a_1c_1)}$.
The expression under the radical is correctly computed with $8\times$ the input precision; the remaining expression can be evaluated to a little more than $4\times$ input precision in floating point, or can be evaluated in fixed point in $8\times$ input precision by isolating the radical and squaring one last time.
This method not only requires high precision, but also a large number of FLOPs. Computing the unambiguous sign of the difference of the roots requires 15 FLOPs total, and correctly computing the final sign requires another 24 FLOPs. Unfortunately, many of the computed terms require coefficients from both polynomials; only the discriminants, squares, and products can precomputed, which reduces the number of FLOPs by 14. This brings us to 25 FLOPs per comparison, with an initialization cost of 14 FLOPs per quadric.
Note that this method uses our assumption that we know $\sign(r_1^\pm-r_2^\mp)$ when computing $\sign(r_1^\pm-r_2^\pm)$, but we can learn this from a lower precision test against $-b_2/a_2$, since $r_2^- \le -b_2/a_2 \le r_2^+$.
\subsection{Resultant} This method was previously described in \cite{fastaccuratefp}, but a description is included here for completeness.
The resultant method computes the order of two intersections from the resultant for their polynomials, which can be written as the determinant of their Sylvester Matrix~\cite[Section~3.5]{cheeyap}. The general Sylvester Matrix for polynomials~$P(t)=p_m t^m + \dots + p_0$ and~$Q(t)=q_n t^n + \dots + q_0$ is defined as in Equation \ref{eq:sylv}.
\begin{equation}
res(P, Q)=\begin{pmatrix}
p_m & \dots & & p_0 & 0 & & 0\\
0 & p_m & \dots & & p_0 & & 0\\
& \ddots & & & & \ddots\\
0 & 0 & & p_m & \dots & & p_0\\
q_n & \dots & & q_0 & 0 & & 0\\
0 & q_n & \dots & & q_0 & & 0\\
& \ddots & & & & \ddots\\
0 & & q_n & \dots & & q_0\\
\end{pmatrix}
\label{eq:sylv} \end{equation}
The resultant is also the product of the differences of $P$'s roots, $a_1$, \dots, $a_n$, and $Q$'s roots, $b_1$, \dots, $b_m$, as in Equation~\ref{eq:resultant}.~\cite[Section~6.4]{cheeyap} \begin{equation}
res(P, Q)=p_m^n q_n^m \prod_{i=1}^m\prod_{j=1}^n (a_i-b_j)
\label{eq:resultant} \end{equation}
\begin{figure*}\label{eq:signroot}
\end{figure*}
The two expressions for the resultant provide us with another method of computing the sign of one of the differences of the two roots. Under our assumption that we know the order of all pairs or roots except, say, $a_1$ and $b_1$, we can compute $\sign(a_1-b_1)$ from the determinant and known signs, as in Equation \ref{eq:signroot} at the top of the next page. The signs need not be multiplied; we simply count the negatives. With quadratics, $m=n=2$, so the signs of the leading ~$p_2^2$ and~$q_2^2$ will be positive and can be ignored.
The determinant can be computed with half the precision and fewer floating point operations than repeated squaring to correctly compute the sign of the differences of roots of the polynomials.
Computing a general $4{\times} 4$ determinant takes about 120 multiplications, and computing the determinant of the Sylvester matrix itself would naively take~$35$ FLOPs for each comparison. We can do better in Equation~\ref{eq:sylvpoly} by writing the determinant in terms of the discriminants and other precomputed $2{\times} 2$ minors from each polynomial. This brings us to 11 FLOPs per comparison, with an initialization cost of 7 FLOPs per intersection.
\begin{figure*}\label{eq:sylvpoly}
\end{figure*}
\section{Experimental Evaluation} We experimentally evaluated the resultant method and the approximate computation method with both machine precision and extended precision. Repeated Squaring is dominated by the other methods so was not tested.
We created two types of test scenes that had touching surfaces so that random lines might have some chance (albeit small) to give incorrect orders under approximation, and count the number of disagreements. We evaluated time per comparison for each method on computers with different processors. Finally, by varying the number of surfaces in the second type of scene, we could use linear regression to determine the contribution to running time from per quadric and per comparison terms.
\subsection{Experimental Setup} All methods were implemented in C++, and were tested by computing the line-quadric intersection orders along random lines in scenes of quadric surfaces. The creation of these lines and quadric surfaces is described in the next subsection. Machine precision tests were performed in IEEE 754, with quadratic coefficients and discriminants stored as single precision floats, with all machine precision computations performed as floats. MPFR\cite{mpfr} was used to support arbitrary precision in both the approximate comparison and the resultant methods. The approximate comparison method used $24\times$ the precision of a float. The resultant comparison method also used $24\times$ the precision of a float, to account for the range of exponents in the inputs.
The first step of the evaluation for a line $\ell$ and quadric $Q$ was to determined if there was a real intersection by evaluating the discriminant of the quadratic~$p(t)=Q(\ell(t))$. This evaluation was done in machine precision, so there is a small chance that near tangent intersections may have been missed due to numeric error in calculating the discriminant. (In our application, missing near tangent intersections was allowed, but getting orders wrong had been known to trap particles into repeatedly trying to cross the same pair of surfaces, which tends to worry a physicist.)
If the intersections are deemed to exist, the second step is to compute the roots at machine precision. These roots are needed to determine if the order of a pair of intersections is ambiguous or not. Finally, the stl sort algorithm is used to sort the intersections. The full process was timed in nanoseconds with the POSIX clock\_gettime function.
The comparison function used for sorting came from the method being evaluated. The machine precision approximate comparison just returns the difference of the previously computed roots. In the increased precision approximation and the resultant method, the difference of the roots is compared against a threshold. If the difference was smaller than a threshold of $2^{-16}$, the more accurate method provided is used to determine the order, and an appropriate value is returned. This occurred infrequently for a random line, and is only expected to occur a few times for every 100k lines.
We ran tests on two computers with different speeds and operating systems; we name them by their operating systems.
\noindent{\bf Arch} was a Core i3 M370 processor with 2 cores, a 3 MB cache, and 4 GB of DDR3 memory clocked at 1 GHz. It ran an up-to-date installation of Arch Linux, kernel version 4.4, and GCC 6.0 was used to compile the code. For the tests, the performance manager was set to keep the CPU clock at 2.4 GHz, and the process was run with a nice value of $-20$.
\noindent{\bf Gentoo} was a Core 2 Duo E6550 processor with two cores, a 4 MB cache, and 8 GB of DDR2 memory clocked at 667 MHz. It ran an up-to-date installation of Gentoo Linux, kernel version 4.1 and GCC 4.9 was used to compile the code. For the tests, the performance manager was set to keep the CPU clock at 2.3 GHz, with a nice value of $-20$.
A Geekbench benchmark was employed to estimate the floating point processor speeds, Arch 1702, and Gentoo 1408. Thus, on average, Arch was capable of about 1.2 times more FLOPS than the Gentoo computer.
\subsection{Test Scenes} We created two types of test scenes: a single scene of Packed Spheres and a set of scenes of Nested Spheres. The test scenes consisted of quadric surfaces stored as IEEE754 single precision floating point numbers. We preferred spheres and ellipsoids, since any intersecting line would intersect twice, possibly with a repeated root. Sorting isolated single roots is easier, since, for example, the intersection with a plane requires less precision. The quadric surfaces were constructed from the unit cube that has one corner at the origin and the opposite corner at $(1.0, 1.0, 1.0)$.
The single scene of Packed Spheres consisted of 1331 spheres in a hexagonal close packing lattice shown in Fig.~\ref{fig:testScenes}. This ensures that the spheres each have 12 intersecting or nearly intersecting neighbors. The spheres each have a radius of about~$0.05$ units, and are spaced about~$0.05$ units from each other. The initial sphere is centered at the origin, and one of the axes of the lattice is aligned with the $y$ axis of the coordinate frame. The coefficients of the spheres are scaled so that the coefficients of the squared terms were all $1.0$. This caused the exponent range for the non-zero coefficients of the spheres to be between~$-8$ and~$1$, which is well within the limits required for the resultant method to return correct results.
The random lines generated for the scenes of Packed Spheres were generated with an intersect from a uniform distribution over the unit cube. The directions were generated by normalizing a vector chosen from a uniform distribution over the cube with opposite corners at $(-1.0, -1.0, -1.0)$ and $(1.0, 1.0, 1.0)$. To ensure that we are able to compute the order of intersections exactly with the resultant method, the exponents of the non-zero terms were constrained between -20 and 0.
We used eleven scenes of Nested Spheres. One, shown in Fig.~\ref{fig:testScenes}, had $n=10$ spheres, the others had $n=100i$, for $1\leq i \leq 10$. The first sphere was centered at~$x_0=0.5, y_0=0.5, z_0=0.5$ units with a radius of~$R_0=0.5$ units. The radius of successive spheres decreased linearly so that the final sphere's radius was $R_n=2^{-16}$ units. Thus, $R_i=R_{i-1}-(R_0-R_n)/n$. The $x$ position of successive spheres increased linearly to fix the minimum distance at~$\epsilon=2^{-19}$ units. Thus, $x_i=x_{i-1}+(R_0-R_n)/n-\epsilon$. The exponent range for the non-zero coefficients of the spheres was chosen to be between~$-1$ and~$0$, which is well within the limits required for the resultant method to return correct results.
The random lines generated for the scenes of Nested Spheres were generated with intersects~$p_i$ from a uniform distribution over the unit cube. The directions were set as~$(1.0, 0.5, 0.5)-p_i$, where $(1.0, 0.5, 0.5)$ is a point very close to the points of minimum distance for the sets of spheres. This made it very probable that increased precision would be required to correctly compute the order of intersections. To ensure that we are able to compute the order of intersections exactly with the resultant method, the exponents of the non-zero terms were constrained between -20 and 0.
\begin{figure}
\caption{Test Scenes of 1331 Packed Spheres and 10 Nested Spheres,
which is smallest of a family of eleven. Random lines in Packed
Spheres have some chance of being near sphere contacts. Random
lines in Nested Spheres are unlikely to, unless they are biased to
pass by the near tangency.}
\label{fig:testScenes}
\end{figure}
\subsection{Analysis}
The time that it takes to compute the order of intersections between a given line and a scene of quadric surfaces is expected to be linear in both the number of quadric surfaces and the number of accurate comparisons made. Because performing accurate comparisons is so much more expensive than normal comparisons, we expect there to be a clear linear relation between the number of accurate comparisons performed and the time it takes to perform the sorting.
The number of quadrics, on the other hand, can significantly affect the number of intersections in the list to be sorted, especially in antagonistic scenes. However, most of the time spent sorting will be accounted for by the time spent making accurate comparisons, which we have already accounted for. Thus, the remaining time will instead come from computing the approximate roots, which is linear.
To analyze the Packed Spheres timing data, we used least squares to fit a line to the number of comparisons made and the timing data. A constant term was also computed for the time taken computing the approximate roots.
To analyze the set of Nested Spheres scenes, we aggregated the test results for the scenes so that we could use least squares to fit a plane to the number of comparisons made, the number of quadric surfaces, and the timing data. A constant term was also computed to catch any hidden initialization costs, though we expect this to contain mostly noise.
\section{Experimental Results} The results of the experiments are shown in Table~\ref{tab:times}. The first thing to notice is that increasing the precision of a computation is not enough to guarantee that the result will be computed correctly. Despite increasing the precision of the computations to~$24\times$ the initial precision, the increased precision approximation still fails for~$1044/11000$ of the random lines in the Nested Spheres scenes. It did, however, perform significantly better than the original calculation, which failed for~$8272/11000$ of the lines. More lines are needed to find examples that cause errors in the Packed Spheres scene, but based on previous experiments, we can expect several to occur by the $100\text{k}^\text{th}$ test.
In addition to guaranteeing correctness, the resultant method also performed well against the generic increased precision method. For the set of Nested Spheres scenes, it cost slightly more to compute the order of intersections on a time per quadric basis. The approximate computation with increased precision can cache intermediate values more effectively, reducing its cost.
The resultant method performed extremely well on the time per comparison basis, as it actually beat the increased precision method by more than it lost out on in the time per quadric basis in the Nested Spheres scenes, and the Packed Spheres scene on the Gentoo machine.
After removing the time per quadric basis in the tests with the Nested Spheres scenes, the constant term appears somewhat nonsensical. From previous experiments, we have concluded that this is mostly noise, suggesting that we obtained most of the useful information from the measured times. This suggests the time per quadric is the main contributor to the constant time in the tests with the Packed Spheres scene as we expected.
\begin{figure}
\caption{Evaluation Time for a Line (ms) vs. the Number of
Comparisons; Sorting the intersections of 1k lines in each of the
Nested Spheres test scenes on the Gentoo machine; {\bf Red Dot's
(above the bars): Approximation Method at $24\times$ the Input
Precision}; {\bf Blue Bars (beneath the dots): Resultant
Method}. The least squares coefficient for the time per
quadrics has been subtracted out to better show the actual
fit. The lines show the respective least squares fits without the
quadric term.}
\label{fig:linefit}
\end{figure}
Figure \ref{fig:linefit} shows a plot of the results from one of the tests. It appears to confirm our expectation that the time required is linearly coorelated with the number of precision increases.
\begin{table*}
\caption{Analysis of the timing of the Approximate Comparison and
Resultant Comparison. Timing data for 11k lines was analyzed for
the Packed Spheres scene to find the coefficients of the best
fitting lines. Timing data for 1k lines was analyzed for each of
the set of 11 Nested Spheres scenes to find the coefficients of
the best fitting planes. The dimensions are the number of quadric
surfaces and the number of increased precision comparisons made.}
\label{tab:times}
\centering
\begin{tabular}{|l|l|ll|lll|l|} \hline Scene & Machine & Method & Errors & ms/Quadric & ms/Comp & Const ms & $\sum$ Residual ($\text{ms}^2$)\\
\hhline{|=|=|==|===|=|} Nested & Arch & Approximate & 8272 & 0.00425 & \hphantom{-}0.000361 & -0.0693 & \hphantom{000}\hphantom{-}149.084\\ Spheres & & Increased Prec. & 1044 & 0.00554 & \hphantom{-}0.105 & -0.567 & \hphantom{0}\hphantom{-}87655.1\\
& & Resultant & \hphantom{---}--- & 0.00670 & \hphantom{-}0.100 & -0.746 & \hphantom{0}\hphantom{-}80544.6\\
\hhline{|~|-|--|---|-|}
& Gentoo & Approximate & 8244 & 0.00379 & \hphantom{-}0.000313 & -0.0519 & \hphantom{0000}\hphantom{-}34.4705\\
& & Increased Prec. & 1042 & 0.00484 & \hphantom{-}0.146 & -0.110 & \hphantom{0}\hphantom{-}11944.9\\
& & Resultant & \hphantom{---}--- & 0.00584 & \hphantom{-}0.141 & \hphantom{-}0.00485 & \hphantom{0}\hphantom{-}19872.5\\
\hhline{|-|-|--|---|-|} Packed & Arch & Approximate & \hphantom{000}0 & -- & \hphantom{-}0.00738 & \hphantom{-}4.54 & \hphantom{0000}\hphantom{-}21.7059\\ Spheres & & Increased Prec. & \hphantom{000}0 & -- & \hphantom{-}0.126 & \hphantom{-}4.49 & \hphantom{0000}\hphantom{-}22.4387\\
& & Resultant & \hphantom{---}--- & -- & \hphantom{-}0.130 & \hphantom{-}4.51 & \hphantom{0000}\hphantom{-}23.5822\\
\hhline{|~|-|--|---|-|}
& Gentoo & Approximate & \hphantom{000}0 & -- & \hphantom{-}0.00180 & \hphantom{-}4.37 & \hphantom{00000}\hphantom{-}3.75176\\
& & Increased Prec. & \hphantom{000}0 & -- & \hphantom{-}0.156 & \hphantom{-}4.37 & \hphantom{00000}\hphantom{-}3.76225\\
& & Resultant & \hphantom{---}--- & -- & \hphantom{-}0.155 & \hphantom{-}4.41 & \hphantom{00000}\hphantom{-}3.83604\\ \hline \end{tabular}
\end{table*}
\section{Conclusion} In this paper we showed how the resultant method can guarantee the correct order of line-quadric intersections at a similar cost to using an increased precision approximation method. We have also shown that naively using increased precision to improve accuracy is not enough to eliminate errors, and that one must take into account the operations being used and the ranges of the input.
We have assumed that we know the order of all roots except one pair. Even if one's application does not provide this information, for quadratic equations it is relatively easy to obtain using lower precision than it takes to compare roots. The zero of the derivative $x_i=-b_i/a_i$ separates $r_i^-$ and $r_i^+$ by value of the discriminant. If $x_1=x_2$ then comparing squared discriminants tells us all we need to know about root orders. When, wlog, $x_1<x_2$, we use the signs of both quadratics at $x_1$ and $x_2$ to bound roots to intervals, and can again compare squared discriminants to reveal the order for all but one pair.
\section{Acknowledgment} We thank David Griesheimer for discussions on this problem, and both NSF and Bettis Labs for their support of this research.
\end{document} | arXiv | {
"id": "1605.05629.tex",
"language_detection_score": 0.8830905556678772,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{A Bourgain-Pisier construction for general Banach spaces} \author{J. Lopez-Abad}
\address{Instituto de Ciencias Matematicas (ICMAT), CSIC-UAM-UC3M-UCM, Madrid, Spain.} \email{abad@icmat.es}
\subjclass[2010]{46B03,\,46B26}
\keywords{$\mc L_\infty$ spaces, Schur property, non-separable spaces.}
\begin{abstract} We prove that every Banach space, not necessarily separable, can be isometrically embedded into a $\mc L_{\infty}$-space in a way that the corresponding quotient has the Radon-Nikodym and the Schur properties. As a consequence, we obtain $\mc L_\infty$ spaces of arbitrary large densities with the Schur and the Radon-Nikodym properties. This extents the result by J. Bourgain and G. Pisier in \cite{BP} for separable spaces. \end{abstract}
\maketitle
\section{Introduction}
The main question considered in this paper is the largeness of the class of $\mc L_\infty$ spaces
in terms of embeddability. Recall that a Banach space $X$ is called $\mc L_{\infty,\lambda}$ when for every
finite dimensional subspace $F$ of $X$ there is a subspace $G$ of $X$ $\lambda$-isomorphic to $\ell_\infty^{\dim
G}$ containing $F$. $\mc L_\infty$ just means $\mc L_{\infty,\lambda}$ for some $\lambda$. There are two remarkable results for the class of separable $\mc L_\infty$ spaces. The first by J. Bourgain and G. Pisier in \cite{BP} states that every separable Banach space $X$ can be isometrically embedded into a $\mc L_\infty$-space $Y_X$ in such a way that the corresponding quotient space $Y_X/X$ has the Radon-Nikodym property (RNP) and the Schur property. The second, more recent one, by D. Freeman, E. Odell and Th. Schlumprecht \cite{FOS} tells that every space with separable dual can be isomorphically embedded into a $\mc L_\infty$-space with separable dual (therefore an $\ell_1$-predual). Both constructions are the natural extensions of the work of J. Bourgain and F. Delbaen \cite{BD} and Bourgain \cite{B}. There several other recent examples. Perhaps the most impressive one is the $\mc L_\infty$-space by S. A. Argyros and R. G. Haydon \cite{AH} where every operator is the sum of a multiple of the identity and a compact one.
In the non-separable context much less is known. Spaces of functions on a non-metrizable compactum, or non-separable Gurarij spaces are non-separable $\mc L_\infty$-spaces. There is a wide variety of structures in the non-separable level for spaces in these two classes. Based on combinatorial axioms outside ZFC, there are non-separable spaces in these two classes without uncountable biorthogonal systems or where every operator is the sum of a multiple of the identity and an operator with separable range (see \cite{LT} for more information). On the other hand, the separable structure of the known examples is too simple: either they are $c_0$-saturated, that is, every infinite dimensional subspace of it contains an isomorphic copy of $c_0$, or universal for the separable spaces. So it is natural to ask if there are examples of non-separable $\mc L_\infty$-space without isomorphic copies of $c_0$, with the (RNP) or with the Schur property. Our main result in Theorem \ref{maintheorem} is that the embedding Theorem by Bourgain and Pisier reminds valid for any density. In particular, by embedding $\ell_1(\kappa)$ for an infinite cardinal number $\kappa$, we obtain examples of $\mc L_\infty$ spaces of arbitrary density with the Radon-Nikodym and the Schur properties.
For a given separable space $X$, the corresponding Bourgain-Pisier superspace $Y_X$ of it is built in such a way that $Y_X$ and the quotient $Y_X/X$ are both the inductive limit of \emph{linear} systems $(Z_n,j_n)_{n\in {\mathbb N}}$ of a special type of isometrical embedding $j_n:Z_n\to Z_{n+1}$ ($\eta$-admissible embeddings, see Definition \ref{ioioijo4trt}), and such that, in addition, the corresponding $Z_n$'s are finite dimensional for the quotient space $Y_X/X$. The key fact to get the Radon-Nikodym and the Schur properties of the quotient space $Y_X/X$ is the metric property of $\eta$-admissible embeddings exposed here in Lemma \ref{kjdfklsdljfdw} and its consequence to inductive limits as above for finite dimensional spaces (see \cite[Theorem 1.6]{BP}).
In contrast to the separable case, the main difficulty in the non-separable case is the construction the appropriate inductive limit. Indeed, if $X$ is non-separable, then it is unlikely to find a nice linear system having the space $Y_X$ as the corresponding limit. In general, every Banach space $X$ is naturally represented as the inductive limit of its finite dimensional subspaces together with the corresponding inclusions between them. Our inductive system $((E_s)_{s\in I}, (j_{t,s})_{t \subseteq s})$ to represent $Y_X$ is also based on the inclusion relation over the index set $I$ consisting on all finite subsets of the density of the space $X$. This provides a natural way to isometrically embed $X$ into $Y_X$. In addition, our inductive system is constructed in a way that its linear subsystems $((E_{s_n})_{n\in {\mathbb N}},(j_{s_n,s_{n+1}})_{n\in {\mathbb N}})$ are Bourgain-Pisier linear systems as above. In other words, every separable subspace $Z$ of $Y_X$ can be isometrically embedded into the separable Bourgain-Pisier extension $Y_Z$. So, having into account that the Radon-Nikodym and Schur properties are separably determined, we readily have that the quotient $Y_X/X$ has the desired properties.
To construct the spaces $E_s$ and the corresponding embeddings $j_{s,t}:E_s\to E_t$ we define first finite linear systems $((E_{t}^{(s)})_{t\subseteq s}, (j_{u,t}^{(s)})_{u\prec t})$ of $\eta$-admissible embeddings $j_{u,t}^{(s)}:E_{u}^{(s)}\to E_{t}^{(s)}$, where $\prec$ is a natural well ordering extending the inclusion relation. Obviously, this raises a problem of coherence, since given $s\subseteq p\subseteq q$ we will have defined two ``$s$-extensions'' $E_{s}^{(p)}$ and $E_{s}^{(q)}$ of $E_\emptyset=X$ and therefore two isometric embeddings $X\to E_s^{(p)}$ and $X\to E_s^{(q)}$. This is corrected by defining simultaneously an infinite directed system $((E_{s}^{(p)})_{s\subseteq p}, (k_{s}^{(p,q)})_{s\subseteq p\subseteq q})$ of $\eta$-admissible embeddings making the appropriate diagrams commutative.
Finally, let us point out that nothing is known in how to skip the separability assumption in the Freeman-Odell-Schlumprecht embedding Theorem, or, even more basic, if a non-separable Bourgain-Delbaen exists, i.e. a non-separable $\mc L_\infty$-space not containing isomorphic copies of $c_0$ or $\ell_1$.
The paper is organized as follows. The Section 2 is a survey of basic facts concerning the Kisliakov's extension method and $\eta$-admissible, in particular we present a new extension fact concerning these embeddings in Lemma \ref{i4jrijeerdthtyolo}. The last section is devoted to the proof of the Theorem \ref{maintheorem}.
\section{Background and basic facts} We use standard terminology in Banach space theory from the monographs \cite{AK} and \cite{LiTza}. The goal of this section is to present the basic notions of $\eta$-admissible diagrams and $\eta$-admissible embeddings introduced by Bourgain and Pisier. To complete the information we give here, specially for some proofs, we refer the reader to the original paper \cite{BP} or to the recent book by P. Dodos \cite{Do}.
Recall the Kisliakov's extension method \cite{K}: Given Banach spaces $S\subseteq B$ and $E$ and an operator $u:S\to E$ such that $\nrm{u}\le \eta\le 1$, let \begin{align*} N_u:= & \conj{(s,-u(s))\in B\times E}{s\in S},\\ i_B:B& \to (B\oplus_1 E)/N_u \\ b &\mapsto i_B(b)=(b,0)+N_u \\ i_E:E &\to (B\oplus_1 E )/N_u \\ e&\mapsto i_E(e)=(0,e)+N_u \end{align*} Then the diagram $(K)$ \begin{equation*} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2pt}] \matrix (m) [matrix of math nodes, row sep=3em, column sep=3em, text height=1.5ex, text depth=0.25ex] { B & (B\oplus_1 E)/N_u \\ S & E \\ };
\path[->,font=\normalsize]
(m-1-1) edge node[above] {$i_B$} (m-1-2)
(m-2-1) edge node[below] {$u$} (m-2-2)
(m-2-2) edge node[right] {$i_E$} (m-1-2)
(m-2-1) edge node[right=26pt] {$(K)$} (m-1-1)
;
\path[right hook->]
(m-2-1) edge (m-1-1)
;
\end{tikzpicture} \end{equation*} is commutative, $i_E$ is an isometrical embedding, and $\nrm{i_B}\le 1$. This diagram has several categorical properties such as minimality and uniqueness. \begin{defin}\rm We say that a diagram \begin{equation*} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2pt}] \matrix (m) [matrix of math nodes, row sep=3em, column sep=3em, text height=1.5ex, text depth=0.25ex] { B & E_1 \\ S & E \\ };
\path[->,font=\normalsize]
(m-1-1) edge node[above] {$\bar{u}$} (m-1-2)
(m-2-1) edge node[below] {$u$} (m-2-2)
(m-2-2) edge node[right] {$j$} (m-1-2)
;
\path[right hook->]
(m-2-1) edge (m-1-1)
;
\end{tikzpicture} \end{equation*} is a $\eta$-admissible diagram when there is an isometry $T:(B\oplus_1 E)/N_{u}\to E_1$ such that $j=T\circ i_E$ and $\bar{u}=T\circ i_B $. The \emph{canonical} $\eta$-admissible diagram associated to the triple $(S,B,u)$ is the Kisliakov's diagram $(K)$ above.
An isometrical embedding $j:E\to E_1$ is called \emph{$\eta$-admissible embedding} when there are $S\subseteq B$, $E_1$, $u:S\to E$, $\bar{u}: B\to E_1$ forming together with $j:E\to E_1$ an $\eta$-admissible diagram. \end{defin} Observe that $\eta$-admissible diagrams are always commutative.
\begin{defin}\rm\label{ioioijo4trt}\cite{BP} A surjective operator $\pi:E\to F$ is called a \emph{metric surjection} when the associated isomorphism $\bar{\pi}:E/\mathrm{Ker}(\pi)\to F$ is an isometry.
\end{defin} The following are useful known characterizations, not difficult to prove. \begin{propo}\label{jiejrijeijifjdd} \begin{enumerate} \item[(a)] Let $S\subseteq B$, $E$ and $E_1$ be normed spaces, and $\eta\le 1$. A diagram $(\Delta)$ \begin{equation} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2pt}] \matrix (m) [matrix of math nodes, row sep=3em, column sep=3em, text height=1.5ex, text depth=0.25ex] { B & E_1 \\ S & E \\ };
\path[->,font=\normalsize]
(m-1-1) edge node[above] {$\bar{u} $} (m-1-2)
(m-2-1) edge node[below] {$u$} (m-2-2)
(m-2-2) edge node[right] {$j$} (m-1-2)
;
\path[right hook->]
(m-2-1) edge node[right=13pt] {$(\Delta)$} (m-1-1)
;
\end{tikzpicture} \end{equation} is an \emph{$\eta$-admissible diagram} if and only if \begin{enumerate} \item[$(\alpha.1)$] $j$ is an isometry and $\nrm{u} \le \eta$, \item[$(\alpha.2)$] $\pi:B\oplus_1 E \to E_1$ defined for $(b,e)\in B\times E$ by $\pi(b,e):=\bar{u}(b)+j(e)$ is a metric surjection. \item[$(\alpha.3)$] $\mathrm{Ker} (\pi)=N_u=\conj{(s,-u(s))}{s\in S}$.
\end{enumerate} \item[(b)] An isometrical embedding $j:E\to E_1$ is $\eta$-admissible iff there is some Banach space $B$ and a metric surjection
$\pi:B\oplus_1 E \to E_1$ such that $\pi(0,e)=j(e)$ for every $e\in E$ and $\nrm{\pi(b,e)}\ge \nrm{e}-\eta\nrm{b}$ for every $(e,b)\in E\times B$. \qed \end{enumerate}
\end{propo} It follows from (b) above that the composition of two $\eta$-admissible embeddings is also $\eta$-admissible. Although we are not going to used them directly, two metric properties of $\eta$-admissible embeddings crucial for the Radon-Nikodym and Schur properties of the Bourgain-Pisier quotient $Y_X/X$. \begin{lemin}\label{kjdfklsdljfdw} Suppose that the diagram $(\Delta)$ above is $\eta$-admissible. Then, \begin{enumerate} \item[(a)] $\nrm{\overline{u}(b)}=\nrm{(b,0)+\mathrm{Ker}(\pi)}=\inf_{s\in S}\nrm{b+s}+\nrm{u(s)}$ for every $b\in B$. Consequently, $\nrm{\overline{u}}\le 1$, and if there is $\delta\le 1$ such that $\nrm{u(s)}\ge \delta\nrm{s}$ for every $s\in S$, then $\nrm{\overline{u}(b)}\ge \delta\nrm{b}$ for every $b\in B$. In other words, if $u$ is an isomorphic embedding then so is $\bar{u}$ with better isomorphic constant. \item[(b)] Let $q:E_1\to E_1/j(E)$ be the natural quotient map. Suppose that $x_0,\dots,x_n\in E_1$ are such that $x_0+\dots+x_n\in j(E)$. Then $$\sum_{i=0}^n \nrm{x_i}\ge \nrm{\sum_{i=0}^n x_i}+(1-\eta)\sum_{i=0}^n \nrm{q(x_i)}.$$ \end{enumerate} \end{lemin} The fact in (b) is taken from \cite{Do} and it has an equivalent probabilistic reformulation in \cite{BP}. It is the key to prove the following. \begin{teore}\cite[Theorem 1.6.]{BP}\label{ijisjfjdsss} Suppose that $(E_n)_n$ is a sequence of finite dimensional spaces, and suppose that $j_n:E_n\to E_{n+1}$ is an $\eta$-admissible embedding for each $n$. Then the inductive limit of $(E_n,j_n)_n$ has the Schur and the Radon-Nikodym properties. \end{teore}
\subsection{One step extension} We finish this section with the following result, somehow stating that an appropriate composition of $\eta$-admissible diagrams is again $\eta$-admissible. \begin{lem} \label{i4jrijeerdthtyolo} \label{khwe4iothjiogff} Suppose that \begin{equation} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2pt}] \matrix (m) [matrix of math nodes, row sep=2em, column sep=2em, text height=1.5ex, text depth=0.25ex] { B_0 & X_0 & & & X_2 \\ & & S_1 & B_1 & \\ S_0 & E & & & X_1 \\ };
\path[->,font=\normalsize]
(m-1-1) edge node[above] {$\bar{u_0} $} (m-1-2)
(m-3-1) edge node[below] {$u_0$} (m-3-2)
(m-3-2) edge node[right] {$j_0$} (m-1-2)
(m-1-2) edge node[above] {$j_2$} (m-1-5)
(m-3-2) edge node[below] {$j_1$} (m-3-5)
(m-3-5) edge node[right] {$j$} (m-1-5)
(m-2-3) edge node[right] {$u_2$} (m-1-2)
(m-2-4) edge node[left] {$\overline{u_2}$} (m-1-5)
(m-2-3) edge node[right] {$u_1$} (m-3-2)
(m-2-4) edge node[left] {$\overline{u_1}$} (m-3-5)
;
\path[right hook->]
(m-3-1) edge node[right=8pt] {$(\Delta_0)$} (m-1-1)
(m-2-3) edge node[above=8pt] {$(\Delta.2)$} node[below=8pt] {$(\Delta.1)$} (m-2-4)
; \end{tikzpicture} \end{equation} is a commutative diagram such that: \begin{enumerate} \item[(1)] $(\Delta.0)$, $(\Delta.1)$ and $(\Delta.2)$ are $\eta$-admissible diagrams. \item[(2)] $j:X_1\to X_2$ is an isometry. \end{enumerate} Then the diagram
\begin{equation} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2pt}] \matrix (m) [matrix of math nodes, row sep=3em, column sep=3em, text height=1.5ex, text depth=0.25ex] { B_0 & X_2 \\ S_0 & X_1 \\ };
\path[->,font=\normalsize]
(m-1-1) edge node[above] {$j_2\circ\bar{u_0} $} (m-1-2)
(m-2-1) edge node[below] {$j_1\circ u_0$} (m-2-2)
(m-2-2) edge node[right] {$j$} (m-1-2)
;
\path[right hook->]
(m-2-1) edge
(m-1-1)
;
\end{tikzpicture} \end{equation} is $\eta$-admissible. \end{lem}
\begin{proof} Let $\pi:B_0\oplus_1 X_1\to X_2$, $\pi(b_0,x):=j_2(\bar{u_0}(b_0))+j(x)$, and for $i=0,1,2$, let $\pi_i: B_i\otimes E_i\to X_i$ be defined by $\pi_i(b,e):= \bar{u_i}(b)+j_i(e)$, where $E_0=E_1=E$, $E_2=X_0$ and $B_2=B_1$. We have to check that $(\alpha.1)$, $(\alpha.2)$ and $(\alpha.3)$ in Proposition \ref{jiejrijeijifjdd} (a) hold. By hypothesis $j$ is isometry and clearly $\nrm{j_1\circ u_0}=\nrm{u_0}\le \eta$, so we get $(\alpha.1)$. \begin{claim}\label{nerhoifdohidgf} $\pi(b_0,\pi_1(b_1,e))=\pi_2(b_1,\pi_0(b_0,e))$ for every $b_0\in B_0$, $b_1\in B_1$ and $e\in E$. \end{claim} \begin{proof}[Proof of Claim:] \begin{align*}
\pi(b_0,\pi_1(b_1,e))=& \pi_2(b_1,\pi_0(b_0,e))=j_2(\bar{u_0}(b_0))+j(\pi(b_1,e))=j_2(\bar{u_0}(b_0))+j(\bar{u_1}(b_1)+ j_1(e))=\\
=& j_2(\bar{u_0}(b_0)+j_0(e))+j(\bar{u_1}(b_1))= j_2(\bar{u_0}(b_0)+j_0(e))+ \bar{u_2}(b_1)=\\
=& \pi_2(b_1,\bar{u_0}(b_0)+j_0(e))=\pi_2(b_1,\pi_0(b_0,e)).
\end{align*} \end{proof} It follows from this that $\pi$ is onto. \begin{claim}\label{hieroioijdfgdf} $\mathrm{Ker} (\pi)= \conj{(s_0,-j_1(u_0(s_0)))}{s_0\in S_0}=\conj{(b_0,\pi_1(0,-u_0(b_0)))}{b_0\in S_0}$. \end{claim} \begin{proof}[Proof of Claim:] The last equality follows from the fact that by definition, $\pi_1(0,-u_0(b_0))=-j_1(u_0(b_0))$. We prove now the first equality. Fix $s_0\in S_0$, and we work to prove that $\pi(s_0,-j_1(u_0(s_0)))=0$. Using the commutativity of the diagram we obtain \begin{align*} \pi(s_0,-j_1(u_0(s_0)))= & j_2(\overline{u_0}(s_0))-j(j_1(u_0(s_0)))=j_2(\overline{u_0}(s_0))-j_2(j_0(u_0(s_0)))= \\ =& j_2(\overline{u_0}(s_0)-j_0(u_0(s_0)) )= j_2(0)=0. \end{align*} Now suppose that $\pi(b_0,g)=0$. Let $(b_1,e)\in B_1\times E$ be such that $\pi_1(b_1,e)=g$. Then, by Claim \ref{nerhoifdohidgf}, it follows that \begin{equation} (b_1,\pi_0(b_0,e))\in \mathrm{Ker} (\pi_2). \end{equation} And hence, $b_1\in S_1$ and $\pi_0(b_0,e)=-u_2(b_1)$. It follows that \begin{align*} 0= & \bar{u_0}(b_0)+j_0(e)+u_2(b_1)=\bar{u_0}(b_0)+j_0(e)+j_0(u_1(b_1))= \pi_0(b_0,e+u_1(b_1)) \end{align*} So, $b_0\in S_0$ and $e+u_1(b_1)=-u_0(b_0)$. By applying $j_1$ to the last equality, we obtain that \begin{align*} g:=j_1(e)+\bar{u_1}(b_1)= -j_1(u_0(b_0)), \end{align*} as desired. \end{proof} It follows readily that $(\alpha.3)$ holds. It rests to prove the property $(\alpha.2)$.
\begin{claim}\label{jioiojoijw34ef} $\nrm{\pi(b_0,g)}=\inf_{s_0\in S_0}\nrm{b_0+s_0}+\nrm{g-j_1(u_0(s_0) )}$. \end{claim} \begin{proof}[Proof of Claim:] Fix $(b_1,e)\in B_1\times E$ such that $\pi_1(b_1,e)=g$. Then, by Claim \ref{nerhoifdohidgf} it follows that $\pi(b_0,g)=\pi_2(b_1,\pi_0(b_0,e))$. Hence, \begin{align*} \nrm{\pi(b_0,g)}=& \nrm{\pi_2(b_1,\pi_0(b_0,e))}=\nrm{(b_1,\pi_0(b_0,e))+\mathrm{Ker}(\pi_2)}=\\ =& \inf_{s_1\in S_1}\left( \nrm{b_1+s_1}+\nrm{\pi_0(b_0,e)-u_2(s_1)}\right)=\\ = & \inf_{s_1\in S_1} \left(\nrm{b_1+s_1}+\nrm{\pi_0(b_0,e)-j_0(u_1(s_1))}\right)= \\ = & \inf_{s_1\in S_1} \left(\nrm{b_1+s_1}+\nrm{\pi_0(b_0,e-u_1(s_1))}\right)= \\ =& \inf_{s_1\in S_1}\left( \nrm{b_1+s_1}+\inf_{s_0\in S_0}\left( \nrm{b_0+s_0}+\nrm{e-u_1(s_1)-u_0(s_0)}\right)\right)= \\ =& \inf_{s_0\in S_0} \left( \nrm{b_0+s_0}+\inf_{s_1\in S_1}\left(\nrm{s_1+b_1}+\nrm{e-u_1(s_1)-u_0(s_0)} \right)\right)=\\ =& \inf_{s_0\in S_0} \left( \nrm{b_0+s_0}+\nrm{ (b_1,e-u_0(s_0))+\mathrm{Ker}(\pi_1)} \right)=\\ =& \inf_{s_0\in S_0} \left( \nrm{b_0+s_0}+\nrm{\pi_1(b_1,e)+\pi_1(0,-u_0(s_0))} \right)=\\ =& \inf_{s_0\in S_0} \left( \nrm{b_0+s_0}+\nrm{g-j_1(u_0(s_0))} \right). \end{align*} \end{proof} From this we prove that $\pi$ is a metric surjection: Fix $(b_0,g)\in B_0\times G$, and let $(b_1,e)\in B_1\times E$ be such that $g=\pi_1(b_1,e)$. Then by the Claim \ref{hieroioijdfgdf}, it follows that \begin{align*} \nrm{(b_0,g)+\mathrm{Ker}(\pi)}=& \nrm{(b_0,\pi_1(b_1,e))+\mathrm{Ker}(\pi)}=\\ =& \inf_{s_0\in S_0}\left(\nrm{b_0+s_0}+\nrm{\pi_1(b_1,e)+\pi_1(0,-u_0(s_0))}\right)=\\ =& \inf_{s_0\in S_0}\left(\nrm{b_0+s_0}+\nrm{\pi_1(b_1,e-u_0(s_0))}\right)=\\ =& \inf_{s_0\in S_0}\left(\nrm{b_0+s_0}+\inf_{s_1\in S_1}\left(\nrm{b_1+s_1}+\nrm{e-u_0(s_0)-u_1(s_1)}\right)\right)=\\ =& \inf_{s_0\in S_0} \left( \nrm{b_0+s_0}+\nrm{g-j_1(u_0(s_0))} \right)=\nrm{\pi(b_0,g)}, \end{align*} the last equality by Claim \ref{jioiojoijw34ef}. \end{proof}
\section{The main result}\label{dkjferjeijr} Our goal is to isometrically embed a given Banach space, not necessarily separable, into a $\mc L_\infty$-space in such a way that the corresponding quotient has the Schur and the Radon-Nikodym properties. Extending the approach of Bourgain and Pisier, we will find the $\mc L_\infty$-space as a direct, not necessarily linear, limit of $\eta$-admissible embeddigs. The following is our main result.
\begin{teore} \label{maintheorem} Every infinite dimensional Banach space $X$ can be isometrically embedded into a $\mc L_\infty$-space $Y$ of the same density that $X$ such that the quotient $Y/X$ has the Radon-Nikodym and the Schur properties. \end{teore}
\begin{coro} For every infinite cardinal number $\kappa$ there is a $\mc L_\infty$-space of density $\kappa$ with the Radon-Nikodym and the Schur properties. \end{coro} \begin{proof} For a fixed infinite cardinal number $\kappa$, apply the Theorem \ref{maintheorem} to $X=\ell_1(\kappa)$. Then the corresponding superspace $Y$ is the desired space, since the required properties are three space properties. \end{proof}
For the proof of Theorem \ref{maintheorem} we need the following two concepts. \begin{defin}\rm Recall that the \emph{anti-lexicographical} ordering $\prec$ on the family $[\kappa]^{<\omega}$ of finite subsets of $\kappa$ is defined recursively as follows: $\emptyset \prec s$ for every non empty $s$, and \begin{center} $t\prec s $ if and only if $\left\{ \begin{array}{ll} \max t<\max s & \text{ or}\\ \max t=\max s &\text{and } t\setminus \{\max t\}\prec s \setminus \{\max s\} \end{array}\right.$ \end{center} \end{defin}
This is a well-ordering on $[\kappa]^{<\omega}$ that extends the inclusion relation $\varsubsetneq $. We introduce some notation: For each $\emptyset\varsubsetneq t\subseteq s$, we denote by $\bar{t}^{(s)}$ the immediate $\prec$-predecessor of $t$ in the family $\mc P(s)$ of subsets of $s$, i.e. $$\bar{t}^{(s)}:=\max_\prec\conj{u\subseteq s}{u\prec t}.$$ Obviously this is well defined since $\mc P(s)$ is finite. We write $\bar{t}$ to denote $\bar t^{(t)}$.
\begin{defin}\rm Recall that a \emph{directed system} is $((X_i)_{i\in I}, (j_{i_0,i_1})_{i_0\le _{I}i_1})$, where $X_i$ are Banach spaces, $<_I$ is a directed partial ordering, $j_{i_0,i_1}:X_{i_0}\to X_{i_1}$ are isometrical embeddings, such that if $i_0\le_I i_i\le_I i_2$, then $j_{i_0,i_2}=j_{i_1,i_2}\circ j_{i_0,i_1}$, and such that $j_{i,i}=\text{Id }_{X_i}$.
\end{defin}
From now on we fix an infinite dimensional Banach space $X$ of density $\kappa$, and a dense subset $D=\conj{d_\alpha}{\alpha<\kappa}$ of it. For each $s\in [\kappa]^{<\omega}$, let $X_s$ be the linear span of $\{d_\alpha\}_{\alpha\in s}$. Fix also $\lambda>1$ and $\eta<1$ such that $\lambda \cdot\eta < 1$. \begin{lem} \label{ni43hjoit4hjt} There is a direct systems $((E_s)_{s\in [\kappa]^{<\omega}},(j_{s,t})_{s\subseteq t,\, s,t\in [\kappa]^{<\omega}})$ and $(G_s)_{s\in [\kappa]^{<\omega}}$ such that: \begin{enumerate} \item[(1)] $G_s\subseteq E_s$ are Banach spaces, $E_\emptyset=X$. \item[(2)] Each $j_{s,t}:E_s\to E_t$ is an $\eta$-admissible isometrical embedding such that $j_{s,t}E_s$ has finite codimension in $E_t$. \item[(3)] $G_s$ is $\lambda$-isomorphic to $\ell_\infty^{\dim G_s}$. \item[(4)] $\bigcup_{t\varsubsetneq s}j_{t,s}(G_t)\cup j_{\emptyset,s}(X_s)\subseteq G_s$. \end{enumerate} \end{lem} We are ready now to give a proof of Theorem \ref{maintheorem} from this lemma.
\begin{proof}[Proof of Theorem \ref{maintheorem}] Fix $((E_s)_{s\in [\kappa]^{<\omega}},(j_{s,t})_{s\subseteq t,\, s,t\in [\kappa]^{<\omega}})$ and $(G_s)_{s\in [\kappa]^{<\omega}}$ as in Lemma \ref{ni43hjoit4hjt}. Let $E$ be the completion of the inductive limit of $((E_s)_{s\in [\kappa]^{<\omega}}, (j_{t,s})_{t\subseteq s,\, t,s\in [\kappa]^{<\omega}})$. Because of property (4) in Lemma \ref{ni43hjoit4hjt}, it follows that $((G_s)_{s\in \mathcal F},(j_{t,s}\upharpoonright G_t))_{t\subseteq s\in [\kappa]^{<\omega}}$ is also a directed system of finite dimensional normed spaces $G_s$ which are $\lambda$-isomorphic to $\ell_\infty^{\dim G_s}$. Let $Y$ be the completion of the corresponding direct limit $\lim_{s\in [\kappa]^{<\omega}}G_s$. It is clear that $Y$ can be isometrically imbedded into $E$, while there is a natural isometric embedding of $X$ into $Y$: $X$ is the completion of the direct limit $((X_s)_{s\in [\kappa]^{<\omega}},(i_{t,s})_{t\subseteq s\in [\kappa]^{<\omega}})$, where $i_{t,s}:X_t\to X_s$ is the inclusion map. For each finite subset $s$ of $\kappa$, let $g_{s}:X_s\to G_s$ be $g_s:=j_{\emptyset,s}\upharpoonright X_s$, which is well defined by (4). This is obviously an isometric embedding such that $j_{t,s}\circ g_{s}=g_s\circ i_{t,s}$ for every $t\subseteq s$, and hence $X$ isometrically embeds into $Y$.
If we denote by $j_{s,\infty}: G_s \to Y$ the corresponding limit of $(j_{s,t})_{s\subseteq t}$, then $\bigcup_{s\in [\kappa]^{<\omega}}j_{s,\infty} (G_s)$ is dense in $Y$. It follows that $Y$ is a $\mc L_{\infty,\lambda}$-space. Since each $G_s$ is finite dimensional, it follows that $Y$ has density at most
$|[\kappa]^{<\omega}|=\kappa$. Since $X$ isometrically embeds into $Y$, the density of $Y$ has to be $\kappa$. Let us see that $Y/X$ has the Radon-Nikodym and the Schur properties: We use that $Y/X$ is naturally isometrically embedded into $E/X$, and we prove that $E/X$ has these two properties. Observe that these two properties are properties of separable subspaces of $E/X$. So let $Z\subseteq E/X$ be a separable subspace of $E/X$. By construction, we can find a sequence $(s_n)_{n\in {\mathbb N}}$ of elements of $\mathcal F$ such that $s_n\subseteq s_{n+1}$, and such that $Z$ is a subspace of the closure of the quotient $$\left(\lim_{n\to \infty} ((E_{s_n})_{n\in {\mathbb N}},(j_{s_n s_{n+1}})_{n\in {\mathbb N}})\right)/X.$$ This quotient can be naturally isometrically identified with the inductive limit of finite dimensional spaces $((E_{s_n}/j_{\emptyset, s_n}X )_{n\in {\mathbb N}},(\overline{j_{s_n,s_{n+1}}})_{n\in {\mathbb N}})$, which, by Theorem \ref{ijisjfjdsss}, has the two required properties. \end{proof} The existence of the direct system in Lemma \ref{ni43hjoit4hjt} is based on the following local construction. \begin{lem}\label{j4irjtiojgghff} For every finite subset $s$ of $\kappa$ there are \begin{equation} \label{kjhuhurt}\text{$(E_t^{(s)})_{t\subseteq s}$, $(G_t^{(s)})_{t\subseteq s}$, $(j_{u,t}^{(s)})_{u\prec t,\,u,t\subseteq s}$ and $(k_{u}^{(t,s)})_{u\subseteq t\subseteq s}$} \end{equation} such that \begin{enumerate} \item[(A)] (Local directed system) For every finite subset $s$ of $\kappa$ one has that $$((E_t^{(s)})_{t\subseteq s}, (j_{u,t}^{(s)})_{u\prec t, \, u,t\subseteq s})$$
is a (finite) system of $\eta$-admissible isometrical embeddings such that $j_{u,t}^{(s)}E_u^{(s)}$
has finite codimension in $E_t^{(s)}$.
\item[(B)] (Transition directed system) For every finite subset $u$ of $\kappa$ one has that
$$((E_u^{(s)})_{u\subseteq s\in [\kappa]^{<\omega}}, (k_{u}^{(t,s)})_{u\subseteq t\subseteq s\in [\kappa]^{<\omega}})$$
is a system of $\eta$-admissible isometrical embeddings such that $k_u^{(t,s)}E_u^{(t)}$ has finite
codimension in $E_u^{(s)}$. \item[(C)] (Coherence property) For every $v\subseteq u \subseteq t\subseteq s$ one has that \begin{equation} \label{erjijgijgfj} k_{u}^{(t,s)}\circ j_{v,u}^{(t)}= j_{v,u}^{(s)}\circ k_{v}^{(t,s)}. \end{equation} \item[(D)] For every $t\subseteq s\in [\kappa]^{<\omega}$ one has that $G_t^{(s)}\subseteq E_t^{(s)}$ is finite dimensional and \begin{equation} \label{cassegffb} d(G_t^{(s)},\ell_\infty^{\dim G_t^{(s)}})\le \lambda. \end{equation} \item[(E)] For every $u\subseteq t\subseteq s$ one has that \begin{equation} \label{ljuhbbb} k_{u}^{(t,s)} (G_{u}^{(t)})=G_{u}^{(s)}. \end{equation} \item[(F)] For every $\emptyset \varsubsetneq t\subseteq s$ one has that \begin{equation} \label{rgjrjghrhhhhff} j_{\bar t,t}^{(s)} (G_{\bar t}^{(s)} )\cup j_{\emptyset,t}^{(s)} (X_{t})\subseteq G_t^{(s)}. \end{equation} \end{enumerate} \end{lem} We postpone its proof, and we pass to prove Lemma \ref{ni43hjoit4hjt}. \begin{proof}[Proof of Lemma \ref{ni43hjoit4hjt}] Fix $(E_t^{(s)})_{t\subseteq s}$, $(G_t^{(s)})_{t\subseteq s}$, $(j_{u,t}^{(s)})_{u\prec t,\,u,t\subseteq s}$ and $(k_{u}^{(t,s)})_{u\subseteq t\subseteq s}$ as in Lemma \ref{j4irjtiojgghff}. For each finite subset $s$ of $\kappa$ set $E_s:=E_s^{(s)}$ and $G_s:=G_s^{(s)}$. Given $t\subseteq s$ let $j_{t,s}: E_t\to E_s$ be \begin{equation} \label{ueuuurjrihjh} j_{t,s}:= j_{t,s}^{(s)}\circ k_{t}^{(t,s)}. \end{equation} It follows from properties (A) and (B) in Lemma \ref{j4irjtiojgghff} that $j_{t,s}$ is an $\eta$-admissible embedding such that $j_{t,s}E_t$ has finite codimension in $E_s$. \begin{claim}
$((E_{s})_{s\in [\kappa]^{<\omega}},(j_{t,s})_{t\subseteq s\in [\kappa]^{<\omega}})$ is a directed system of $\eta$-admissible isometrical
embeddings. \end{claim}
\begin{proof}[Proof of Claim:] Suppose that $u\subseteq t\subseteq s$. Then \begin{align*} j_{t,s}\circ j_{u,t}= & j_{t,s}^{(s)}\circ( k_{t}^{(t,s)}\circ j_{u,t}^{(t)})\circ k_{u}^{(u,t)}=_{\eqref{erjijgijgfj}} j_{t,s}^{(s)}\circ( j_{u,t}^{(s)}\circ k_{u}^{(t,s)})\circ k_{u}^{(u,t)}= \\ = & (j_{t,s}^{(s)}\circ j_{u,t}^{(s)})\circ (k_{u}^{(t,s)}\circ k_{u}^{(u,t)})= j_{u,s}^{(s)}\circ k_{u}^{(u,s)} =j_{u,s} \end{align*} \end{proof} For each $s\in [\kappa]^{<\omega}$, let $G_s=G_s^{(s)}$. \begin{claim} \begin{enumerate} \item[(a)] $d(G_s,\ell_\infty^{\dim G_s})\le \lambda$, i.e. $G_s$ is $\lambda$-isomorphic to $\ell_\infty^{\dim G_s}$. \item[(b)] For every $t\subseteq s$ one has that $j_{t,s}G_t\subseteq G_s$. \end{enumerate} \end{claim} \begin{proof}[Proof of Claim:]
(a) follows from \eqref{cassegffb} in (D). (b): By \eqref{ljuhbbb} and \eqref{rgjrjghrhhhhff} one has that $$j_{t,s} (G_t)= j_{t,s}^{(s)}\circ k_{t}^{(t,s)} (G_t^{(t)})= j_{t,s}^{(s)} G_t^{(s)}\subseteq G_s^{(s)}=G_s.$$
\end{proof} \end{proof} It only rests to give a proof of Lemma \ref{j4irjtiojgghff}. \begin{proof}[Proof of Lemma \ref{j4irjtiojgghff}]
Fix $s\in [\kappa]^{<\omega}$. We define $\preceq$-recursively on $s$ all the objects in \eqref{kjhuhurt} together with an integer $n_t\in {\mathbb N}$, $S_t\subseteq \ell_\infty^{n_t}$ and $\upsilon_t:S_t\to E_{\bar t}^{(t)}$ for each
$\emptyset\varsubsetneq t\subseteq s$ such that
\begin{enumerate} \item[(a)] $E_\emptyset^{(s)}=X$, $k_{\emptyset}^{(t,s)}=\mr{Id}_{X}$. \item[(b)] $\upsilon_t:S_t\to E_{\bar t}^{(t)}$ is an isomorphism with $\nrm{\upsilon_t}\le \eta$, $\nrm{\upsilon_t^{-1}}\le \lambda$, and \begin{equation} \label{jutrihgf} \upsilon_t(S_t)=\langle j_{u,\bar t}^{(t)}(G_u^{(t)})\cup j_{\emptyset,\bar{t}}^{(t)}(X_t)\rangle\subseteq E_{\bar t}^{(t)}, \end{equation}
where $u=\overline{\overline{t}}^{(t)}$ is the $\prec$-penultimate element in $\mc P(t)$, if $|t|>1$ and
$u=\emptyset$, if $|t|=1$ (and hence $\bar t=\emptyset$). \item[(c)] $ E_t^{(s)}=(\ell_\infty^{n_t}\oplus_1 E_{\overline{t}^{(s)}}^{(s)})/N_{v_t^{(s)}}$, the diagram \begin{equation*} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2pt}] \matrix (m) [matrix of math nodes, row sep=3em, column sep=2.5em, text height=1.5ex, text depth=0.25ex] {\ell_\infty^{n_t} & E_t^{(s)}=(\ell_\infty^{n_t}\oplus_1 E_{\overline{t}^{(s)}}^{(s)})/N_{v_t^{(s)}} \\ S_t & E_{\overline{t}^{(s)}}^{(s)} \\ E_{\overline{t}}^{(t)} & E_{\overline{t}}^{(s)} \\ };
\path[->,font=\normalsize]
(m-1-1) edge node[above] {$\overline{v_t^{(s)}} $} (m-1-2)
(m-2-1) edge node[below] {$v_t^{(s)}$} (m-2-2)
edge node[left] {$v_t$} (m-3-1)
(m-2-2) edge node[right] {$j_{\overline{t}^{(s)},t}^{(s)}$} (m-1-2)
(m-3-1) edge node[below] {$k_{\overline{t}}^{(t,s)}$} (m-3-2)
(m-3-2) edge node[right] {$j_{\overline{t},\overline{t}^{(s)}}^{(s)}$} (m-2-2)
;
\path[right hook->]
(m-2-1) edge node[right=45pt] {$(\Delta)$} (m-1-1)
;
\end{tikzpicture} \end{equation*} is commutative,$(\Delta)$ is a canonical $\eta$-admissible diagram, and \begin{equation} G_{t}^{(s)}= \overline{v_t^{(s)}}(\ell_\infty^{n_t})=\conj{(x,0)+N_{t}^{(s)}}{x\in \ell_\infty^{n_t}}.\label{rjtirjirjr3} \end{equation}
\item[(d)] For every $u\prec t$ subset of $s$, we have that \begin{equation} \label{rtkjritjjhjjgg} j_{u,t}^{(s)}=j_{\overline{t}^{(s)},t}^{(s)}\circ j_{\overline{t}^{(s)},t}^{(s)}. \end{equation} \item[(e)] For every $u\subseteq t\subseteq s$, $k_{u}^{(t,s)}:E_{u}^{(t)}\to E_{u}^{(s)}$ satisfies that for every $(x,y)\in \ell_\infty^{n_u}\times E_{\bar{u}^{(t)}}^{(t)}$ by \begin{equation} \label{lejritjgjg} k_u^{(t,s)}((x,y)+N_u^{(t)})=(x,j_{\bar{u}^{(t)},\bar{u}^{(s)}}^{(s)}\circ k_{\bar u^{(t)}}^{(t,s)}(y))+N_u^{(s)}. \end{equation} The requirement in (e) can be fulfilled because the commutativity of the following diagram: \begin{equation*} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2pt}] \matrix (m) [matrix of math nodes, row sep=3em, column sep=2.5em, text height=1.5ex, text depth=0.25ex] {E_{\overline{u}}^{(u)}& E_{\overline{u}}^{(t)} & E_{\overline{u}^{(t)}}^{(t)} & E_{\overline{u}^{(t)}}^{(s)} \\ & E_{\overline{u}}^{(s)} & & \\ & & & E_{\overline{u}^{(s)}}^{(s)} \\ }; \path[->,font=\normalsize]
(m-1-1) edge node[auto] {$ k_{\overline{u}}^{(u,t)}$} (m-1-2)
(m-1-2) edge node[auto] {$j_{\overline{u},\overline{u}^{(t)}}^{(t)}$} (m-1-3)
(m-1-3) edge node[auto] {$ k_{\overline{u}^{(t)}}^{(t,s)} $} (m-1-4)
(m-1-4) edge node[auto] {$j_{\overline{u}^{(t)},\overline{u}^{(s)}}^{(s)}$} (m-3-4)
(m-1-1) edge node[ below=4pt ] {$k_{\overline{u}}^{(u,s)}$} (m-2-2)
(m-2-2) edge node[below=4pt] {$j_{\overline{u},\overline{u}^{(s)}}^{(s)}$} (m-3-4)
(m-2-2) edge node[below] {$j_{\overline{u},\overline{u}^{(t)}}^{(s)}$} (m-1-4)
(m-1-2) edge node[auto] {$k_{\overline{u}}^{(t,s)}$} (m-2-2) ;
\path[->,dotted]
(m-1-1) edge[bend right=60] node[descr]{$v_{u}^{(s)}$} (m-3-4)
(m-1-1) edge [bend left=60] node[descr] {$v_{u}^{(t)}$} (m-1-3) ; \end{tikzpicture} \end{equation*} \end{enumerate} It rests to check that the conditions (A)--(F) hold:
\noindent (A): It is clear from the definition of $j_{\bar{t}^{(s)},t}^{(s)}$ is an $\eta$-admissible isometrical embedding, and it follows from the equality in \eqref{rtkjritjjhjjgg} that $j_{u,t}^{(s)}=j_{v,t}^{(s)}\circ j_{u,v}^{(s)}$ for every $u,v\subseteq s$ with $u\prec v \prec t$.
\noindent (C): Let $v\varsubsetneq u \subseteq t\subseteq s$. We want to prove that $ k_{u}^{(t,s)}\circ j_{v,u}^{(t)}= j_{v,u}^{(s)}\circ k_{v}^{(t,s)}$. Using that, by inductive hypothesis, the left side of the following diagram is commutative, $$\begin{tikzpicture}[descr/.style={fill=white,inner sep=2pt}] \matrix (m) [matrix of math nodes, row sep=3em, column sep=3em, text height=1.5ex, text depth=0.25ex] {E_v^{(s)} & E_{\overline{u}^{(t)}}^{(s)} & E_{\overline{u}^{(s)}}^{(s)} & E_u^{(s)}\\ E_v^{(t)} & E_{\overline{u}^{(t)}}^{(t)} & & E_{u}^{(t)} \\ };
\path[->,font=\normalsize]
(m-1-1) edge node[above] {$j_{v,\overline{u}^{(t)}}^{(s)}$} (m-1-2)
(m-1-2) edge node[above] {$j_{\overline{u}^{(t)},\overline{u}^{(s)}}^{(s)}$} (m-1-3)
(m-1-3) edge node[above] {$j_{\overline{u}^{(s)},u}^{(s)}$} (m-1-4)
(m-1-2) edge[bend left=60] node[descr] {$j_{\overline{u}^{(t)},u}^{(s)}$} (m-1-4)
(m-2-1) edge node[left] {$k_v^{(t,s)}$} (m-1-1)
(m-2-2) edge node[left] {$k_{\overline{u}^{(t)}}^{(t,s)}$} (m-1-2)
(m-2-1) edge node[below] {$j_{v,\overline{u}^{(t)}}^{(t)}$} (m-2-2)
(m-2-2) edge node[below] {$j_{\overline{u}^{(t)},u}^{(t)}$} (m-2-4)
(m-2-4) edge node[right] {$k_u^{(t,s)}$} (m-1-4)
; \end{tikzpicture} $$ it suffices to prove that $k_{u}^{(t,s)}\circ j_{\bar{u}^{(t)},u}^{(t)}=j_{\bar{u}^{(t)},u}^{(s)}\circ k_{\bar{u}^{(t)}}^{(t,s)}$. Let $x\in E_{\bar u^{(t)}}^{(t)}$. Then by (e), \begin{align*} k_{u}^{(t,s)}\circ j_{\bar{u}^{(t)},u}^{(t)}(x)= & k_u^{(t,s)}((0,x)+N_u^{(t)})=(0,j_{\bar u^{(t)},\bar{u}^{(s)}}^{(s)}\circ k_{\bar u^{(t)}}^{(t,s)}(x))+N_u^{(s)}\\ j_{\bar{u}^{(t)},u}^{(s)}\circ k_{\bar{u}^{(t)}}^{(t,s)}(x)=& j_{\bar{u}^{(s)},u}^{(s)} \circ j_{\bar{u}^{(t)},\bar{u}^{(s)}}^{(s)}\circ k_{\bar{u}^{(t)}}^{(t,s)}(x)= (0,j_{\bar{u}^{(t)},\bar{u}^{(s)}}^{(s)}\circ k_{\bar{u}^{(t)}}^{(t,s)}(x) )+ N_{u}^{(s)}. \end{align*}
\noindent (B): Suppose that $v\subseteq u\subseteq t\subseteq s$. We have to see that $k_{v}^{(u,s)}= k_{v}^{(t,s)}\circ k_{v}^{(u,t)}$. Recall that from (e) it follows that for every $(x,y)\in \ell_\infty^{n_v}\times E_{\bar v^{(u)}}$ and for every $(x,z)\in \ell_\infty^{n_v}\times E_{\bar v^{(t)}}$ one has that \begin{align} \label{rtjrtjgijgjgg} k_v^{(u,t)}((x,y)+N_v^{(u)})=&(x, j_{\bar v^{(u)},\bar v^{(t)}}^{(t)}\circ k_{\bar v^{(u)}}^{(u,t)}(y))+N_v^{(t)} \\ k_v^{(u,s)}((x,y)+N_v^{(u)})=&(x, j_{\bar v^{(u)},\bar v^{(s)}}^{(s)}\circ k_{\bar v^{(u)}}^{(u,s)}(y))+N_v^{(s)} \\ k_v^{(t,s)}((x,z)+N_v^{(t)})=&(x, j_{\bar v^{(t)},\bar v^{(s)}}^{(s)}\circ k_{\bar v^{(t)}}^{(t,s)}(z))+N_v^{(s)}. \end{align} Hence, using inductively (C), \begin{align*} \label{rtjrtjgijgjgg} k_v^{(t,s)}\circ k_v^{(u,t)}((x,y)+N_v^{(u)})=&(x, j_{\bar v^{(t)},\bar v^{(s)}}^{(s)} \circ k_{\bar v^{(t)}}^{(t,s)}\circ j_{\bar v^{(u)},\bar v^{(t)}}^{(t)}\circ k_{\bar v^{(u)}}^{(u,t)}(y))+N_v^{(t)}= \\ = & (x, j_{\bar v^{(t)},\bar v^{(s)}}^{(s)} \circ j_{\bar v^{(u)},\bar v^{(t)}}^{(s)}\circ k_{\bar v^{(u)}}^{(t,s)}\circ k_{\bar v^{(u)}}^{(u,t)}(y))+N_v^{(t)} \\ = & (x, j_{\bar v^{(u)},\bar v^{(s)}}^{(s)}
\circ k_{\bar v^{(u)}}^{(u,s)}(y))+N_v^{(t)} \\ = & k_v^{(u,s)}((x,y)+N_v^{(u)}).\\ \end{align*} We now prove that $k_u^{(t,s)}$ is an $\eta$-admissible isometrical embedding: By inductive hypothesis the composition $j:E_{\bar u^{(t)}}^{(t)}\to E_{\bar u^{(s)}}^{(s)}$, $j:=j_{\bar u^{(t)},\bar{ u}^{(s)}} ^{(s)}\circ k_{\bar u^{(t)}}^{(t,s)}$, is an $\eta$-admissible isometrical embedding. We then fix $S\subseteq B$,
$\nu:S\to E_{\bar u^{(t)}}^{(t)}$ and $\overline{\nu}:B\to E_{\overline{u}^{(s)}}^{(s)}$ such that
\begin{equation*} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2pt}] \matrix (m) [matrix of math nodes, row sep=3em, column sep=3em, text height=1.5ex, text depth=0.25ex] { B & E_{\bar u^{(s)}}^{(s)} \\ S & E_{\bar u^{(t)}}^{(t)} \\ };
\path[->,font=\normalsize]
(m-1-1) edge node[above] {$\bar{\nu} $} (m-1-2)
(m-2-1) edge node[below] {$\nu$} (m-2-2)
(m-2-2) edge node[right] {$j=j_{\bar u^{(t)},\bar{ u}^{(s)}} ^{(s)}\circ k_{\bar u^{(t)}}^{(t,s)}$} (m-1-2)
;
\path[right hook->]
(m-2-1) edge node[right=13pt] {$(\Delta_0)$} (m-1-1)
;
\end{tikzpicture} \end{equation*} is an $\eta$-admissible diagram. It follows by (C), that the following diagram is commutative:
\begin{equation*} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2pt}] \matrix (m) [matrix of math nodes, row sep=2em, column sep=2em, text height=1.5ex, text depth=0.25ex] { B & E_{\bar{u}^{(s)}}^{(s)} & & & E_u^{(s)}=(\ell_\infty^{n_u}\oplus_1 E_{\bar u^{(s)}}^{(s)})/N_u^{(s)} \\ & & S_u & \ell_\infty^{n_u} & \\ S & E_{\bar u^{(t)}}^{(t)} & & & E_u^{(t)}=(\ell_\infty^{n_u}\oplus_1 E_{\bar u^{(t)}}^{(t)})/N_u^{(t)} \\ }; \path[->,font=\normalsize]
(m-1-1) edge node[above] {$\bar{u_0} $} (m-1-2)
(m-3-1) edge node[below] {$u_0$} (m-3-2)
(m-3-2) edge node[right] {$j$} (m-1-2)
(m-1-2) edge node[above] {$j_{\overline{u}^{(s)},u}^{(s)}$} (m-1-5)
(m-3-2) edge node[below] {$j_{\overline{u}^{(t)},u}^{(t)}$} (m-3-5)
(m-3-5) edge node[right] {$k_{u}^{(t,s)}$} (m-1-5)
(m-2-3) edge node[right=3pt] {$v_u^{(s)}$} (m-1-2)
(m-2-4) edge node[left=18pt] {$\overline{v_u^{(s)}}$} (m-1-5)
(m-2-3) edge node[right] {$v_u^{(t)}$} (m-3-2)
(m-2-4) edge node[right=2pt, above=1pt] {$\overline{v_u^{(t)}}$} (m-3-5)
; \path[right hook->]
(m-3-1) edge node[right=8pt] {$(\Delta_0)$} (m-1-1)
(m-2-3) edge node[above=8pt] {$(\Delta.2)$} node[below=8pt] {$(\Delta.1)$} (m-2-4) ; \end{tikzpicture} \end{equation*} Since $(\Delta_0)$, $(\Delta_1)$ and $(\Delta_2)$ are $\eta$-admissible diagram, we conclude from Lemma \ref{khwe4iothjiogff} that $k_{u}^{(t,s)}$ is an $\eta$-admissible embedding.
\noindent (D) is clear by definition of $G_t^{(s)}$.
\noindent (E): Fix $u\subseteq t\subseteq s$. Then \begin{align*} k_{u}^{(t,s)}(G_u^{(t)})=& \conj{ k_{u}^{(t,s)}((x,0)+N_{u}^{(t)})}{x\in \ell_\infty^{n_u}}= \conj{ (x,0)+N_{u}^{(s)}}{x\in \ell_\infty^{n_u}}=G_{u}^{(s)}. \end{align*} \noindent (F): Let $t\subseteq s$. We have to prove the inclusion in \eqref{rgjrjghrhhhhff}. Notice that the diagram \begin{equation*} \begin{tikzpicture}[descr/.style={fill=white,inner sep=2pt}] \matrix (m) [matrix of math nodes, row sep=3em, column sep=3em, text height=1.5ex, text depth=0.25ex] {\ell_{\infty}^{n_t} & E_t^{(s)}=(\ell_\infty^{n_t}\oplus_1 E_{\overline{u}^{(s)}}^{(s)})/N_t^{(s)}\\ S_t & E_{\overline{u}^{(t)}}^{(s)} \\ };
\path[->,font=\normalsize]
(m-1-1) edge node[above] {$\overline{v_t^{(s)}}$} (m-1-2)
(m-2-1) edge node[below] {$v_t^{(s)}$} (m-2-2)
(m-2-2) edge node[right] {$j_{\overline{t}^{(s)},t}^{(s)}$} (m-1-2)
;
\path[right hook->]
(m-2-1) edge (m-1-1)
;
\end{tikzpicture} \end{equation*}
is commutative. Let $u\subseteq t$ be the immediate $\prec$-predecessor of $\overline{t}$ in $t$, if $|t|>1$, and let $u=\emptyset$ otherwise. Then by (b), \begin{align*} G_t^{(s)}=& \conj{(x,0)+N_t^{(s)}}{s\in S_t}= \bar\upsilon_t^{(s)}(\ell_\infty^{n_t}) \supseteq \\ \supseteq & \bar\upsilon_t^{(s)}(S_t)= j_{\overline{t}^{(s)},t}^{(s)}\circ \upsilon_t^{(s)}(S_t)= \\ = & j_{\overline{t}^{(s)},t}^{(s)}\circ j_{\bar{t},\bar{t}^{(s)}}^{(s)}\circ k_{\bar{t}}^{(t,s)}\circ \upsilon_t (S_t)= j_{\bar{t},t}^{(s)}\circ k_{\bar{t}}^{(t,s)}\circ \upsilon_t(S_t)= k_{t}^{(t,s)}\circ j_{\bar{t},t}^{(t)}\circ \upsilon_t(S_t)=\\ = & k_{t}^{(t,s)}\circ j_{\bar{t},t}^{(t)}\left\langle j_{u,\bar{t}}^{(t)}(G_u^{(t)})\cup j_{\emptyset,\bar{t}}^{(t)}(X_t)\right\rangle =\\ =& \left\langle k_{t}^{(t,s)}\circ j_{\bar{t},t}^{(t)}\circ j_{u,\bar{t}}^{(t)}(G_u^{(t)})\cup k_{t}^{(t,s)}\circ
j_{\bar{t},t}^{(t)}\circ j_{\emptyset,\bar{t}}^{(t)}(X_t)\right\rangle= \\ =& \left\langle k_{t}^{(t,s)}\circ j_{u,t}^{(t)}(G_u^{(t)})\cup k_{t}^{(t,s)}\circ j_{\emptyset,t}^{(t)}(X_t)\right\rangle=\\ = & \left\langle j_{u,t}^{(s)}\circ k_{u}^{(t,s)}(G_u^{(t)})\cup j_{\emptyset,t}^{(s)}\circ k_{\emptyset}^{(t,s)}(X_t)\right\rangle= \\ = & \left\langle j_{u,t}^{(s)}(G_u^{(s)})\cup j_{\emptyset,t}^{(s)}(X_t)\right\rangle. \end{align*} \end{proof}
\end{document} | arXiv | {
"id": "1210.5728.tex",
"language_detection_score": 0.5414274334907532,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title[Coefficients of univalent harmonic mappings]{Coefficients of univalent harmonic mappings}
\thanks{ File:~\jobname .tex,
printed: \number\year-\number\month-\number\day,
\thehours.\ifnum\theminutes<10{0}\fi\theminutes}
\author{Saminathan Ponnusamy
} \address{S. Ponnusamy, and A. Sairam Kaliraj, Indian Statistical Institute (ISI), Chennai Centre, SETS, MGR Knowledge City, CIT Campus, Taramani, Chennai 600 113, India. } \email{samy@iitm.ac.in, samy@isichennai.res.in; sairamkaliraj@gmail.com}
\author{Anbareeswaran Sairam Kaliraj}
\author{Victor V. Starkov} \address{V.V. Starkov, Petrozavodsk State University, 33, Lenin Str., 185910, Petrozavodsk, Republic of Karelia, Russia.} \email{vstarv@list.ru}
\subjclass[2010]{Primary: 31A05; Secondary: 30C45, 30C50, 30C55} \keywords{Harmonic functions, harmonic univalent functions, linear invariant family, affine invariant family, coefficient bounds, and partial sums.
}
\date{\today }
\begin{abstract}
Let $\mathcal{S}_H^0$ denote the class of all functions $f(z)=h(z)+\overline{g(z)}=z+\sum^\infty_{n=2} a_nz^n +\overline{\sum^\infty_{n=2} b_nz^n}$ that are sense-preserving, harmonic and univalent in the open unit disk $|z|<1$. The coefficient conjecture for $\mathcal{S}_H^0$ is still \emph{open} even for $|a_2|$. The aim of this paper is to show that if $f=h+\overline{g} \in \mathcal{S}^0_H$ then $ |a_n| < 5.24 \times 10^{-6} n^{17}$ and $|b_n| < 2.32 \times 10^{-7}n^{17}$ for all $n \geq 3$. Making use of these coefficient estimates, we also obtain radius of univalence of sections of univalent harmonic mappings. \end{abstract} \thanks{ }
\maketitle \pagestyle{myheadings} \markboth{S. Ponnusamy, A. Sairam Kaliraj, and V. V. Starkov}{Coefficients of univalent harmonic mappings}
\section{Preliminaries and main results}\label{PSS8Sec1} Let ${\mathbb D}$ denote the open unit disk centered at the origin of the complex plane. The theory of univalent and sense-preserving complex-valued harmonic functions in ${\mathbb D}$ has attracted a lot of attention since the appearance of the paper by Clunie and Sheil-Small \cite{Clunie-Small-84} which brought the theory a large step forward. They pointed out that many of the classical results for conformal mappings have clear analogues for harmonic mappings although only few of them have been addressed and used by a number of authors, while others were not because of the higher difficulty level. Besides its interest from the point of view of analysis, it has been recently shown to be of relevance in some problems related to fluid flows. This applied mathematics connection brings new relevance to the issue of coefficient estimates for a family of sense-preserving harmonic mappings since these maps provide an approach towards obtaining explicit solutions to the incompressible two-dimensional Euler equations. For example, A.~Aleman and A.~Constantin \cite{AleConst2012} pointed out the importance of harmonic mappings in the Eulerian description of fluid flows and developed a method which is largely based on a detailed study of the governing equations using analytic function theory, and an important role played by the univalence of the labelling map. Also, the authors in \cite{AleConst2012} presented several examples to illustrate how the classical solutions can be obtained from the more general solution formulas via univalent harmonic mappings. More recently, O.~Constantin and M.J.~Mart\'{i}n \cite{ConstMartin2017} continued this investigation and proposed a different approach that provides a complete solution to the original problem of classifying all two-dimensional ideal fluid flows with harmonic Lagrangian labelling mappings. This approach is based on the ideas from the theory of planar harmonic mappings and thus, provide an illustration of the deep link between the sense-preserving harmonic mappings and fluid flow problems. This newly explored connection renewed our interest in this topic.
In this article, we consider the class $\mathcal{S}_H$ of all univalent, sense-preserving harmonic functions $f$ of ${\mathbb D}$ normalized by $f(0)=0=f_z(0)-1$. Every such function has a unique canonical representation of the form $f=h+\overline{g}$, with $h$ and $g$ analytic in ${\mathbb D}$ and $g(0)=0$. Here $h$ and $g$ are often referred to as analytic and co-analytic parts of $f$. Let $\mathcal{S}^0_{H} = \{f=h+\overline{g} \in \mathcal{S}_{H}:\, g'(0)=0 \}$. Clearly, $\mathcal{S}= \{f=h+\overline{g} \in \mathcal{S}_{H}:\, g(z)\equiv 0 \}$ is the class of normalized univalent analytic functions in ${\mathbb D}$. A typical element $f \in \mathcal{S}^0_H$ has the form \begin{equation}\label{PSSerRep} f(z)=h(z)+\overline{g(z)}:=z+\sum _{k=2}^{\infty}a_k z^k + \sum _{k=2}^{\infty}\overline{b_k z^k}, ~z \in \mathbb{D}. \end{equation} Throughout the discussion we shall use this representation. Clunie and Sheil-Small \cite{Clunie-Small-84} proved that both $\mathcal{S}_{H}$ and $\mathcal{S}^0_{H}$ are normal whereas only $\mathcal{S}^0_{H}$ is compact with respect to the topology of uniform convergence on compact subsets of ${\mathbb D}$. A function $f \in \mathcal{S}^0_H$ is said to belong to the class $\mathcal{S}^{*0}_{H}$, $\mathcal{K}^0_{H}$ and $\mathcal{C}^0_{H}$ if $f(\mathbb{D})$ is starlike with respect to the origin, convex and close-to-convex, respectively. The corresponding notations for the analytic case are $\mathcal{S}^*$, $\mathcal{K}$ and $\mathcal{C}$, respectively. For basic information about $\mathcal{S}^0_{H}$ and related geometric subfamilies, one can refer to \cite{Clunie-Small-84,Duren,Duren:Harmonic} and the recent expository article of Ponnusamy and Rasila \cite{PonRasi2013}.
This article is organized as follows. In Section \ref{PSS8SubSec1}, we present a preliminary information on the coefficient conjecture of Clunie and Sheil-Small \cite{Clunie-Small-84} and present a coefficient estimate for a family of sense-preserving harmonic mappings, which contains the class $\mathcal{S}^0_H$ (Theorem \ref{PS8Thm1}). In Section \ref{PSS8SubSec2}, we recall some known results on the sections of functions in certain geometric subclasses of univalent harmonic mappings and present our result on the radius of univalence of partial sums of functions in $\mathcal{S}^0_H$ (Theorem \ref{PS8Thm2}). Few basic lemmas that are needed for the proofs of these two results are recalled in Section \ref{PSS8Sec2}. The proofs of our main results are presented in Section \ref{PSS8Sec3}. Some consequences of them are discussed in Section \ref{PSS8Sec4} (Theorems \ref{spec_coeff_bound} and \ref{part_spec_case}).
\subsection{Coefficient conjecture of Clunie and Sheil-Small}\label{PSS8SubSec1} Using the method of shearing, Clunie and Sheil-Small \cite{Clunie-Small-84} obtained an important member of so-called slit mapping $K=H+\overline{G}$,
where $$H(z)=\frac{z-\frac{1}{2}z^2+\frac{1}{6}z^3}{(1-z)^3}= z+\sum_{n=2}^{\infty}A_nz^n ~\mbox{ and }~G(z)=\frac{\frac{1}{2}z^2+\frac{1}{6}z^3}{(1-z)^3}=\sum_{n=2} ^{\infty}B_nz^n $$ with $$A_n=\frac{(n+1)(2n+1)}{6} ~\mbox{ and }~ B_n=\frac{(n-1)(2n-1)}{6} ~\mbox{ for $n\geq 2$.} $$ The function $K$ is called the harmonic Koebe function and it maps the unit disk one-to-one onto the slit domain ${\mathbb C}\backslash\{u+iv:\, u\leq -1/6,\,v=0\}$ which is indeed convex along horizontal direction, and it plays an extremal role for several extremal problems in $\mathcal{S}^{*0}_{H}$ and $\mathcal{C}_{H}^0$, such as coefficient bounds and covering theorems (see \cite{Clunie-Small-84, Duren:Harmonic, Sheil-Small}). Due to the extremal role of the harmonic Koebe function in these families, it was natural for Clunie and Sheil-Small \cite{Clunie-Small-84} to conjecture that if $f=h+\overline{g}\in {\mathcal S}_{H}^{0}$ is given by \eqref{PSSerRep}, then for all $n\geq2$,
$$|a_n|\leq A_n,~ |b_n|\leq B_n~ \mbox{ and }~\big||a_n|-|b_n|\big|\leq n $$ and equality occurs for $f(z)=K(z)$. In \cite{Clunie-Small-84}, they also showed that
$|b_2|\leq 1/2$ which is sharp, and the non-sharp estimate $|a_2|<12172$. Later in 1990, Sheil-Small \cite{Sheil-Small} improved it to $|a_2|<57$, and then Duren \cite[p.~96]{Duren:Harmonic} improved it further to $|a_2|<49$ which is again far from the conjectured bound
$|a_2|\leq 5/2$. The above conjecture remains \emph{open} and little is known for $n\geq 3$ for the class ${\mathcal S}_{H}^{0}$. In \cite{Starkov}, it has been proved that \begin{equation}\label{coeff_bound_starkov}
|a_n| < \frac {(2e^2)^\alpha}{2\alpha} n^\alpha ~\mbox{ and }~ |b_n| < \frac {(2e^2)^\alpha}{2\alpha} n^\alpha ~\mbox{ for all }~ n\in {\mathbb N},
\end{equation} where $\alpha: = {\rm ord} \,\mathcal{S}_H = \sup_{f\in \mathcal{S}_H} |a_2|$. However, finding the explicit value of $\alpha$ or even finding a good upper bound itself seems to be a difficult task. Very recently, Abu Muhanna et. al \cite{AAP-PP2015} obtained the following result, which is the best known bound so far and this could be used in \eqref{coeff_bound_starkov}.
\begin{Lem} {\rm\cite{AAP-PP2015}}\label{samy_abu_bound}
If $f=h+\overline{g} \in \mathcal{S}^0_H$, then $|a_2|=|h''(0)/2|\leq 16.5$ and $\alpha = {\rm ord}~\mathcal{S}_H < 17$. \end{Lem}
In \cite{PonSai5}, it was remarked that the coefficient conjecture of Clunie and Sheil-Small is true if $\mathcal{S}^0_H(\mathcal{S})=\mathcal{S}^0_H$ holds, where \begin{equation}\label{eq-SS1} \mathcal{S}^0_H(\mathcal{S}) = \left\{h+\overline{g} \in \mathcal{S}^0_H :\, h+e^{i \theta}g \in \mathcal{S}~
\mbox{for some}~~\theta \in \mathbb{R} \right\} \end{equation} and $\mathcal{S}^0_H(\mathcal{S})$ contains the class of harmonic mappings convex in one direction. However, this conjecture remains \emph{open}.
Let $\mathscr{F}_H$ be a family of sense-preserving harmonic mappings $f$ with the power series representation as in \eqref{PSSerRep}. Then, the family $\mathscr{F}_H$ is called a linear invariant family, if for each $f\in \mathscr{F}_H$, the function $F$ defined by
$$ F(z) = \frac{f(e^{i\theta}\frac{z+a}{1+\overline{a}z})-f(ae^{i\theta})}{(1-|a|^2)h'(ae^{i\theta})e^{i\theta}} $$ also belongs to the class $\mathscr{F}_H$ for all $\theta \in {\mathbb R}$ and $a \in {\mathbb D}$. A family $\mathscr{F}_H$ is called an affine invariant family, if, in addition, for each $f \in \mathscr{F}_H$, the function $A(f(z))$ defined by $$ A(f(z)) = \frac{f(z)+\epsilon \overline{f(z)}}{1+\epsilon \overline{f_{\overline{z}}(0)}} $$
also belongs to the class $\mathscr{F}_H$ for all $\epsilon \in {\mathbb D}$. The order of an affine and linear invariant family $\mathscr{F}_H$ is defined as ${\rm ord}~ \mathscr{F}_H = \sup_{f \in \mathscr{F}_H}|a_2|$. Three well-known affine and linear invariant families are the class $\mathcal{S}_H$, its subclasses $\mathcal{K}_H$ of convex and $\mathcal{C}_H$ of close-to-convex harmonic mappings. It is well known that ${\rm ord}~\mathcal{K}_H = 2$ and ${\rm ord}~\mathcal{C}_H = 3$. In 2004, Starkov \cite{Starkov_2004} (for details see \cite{Starkov_2011}) introduced the order of a linear invariant family (which is not necessarily affine invariant family) $\mathscr{F}_H$ which is defined as follows:
$$ \overline{{\rm ord}}~\mathscr{F}_H = \sup_{f\in \mathscr{F}_H} \frac{|a_2-\overline{b_1}b_2|}{1-|b_1|^2}. $$ Corresponding to a linear invariant family $\mathscr{F}_H$, we define the family $\mathscr{F}^0_H$ as $$\mathscr{F}^0_H = \left\{F=\frac{f+\epsilon \overline{f}}{1+\epsilon \overline{f_{\overline{z}}(0)}}:\, f\in \mathscr{F}_H, \epsilon \in {\mathbb D}, F_{\overline{z}}(0)=0 \right\}. $$ The following lemma is useful in determining the $\overline{{\rm ord}}~\mathscr{F}_H$.
\begin{Lem}\label{new_ord}{\rm \cite{Starkov_Ganenkova}} Let $\mathscr{F}_H$ be a linear invariant family of harmonic mappings. Then
$$\overline{{\rm ord}}~\mathscr{F}_H = \sup_{f\in \mathscr{F}_H^0} |a_2| = {\rm ord}~\mathscr{F}^0_H.$$ \end{Lem}
The family $\mathscr{U}_H(\alpha)$ is defined as the union of all affine and linear invariant families $\mathscr{F}_H$ of harmonic functions such that $\overline{{\rm ord}}~{\mathscr{F}_H} \leq \alpha.$ Set $\mathscr{U}^0_H(\alpha) :=\{f \in \mathscr{U}_H(\alpha): f_{\overline z}(0)=0 \}$. It is now appropriate to state our first main result.
\begin{thm}\label{PS8Thm1} Let $f= h+\overline{g} \in \mathscr{U}^0_H(16.5)$ with series representation as in \eqref{PSSerRep}. Then we have \begin{equation}\label{coeff_bound_all_1}
|a_n| < 5.24 \times 10^{-6} n^{17} ~\mbox{ and }~ |b_n| < 2.32 \times 10^{-7}n^{17} ~~\mbox{for all}~~ n \geq 3. \end{equation} \end{thm}
\begin{rem}
{\rm We remark that ${\mathcal S}^0_{H} \subset \mathscr{U}^0_H(16.5)$. The new bounds in \eqref{coeff_bound_all_1} clearly improves the earlier bounds in \eqref{coeff_bound_starkov}. From the proof of Theorem \ref{PS8Thm1}, we observe that the number $5.24$ in \eqref{coeff_bound_all_1} could be replaced by $4.1006$ for $n \geq 19$ and $2.32$ by $2.25$ for $n \geq 18$. The proof of Theorem \ref{PS8Thm1} relies on the bound $|a_2| \leq 16.5$ for $f\in \mathscr{U}^0_H(16.5)$. If we use the conjectured bound $|a_2| \leq 5/2$, then Theorem \ref{PS8Thm1} takes an improved version which is stated in Section \ref{PSS8Sec4}. } \end{rem}
\subsection{Injectivity of sections of univalent harmonic functions}\label{PSS8SubSec2} For an analytic function $h(z)=\sum _{k=1}^{\infty}a_k z^k$ in the unit disk $\mathbb{D}$, the $n$-th section/partial sum $s_n(h)$ of $h$ is defined by \begin{equation}\label{AnPSerRep} s_n(h)(z)=\sum _{k=1}^{n}a_k z^k.
\end{equation} In \cite{Szego}, Szeg\"{o} proved that every section $s_n(h)$ of $h\in {\mathcal S}$ is univalent in $|z| < 1/4$ for all $n \geq 2$. The constant $1/4$ is sharp. If $h \in \mathcal{K}$ (resp. $\mathcal{S}^*$, and $\mathcal{C}$), then the $n$-th section $s_n(h)$ is known to be univalent and convex (resp. starlike and close-to-convex)
in the disk $|z| < 1-3n^{-1}\log n$ for all $n \geq 5$ (cf. \cite[Exercise 7, p.~272]{Duren}). However, the exact radius of univalence $r_n$ of $s_n(h)$, $h \in \mathcal{S}$, remains an \emph{open} problem. By making use of Goluzin's inequality, Jenkins \cite{Jenkins} proved that $s_n(h)$ is univalent in $|z|<r_n$ for $h\in\mathcal{S}$, where $r_n$
is at least $ 1-n^{-1}(4\log n - \log(4\log n))$ for $n \geq 8$. It is worth pointing out that the result of Jenkins could be improved if we use de Branges \cite{de_Branges} coefficient estimates $|h^{(n)}(0)/n!|\leq n$ for $h\in {\mathcal S}$. More precisely, we can easily obtain that $s_n(h)$ is univalent in $|z|<r_n$ for $h\in\mathcal{S}$, where $r_n$ is at least $ 1-n^{-1}(4\log n - 2\log(\log n))$ for $n \geq 7$, which seems to be the best known radius so far. We avoid the technical details of this fact for obvious reasons. For related investigations on this topic, see the recent articles \cite{ObSamy13,Hiroshi-Samy-2014, PonSaiStarkov1} and the references therein.
For $f=h+\overline{g} \in \mathcal{S}^0_H$, $n\geq 1$ and $m\geq 2$, the sections/partial sums $s_{n,m}(f)$ of $f$ are defined as $$s_{n,m}(f)(z)=s_n(h)(z)+\overline{s_m(g)(z)}. $$
However, the special case $m=n\geq 2$ seems to be interesting in its own merit. In 2013, Li and Ponnusamy \cite{LiSamyNA1, LiSamyNA2, LiSamyCzM} determined the radius of univalence of sections of functions from certain classes of univalent harmonic mappings. For $f$ belonging to ${\mathcal S}_H^{*0}$, ${\mathcal C}_H^0$, $\mathcal{S}^0_H(\mathcal{S})$ or the class of harmonic mappings convex in one direction, in \cite{PonSaiStarkov1}, the present authors proved that $s_{n,m}(f)$ is univalent in the disk $|z|<r_{n,m}$, where $r_{n,m}$ is the zero of a rational function. In the special case $m=n$, $s_{n,n}(f)$ is univalent in the disk $|z|<r_{n,n}$, where $$r_{n,n} > r^L_{n,n}:=1- \frac{(7\log n - 4\log(\log n))}{n} ~\mbox{ for }~ n \geq 15. $$ Moreover, it was also pointed out that $r_{n,m} \geq r^L_{l,l}$, where $l=\min\{n,m\}\geq 15$.
In \cite{PonSaiStarkov1}, it was also proved that for $f \in \mathcal{K}^0_H$, each partial sum $s_{n,m}(f)$ is univalent in the disk $|z|<r_{n,m}$, where $$r_{n,m} \geq 1-\frac{4\log l - 2\log(\log l)}{l} ~ \mbox{ and } ~ l=\min\{n,m\} \geq 7 . $$ In view of the lack of information on the coefficients of the analytic and co-analytic parts of functions in $\mathcal{S}^0_H$, in contrast to the analytic case, determining the radius of univalence of sections of functions in the class $\mathcal{S}^0_H$ seems to be a difficult task. Nevertheless, in the present article we attempt to consider this problem for the class $\mathcal{S}^0_H$. This is achieved as an application of Theorem \ref{PS8Thm1}.
\begin{thm}\label{PS8Thm2} Suppose that $f=h+\overline{g} \in \mathcal{S}^0_H$ with the series representation as in \eqref{PSSerRep}. For $r \in (0, 1)$, define $$U(r)=\frac{1}{\log r}\left\{-28.5 + \log(r (\log (1/r))^{19})+\log\left[\left(\frac{1-r}{1+r}\right)^{17} - \left(\frac{1-r}{1+r}\right)^{51}\right] \right\}. $$
Then, for $n \geq 2$, each section $s_{n,n}(f)$ is univalent in the disk $|z|<r_{n,n}$, where $$r_{n,n} = \max \{r \in (0, 1):\, 18 \log n = -(n - U(r)) \log r\}. $$
On the other hand, for fixed $r \in (0.016155, 1)$, $s_{n,n}(f)$ is univalent in $|z| < r$ for all $n \geq N(r)$, where $$N(r):=\min \{n \ge U(r):\, 18 \log n \leq -(n - U(r)) \log r\}. $$ \end{thm}
For example, a routine computation gives the following and so we omit the details.
\begin{cor}\label{PSS8_cor1} For $f \in \mathcal{S}^0_H$, we have \begin{enumerate}
\item $s_{n,n}(f)$ is univalent in the disk $|z|<1/4$ whenever $n \geq 81$.
\item $s_{n,n}(f)$ is univalent in the disk $|z|<1/e\approx 0.36788$ whenever $n \geq 131$.
\item $s_{n,n}(f)$ is univalent in the disk $|z|<1/2$ whenever $n \geq 220$. \end{enumerate} \end{cor}
From the proof of Theorem \ref{PS8Thm2}, it is also clear that the result could be improved, if we knew the exact upper bounds on
$|a_n|$ and $|b_n|$ for $f \in \mathcal{S}^0_H$. Therefore it is natural to state an improved form of this result with the assumption on the order of the family considered. This is done in Section \ref{PSS8Sec4}.
\section{Basic lemmas}\label{PSS8Sec2}
The following results together with Lemma \ref{samy_abu_bound} are useful in the proofs of our main results.
\begin{Lem}{\rm\cite{Starkov}}\label{uni_nec_suf} A sense-preserving harmonic function $f=h+\overline{g}$ of the form \eqref{PSSerRep} is univalent in ${\mathbb D}$ if and only if for each $z \in {\mathbb D}\setminus\{0\}$ and each $t\in(0, \pi/2]$, \begin{equation}\label{PS7inteq2} \frac {f(re^{i\eta})-f(re^{i\psi})} {re^{i\eta}-re^{i\psi}} = \sum_{k=1}^{\infty}\left[ (a_k z^k - \overline{b_k z^k})\frac{\sin kt}{\sin t} \right] \ne 0, \end{equation} where $a_1=1$, $t=(\eta-\psi)/2$ and $z=re^{i(\eta+\psi)/2}$. \end{Lem}
\begin{Lem} {\rm\cite{Graf-samy}}\label{two_po_dis} If $f=h+\overline{g} \in \mathcal{S}^0_H$, $r \in (0, 1)$, $t, \psi \in {\mathbb R}$, then \begin{equation}\label{lower_bound}
\left|\frac {f(re^{it})-f(re^{i\psi})} {re^{it}-re^{i\psi}}\right|\ge \frac {1}{4\alpha r} \left(\frac {1-r}{1+r}\right)^\alpha \left[1-\left(\frac {1-r}{1+r}\right)^{2\alpha}\right], \end{equation} where $\alpha = {\rm ord}~\mathcal{S}_H$. \end{Lem}
\begin{Lem}\label{Thm_abs_h_g}{\rm \cite{Graf}}
Suppose that $f = h+\overline{g} \in \mathscr{U}_H(\alpha_0)$ with $b_1=f_{\overline{z}}(0)$. For $z \in \mathbb{D}$ with $|z|=r$, $h$ and $g$ satisfy the bounds $$
|h'(z)| \leq (1+r|b_1|)\frac{(1+r)^{\alpha_0 - 3/2}}{(1-r)^{\alpha_0 + 3/2}} ~\mbox{ and }~
|g'(z)| \leq (r+|b_1|)\frac{(1+r)^{\alpha_0 - 3/2}}{(1-r)^{\alpha_0 + 3/2}}. $$
\end{Lem}
\section{Proofs of Main Theorems}\label{PSS8Sec3}
\subsection{Proof of Theorem \ref{PS8Thm1}} Let $f= h+\overline{g} \in \mathscr{U}^0_H(16.5)$. From the power series representation of $h(z)$ given by \eqref{PSSerRep} and Lemma \Ref{Thm_abs_h_g}, we obtain that \begin{equation}\label{growth_an}
|a_n| = \left|\frac 1{2\pi i}\int_{|z|=r} \frac {h'(z)} {n z^{n}} dz\right| \leq \frac {1}{nr^{n-1}} \frac{(1+r)^{15}}{(1-r)^{18}} =: \psi_n(r), \end{equation} where $0 < r < 1$. In particular,
$$|a_n| \leq \min_{r \in (0, 1)} \psi_n(r). $$ In order to obtain the minimum value of the right hand side of the inequality, we need to find the point of minimum of the function $\log \psi_n(r)$. We see that $$(\log \psi_n(r))'= \frac{15}{1+r} - \frac{n-1}{r} + \frac{18}{1-r} = 0 \Longleftrightarrow r^2+ \frac {33r}{n+2}-\frac{n-1}{n+2}=0. $$ It follows that $$\tau_n = \frac{-33+\sqrt{4n^2+4n+1081}}{2(n+2)} $$ is the point of minimum and thus, \begin{equation}\label{PSS8_eq4}
|a_n|\leq \psi_n (\tau_n) = A(\tau_n) B(\tau_n) ~\mbox{ for all }~ n \geq 3, \end{equation} where $$A(\tau_n)= \left(\frac{2(n+2)}{\sqrt{4n^2+4n+1081}-33}\right)^{n-1} < \left(\frac{2(n+2)}{\sqrt{4n^2+4n+1081}-33}\right)^n $$ and $$B(\tau_n)= \frac 1 n \left(\frac{2n + \sqrt{4n^2+4n+1081}-29}{2(n+2)}\right)^{15} \left(\frac{2(n+2)}{2n+37-\sqrt{4n^2+4n+1081}}\right)^{18}. $$ First, we shall prove that $A(\tau_n) \leq e^{18}$ for all $n \geq 2$.
Now, we let $$ \Psi(x) = \frac{2(x+2)e^{-18/x}}{\sqrt{4x^2+4x+1081}-33}. $$ Differentiating $\Psi$ with respect to $x$ we get that $$\Psi'(x) =\frac{6 e^{-18/x}q(x)}{ x^2 t(x)\left(t(x)-33\right)^2}, $$ where $t(x)=\sqrt{4x^2+4x+1081}$ and $q(x)=q_1(x) - q_2(x)$ with $$q_1(x) = 12972 + 6534 x + 431 x^2 + 22 x^3 ~\mbox{ and }~ q_2(x) = 396 t(x) + 198 x t(x) + 11 x^2 t(x). $$ As $q_1(x) > 0 $ and $q_2(x) > 0 $ for $x \ge 0$, it is clear that $q_1(x) - q_2(x)$ and $q^2_1(x) - q^2_2(x)$ will have the same sign whenever $x \ge 0$. Computation shows that $$ q^2_1(x) - q^2_2(x) = 24 (x+2)^2 (44 x^3 + 5381 x^2 + 6438 x -12972) > 0 ~\mbox{ for all }~ x \ge 2, $$
These observations show that $\Psi'(x) > 0$ for $x \ge 2$ and hence, $\Psi(x)$ is a increasing function of $x$, whenever $x \ge 2$. As $\lim_{x \rightarrow \infty} \Psi(x) = 1$, we deduce that $\Psi(x) \leq 1$ for all $x \geq 2$, which is equivalent to $$\left(\frac{2(x+2)}{\sqrt{4x^2+4x+1081}-33}\right)^x \leq e^{18} ~\mbox{ for all }~ x \geq 2. $$ In particular, this observation gives \begin{equation}\label{bound_A_tau} A(\tau_n) \leq e^{18} ~\mbox{ for all $n \geq 2$.} \end{equation}
Now, we set $n = N+3$, $p(N) = (2N + \sqrt{4N^2+28N+1129} +43)^{18}$ and $$ T(N) = \sqrt{1+\frac{7}{N}+\frac{1129}{4N^2}}. $$ A simple calculation shows that
\noindent $\displaystyle \frac{B(\tau_{N+3})p(N)}{p(N)}$ \begin{eqnarray*} &=& \frac{2^{-21}3^{-36}N^{17}}{(1+3/N)} \left(1+T(N)+\frac{43}{2N} \right)^3 \left(\frac{1+T(N)+\frac{1}{2N}(17+10T(N)) +\frac{140}{8N^2}}{1+5/N}\right)^{15}\\ & < & \frac{1}{2^{21}3^{36}} \left(1+T(N)+\frac{43}{2N} \right)^3 \left(1+T(N)+\frac{1}{2N}(17+10T(N)) +\frac{140}{8N^2}\right)^{15}N^{17} \\ &\leq& \frac{1}{2^{21}3^{36}} \left(1+T(16)+\frac{43}{32} \right)^3 \left(1+T(16)+\frac{1}{32}(17+10T(16)) +\frac{140}{8 \times {16}^2}\right)^{15}N^{17}
\end{eqnarray*} for all $N \geq 16$. Since $T(16) \approx 1.59375$, the last inequality then gives that \begin{eqnarray}\label{bound_B_tau} B(\tau_{N+3}) &\leq& (3.17691 \times 10^{-24}) \times 61.0466 \times (3.22016 \times 10^8) \nonumber \\ &\leq& 6.2452 \times 10^{-14} N^{17} \mbox{ for all $ N \geq 16$.} \end{eqnarray} Hence, by \eqref{bound_A_tau} and \eqref{bound_B_tau}, one obtains that $$A(\tau_{N+3})B(\tau_{N+3}) \leq e^{18} \times 6.2452 \times 10^{-14} N^{17} \approx 4.1006 \times 10^{-6} N^{17} ~\mbox{ for all $ N \geq 16$}. $$ By a direct but lengthy computation or by Mathematica, we can easily see that $$A(\tau_n) B(\tau_n) \leq 5.24 \times10^{-6} n^{17} ~\mbox{ for }~ 3 \leq n \leq 18. $$ Therefore, using these two estimates, the inequality \eqref{PSS8_eq4} reduces to
$$|a_n| \leq 5.24 \times10^{-6} n^{17} ~\mbox{ for all $n \geq 3$.} $$ Similarly, from the power series representation of $g$ given by \eqref{PSSerRep}, one sees that
$$|b_n| = \left|\frac 1{2\pi i}\int_{|z|=r} \frac {g'(z)} {n z^{n}} dz\right| \leq \frac {1}{nr^{n-2}} \frac{(1+r)^{15}}{(1-r)^{18}} : =\phi_n(r) = \frac{n-1}{n}\psi_{n-1}(r), $$ where $0 < r < 1$ and $\psi_n(r)$ is defined as in \eqref{growth_an}. In particular,
$$|b_n| \leq \min_{r \in (0, 1)} \phi_n(r). $$ Using similar arguments as above, we get that
$$|b_n| \leq A_1(\rho_n) B_1(\rho_n) ~\mbox{ for all }~ n \geq 3, $$ where $$ \rho_n = \frac{-33+\sqrt{4n^2-4n+1081}}{2(n+1)}, $$ $$A_1(\rho_n)= \left(\frac{2(n+1)}{\sqrt{4n^2-4n+1081}-33}\right)^{n-2} = A(\tau_{n-1}) \leq e^{18} ~\mbox{ for all }~ n \geq 3 $$ and $$B_1(\rho_n)= \frac 1 n \left(\frac{2n + \sqrt{4n^2-4n+1081}-31}{2(n+1)}\right)^{15} \left(\frac{2(n+1)}{2n+35-\sqrt{4n^2-4n+1081}}\right)^{18}. $$ Setting $$ l(n) = \sqrt{1+\frac{1081}{4n^2}-\frac 1 n}, $$ we obtain that $$B_1(\rho_n)= \frac{8}{n 144^{18}(n+1)^{15}} \left[8n^2(1+l(n))+4n(1+2l(n)) - 4\right]^{15} \left[2n(1+l(n))+35 \right]^3. $$ Using the fact that $\sqrt{1+x} \leq 1 + \sqrt{x}$ for $x \geq 0$, we get that $$ B_1(\rho_n)= \frac{n^{17}}{2^{21}3^{36}} \left(2 + \frac{18}{n} + \frac{16}{n^2} \right)^{15} \left(2 + \frac{34}{n} \right)^3 \leq 3.425 \times 10^{-15} n^{17}
$$ for all $n \geq 18$. Therefore, $$A_1(\rho_n)B_1(\rho_n) \leq 3.425 \times 10^{-15} e^{18} n^{17} \approx 2.25 \times 10^{-7} n^{17} ~\mbox{ for all } n \geq 18. $$ By a direct computation with the help of Mathematica, one can see that $$A_1(\rho_n)B_1(\rho_n) \leq 2.32 \times 10^{-7} n^{17} ~\mbox{ for }~ 3 \leq n \leq 18.$$
Therefore, $|b_n| \leq 2.32 \times 10^{-7} n^{17} $ for all $n \geq 3$.
$\Box$
\subsection{Proof of Theorem \ref{PS8Thm2}} Suppose that $f=h+\overline{g}$ belongs to ${\mathcal S}_H^{0}$. Set $F_r(z)=f(rz)/r$ for $0 < r < 1$. Then $F_r(z) \in \mathcal{S}^0_H$. In view of Lemma \Ref{uni_nec_suf}, it is clear that $s_{n,m}(f)$
is univalent in $|z|<r$ if and only if $s_{n,m}(F_r)(z)$ is sense-preserving in ${\mathbb D}$ and the associated harmonic polynomial $P_{n,m,r}(z)$ has the property that $$P_{n,m,r}(z) := \sum_{k=1}^{\infty}\left[ (a'_k z^k - \overline{b'_k z^k})\frac{\sin kt}{\sin t} \right] \ne 0 ~\mbox{ for all } z\in{\mathbb D}\setminus\{0\} \mbox{ and } t\in(0,\pi/2], $$ where $$ a'_k = a_k r^{k-1} ~\mbox{ for }~ k\in \{1,2,\ldots,n\} ~\mbox{ and }~ a'_k = 0 ~\mbox{ if }~ k >n $$ and $$ b'_k = b_k r^{k-1} ~\mbox{ for }~ k\in \{1,2,\ldots,m\} ~\mbox{ and }~ b'_k = 0 ~\mbox{ if }~ k >m. $$
Now, we set $t=(\eta-\psi)/2$ and $z=\rho e^{i(\eta+\psi)/2}\in{\mathbb D}$ in \eqref{PS7inteq2}. Note that the function in the right side of the inequality \eqref{lower_bound} in Lemma \Ref{two_po_dis} decreases with increasing value of $\alpha$, where $\alpha = {\rm ord}\,\mathcal{S}_H$. As $F_r \in \mathcal{S}^0_H$ and $\alpha < 17$, we apply Lemma \Ref{two_po_dis} to the function $F_r$ and get that $$
\left|\sum_{k=1}^{\infty}\left[ (a_k z^k - \overline{b_k z^k})r^{k-1}\frac{\sin kt}{\sin t} \right]\right| \geq \frac {1}{68 r} \left(\frac {1-r}{1+r}\right)^{17} \left[1-\left(\frac {1-r}{1+r}\right)^{34}\right]. $$
In order to find a lower bound for $|P_{n,m,r}(z)|$, we need to find an upper bound for
$$\left|R_{n,m,r}(z)\right| = \left| \sum_{k=n+1}^{\infty}\left[ a_k r^{k-1} z^k \frac{\sin kt}{\sin t} \right] - \sum_{k=m+1}^{\infty}\left[\overline{(b_k r^{k-1} z^k)} \frac{\sin kt}{\sin t} \right]\right|. $$
Using Theorem \ref{PS8Thm1} and the fact that $|\sin kt| \le k \sin t$ for all $t \in [0, \pi/2]$ and $k \in {\mathbb N}$, we get that \begin{eqnarray}\label{PSS8_eq2}
|R_{n,m,r}(z)| &\leq& \sum_{k=n+1}^{\infty}5.24 \times 10^{-6} k^{18} r^{k-1} + \sum_{k=m+1}^{\infty} 2.32 \times 10^{-7} k^{18} r^{k-1} \\ \nonumber
&=:&R_{n, r} + T_{m, r}. \end{eqnarray} Set $\psi(n,m,r) = C_{17}(r) - (R_{n, r} + T_{m, r})$, where $$ C_{17}(r) = \frac {1}{68 r} \left(\frac {1-r}{1+r}\right)^{17} \left[1-\left(\frac {1-r}{1+r}\right)^{34}\right]. $$
The inequality $|P_{n,m,r}(z)|>0$ holds for all $z\in {\mathbb D}\setminus\{0\}$, whenever $\psi(n,m,r)>0$. In \cite[Lemma 1]{Graf-samy} it was shown that $r \mapsto C_{\alpha}(r)$ is strictly decreasing on $(0, 1)$. This fact implies that
$\psi(n,m,r)$ is decreasing in $(0, 1)$ and thus $\psi(n,m,r)>0$ for all $r\in(0, r_{n,m})$, where $r_{n,m}$ is the unique positive root of the equation $\psi(n,m,r)=0$ which is less than $1$. It is easy to see that $s_{n,m}(F_r)(z)$ is sense-preserving in ${\mathbb D}$ provided $r\in(0, r_{n,m})$ (see e.g \cite{PonSaiStarkov1}) and hence, $s_{n,m}(f)$ is univalent in $|z| < r_{n,m}$.
Now, let us consider the special case $m=n$. In this case $\psi(n,m,r)$ reduces to $$\psi(n,n,r)=\frac {1}{68 r} \left(\frac {1-r}{1+r}\right)^{17} \left[1-\left(\frac {1-r}{1+r}\right)^{34}\right] - \sum_{k=n+1}^{\infty} 54.72 \times 10^{-7} k^{18} r^{k-1}. $$
From our discussion, it is clear that $s_{n,n}(f)$ is univalent in $|z|<r_{n,n}$, where $r_{n,n}$ is the unique positive root of the equation $\psi(n,n,r)=0$ which is less than $1$. In order to compute the lower bound for $r_{n,n}$, we consider the function $T(x) = x^{18}r^{x-1}$. It follows that $T(x)$ is a decreasing function of $x$ in the interval $[-18/\log r, \infty)$. Whenever $-n \log r > 18$, we have $$ \sum_{k=n+1}^{\infty} k^{18} r^{k-1} < \int_{n}^{\infty} T(x)\, \mathrm{d}x = \int_{n}^{\infty} x^{18}r^{x-1}\, \mathrm{d}x. $$ Applying integration by parts repeatedly, we obtain that
$$\int_{n}^{\infty} x^{18}r^{x-1}\, \mathrm{d}x = \frac{1}{r}\left\{\frac{n^{18}r^{n}}{|\log r|} + \frac{18n^{17}r^{n}}{|\log r|^2} + \frac{18 \times 17n^{16}r^{n}}{|\log r|^3} + \cdots + \frac{18!r^{n}}{|\log r|^{19}} \right\}. $$ Choose $n$ large enough so that $-n \log r > 18a \log n \geq 18$, where $a \in (1, \infty)$, which implies that $$ r^{n} n^j \leq r^{n} n^{18} \leq r^{n((a-1)/a)} ~\mbox{ for all }~ j=1, 2, \ldots, 18. $$ Hence, we get that \begin{eqnarray*}
\int_{n}^{\infty} x^{18}r^{x-1}\, \mathrm{d}x &\leq& \frac{18! ~r^{n((a-1)/a)-1}}{|\log r|^{19}} \left\{\frac{|\log r|^{18}}{18!} + \frac{|\log r|^{17}}{17!} + \cdots + \frac{|\log r|}{1!} + r^{n/a} \right\}\\
&\leq& \frac{18! ~r^{n((a-1)/a)-2}}{|\log r|^{19}}. \end{eqnarray*} Using the above inequality and \eqref{PSS8_eq2}, we obtain that \begin{eqnarray}\label{PSS8_eq3}
|R_{n,n,r}| &\leq& R_n + T_n = \sum_{k=n+1}^{\infty}(52.4+2.32) 10^{-7} k^{18} r^{k-1}\\ \nonumber
&\leq& 54.72 \times 10^{-7} ~\frac{18! ~r^{n((a-1)/a) - 2} }{|\log r|^{19}} \end{eqnarray} provided $\displaystyle r \leq n^{-18a/n}$ for some $a \in (1, \infty)$. Therefore, $\psi(n,n,r) > 0$ whenever $$\frac {1}{68} \left(\frac {1-r}{1+r}\right)^{17} \left[1-\left(\frac {1-r}{1+r}\right)^{34}\right]
- 54.72 \times 10^{-7} \frac{r^{n((a-1)/a) - 1}18!}{|\log r|^{19}} \geq 0. $$ This gives that $$ u(a, r):= \frac{a}{a-1} U(r) \le n, $$ where
$$U(r)=\frac{1}{\log r}\left\{-28.5 + \log(r |\log r|^{19})+\log\left[\left(\frac{1-r}{1+r}\right)^{17}- \left(\frac{1-r}{1+r}\right)^{51}\right] \right\}. $$ The lower bound for $r_{n,n}$ is obtainable for all $n \geq 2$ and this follows from the fact that $aU(r)/(a-1) \rightarrow 2^-$ as $a \rightarrow \infty$ and $r \rightarrow 0^+$. Similarly, $U(r) \rightarrow \infty$ as $r \rightarrow 1^-$. Therefore, $U(r)$ accepts all values from $(2-\delta, \infty)$ if $r \in (0, 1)$, where $\delta$ is some positive constant.
From the above discussion, it is clear that $s_{n,n}(f)$ is univalent in $|z|<r$, whenever $u(a, r)\leq n$ and $r \leq n^{-18a/n}$ for some $a \in (1, \infty)$. The inequality $u(a, r)\leq n$ holds for any $r \in (0, 1)$, such that $U(r) < n$, if we choose $a = a^* = n/(n-U(r))$. The inequality $r \leq n^{-18a/n}$ holds true if we choose $r = R_{n,n}$ with $a = a^*$, where $$ R_{n,n} = \max \{r \in (0, 1):\, 18 \log n \le -(n - U(r)) \log r ~\}.
$$ In this expression, the maximum is reached, because for fixed $n$, $-(n - U(r)) \log r \rightarrow -\infty$ as $r \rightarrow 1^-$ and $-(n - U(r)) \log r \rightarrow \infty$ as $r \rightarrow 0^+$. For every $n \ge 2$, the function continuously depends on $r$. Thus, $$r_{n,n} = \max \{r \in (0, 1):\, 18 \log n = -(n - U(r)) \log r ~\}. $$
We remark here that $r \mapsto U(r)$ is strictly increasing on $(0.016155, 1)$. In order to prove that, it is enough to show that
$$U_1(r)= - \log(r |\log r|^{19})-\log\left[\left(\frac{1-r}{1+r}\right)^{17}- \left(\frac{1-r}{1+r}\right)^{51}\right] $$ is strictly increasing on $(0.016155, 1)$. A computation shows that $$ U'_1(r) = \frac{1}{r}\left(-1 - \frac{19}{\log r} \right) + \frac{34}{1-r^2}\left( 1 - \frac{2}{\left(\frac{1+r}{1-r}\right)^{34} - 1} \right) > 0 ~\mbox{for}~ r \ge 0.016155. $$
Next, for a given $r \in (0.016155, 1)$, we consider the problem of finding the least positive integer $N(r)$ such that $s_{n,n}(f)$ is univalent in the disk $|z|<r$ for all $n \geq N(r)$. In order to guarantee the univalency of $s_{n,n}(f)$ for all $n \geq N(r)$, the number $N(r)$ must be greater than or equal to $u(a, r)$ and $r \leq n^{-18a/n}$ for all $n \ge N(r)$. Using the above arguments, we obtain that $$ N(r) = \min\{n \ge U(r):\, 18 \log n \leq -(n - U(r)) \log r \}. $$ This completes the proof.
$\Box$
\section{Concluding remarks}\label{PSS8Sec4}
It is known that the inequality $|a_2| \leq 5/2$ holds for functions in $\mathcal{C}^0_{H}$ and from Lemma \Ref{new_ord}, it follows that $\mathcal{C}_{H} \subset \mathscr{U}_H(5/2)$. Theorems \ref{spec_coeff_bound} and \ref{coeff_bound_all_1} below are the analog of Theorem \ref{PS8Thm1} and \ref{PS8Thm2} for the families $\mathscr{U}^0_H(5/2)$ and $\mathscr{U}^0_H(5/2) \cap \mathcal{S}^0_H$, respectively. If $|f''(0)/2| \leq 5/2$ for all harmonic mappings $f \in \mathcal{S}^0_{H} $ as conjectured by Clunie and Sheil-Small, then $\mathcal{S}^0_{H} \subset \mathscr{U}^0_H(5/2)$.
\begin{thm}\label{spec_coeff_bound} Suppose that $f= h+\overline{g} \in \mathscr{U}^0_H(5/2)$ with series representation as in \eqref{PSSerRep}. Then for all $n \geq 3$,
$$|a_n| \leq \frac{8 (2 + n)^3 (\sqrt{4 n^2+ 4 n+17}+ 2 n-1)}{n (\sqrt{4 n^2+ 4 n+17}- 2 n-9)^4} \left(\frac{\sqrt{4 n^2+ 4 n+17}-5}{2(2+n)}\right)^{1-n} $$ and
$$|b_n| \leq \frac{8 (1 + n)^3 (\sqrt{4 n^2- 4 n+17}+ 2 n-3)}{n (\sqrt{4 n^2- 4 n+17}- 2 n-7)^4} \left(\frac{\sqrt{4 n^2- 4 n+17}-5}{2(1+n)}\right)^{2-n}. $$ In particular, the following bounds hold: $$
|a_n| \leq \frac{3n^3}{4} ~\mbox{ and }~ |b_n| \leq \frac{43 n^3}{100} ~\mbox{ for all }~ n \geq 3. $$ \end{thm}
As the proof of Theorem \ref{spec_coeff_bound} is similar to that of the proof of Theorem \ref{PS8Thm1}, we omit the details here. As an application of Theorem \ref{spec_coeff_bound}, we prove the following result:
\begin{thm}\label{part_spec_case}
Suppose that $f=h+\overline{g} \in \mathscr{U}^0_H(5/2) \cap \mathcal{S}^0_H$. Then the partial sums $s_{n,m}(f)$ is univalent in the disk $|z|<r_{n,m}$. Here $r_{n,m}$ is the unique positive root of the equation $\varphi(n,m,r)=0$, where \begin{equation}\label{PSS8_thm3_eq1} \varphi(n,m,r) = \frac {1}{12 r} \left(\frac {1-r}{1+r}\right)^3 \left[1-\left(\frac {1-r}{1+r}\right)^{6}\right] - R_{n,r} - T_{m,r}, \end{equation} with $$ R_{n,r} = \sum_{k=n+1}^{\infty}\frac{3k^4}{4} r^{k-1} ~\mbox{ and }~ T_{m,r} = \sum_{k=m+1}^{\infty}\frac{43k^4}{100} r^{k-1}. $$
In particular, each section $s_{n,n}(f)$ is univalent in the disk $|z|<r_{n,n}$, where \begin{equation}\label{PSS8_eq5} r_{n,n} > r^L_{n,n}:=1- \frac{(8\log n - 4\log(\log n))}{n} ~\mbox{ for }~ n \geq 20. \end{equation} Moreover, $r_{n,m} \geq r^L_{l,l}$, where $l=\min\{n,m\}\geq 20$. \end{thm} \begin{pf} The first part of the proof is similar to the proof of Theorem \ref{PS8Thm2}. Following the proof of Theorem \ref{PS8Thm2}, under the hypothesis of Theorem \ref{part_spec_case}, we get that
$s_{n,m}(f)$ is univalent in the disk $|z|<r_{n,m}$, where $r_{n,m}$ is the unique positive root of the equation $\varphi(n,m,r)=0$. Here $\varphi(n,m,r)$ is given by \eqref{PSS8_thm3_eq1}.
For $m=n$, we have $$ \varphi(n,n,r)=\phi(r) - (R_{n,r} + T_{n,r}), $$ where $$ \phi(r) = \frac{(1-r)^3 (3 + 10 r^2 + 3 r^4)}{3(1+r)^9} $$ and \begin{eqnarray*}
R_{n,r} + T_{n,r} &=& \sum_{k=n+1}^{\infty} \frac{59}{50}k^{4} r^{k-1}\\
& =& \frac{59 r^n}{50(1-r)^5} \left\{1 + 4 n^3 (1 - r)^3 + n^4 (1 - r)^4 + 11 r + 11 r^2 + r^3 \right. \\
&~~& \left. + 6 n^2 (1 - r)^2 (1 + r) - 4 n (r^3+ 3 r^2- 3 r-1 )\right\}. \end{eqnarray*} The inequality $\varphi(n,n,r) \geq 0$ holds if and only if $0 < k(n, r) \le 1$, where $$ k(n, r) := \frac{R_{n,r} + T_{n,r}}{\phi(r)}.$$ Now, we show that for every fixed integer $n \ge 2$, $k(n, r)$ is a increasing function of $r$ in the interval $~[0, 1)$. In order to show that $k(n, r)$ is a increasing function of $r$, it is enough to show that $\phi_1(r) = (1-r)^3 (3 + 10 r^2 + 3 r^4)$ is decreasing function of $r$ in the interval $[0, 1)$. Since $$\phi_1'(r) = (1-r)^2 (-9 + 20 r - 50 r^2 + 12 r^3 - 21 r^4) \leq (1-r)^2 (-9 + 20 r - 38 r^2) < 0 $$ for all $r \in [0, 1)$, $\phi_1(r)$ is decreasing and hence $k(n, r)$ is increasing function and $k(n, r) > 0 $ for $r \in (0, 1)$. As $\lim_{n\rightarrow\infty}(R_{n,r} + T_{n,r}) =0$, it is clear that the radius $r_{n,n}$ of univalence approaches $1$. This suggests that $r_{n,n} \geq r_{n,n}^L:=1-x_n/n$, where $x_n$ is positive and increasing sequence of real numbers such that $x_n=o(n)$.
Let us compute the approximate value of $r_{n,n}$ for large values of $n$. By setting $r = 1-x/n$ in $k(n, r)$, and making use of the fact that $(1-x/n)^n \leq e^{-x}$ for $x \geq 0$, we get that $k(n, 1-x/n) \leq t(x, n)$, where
$$t(x, n) := \frac{177e^{-x}n^8}{50 x^8}\left(2 - \frac{x}{n}\right)^9\frac{q(x,n)}{16 n^4 - 32 n^3 x + 28 n^2 x^2 - 12 n x^3 + 3 x^4}, $$ with $$q(x, n) = n[n^3 (24 + 24 x + 12 x^2 + 4 x^3 + x^4) - 6 n^2 x (6 + 4 x + x^2)+ 2 n x^2 (7 + 2 x) -x^3]. $$
We may set $x_n=8\log n - 4\log(\log n)$ and we observe that $1 - x_n/n > 0$ only when $n \geq 20$. Therefore, we consider the case $n \ge 20$. In order to show that $\varphi(n,n,r) \geq 0$ for all $r \in (0, 1-x_n/n)$, it suffices to prove that $t(x_n, n) \leq 1$ for all $n \ge 20$.
Set $$ t(x_n, n) = T_1(n) T_2(n) T_3(n), $$ where \begin{eqnarray*} T_1(n) &=& \frac{177}{50 \times 2^7 \times \left(2 - \frac{\log(\log n)}{\log n}\right)^8} \left(1 - \frac{4\log n - 2 \log(\log n)}{n} \right)^9\\
&\le& \frac{177}{50 \times 2^7 \times\left(2 - \frac{\log(\log 20)}{\log 20}\right)^8} \approx 0.000555~\mbox{ for all }~ n \ge 20, \end{eqnarray*} \begin{eqnarray*} T_2(n) &=& \frac{q(x_n, n)}{(n \log n)^4} \le \frac{24}{(\log n)^4} + \frac{8}{(\log n)^3}\left(24-\frac{36}{n} \right)+ \frac{8^2}{(\log n)^2}\left(\frac{14}{n^2} - \frac{24}{n}\right)\\
&~~& \hspace{2cm} +~~~~ \frac{8^3}{(\log n)}\left(4 - \frac{6}{n} +\frac{4}{n^2} - \frac{1}{n^3}\right) + 8^4\\
&\le& \frac{24}{(\log n)^4} + \frac{8\times 24}{(\log n)^3} + \frac{8^3 \times 4}{(\log n)} + 8^4 = : S_2(n)\\
&\le& S_2(20) \approx 4787.08 ~\mbox{ for all }~ n \ge 20 \end{eqnarray*} and \begin{eqnarray*} T_3(n) &=& \frac{1}{16 - 32 (x_n/n) + 28 (x_n/n)^2 - 12 (x_n/n)^3 + 3 (x_n/n)^4} = : S_3(x_n/n)\\
&\le& S_3(x_{20}/20) \approx 0.333 ~\mbox{ for all }~ n \ge 20. \end{eqnarray*} Therefore, $$ t(x_n, n) \le 0.000555 \times 4787.08 \times 0.333 \approx 0.885 < 1 ~\mbox{ for all }~ n \ge 20. $$
This completes the proof of the theorem. \end{pf}
A rough estimate on $r_{n,n}$ gives the following which may be compared with Corollary \ref{PSS8_cor1}.
\begin{cor}\label{PSS8_cor2} Suppose that $f=h+\overline{g} \in \mathscr{U}^0_H(5/2) \cap \mathcal{S}^0_H$. Then $s_{n,n}(f)$ is univalent in the disk
$|z|<r$, where {\rm (i)} $r=1/4$ whenever $n \geq 10$; {\rm (ii)} $r=1/2$ whenever $n \geq 29$; and {\rm (iii)} $r=3/4$ whenever $n \geq 98$.
\end{cor}
Better lower bounds for the radius of univalence $r_{n,n}$ of $s_{n,n}(f)$ (under the assumptions of Theorem \ref{part_spec_case}) for certain values of $n$ are listed in Table \ref{tab2}. They are obtained by solving the equation $\varphi(n,n,r)=0$.
\begin{table}[htp] \begin{center}
\begin{tabular}{|l|l||l|l|l|}
\hline
Value of $n$ & Lower bound for $r_{n,n}$ &Value of $n$ &Lower bound for $r_{n,n}$\\
\hline
2 & 0.0635798 & 10 & 0.269796\\
\hline
3 & 0.0952634 & 50 & 0.625779\\
\hline
4 & 0.12535 & 100 & 0.753905\\
\hline
5 & 0.153603& 354 & 0.900055\\
\hline \end{tabular} \end{center} \caption{Values of $r_{n,n}$ for certain values of $n$\label{tab2}} \end{table}
\end{document} | arXiv | {
"id": "1703.02371.tex",
"language_detection_score": 0.6364213228225708,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Gaussian entanglement revisited}
\author{Ludovico Lami} \affiliation{F\'{\i}sica Te\`{o}rica: Informaci\'{o} i Fen\`{o}mens Qu\`{a}ntics, Departament de F\'{i}sica, Universitat Aut\`{o}noma de Barcelona, ES-08193 Bellaterra (Barcelona), Spain}
\author{Alessio Serafini} \affiliation{Department of Physics \& Astronomy, University College London, Gower Street, London WC1E 6BT, United Kingdom}
\author{Gerardo Adesso} \affiliation{Centre for the Mathematics and Theoretical Physics of Quantum Non-Equilibrium Systems, School of Mathematical Sciences, The University of Nottingham, University Park, Nottingham NG7 2RD, United Kingdom}
\begin{abstract} We present a novel approach to the separability problem for Gaussian quantum states of bosonic continuous variable systems. We derive a simplified necessary and sufficient separability criterion for arbitrary Gaussian states of $m$ vs $n$ modes, which relies on convex optimisation over marginal covariance matrices on one subsystem only. We further revisit the currently known results stating the equivalence between separability and positive partial transposition (PPT) for specific classes of Gaussian states. Using techniques based on matrix analysis, such as Schur complements and matrix means, we then provide a unified treatment and compact proofs of all these results. In particular, we recover the PPT-separability equivalence for: (i) Gaussian states of $1$ vs $n$ modes; and (ii) isotropic Gaussian states. In passing, we also retrieve (iii) the recently established equivalence between separability of a Gaussian state and and its complete Gaussian extendability.
Our techniques are then applied to progress beyond the state of the art. We prove that: (iv) Gaussian states that are invariant under partial transposition are necessarily separable; (v) the PPT criterion is necessary and sufficient for separability for Gaussian states of $m$ vs $n$ modes that are symmetric under the exchange of any two modes belonging to one of the parties; and (vi) Gaussian states which remain PPT under passive optical operations can not be entangled by them either. This is not a foregone conclusion per se (since Gaussian bound entangled states do exist) and settles a question that had been left unanswered in the existing literature on the subject.
This paper, enjoyable by both the quantum optics and the matrix analysis communities, overall delivers technical and conceptual advances which are likely to be useful for further applications in continuous variable quantum information theory, beyond the separability problem. \end{abstract}
\maketitle
\section{Introduction}
Gaussian states have played a privileged role in quantum optics and bosonic field theories, essentially since the very early steps of such theories, due to their ease of theoretical description and relevance to experimental practice. Over the last twenty years, such a privilege has carried over to quantum information science, where Gaussian states form the core of the `continuous variable' toolbox~\cite{introeisert,biblioparis,adesso07,weedbrook12,adesso14,bucco}. The analysis of quantum Gaussian states from the information theoretic standpoint brought up new subtle elements and much previously unknown insight into their structure. For, while Gaussian dynamics may essentially be dealt with entirely at the phase space level (typically by normal mode decomposition, so that Gaussian dynamics are often trivialised as `quasi-free' in field theory), the analysis of quantum information properties requires one to confront the Hilbert space description of the quantum states. Hence, while Gaussian dynamics might well be exactly solvable with elementary tools, the properties of Gaussian states related to the Hilbert space and tensor product structures are far from being equally transparent.
The problem of {\em Gaussian separability}, that is, determining whether a bipartite Gaussian state is separable or entangled~\cite{adesso07}, exemplifies such a situation very well. Necessary and sufficient conditions for Gaussian separability in the general $m$ vs $n$-mode case are available~\cite{Werner01,Giedke01}, yet they are recast in terms of convex optimisation problems whose solution (albeit numerically efficient) does not admit, in general, a closed analytical form. For non-separable states, a closely related question is whether their entanglement is distillable or bound~\cite{HorodeckiBound}. In the case of arbitrary multimode bipartite Gaussian states, while entanglement can never be distilled by Gaussian operations alone~\cite{nogo1,nogo2,nogo3}, it is known that entanglement distillability under general local operations and classical communications is equivalent to violation of the positivity of the partial transposition (PPT) criterion~\cite{Giedke01,GiedkeQIC}. In turn, the PPT criterion, which is as well efficiently computable at the level of covariance matrices for Gaussian states, and is in general only necessary for separability~\cite{Peres}, has been proven to be also sufficient in some important cases, notably when the bipartite Gaussian state under examination pertains to a $1$ vs $n$-mode system~\cite{Simon00, Werner01}, when it is `bi-symmetric'~\cite{Serafini05}, i.e.~invariant under local permutations of any two modes within any of the two subsystems, and when it is `isotropic', i.e.~with a fully degenerate symplectic spectrum of its covariance matrix~\cite{holwer,botero03,giedkemode}. Outside of these special families, bound entangled Gaussian states can occur, as first shown in the $2$ vs $2$-mode case in~\cite{Werner01}.
In this paper we provide significant advances towards the characterisation of separability and entanglement distillability in Gaussian quantum states. On one hand, we revisit the existing results, providing in particular a new compact proof for the equivalence between PPT and separability in $1$ vs $n$-mode Gaussian states, which encompasses the seminal $1$ vs $1$-mode case originally tackled by Simon~\cite{Simon00} and its extension settled by Werner and Wolf~\cite{Werner01}. Key to our proof is the intensive use of Schur complements, which have enjoyed applications in various areas of (Gaussian) quantum information theory~\cite{Giedke01,giedkemode,nogo1,nogo2,nogo3,eisemi,gian,Simon16,Lami16}, and --- as further reinforced by this work --- may be appreciated as a mathematical cornerstone for continuous variable quantum technology.
On the other hand, we derive a number of novel results. In particular, a marginal extension of the techniques applied in the aforementioned proof allow us to prove that Gaussian states invariant under partial transposition are necessarily separable, a result previously known only for the partial transposition of qubit subsystems \cite{sep 2xN}. We then show that the $1$ vs $n$-mode PPT-separability equivalence can be further extended to a class of arbitrary bipartite multimode Gaussian states that we call `mono-symmetric', i.e., invariant under local exchanges of any two modes on one of the two subsystems (see Fig.~\ref{mononucleosi}). This result, which (to the best of our knowledge) is observed and proven here for the first time, generalises the case of bi-symmetric states studied in~\cite{Serafini05}, providing as a byproduct a simplified proof for the latter as well.
As for isotropic Gaussian states, in the traditional approach the sufficiency of PPT for their separability follows from a well known `mode-wise' decomposition of pure-state covariance matrices~\cite{holwer,botero03,giedkemode}, and from the fact that the covariance matrix of an isotropic state is just a multiple of the covariance matrix of a pure Gaussian state.
Here, we derive the sufficiency of the PPT criterion for isotropic Gaussian states following a completely different and arguably more direct approach. Main ingredients of this novel proof are advanced matrix analysis tools such as the operator geometric mean, already found to be useful in the context of quantum optics ~\cite{Lami16}.
We also consider the well known class of Gaussian passive operations (i.e., the ones that preserve the average number of excitations of the input state, such as beam splitters and phase shifters), which play a central role in quantum optics~\cite{introeisert,adesso14,bucco}, and we prove that a bipartite Gaussian state that always remains PPT under such a set of operations must also always stay separable. This novel result complements the seminal study of~\cite{passive}, in that the latter only considered the possibility of turning a PPT state into a non-PPT one through passive operations --- essentially, the question of generating distillable entanglement --- which is not the same as the question of generating inseparability, because Gaussian PPT bound entangled states do exist~\cite{Werner01}. Here we settle the latter, more general and fundamental question.
All the previous results enable us to substantially extend the range of equivalence between Gaussian separability and PPT in contexts of strong practical relevance. Last but not the least --- in fact, first and foremost in the paper --- we address the separability problem directly, and derive a simplified necessary and sufficient condition for Gaussian separability. For a bipartite state, this requires convex optimisation over marginal covariance matrices on one subsystem only, yielding a significant simplification over the existing criteria, which instead require optimisation on both parties~\cite{Werner01,Giedke01,eisemi}.
\begin{figure}
\caption{Mono-symmetric Gaussian states of two parties $A$ (with $m$ modes) and $B$ (with $n$ modes) are invariant under exchange of any two modes within party $A$. By means of a suitable symplectic transformation on subsystem $A$, these states can be reduced to a $1$ vs $n$-mode Gaussian state and a collection of $m-1$ uncorrelated single-mode states on $A$'s side. Since PPT is equivalent to separability for $1$ vs $n$-mode Gaussian states, it follows that PPT is necessary and sufficient for separability of all $m$ vs $n$-mode mono-symmetric Gaussian states. In the schematics, entanglement between pairs of modes from the same party is depicted as a single solid (black) line, while entanglement across a mode from $A$ and a mode from $B$ is depicted as a double (dark red) line. }
\label{mononucleosi}
\end{figure}
This paper is organised as follows: in Sec.~\ref{methods}, the definition and basic properties of Schur complements and matrix means that will be used in our derivations are recalled, and the Gaussian notation is set; Sec.~\ref{secp} contains our main novel finding: a simplified necessary and sufficient condition for Gaussian separability in the general $m$ vs $n$-mode case; Sec.~\ref{ppt} contains a new proof of the sufficiency of the PPT criterion for separability of $1$ vs $n$-mode Gaussian states achieved in a few, swift Schur complements' manipulation; Sec.~\ref{inva} shows that invariance under partial transposition implies separability; in Sec.~\ref{symm} we prove that mono-symmetric states can be reduced, under local unitary operations, to $1$ vs $n$-mode states, which implies that the PPT condition is sufficient for them too (see Fig.~\ref{mononucleosi}); in Sec.~\ref{mode-wise} we provide the reader with a new proof of the sufficiency of PPT for separability of isotropic Gaussian states, not relying on their mode-wise decomposition; Sec.~\ref{pass} contains our novel analysis of entanglement generation under passive operations.
Sec.~\ref{outro} concludes the paper with a brief summary and some future perspectives related to this work.
\section{The toolbox: Schur complements, matrix means and Gaussian states} \label{methods}
One of the messages of the present paper is to lend further support to the fact that methods based on Schur complements and matrix means can be successfully exmployed to derive fundamental results in continuous variable quantum information, following a streak of applications to various contexts including separability, distillability, steerability, entanglement monogamy, characterisation of Gaussian maps, and related problems~\cite{Giedke01,giedkemode,nogo1,nogo2,nogo3,eisemi,gian,Simon16,Lami16}. As a divertissement to set the stage, let us present a compact, essential compendium of such methods.
\subsection{Schur complements}\label{secSC} Given a square matrix $M$ partitioned into blocks as \begin{equation} M\, =\, \begin{pmatrix} A & X \\ Y & B \end{pmatrix}\, , \label{block} \end{equation} the {\it Schur complement} of its (square, invertible) principal submatrix $A$, denoted by $M/A$, is defined as \begin{equation} M/A \coloneqq B - YA^{-1} X\, . \label{schur} \end{equation} A useful reference on Schur complements is the monograph~\cite{ZHANG05}. Here we limit ourselves to stress some of the properties we will make use of in the present paper. As it turns out, Schur complements are the answer~\cite{42} to a number of questions that arise pretty naturally in matrix analysis. Many of these applications stem from the fact that the positivity conditions of $2\times 2$ hermitian block matrices can be easily written in terms of Schur complements.
\begin{lemma} \label{pos cond} Consider a hermitian matrix \begin{equation} H = \begin{pmatrix} A & X \\ X^\dag & B \end{pmatrix} . \label{H part} \end{equation} Then $H$ is strictly positive definite ($H>0$) if and only if $A > 0$ and $H/A=B-X^\dag A^{-1}X > 0$. Then, by taking suitable limits, $H$ is semidefinite positive ($H\geq 0$) if and only if $A\geq 0$ and $B-X^{\dag}(A+\varepsilon\mathds{1})^{-1} X\geq 0$ for all $\varepsilon>0$. \end{lemma}
A consequence of this result that will be relevant to us is the following.
\begin{cor} Let $H$ be a hermitian matrix partitioned as in~\eqref{H part}. Then, if $A>0$, \begin{equation} H/A\,=\, \sup \Big\{ \tilde{B}=\tilde{B}^\dag:\ H > 0\oplus \tilde{B} \Big\}\, . \label{variational} \end{equation} Here we mean that the matrix set on the right-hand side has a supremum (i.e.~a minimum upper bound) with respect to the L\"owner partial order ($X\geq Y$ if and only if $X-Y$ is positive semidefinite), and that this supremum is given by the Schur complement on the left-hand side. \end{cor}
We note in passing that from the above variational representation it follows immediately that $H/A$ is monotone and concave in $H>0$.
\subsection{Matrix means}\label{secMM} { Somehow related to Schur complements are the so-called matrix means. As one might expect from their name, these are functions taking two positive matrices as inputs and yielding another positive matrix as output. For an excellent introduction to this topic, we refer the reader to~\cite[Chapter 4]{BHATIA}. Here, we review only some basic facts that we will find useful throughout the paper. Given two strictly positive matrices $A,B>0$, the simplest mean one can define is the {\it arithmetic mean} $(A+B)/{2}$, whose generalisation from scalars to matrices does not present difficulties. Another easily defined object is the {\it harmonic mean}~\cite{parallel sum, ando79}, denoted by $A!B$ and given by \begin{equation} A!B\, \coloneqq\, \left(\frac{A^{-1}+B^{-1}}{2}\right)^{-1}\, . \label{harmonic} \end{equation} Incidentally, the harmonic mean can be also defined as a Schur complement, with the help of the identity $A!B=A-A(A+B)^{-1}A=\begin{pmatrix} A & A \\ A & A+B \end{pmatrix} \Big/ (A+B)$, which immediately implies that $A!B$ is monotone and jointly concave in $A$ and $B$, i.e.~concave in the pair $(A,B)$.
The least trivially defined among the elementary means is undoubtedly the {\it geometric mean} $A\# B$ between strictly positive matrices $A,B>0$~\cite{geometric mean, ando79}, which can be constructed as \begin{equation} A\# B\, \coloneqq\, \max\{X=X^{\dag}:\ A\geq XB^{-1} X\}\, , \label{geometric} \end{equation} where the above maximisation is with respect to the L\"owner partial order, and the fact that the particular set of matrices we chose admits an absolute maximum is already nontrivial. With a bit of work one can show that $A\# B$ is explicitly given by \begin{equation} A\# B\, =\, A^{1/2} \left( A^{-1/2} B A^{-1/2} \right)^{1/2} A^{1/2}\, . \label{geom expl} \end{equation} Having multiple expressions for a single matrix mean is always useful, as some properties that are not easy to prove within one formulation may become apparent when a different approach is taken. For instance, the fact that $A\# B$ is covariant under congruences, i.e.~$(MAM^{\dag})\#(MBM^{\dag})=M(A\#B)M^{\dag}$ for all invertible $M$, is far from transparent if one looks at~\eqref{geom expl}, while it becomes almost obvious when~\eqref{geometric} is used. On the contrary, the fact that $A\#B=(AB)^{1/2}$ when $[A,B]=0$ is not easily seen from~\eqref{geometric}, but it is readily verified employing~\eqref{geom expl}.
As it happens with scalars, the inequality \begin{equation} A!B\, \leq\, A\#B\, \leq\, \frac{A+B}{2} \label{hga} \end{equation} holds true for all $A,B>0$. In view of the above inequality, it could be natural to wonder, how the geometric mean between the leftmost and rightmost sides of~\eqref{hga} compares to $A\#B$. That this could be a fruitful thought is readily seen by asking the same question for real numbers. In fact, when $0<a,b\in\mathds{R}$ it is elementary to verify that $\sqrt{ab}=\sqrt{\frac{a+b}{2}\, \left( \frac{1/a+1/b}{2}\right)^{-1}}$. Our first result is a little lemma extending this to the non-commutative case. We were not able to find a proof in the literature, so we provide one.
\begin{lemma} \label{lemma ha=g} For $A,B>0$ strictly positive matrices, the identity \begin{equation} A\#B\, =\, \left(\frac{A+B}{2}\right)\#\left( A!B \right) \label{ha=g} \end{equation} holds true. \end{lemma}
\begin{proof} We start by defining $\tilde{A}\coloneqq \left(A+B\right)^{-1/2}A\left(A+B\right)^{-1/2}$, $\tilde{B}\coloneqq\left(A+B\right)^{-1/2}B\left(A+B\right)^{-1/2}$. It is easy to see that $[\tilde{A},\tilde{B}]=0$, for instance because $\tilde{A}+ \tilde{B}=\left(A+B\right)^{-1/2}(A+B)\left(A+B\right)^{-1/2}=\mathds{1}$. Therefore, the identity $\tilde{A}\#\tilde{B}=(\tilde{A}\tilde{B})^{1/2}$ holds. Now, on the one hand the congruence covariance of the geometric mean implies that \begin{equation*} \tilde{A}\#\tilde{B}\, =\, \left( \left(A+B\right)^{-1/2}A\left(A+B\right)^{-1/2} \right) \# \left( \left(A+B\right)^{-1/2}B\left(A+B\right)^{-1/2} \right)\, =\, \left(A+B\right)^{-1/2}(A\# B)\left(A+B\right)^{-1/2}\, . \end{equation*} On the other hand, \begin{align*} \tilde{A}\tilde{B}\, &=\, \left(A+B\right)^{-1/2}A\left(A+B\right)^{-1}B\left(A+B\right)^{-1/2}\, =\\ &=\, \left(A+B\right)^{-1/2}\left(B^{-1}(A+B)A^{-1}\right)^{-1}\left(A+B\right)^{-1/2} \, =\, \frac12\, \left(A+B\right)^{-1/2}(A!B)\left(A+B\right)^{-1/2}\, . \end{align*} Putting all together, we see that \begin{equation*} \left(A+B\right)^{-1/2}(A\# B)\left(A+B\right)^{-1/2}\, =\, \tilde{A}\#\tilde{B}\, =\, (\tilde{A}\tilde{B})^{1/2}\, =\, \frac{1}{\sqrt{2}}\, \left(\left(A+B\right)^{-1/2}(A!B)\left(A+B\right)^{-1/2}\right)^{1/2}\, . \end{equation*} Conjugating by $(A+B)^{1/2}$, we obtain \begin{equation} A\# B\, =\, \frac{1}{\sqrt{2}}\, \left(A+B\right)^{1/2} \left(\left(A+B\right)^{-1/2}(A!B)\left(A+B\right)^{-1/2}\right)^{1/2} \left(A+B\right)^{1/2}\, =\, \left(\frac{A+B}{2}\right)\#\left( A!B \right) , \end{equation} where the last step is an application of~\eqref{geom expl}. \end{proof} }
\subsection{Gaussian states}\label{secGS} In the remainder of this Section, we provide a brief introduction to the main concepts of the Gaussian formalism. Quantum continuous variables just describe quantum mechanics applied to an infinite-dimensional Hilbert space equipped with position and momentum operators $x_j,p_k$ ($j,k=1,\ldots, n$) satisfying the so-called canonical commutation relations $[x_j,p_k]=i\delta_{jk}$ (in natural units, $\hbar=1$). Such a Hilbert space describes, for instance, a collection of $n$ quantum harmonic oscillators, or modes of the electromagnetic radiation field. The operators $x_j,p_k$ are often grouped together to form a single vector of $2n$ operators $r \coloneqq (x_1,p_1,\ldots,x_n,p_n)$. The canonical commutation relations then take the form \begin{equation} [r,r^T]\, =\, i\ \Omega\, \coloneqq\, i \ \omega^{\oplus n}\,, \quad \omega \coloneqq \begin{pmatrix} 0 & {1} \\ - {1} & 0 \end{pmatrix}.
\label{CCR} \end{equation} An important object one can form is the displacement operator. For any $z\in\mathds{R}^{2n}$, we define \begin{equation} D_z\, \coloneqq\, e^{iz^T\Omega r}\, . \label{displacement} \end{equation}
It turns out that Nature has a special preference for quadratic Hamiltonians. A prominent example is the free-field Hamiltonian $\mathcal{H}_0=\frac{1}{2} r^T r$. Not surprisingly, thermal states of quadratic Hamiltonians are extremely easily produced in the lab, in fact so easily that they deserve a special name: Gaussian states~\cite{introeisert,biblioparis,adesso07,weedbrook12,adesso14,bucco}. As the name suggests, they can be fully described by a real displacement vector $w\in\mathds{R}^{2n}$ and a real, $2n\times 2n$ {\it quantum covariance matrix} (QCM) $V$, defined respectively as $w=\langle r \rangle$ and $V = \langle \{r-w,r^T-w^T\}\rangle$. By quantum covariance matrix we mean a real, symmetric, strictly positive matrix $V>0$ that moreover satisfies the Heisenberg uncertainty relation~\cite{simon94} \begin{equation} V+i\Omega \geq 0\, . \label{Heisenberg} \end{equation} Note that (\ref{Heisenberg}) can equivalently be written as $V-i\Omega \geq 0$ upon applying transposition (as $V^T=V$, $\Omega^T=-\Omega$).
The Gaussian state $\rho^G(V,w)$ with QCM $V$ and displacement vector $w$ admits the representation \begin{equation} \rho^G(V,w)\, =\, \int \frac{d^{2n} u}{(2\pi)^n} \ e^{-\frac{1}{4} u^T V u - iw^T r} D_{\Omega r}\,, \end{equation} which justifies the alternative definition of Gaussian states as the continuous variable states associated with a Gaussian characteristic function.
Clearly, linear transformations $r \rightarrow S r$ that preserve the commutation relations~\eqref{CCR} play a special role within this framework. Any such transformation is described by a {\it symplectic} matrix, i.e.~a matrix $S$ with the property that $S\Omega S^T=\Omega$. Symplectic matrices form a non-compact, connected Lie group that is additionally closed under transposition, and is typically denoted by $\mathrm{Sp}(2n,\mathds{R})$~\cite{pramana}. The importance of these operations arises from the fact that for any symplectic $S$ there is a unitary evolution $U_S$ on the Hilbert space such that $U_S^\dag r U_S = Sr$. Most importantly, such a unitary is the product of a finite number of factors $e^{i\mathcal{H}_Q}$, where $\mathcal{H}_Q$ is a quadratic Hamiltonian, and as such it can be easily implemented in laboratory. Under conjugation by $U_{S}$, Gaussian states transform as \begin{equation} U_S^\dag\, \rho^G(V,w)\, U_S\, =\, \rho^G\left(SVS^T,\, Sw \right)\, . \label{Gauss transform U_S} \end{equation}
It turns out that all Gaussian states can be brought into a remarkably simple normal form via unitary transformations induced by quadratic Hamiltonians. In fact, a theorem by Williamson~\cite{willy,willysim} implies that for all strictly positive matrices $V>0$ there is a symplectic transformation $S$ and a diagonal matrix $N>0$ such that \begin{equation} S^{-1} VS^{-T}\, =\, N \coloneqq \text{diag} (\nu_1,\nu_1,\ldots,\nu_n,\nu_n) \, . \label{Williamson} \end{equation} The diagonal elements $\nu_i>0$, each taken with multiplicity one, are called symplectic eigenvalues of $V$, and are uniquely determined by $V$ (up to their order, which can be assumed decreasing by convention with no loss of generality). Accordingly, we will refer to $\vec{\nu}=(\nu_1,\nu_2,\ldots,\nu_n)$ as the {\em symplectic spectrum} of $V$. Notably, Heisenberg uncertainty relation~\eqref{Heisenberg} can be conveniently restated as $N\geq \mathds{1}$, or equivalently $\nu_{i}\geq 1$ for all $i=1,\ldots, n$. { A Gaussian state $\rho^G(V,w)$ can be shown to be pure if and only if all of its symplectic eigenvalues are equal to $1$, which corresponds to the matrix equality $V\Omega V\Omega=-{\mathds{1}}$. Correspondingly, a QCM $V$ satisfying $\nu_{j}=1$ for all $j=1,\ldots,n$ (or equivalently $\det V = 1$) will be called a {\it pure} QCM. Note that pure QCMs $V$ are themselves symplectic matrices, $V=(S^T S)^{-1} \in \mathrm{Sp}(2n,\mathds{R})$, and are the extremal elements in the convex set of QCMs.}
Finally, note that displacement vector $w$ is often irrelevant since it can be made to vanish by local unitaries, resulting from the action of the displacement operator of (\ref{displacement}) on each individual mode. Since all the physically relevant informational properties such as purity and entanglement are invariant under local unitaries, all the results we are going to present will not depend on the first moments. Therefore, in what follows, we will completely specify any Gaussian state under our investigation as $\rho^G(V)$ in terms of its QCM $V$ alone.
\section{Simplified separability criterion for M vs N-mode Gaussian states}\label{secp}
The QCM $V_{AB}$ of a Gaussian state $\rho^G_{AB}$ pertaining to a $(m+n)$-mode bipartite system $AB$ can be naturally written in block form according to the splitting between the subsystems $A$ and $B$: \begin{equation} V_{AB}\, =\, \begin{pmatrix} V_A & X \\ X^T & V_B \end{pmatrix}\, . \label{V explicit} \end{equation} According to the same splitting, the matrix $\Omega$ appearing in (\ref{CCR}) takes the form \begin{equation} \Omega_{AB}\, =\, \begin{pmatrix} \Omega_A & 0 \\ 0 & \Omega_B \end{pmatrix}\, =\, \Omega_A\oplus\Omega_B\, , \end{equation} with $\Omega_A = \omega^{\oplus m}$ and $\Omega_B=\omega^{\oplus n}$.
The entanglement properties of a bipartite Gaussian state can thus be conveniently translated at the level of QCMs. Recall that, in general, a bipartite quantum state $\rho_{AB}$ is separable if and only if it can be written as a convex mixture of product states, $\rho_{AB} = \sum_k p_k ({\sigma_k}_A \otimes {\tau_k}_B)$, with $p_k$ being probabilities~\cite{Werner89}. For a Gaussian state $\rho^G_{AB}$ of a bipartite continuous variable system, we have then the following.
\begin{lemma}[Proposition 1 in~\cite{Werner01}] \label{sep} $ \\ $ A Gaussian state $\rho^G_{AB}(V_{AB})$ with $(m+n)$-mode QCM $V_{AB}$ is separable if and only if there exist an $m$-mode QCM $\gamma_A \geq i \Omega_A$ and an $n$ mode QCM $\gamma_B \geq i \Omega_B$ such that \begin{equation} V_{AB} \geq \gamma_A\oplus\gamma_B\, . \label{sep eq} \end{equation} \end{lemma}
In view of the above result, a QCM $V_{AB}$ satisfying~\eqref{sep eq} for some marginal QCMs $\gamma_{A}$, $\gamma_{B}$ will itself be called {\it separable} from now on. The criterion in (\ref{sep eq}) is necessary and sufficient for separability of QCMs, and can be evaluated numerically via convex optimisation~\cite{Giedke01,eisemi}, however such optimisation runs over both marginal QCMs, hence scaling (polynomially) with both $m$ and $n$.
The first main result of this paper is to show that the necessary and sufficient separability condition~\eqref{sep eq}, for any $m$ and $n$, can be further simplified. This result is quite neat and of importance in its own right. In particular, it allows us to recast the Gaussian separability problem as a convex optimisation over the marginal QCM of {\it one} subsystem only (say $A$ without loss of generality), resulting in an appreciable reduction of computational resources, especially in case party $A$ comprises a much smaller number of modes than party $B$.
\begin{thm}[Simplified separability condition for an arbitrary QCM] \label{simp sep lemma} $ \\ $ A QCM $V_{AB}$ of $m+n$ modes is separable if and only if there exists an $m$-mode QCM $\gamma_A \geq i \Omega_A$ such that \begin{equation} V_{AB} \geq \gamma_A\oplus i\Omega_B\, . \label{simp sep 1} \end{equation} In terms of the block form (\ref{V explicit}) of $V_{AB}$, when $V_{B}>i\Omega_{B}$ the above condition is equivalent to the existence of a real matrix $\gamma_A$ satisfying \begin{equation} i\Omega_A \leq \gamma_A \leq V_A - X (V_B-i\Omega_B)^{-1} X^T\, . \label{simp sep 2} \end{equation} If $V_{B}-i\Omega_{B}$ is not invertible, we require instead $i\Omega_A \leq \gamma_A \leq V_A - X (V_B+\varepsilon\mathds{1}_{B}-i\Omega_B)^{-1} X^T$ for all $\varepsilon>0$. \end{thm}
\begin{proof} Since both sets of QCMs $V_{AB}$ defined by~\eqref{sep eq} and~\eqref{simp sep 1} are clearly topologically closed, we can just show without loss of generality that their interiors coincide. This latter condition can be rephrased as an equivalence between the two following statements: (i) $V_{AB}>\gamma_{A}\oplus \gamma_{B}$ for some QCMs $\gamma_{A},\gamma_{B}$; and (ii) $V_{AB} > \gamma_A\oplus i\Omega_B$ for some QCM $\gamma_{A}$.
Now, once $\gamma_A<V_{A}$ is fixed, the supremum of all the matrices $\gamma_B$ satisfying $V_{AB}>\gamma_{A}\oplus \gamma_{B}$ is given by the Schur complement $(V_{AB}-(\gamma_A\oplus 0_B))/(V_A-\gamma_A)$, as the variational characterisation~\eqref{variational} reveals. Therefore, statement (i) is equivalent to the existence of $i\Omega_{A}\leq\gamma_A<V_{A}$ such that $(V_{AB}-(\gamma_A\oplus 0_B))/(V_A-\gamma_A)> i\Omega_B$. This is the same as to require $V_{AB}> \gamma_A\oplus i\Omega_B$, as the positivity conditions of Lemma~\ref{pos cond} immediately show.
Until now, we have proven that the separability of $V_{AB}$ can be restated as $V_{AB} \geq \gamma_A\oplus i\Omega_B$ for some appropriate QCM $\gamma_{A}$. Employing Lemma~\ref{pos cond}, we see that this is turn equivalent to~\eqref{simp sep 2}, or to its $\varepsilon$-modified version when $V_{B}-i\Omega_{B}$ is not invertible. \end{proof}
\begin{rem} It has been recently observed~\cite{Bhat16} that condition~\eqref{simp sep 2} is equivalent to the corresponding Gaussian state $\rho^G_{AB}(V_{AB})$ with QCM $V_{AB}$ being completely extendable with Gaussian extensions. We remind the reader that a bipartite state $\rho_{AB}$ is said to be {\it completely extendable} if for all $k$ there exists a state $\rho_{AB_{1}\cdots B_{k}}$ that is: (i) symmetric under exchange of any two $B_{i}$ systems; and (ii) an extension of $\rho_{AB}$ in the sense that $\text{Tr}_{B_{2}\cdots B_{k}}\rho_{AB_{1}\cdots B_{k}}=\rho_{AB}$. When the original state $\rho_{AB}^G$ is Gaussian, it is natural to consider extensions $\rho^G_{AB_{1}\cdots B_{k}}$ of Gaussian form as well. Interestingly enough, the above Theorem~\ref{simp sep lemma} provides a simple alternative proof of the remarkable fact (also proven in~\cite{Bhat16}) that Gaussian states are separable if and only if completely extendable with Gaussian extensions. \end{rem}
{ \begin{rem}\label{remulti} It is worth noticing that both Lemma~\ref{sep} and Theorem~\ref{simp sep lemma} extend straightforwardly to encompass the case of full separability of multipartite Gaussian states. In the case of Lemma~\ref{sep}, this extension was already formulated in~\cite{Werner01,3-mode sep}. As for Theorem~\ref{simp sep lemma}, the corresponding necessary and sufficient condition for the full separability of a $k$-partite QCM $V_{A_{1}\cdots A_{k}}$ would read $V_{A_{1}\cdots A_{k}}\geq \gamma_{A_{1}}\oplus\ldots\oplus \gamma_{A_{k-1}}\oplus i\Omega_{A_{k}}$ for appropriate QCMs $\gamma_{1},\ldots, \gamma_{k-1}$. \end{rem} }
\section{PPT implies separability for 1 vs N-mode Gaussian states -- Revisited}\label{ppt}
We now focus on investigating known and new conditions under which separability becomes equivalent to PPT for Gaussian states, so that the problem of deciding whether a given QCM is separable or not admits a handy formulation.
For any bipartite state $\rho_{AB}$, recall that the PPT criterion provides a useful necessary condition for separability~\cite{Peres}: \begin{equation}\label{PeresPPT}\mbox{$\rho_{AB}$ is separable $\ \Rightarrow\ $ $\rho_{AB}^{T_B} \geq 0$}\,, \end{equation} where the suffix $T_B$ denotes transposition with respect to the degrees of freedom of subsystem $B$ only. In finite-dimensional systems, PPT is also a sufficient condition for separability when $\dim(A) \cdot \dim(B)\leq 6$~\cite{H3}.
In continuous variable systems, the PPT criterion turns out to be also sufficient for separability of QCMs when either $A$ or $B$ is composed of one mode only.
\begin{thm}[PPT is sufficient for Gaussian states of $1$ vs $n$ modes~\cite{Simon00,Werner01}] \label{PPT thm} $ \\ $ Let $V_{AB}$ be a bipartite QCM such that either $A$ or $B$ are composed of one mode only. Then $V_{AB}$ is separable if and only if \begin{equation} V_{AB}\, \geq\, \begin{pmatrix} i\Omega_A & 0 \\ 0 & \pm i\Omega_B\end{pmatrix}\, =\, i\Omega_A \oplus (\pm i\Omega_B)\, , \label{PPT} \end{equation} which amounts to the corresponding Gaussian state being PPT, ${\rho^{G}}^{T_B}_{AB} \geq 0$. \end{thm}
For completeness, we recall that the partial transpose of an $(m+n)$-mode QCM $V_{AB}$ (i.e., the covariance matrix of the partially transposed density operator ${\rho^{G}}^{T_B}_{AB}$) is given by $V_{AB}^{T_B}= \Theta_B V_{AB} \Theta_B$, where with respect to a mode-wise decomposition on the $B$ subsystem the matrix $\Theta_B$ can be written as $\Theta_B \coloneqq {\mathds{1}}_A \oplus \big( \bigoplus_{j=1}^n \zeta\big)_B$, with $\zeta \coloneqq \left(\begin{smallmatrix}1 & 0 \\ 0 & -1 \end{smallmatrix}\right)$~\cite{Simon00}. Accordingly, we can say that the QCM $V_{AB}$ is PPT if and only if $V_{AB}^{T_B}$ is a valid QCM obeying~\eqref{Heisenberg}, which is equivalent to~\eqref{PPT}.
The original proof of Theorem~\ref{PPT thm} came in two steps. Firstly, Simon~\cite{Simon00} proved it in the particular case when both $A$ and $B$ are made of one mode only by performing an explicit analysis of the symplectic invariants of $V_{AB}$; this seminal analysis is quite straightforward to follow and particularly instructive, but eventually a bit cumbersome, since it requires to distinguish between three cases, according to the sign of $\det X$, where $X$ is the off-diagonal block of the QCM $V_{AB}$ partitioned as in (\ref{V explicit}). Later on, Werner and Wolf~\cite{Werner01} reduced the problem for the $1$ vs $n$-mode case with arbitrary $n$ to the $1$ vs $1$-mode case; the proof of this reduction is geometric in nature and rather elegant, but also relatively difficult.
Our purpose in this Section is to use Schur complements to provide the reader with a simple, direct proof of Theorem~\ref{PPT thm}. Before coming to that, there is a preliminary lemma we want to discuss.
\begin{lemma} \label{2x2 interval} Let $M,N$ be $2\times 2$ hermitian matrices. There is a real symmetric matrix $R$ satisfying $M\leq R\leq N$ if and only if $M\leq N, N^*$, where $*$ denotes complex conjugation. \end{lemma}
\begin{proof} The only complex entry in a $2\times 2$ hermitian matrix is in the off-diagonal element. Suppose without loss of generality that $\Im M_{12}\ge0$ and $\Im N_{12}\le0$ (both conditions in the statement are in fact symmetric under complex conjugation of $M$ or $N$). It is easy to verify that a $p$ such that $0 \leq p \leq 1$ and $\Im (pM+(1-p)N)_{12}=0$ always exists, and we see that $R\coloneqq pM+(1-p)N$ is a real symmetric matrix. Moreover, since $R$ belongs to the segment joining $M$ and $N\geq M$ we conclude that $M\leq R\leq N$. \end{proof}
\begin{rem} Lemma~\ref{2x2 interval} admits an appealing physical interpretation which also leads to an intuitive proof. This interpretation is based on the fact that $2\times 2$ hermitian matrices can be seen as events in $4$-dimensional Minkowski space-time through the correspondence $x_0 \mathds{1} + \vec{x}\cdot\vec{\sigma} \leftrightarrow (x_0, \vec{x})$. Furthermore, $M\leq N$ translates in Minkowski space-time to `$N$ is in the absolute future of $M$', since the remarkable determinantal identity $\det (x_0 \mathds{1} + \vec{x}\cdot\vec{\sigma}) = x_0^2 - \vec{x}^2$ holds true. Now, the complex conjugation at the matrix level becomes nothing but a spatial reflection with respect to a fixed spatial plane in Minkowski space-time. Thus, our original question is: is it true that whenever both an event $N$ and its spatial reflection $N^*$ are in the absolute future of a reference event $M$ then there is another event $R$ which is: (i) in the absolute future of $M$; (ii) in the absolute past of both $N$ and $N^*$; and (iii) lies right on the reflection plane? The answer is clearly yes, and there is a simple way to obtain it. Start from $M$ and shoot a photon to the location of that event between $N$ and $N^*$ that will happen on the other side of the reflection plane. After some time the photon hits the plane, and this event $R$ clearly satisfies all requirements. \end{rem}
Now we are ready to give our direct proof of the equivalence between PPT and separability for $1$ vs $n$-mode Gaussian states, leveraging the simplified separability condition of Theorem~\ref{simp sep lemma}.
\begin{proof}[Proof of Theorem~\ref{PPT thm}] Suppose without loss of generality that $A$ is composed of one mode only. As in the proof of Theorem~\ref{simp sep lemma}, since both sets of QCMs $V_{AB}$ defined by~\eqref{sep eq} and~\eqref{PPT} are topologically closed, we can assume that $V_{AB}$ is in the interior of the PPT set, i.e.~that $V_{AB}>i\Omega_{A}\oplus (\pm i\Omega_{B})$. Our goal will be to show that in this case $V_{AB}$ belongs to the separable set, as characterized by Theorem~\ref{simp sep lemma}. Since $V_{B}-i\Omega_{B}$ is taken to be invertible, the PPT condition reads \begin{equation*} V_A - X (V_B \mp i\Omega_B)^{-1} X^T\, \geq\, i\Omega_{A}\, . \end{equation*} Now, define $M=i\Omega_{A}$ and $N=V_A - X (V_B + i\Omega_B)^{-1} X^T$, and observe that $N^{*}=V_A - X (V_B - i\Omega_B)^{-1} X^T$. Thanks to Lemma~\ref{2x2 interval}, we can find a real matrix $\gamma_{A}$ such that \begin{equation*} V_A - X (V_B \mp i\Omega_B)^{-1} X^T\, \geq\, \gamma_{A}\, \geq\, i\Omega_{A}\, . \end{equation*} Choosing the negative sign in the above inequality, we see that the second condition~\eqref{simp sep 2} in Theorem~\ref{simp sep lemma} is met, and therefore $V_{AB}$ is separable. \end{proof}
\section{Gaussian states that are invariant under partial transpose are separable}\label{inva}
As a further example of application of Theorem~\ref{simp sep lemma}, we study here the separability of a special class of PPT Gaussian states, i.e.~those that are \emph{invariant} under partial transposition of one of the subsystems. This problem has an analogue in finite-dimensional quantum information, already studied in~\cite{sep 2xN}, where it was shown that bipartite states on $\mathds{C}^{2}\otimes \mathds{C}^{d}$ that are invariant under partial transpose on the first system are necessarily separable~\footnote{The proof reported in~\cite{sep 2xN} is rather long, so here we provide a shorter one, again based on Schur complements. A state on $\mathds{C}^{2}\otimes \mathds{C}^{d}$ that is invariant under partial transposition on the first subsystem can be represented in block form as $\rho=\left( \begin{smallmatrix} A & X \\ X & B \end{smallmatrix}\right)$. By a continuity argument, we can suppose without loss of generality that $A>0$. Rewrite $\rho = \left( \begin{smallmatrix} A & X \\ X & XA^{-1}X \end{smallmatrix}\right) + \ket{1}\!\!\bra{1}\otimes (B-XA^{-1}X)$. Both terms are positive by Lemma~\ref{pos cond}. Since the second one is separable, let us deal only with the first one, call it $\tilde{\rho}$. We have $\tilde{\rho} = {\mathds{1}}_2 \otimes A^{1/2} \left( \begin{smallmatrix} {\mathds{1}} & Y \\ Y & Y^{2} \end{smallmatrix}\right) {\mathds{1}}_2 \otimes A^{1/2}$, where $Y\coloneqq A^{-1/2}X A^{-1/2}$ is hermitian. Denoting by $Y = \sum_{i} y_{i} \ket{e_{i}}\!\!\bra{e_{i}}$ its spectral decomposition, we obtain the following manifestly separable representation of $\tilde{\rho}$: \begin{equation*} \tilde{\rho} = {\mathds{1}}_2 \otimes A^{1/2} \bigg( \sum_{i} \left( \begin{smallmatrix} 1 & y_{i} \\ y_{i} & y_{i}^{2} \end{smallmatrix} \right) \otimes \ket{e_i}\!\!\bra{e_i}\bigg) {\mathds{1}}_2 \otimes A^{1/2}\, . \end{equation*}}. Here we show that for Gaussian states an even stronger statement holds, in that invariance under partial transposition implies separability for any number of local modes.
\begin{cor} A bipartite Gaussian state $\rho_{AB}^{G}$ that is invariant under partial transposition of one of the two subsystems is necessarily separable. \end{cor}
\begin{proof} Without loss of generality, we can assume that the partial transpose on the $B$ system leaves the state invariant. We now show that under the this assumption the separability condition~\eqref{simp sep 2} is immediately satisfied, since the rightmost side is already a real, symmetric matrix. In fact, equating the original QCM~\eqref{V explicit} with the one obtained after partial transpose on the $B$ system, we get the identities $X=X \Theta_B$ and $V_{B}=\Theta_B V_{B}\Theta_B$, where, as previously set, ${\Theta}_B = \bigoplus_{j=1}^{n} \zeta$ according to a mode-wise decomposition of the $B$ system, and $\zeta = \left(\begin{smallmatrix}1 & 0 \\ 0 & -1 \end{smallmatrix}\right)$. As a consequence, \begin{equation*} X(V_{B}-i\Omega_{B})^{-1} X^{T} = X \Theta_{B} (V_{B} - i\Omega_{B})^{-1} \Theta_{B} X^{T} = X \left(\Theta_B (V_{B} - i\Omega_{B}) \Theta_B \right)^{-1} X^{T} = X \left(V_{B} + i\Omega_{B} \right)^{-1} X^{T}\, , \end{equation*} where we used also $\Theta_B \Omega_B \Theta_B = -\Omega_B$. This shows that $X(V_{B}-i\Omega_{B})^{-1} X^{T}$ is equal to its complex conjugate, and is therefore (despite appearances) a real symmetric matrix. Hence the separability condition~\eqref{simp sep 2} is satisfied with $\gamma_{A} = V_A - X(V_{B}-i\Omega_{B})^{-1} X^{T}$, which is a legitimate QCM as follows from the bonafide condition~\eqref{Heisenberg} together with Lemma~\ref{pos cond}. \end{proof}
\section{PPT implies separability for multimode mono-symmetric Gaussian states}\label{symm}
Throughout this Section, we show how the PPT criterion is also necessary and sufficient for deciding the separability of bipartite Gaussian states of $m$ vs $n$ modes that are symmetric under the exchange of any two among the first $m$ modes. These states will be referred to as {\em mono-symmetric} (with respect to the first party $A$). As can be easily seen, this novel result (see Fig.~\ref{mononucleosi} for a graphical visualisation) is a generalisation of both Theorem~\ref{PPT thm} and of one of the main results in~\cite{Serafini05}, where the subclass of bi-symmetric states was considered instead, bi-symmetric meaning that they are invariant under swapping any two modes either within the first $m$ or within the last $n$ (that is, they are mono-symmetric in both $A$ and $B$).
\begin{thm}[Symplectic localisation of mono-symmetric states] \label{PPt sym} Let $\rho^G_{AB}(V_{AB})$ be a mono-symmetric Gaussian state of $m+n$ modes, i.e.~specified by a QCM $V_{AB}$ that is symmetric under the exchange of any two of the $m$ modes of subsystem $A$. Then there exists a local unitary operation on $A$ corresponding to a symplectic transformation $S_A \in \mathrm{Sp}(2m, \mathds{R})$ that transforms $\rho^G_{AB}$ into the tensor product of $m-1$ uncorrelated single-mode Gaussian states $\tilde{\rho}^G_{A_j}(\tilde{V}_{A_j})$ ($j=2,\ldots,m$) and a bipartite Gaussian state $\tilde{\rho}^G_{A_1B}(\tilde{V}_{A_1B})$ of $1$ vs $n$ modes. At the QCM level, this reads \begin{equation}\label{monolocale} (S_A \oplus \mathds{1}_B) V_{AB} (S_A^T \oplus \mathds{1}_B) = \bigg(\bigoplus_{j=2}^m \tilde{V}_{A_j}\bigg) \oplus \tilde{V}_{A_1B} \,. \end{equation} The separability properties of $V_{AB}$ and $\tilde{V}_{A_1 B}$ are equivalent, in particular $\rho^G_{AB}(V_{AB})$ is separable if and only if it is PPT. \end{thm}
\begin{proof} We will prove~\eqref{monolocale} directly at the QCM level, by constructing a suitable local symplectic $S_A$. By virtue of the symmetry under the exchange of any two modes of subsystem $A$, if we decompose $V_{AB}$ as in~\eqref{V explicit}, the submatrices $V_A$ and $X$ have the following structure:
\begin{equation}\label{strucaz} V_{A}\, =\, \begin{pmatrix} \alpha & \varepsilon & \ldots & \varepsilon \\[-1ex] \varepsilon & \alpha & & \vdots \\[-1ex] \vdots & & \ddots & \varepsilon \\[-0.5ex] \varepsilon & \ldots & \varepsilon & \alpha \end{pmatrix}\, ,\qquad X\, =\, \begin{pmatrix} \kappa_{1} & \kappa_{2} & \ldots & \kappa_{n} \\ \kappa_{1} & \kappa_{2} & \ldots & \kappa_{n} \\ \vdots & & & \vdots \\ \kappa_{1} & \kappa_{2} & \ldots & \kappa_{n} \end{pmatrix} \, , \end{equation} where each one of the blocks $\alpha,\varepsilon,\kappa_{j}$ in (\ref{strucaz}) is a $2\times 2$ real matrix, with $\alpha$ and $\varepsilon$ symmetric~\cite{adescaling}.
We can now decompose the real space of the first $m$ modes as $\mathds{R}^{2m}=\mathds{R}^{m} \otimes \mathds{R}^{2}$. According to this decomposition, we may rewrite $V_A$ and $X$ as follows: \begin{equation} V_{A}\, =\, \mathds{1}_m\otimes (\alpha-\varepsilon) + m\ket{+}\!\!\bra{+}\otimes \varepsilon\, ,\qquad X\, =\, \sqrt{m}\,\sum_{j=1}^{n} \ket{+}\!\!\bra{j}\otimes \kappa_{j}\, , \end{equation} where $\ket{+}=\frac{1}{\sqrt{m}}\sum_{i=1}^{m}\ket{i}$, with $\{\ket{i}\}_{i=1}^m$ denoting the standard basis for $\mathds{R}^m$. Observe that the symplectic form $\Omega_A$ on subsystem $A$ decomposes accordingly as $\Omega_A = \mathds{1}_m\otimes \omega$. If $O$ is an $m\times m$ orthogonal matrix such that $O\ket{+}=\ket{1}$, we easily see that on the one hand $O\otimes\mathds{1}_2\ \Omega_A\ O^{T}\otimes\mathds{1}_2 = \Omega_A$, i.e.~$O\otimes\mathds{1}_2$ is symplectic, while on the other hand \begin{align} O\otimes\mathds{1}_2\ V_{A}\ O^{T}\otimes\mathds{1}_2\, &=\, \ket{1}\!\!\bra{1} \otimes (\alpha + (m-1)\varepsilon) + \sum_{i=2}^{m} \ket{i}\!\!\bra{i}\otimes (\alpha-\varepsilon)\, =\, \begin{pmatrix} \alpha+(m-1)\varepsilon & 0 & \ldots & 0 \\[-1ex] 0 & \alpha-\varepsilon & & \vdots \\[-1ex] \vdots & & \ddots & 0 \\[-0.5ex] 0 & \ldots & 0 & \alpha-\varepsilon \end{pmatrix}\, , \\[2ex] O\otimes\mathds{1}_2\ X \, &=\, \sqrt{m}\, \sum_{j=1}^{n} \ket{1}\!\!\bra{j}\otimes \kappa_{j}\, =\, \begin{pmatrix} \sqrt{m}\,\kappa_{1} & \sqrt{m}\, \kappa_{2} & \ldots & \sqrt{m}\,\kappa_{n} \\ 0 & 0 & \ldots & 0 \\ \vdots & & & \vdots \\ 0 & 0 & \ldots & 0 \end{pmatrix}\, . \end{align} Therefore, the initial QCM $V_{AB}$ has been decomposed as a direct sum of $m-1$ one-mode QCMs $\tilde{V}_{A_j}=\alpha-\varepsilon$, and of one $(1+m)$-mode QCM $\tilde{V}_{A_1B}$, via a local symplectic operation on subsystem $A$, given precisely by $S_A = O \otimes \mathds{1}_2$. This proves (\ref{monolocale}) constructively. Applying Theorem~\ref{PPT thm}, one then gets immediately that the PPT condition is necessary and sufficient for separability in this case. \end{proof}
\begin{rem} This original result yields a substantial enlargement to the domain of validity of PPT as a necessary and sufficient criterion for separability of multimode Gaussian states, reaching beyond any existing literature. In practice, Theorem~\ref{PPt sym} tells us that, in any mono-symmetric Gaussian state, all the correlations (including and beyond entanglement) shared among the whole $m$ modes of $A$ and the whole $n$ modes of $B$ can be localised onto correlations between a single mode $A_1$ of $A$ vs the whole $B$, by means of a local unitary (symplectic at the QCM level) operation at $A$'s side only. Being unitary, this operation is fully reversible, meaning that the correlations with $B$ can be redistributed back and forth between $A_1$ and the whole set of $A$ modes with no information loss. This also means that quantitative results on any measure of such correlations between $A$ and $B$ encoded in $V_{AB}$ can be conveniently evaluated in the much simpler $1$ vs $n$-mode normal form $\tilde{V}_{A_1B}$ constructed in the proof Theorem~\ref{PPt sym}, ignoring the $m-1$ uncorrelated modes.
In the special case of $V_{AB}$ being the QCM of a bi-symmetric state, i.e.~with full permutation symmetry within both $A$ and $B$, it is immediate to observe that applying a similar construction by means of a local unitary at $B$'s side as well fully reduces $V_{AB}$ to a two-mode QCM $\tilde{V}_{A_1 B_1}$, with equivalent entanglement properties as the original $V_{AB}$, plus a collection of $m+n-2$ uncorrelated single modes. This reproduces the findings of~\cite{Serafini05}.
Similarly to what discussed in Remark~\ref{remulti}, the results of Theorem~\ref{PPt sym} can also be straightforwardly extended to characterise full separability and, conversely, multipartite entanglement of arbitrary multimode Gaussian states which are partitioned into $k$ subsystems, with the requirement of local permutation invariance within some of these subsystems. It is clear that, by suitable local symplectic transformations, each of those locally symmetric parties can be localised onto a single mode correlated with the remaining parties, thus removing the redundancy in the QCM. Gaussian states of this sort generalise the so-called multi-symmetric states studied in~\cite{moleculo}, where local permutation invariance was enforced within all of the subsystems, resulting in a direct multipartite analogue of bi-symmetric states.
\end{rem}
\section{PPT implies separability for multimode isotropic Gaussian states -- Revisited} \label{mode-wise}
{ It is well known that the PPT criterion is in general sufficient, as well as obviously necessary, for pure bipartite states to be separable~\cite{Peres}. This may be seen by a direct inspection of the Schmidt decomposition of a pure state. Let us note, incidentally, that a stronger statement holds, namely any bound entangled state (in any dimension) must have at least rank $4$~\cite{chen08}.
The Schmidt decomposition theorem is in fact so important that a Gaussian version of it, that is, the determination of a normal form of pure QCMs under local symplectic operations, is of central importance in continuous variable quantum information. As can be shown at the covariance matrix level~\cite{holwer,giedkemode} or at the density operator level~\cite{botero03}, every pure bipartite Gaussian state $\rho^G_{AB}(V_{AB})$ can be brought into a tensor product of two-mode squeezed vacuum states and single-mode vacuum states by means of local unitaries with respect to the $A$ vs $B$ partition. In particular, by acting correspondingly with local symplectic transformations, any pure QCM $V_{AB}$ (where pure means $\det V_{AB} = 1$) can be transformed into a direct sum of (pure) two-mode squeezed vacuum QCMs and (pure) single-mode vacuum QCMs. More precisely, at the level of QCMs, one can formulate this fundamental result as follows.
\begin{thm}[Mode-wise decomposition of pure Gaussian states~\cite{holwer,botero03,giedkemode}] \label{mode-wise thm} Let $V_{AB}$ be a bipartite QCM of $m+n$ modes $A_1,\ldots,A_m,B_1,\ldots,B_n$, assuming $m \leq n$ (with no loss of generality). If $V_{AB}$ is a pure QCM, i.e.~all its symplectic eigenvalues are equal to $1$ (which amounts to $\det V_{AB}=1$), then there exist local symplectic transformations $S_A \in \mathrm{Sp}(2m,\mathds{R})$, $S_B \in \mathrm{Sp}(2n,\mathds{R})$ mapping $V_{AB}$ into the following normal form: \begin{equation}\label{modewise} (S_A \oplus S_B) V_{AB} (S_A^T \oplus S_B^T) = \bigoplus_{j=1}^m \bar{V}_{A_jB_j}(r_j)\, \oplus \bigoplus_{k=m+1}^n \mathds{1}_{B_k}\,, \end{equation} where $\bar{V}_{A_jB_j} = \begin{pmatrix} c_j {\mathds{1}} & s_j {\zeta} \\ s_j {\zeta} & c_j {\mathds{1}} \end{pmatrix}$ with $c_{j}=\cosh(2r_j)$ and $s_j=\sinh(2r_j)$, for a real squeezing parameter $r_j$, is the pure QCM of a two-mode squeezed vacuum state of modes $A_j$ and $B_j$, and $\mathds{1}_{B_k}$ is the pure QCM of the single-mode vacuum state of mode $B_k$. In particular, with respect to the block form~\eqref{V explicit}, for any pure QCM $V_{AB}$ the marginal QCMs $V_A$ and $V_B$ have matching symplectic spectra, given by $\vec{\nu}_A=(c_1,\ldots,c_m)$ and $\vec{\nu}_B=(c_1,\ldots,c_m,\underbrace{1,\ldots,1}_{n-m})$. \end{thm}
Leaving apart its far-reaching applications, in the context of the present paper this result is mainly instrumental for assessing the separability of so-called {\it isotropic} multimode Gaussian states. The QCM of any such state of $m+n$ modes is characterised by the property of having a completely degenerate symplectic spectrum, i.e.~formed of only one distinct symplectic eigenvalue $\nu \geq 1$ (repeated $m+n$ times). This means that the QCM $V_{AB}$ of any isotropic state is proportional by a factor $\nu$ to a pure QCM. Hence, Theorem~\ref{mode-wise thm} tells us that $V_{AB}$ can be brought into a direct sum of two-mode QCMs via a local symplectic congruence (local with respect to any partition into groups of modes $A$ and $B$), as first observed in~\cite{holwer}. Thanks to Theorem~\ref{PPT thm}, this guarantees the following.
\begin{thm} \label{iso thm} The PPT criterion is necessary and sufficient for separability of all isotropic Gaussian states of an arbitrary number of modes. \end{thm}
However, notwithstanding the importance of Theorem~\ref{mode-wise} per se, one could strive to seek a more direct way to obtain Theorem~\ref{iso thm}. Our purpose in this Section is in fact to provide an alternative proof of this result, which does not appeal to the mode-wise decomposition theorem at all, and uses directly Lemma~\ref{sep} instead, leveraging matrix analysis tools such as the notions of matrix means introduced in Section~\ref{secMM}.
Note that, in almost all the remainder of this Section, for a single system of $n$ modes, we will find it more convenient to reorder the vector of canonical operators as $r \coloneqq (x_1,\ldots,x_n,p_1,\ldots,p_n)$, corresponding to a position-momentum block structure. The symplectic form $\Omega$ appearing in~\eqref{CCR} is accordingly rewritten as \begin{equation}\label{CCR2} \Omega \coloneqq
\begin{pmatrix} 0 & \mathds{1} \\ -\mathds{1} & 0 \end{pmatrix}\,.
\end{equation}
We will then write any QCM $V$, as well as any symplectic operation $S$ acting on it, with respect to this alternative block structure, unless explicitly stated otherwise.
Let us start with a preliminary result, equivalent to Proposition 12 of~\cite{manuceau} or Lemma 13 of~\cite{Lami16}. We include a proof for the sake of completeness.
\begin{lemma} \label{QCM geom lemma} Let $V>0$ be a positive matrix. Then $V$ is a pure QCM if and only if $V=\Omega V^{-1}\Omega^{T}$, and it obeys~\eqref{Heisenberg} if and only if \begin{equation}\label{WalterWhite} V\, \geq\, V\# (\Omega V^{-1}\Omega^T)\, . \end{equation} \end{lemma}
\begin{proof} Let the Williamson form of $V$ be given by~\eqref{Williamson}, where (in the convention of this Section) $N=\Lambda \oplus \Lambda$, with $\Lambda\coloneqq(\nu_1,\ldots\nu_n)$. Then we can write \begin{equation*} \Omega V^{-1} \Omega^{T}\, =\, \Omega S^{- T} (\Lambda^{-1} \oplus \Lambda^{-1}) S^{-1}\Omega^{T}\, =\, S \Omega (\Lambda^{-1} \oplus \Lambda^{-1})\Omega^{T} S^{T}\, =\, S (\Lambda^{-1} \oplus \Lambda^{-1}) \Omega \Omega^{T} S^{T}\, =\, S (\Lambda^{-1} \oplus \Lambda^{-1}) S^{T}\, , \end{equation*} where we used in order: (i) the identities $\Omega S^{- T} = S\Omega$, $S^{-1}\Omega^{T} = \Omega^{T} S^{T}$, all consequences of the defining symplectic identity $S\Omega S^{T}=\Omega$; (ii) the fact that $\Omega$ commutes with $\Lambda^{-1} \oplus \Lambda^{-1}$; and (iii) the orthogonality relation $\Omega\Omega^T=\mathds{1}$. Now the first claim becomes obvious, since $\Lambda=\Lambda^{-1}=\mathds{1}$ if and only if $V$ is a pure QCM. In general, as it can be seen from the above expression, $V$ and $\Omega V^{-1} \Omega^{T}$ are brought in Williamson form by simultaneous congruences with the same symplectic matrix (i.e. $S^{-1}$, in the convention of~\eqref{Williamson}). Hence, the covariance of the geometric mean under congruence ensures that \begin{equation*} V\# (\Omega V^{-1}\Omega^{T})\, =\, \left( S \begin{pmatrix} \Lambda & 0 \\ 0 & \Lambda \end{pmatrix} S^{T} \right) \# \left( S \begin{pmatrix} \Lambda^{-1} & 0 \\ 0 & \Lambda^{-1} \end{pmatrix} S^{T} \right)\, =\, S \left( \begin{pmatrix} \Lambda & 0 \\ 0 & \Lambda \end{pmatrix} \# \begin{pmatrix} \Lambda^{-1} & 0 \\ 0 & \Lambda^{-1} \end{pmatrix}\right) S^{T}\, =\, SS^{T}\, , \end{equation*} where the last passage is an easy consequence of the fact that $A\# A^{-1}=\mathds{1}$ for all $A>0$. By comparison with~\eqref{Williamson}, we see that the Heisenberg uncertainty relation $\Lambda\geq \mathds{1}$ can be rephrased as $V=S (\Lambda \oplus \Lambda) S^{T}\geq SS^{T}=V\# (\Omega V^{-1}\Omega^{T})$, which reproduces~\eqref{WalterWhite}, proving the second claim. \end{proof}
\begin{rem} From the above proof it is also apparent how, for any positive $A>0$, the matrix $A\# (\Omega A^{-1}\Omega^{T})$ is a pure QCM (independently of the nature of $A$). \end{rem}
Now we are ready to explain our direct argument to show separability of PPT isotropic Gaussian states, alternative to the use of the mode-wise decomposition.
\begin{proof}[Proof of Theorem~\ref{iso thm}] We start by rewriting the PPT condition~\eqref{PPT} for a QCM $V_{AB}$ as
\begin{equation*} \Theta_B V \Theta_B, \geq\, i\Omega\, , \end{equation*} where in the convention of this Section $\Theta_B={\mathds{1}}_A \oplus \left( \begin{smallmatrix} \mathds{1} & 0 \\ 0 & -\mathds{1} \end{smallmatrix}\right)_B$. Thanks to Lemma~\ref{QCM geom lemma}, this becomes in turn \begin{equation*} \Theta_B V \Theta_B\, \geq\, (\Theta_B V \Theta_B) \# (\Omega \Theta_B V^{-1} \Theta_B \Omega^{T}) \end{equation*} and finally \begin{equation*} V\, \geq\, V \# (\Theta_B \Omega \Theta_B V^{-1} \Theta_B \Omega^{T}\Theta_B)\, =\, (g V) \# (Z \Omega\, (g V)^{-1}\, \Omega^{T} Z) \end{equation*} after conjugating by $\Theta_B$, applying once more the covariance of the geometric mean under congruences, introducing a real parameter $g>0$ (to be fixed later), and defining $Z\coloneqq \mathds{1}_{A}\oplus (-\mathds{1}_{B})=\Theta_B \Omega \Theta_B \Omega^T$. Now, we apply Lemma~\ref{lemma ha=g} to the above expression, obtaining \begin{equation*} V\, \geq\, \left(\frac{g V+Z\Omega\, (gV)^{-1}\, \Omega^{T} Z}{2}\right) \# \left( (gV)\, ! \left( Z\Omega\, (gV)^{-1}\, \Omega^{T} Z \right) \right) \end{equation*} Although it is not yet transparent, we are done, as the right-hand side of the above inequality is exactly of the form $\gamma_{A}\oplus\gamma_{B}$ when $V$ is the QCM of an isotropic Gaussian state. In fact, let $g>0$ be such that $gV$ is a pure QCM, satisfying $gV=\Omega\, (gV)^{-1}\, \Omega^{T}=\left( \begin{smallmatrix} P & Y \\ Y^{T} & Q \end{smallmatrix} \right)$, where we have now reverted to a block decomposition with respect to the $A$ vs $B$ splitting. Then on the one hand since $Z=\mathds{1}_{A}\oplus (-\mathds{1}_{B})$ we find \begin{equation*} \frac{g V+Z\Omega\, (gV)^{-1}\, \Omega^{T} Z}{2}\, =\, \begin{pmatrix} P & 0 \\ 0 & Q \end{pmatrix} , \end{equation*} while on the other hand \begin{align*} (gV)\, ! \left( Z\Omega\, (gV)^{-1}\, \Omega^{T} Z \right)\, &=\, 2 \left( (gV)^{-1} + Z\Omega\, (gV)\, \Omega^{T} Z \right)^{-1}\, =\, 2 \Omega \left( \Omega (gV)^{-1} \Omega^{T} + Z (gV) Z \right)^{-1} \Omega^{T}\, =\\ &=\, 2 \Omega \left( gV + Z (gV) Z \right)^{-1} \Omega^{T}\, =\, \begin{pmatrix} \Omega P^{-1}\Omega^{T} & 0 \\ 0 & \Omega Q^{-1} \Omega^{T}\end{pmatrix} \, , \end{align*} where we used the definition~\eqref{harmonic} of harmonic mean and the fact that $[Z,\Omega]=0$. Putting all together, we find \begin{equation*} V\, \geq\, \begin{pmatrix} P & 0 \\ 0 & Q \end{pmatrix}\#\begin{pmatrix} \Omega P^{-1}\Omega^{T} & 0 \\ 0 & \Omega Q^{-1} \Omega^{T}\end{pmatrix}\, =\, \begin{pmatrix} P \# \Omega P^{-1} \Omega^{T} & 0 \\ 0 & Q \# \Omega Q^{-1} \Omega^{T} \end{pmatrix}\, =\, \gamma_{A}\oplus \gamma_{B}\, . \end{equation*} Since we already observed that $P \# \Omega P^{-1} \Omega^{T}$ is a QCM for any $P>0$ (and analogously for $Q$), a direct invocation of Lemma~\ref{sep} allows us to conclude the proof. \end{proof} }
\section{Entangling Gaussian states via passive optical operations}\label{pass}
Throughout this Section, we finally complete the solution of a problem posed in~\cite{passive} and there addressed under some additional constraints. Let us start by recalling that symplectic operations can be divided into two main categories, namely those such as squeezers that require an exchange of energy between the system and the apparatus, called \emph{active}, and those that can be implemented using only beam splitters and phase plates, called \emph{passive}. A symplectic matrix $K$ represents a passive transformation if and only if it is also \emph{orthogonal}, meaning that $KK^{T}={\mathds{1}}$ (it may be worth adding that symplectic orthogonal transformations form the maximal compact subgroup of the symplectic group). As it turns out, symplectic orthogonal matrices can be represented in an especially simple form if we resort to a position-momentum block decomposition. Namely, one has the parametrisation~\cite{bucco} \begin{equation} K = W^\dag \begin{pmatrix} U & \\ & U^* \end{pmatrix} W\, , \end{equation} where \begin{equation*} W \coloneqq \frac{1}{\sqrt{2}} \begin{pmatrix} {\mathds{1}} & i{\mathds{1}} \\ {\mathds{1}} & -i{\mathds{1}} \end{pmatrix} \end{equation*} and $U$ is a generic, $n\times n$ unitary matrix, with $U^*$ denoting its complex conjugate.
Since the implementation of passive operations is so inexpensive in quantum optics and entangled states so useful for quantum technologies, the question first posed in~\cite{passive} was a natural one: ``What bipartite Gaussian states are such that they can be entangled via a global, passive operation?'' However, in this full generality the problem was left unanswered in~\cite{passive}. Instead, another related question was investigated and answered there, namely whether \emph{distillable} Gaussian entanglement can be produced in the same fashion. For Gaussian states, as mentioned in the Introduction, distillability is well known to be equivalent to non-positivity of the partial transpose~\cite{Giedke01,GiedkeQIC}, so the authors of~\cite{passive} proceeded to identify the class of Gaussian states that can be made to violate the PPT condition with a passive transformation. However, it is important to realise that since PPT and separability are not the same for general multimode Gaussian states, the two questions are a priori different. Here we show that the answer to the original question above turns out to be yet another situation where the PPT condition is necessary and sufficient to ensure separability of Gaussian states. In other words, we will prove that a bipartite Gaussian state that can not be made distillable (i.e.~non-PPT) via passive operations is necessarily separable, and thus it stays separable under the application of said passive operations. Let us start with a technical lemma that we deduce from recent results obtained in~\cite{bhatia15}.
\begin{lemma} \label{lemma eig vs sp eig} Let $A>0$ be a strictly positive $2n\times 2n$ matrix. Let $\nu_{i} (A)$ and $\lambda_i (A)$ denote its symplectic and ordinary (orthogonal) eigenvalues, respectively, arranged in nondecreasing order. Then \begin{equation} \nu_{1}(A)^2\geq \lambda_1(A) \lambda_2(A)\, . \label{ineq sympl} \end{equation} In particular, every positive matrix whose two smallest eigenvalues satisfy $\lambda_1 \lambda_2\geq 1$ is automatically a legitimate QCM. \end{lemma}
\begin{proof} From~\cite[Equation (71)]{bhatia15} we deduce $\prod_{j=1}^{k} \nu_{n-j+1}(A)^2\leq \prod_{j=1}^{2k} \lambda_{2n-j+1}(A)$ for all $k=1,\ldots,n$, with equality for $k=n$, when both terms equal the determinant of $A$. We can use this observation to deduce that $\prod_{j=1}^k \nu_j(A)^2\geq \prod_{j=1}^{2k} \lambda_j(A)$ for all $k=1,\ldots,n$. The special case $k=1$ yields the claim. \end{proof}
Now, we are ready to present our strengthening of~\cite[Proposition 1]{passive}.
\begin{thm} \label{abs sep Gauss} Let $V$ be a bipartite QCM of an $n$-mode system. Then the following are equivalent: \begin{enumerate}[(i)] \item $KVK^T$ is separable for all Gaussian passive transformations $K$; \item $KVK^T$ is PPT for all Gaussian passive transformations $K$; and \item the two smallest eigenvalues of $V$ satisfy $\lambda_1(V)\lambda_2(V)\geq 1$. \end{enumerate} \end{thm}
\begin{proof}
The implication $(i)\Rightarrow (ii)$ is obvious, while $(iii)\Rightarrow (ii)$ already follows from Lemma~\ref{lemma eig vs sp eig} together with the fact that the partial transpose at the level of QCMs is a congruence by orthogonal transformation and thus does not change the ordinary spectrum. One of the main contributions of~\cite{passive} is the proof that $(ii)$ and $(iii)$ are in fact equivalent. In view of this discussion, we have just to show that $(iii)\Rightarrow (i)$. To this end, we will assume that $V$ satisfies $\lambda_1(V)\lambda_2(V)\geq 1$ and construct two local QCMs $\gamma_A, \gamma_B$ that satisfy the hypothesis of the original separability criterion given by Lemma~\ref{sep}. Call $\lambda_1(V)= k$ and observe that if $k\geq 1$ then $V\geq {\mathds{1}}={\mathds{1}}_A\oplus {\mathds{1}}_B$ and we are done. Otherwise, assume $k<1$ and denote by $\ket{x}$ the normalised eigenvector corresponding to the minimal eigenvalue of $V$, i.e.~$V\ket{x}= k \ket{x}$ and $\braket{x|x}=1$. Since $\lambda_2(V)\geq \frac1k$ and a fortiori $\lambda_i(V)\geq \frac1k$ for all $i\geq 2$, we can write \begin{equation*} V\geq k \ket{x}\!\!\bra{x} + \frac1k \left( {\mathds{1}} - \ket{x}\!\!\bra{x} \right) . \end{equation*}
Now, decompose the vector $\ket{x}$ into its $A$ and $B$ components as $\ket{x}=\left(\begin{smallmatrix} \sqrt{p} \ket{y}_A \\ \sqrt{1-p} \ket{z}_B \end{smallmatrix}\right)$, where $0\leq p\leq 1$ and $\braket{y|y}=1=\braket{z|z}$. Then, Lemma~\ref{lemma eig vs sp eig} guarantees that the matrices \begin{align*} \gamma_A &\coloneqq k \ket{y}\!\!\bra{y} + \frac1k \left( {\mathds{1}} - \ket{y}\!\!\bra{y} \right) \\ \gamma_B &\coloneqq k \ket{z}\!\!\bra{z} + \frac1k \left( {\mathds{1}} - \ket{z}\!\!\bra{z} \right) \end{align*} are legitimate QCMs. Then showing $V_{AB} - \gamma_A\oplus \gamma_B\geq 0$ would complete our proof. By direct computation, we find \begin{align*} V_{AB} - \gamma_A\oplus \gamma_B &\geq -\left( \frac1k - k\right) \begin{pmatrix} p \ket{y}\!\!\bra{y} & \sqrt{p(1-p)} \ket{y}\!\!\bra{z} \\ \sqrt{p(1-p)} \ket{z}\!\!\bra{y} & (1-p) \ket{z}\!\!\bra{z} \end{pmatrix} + \frac1k \begin{pmatrix} {\mathds{1}} & 0 \\ 0 & {\mathds{1}} \end{pmatrix} \\ &\quad - \begin{pmatrix} -\left( \frac1k - k\right) \ket{y}\!\!\bra{y} + \frac1k {\mathds{1}} & 0 \\ 0 & -\left( \frac1k - k\right) \ket{z}\!\!\bra{z} + \frac1k {\mathds{1}} \end{pmatrix} \\ &= \left( \frac1k - k\right) \begin{pmatrix} (1-p) \ket{y}\!\!\bra{y} & - \sqrt{p(1-p)} \ket{y}\!\!\bra{z} \\ -\sqrt{p(1-p)} \ket{z}\!\!\bra{y} & p \ket{z}\!\!\bra{z} \end{pmatrix} \\ &= \left( \frac1k - k\right) \begin{pmatrix} \sqrt{1-p} \ket{y} & -\sqrt{p} \ket{z} \end{pmatrix}^T \begin{pmatrix} \sqrt{1-p} \ket{y} & -\sqrt{p} \ket{z} \end{pmatrix} \\ &\geq 0\, . \end{align*} \end{proof}
\begin{rem} In some sense, one can think of the question posed in~\cite{passive} and answered here in Theorem~\ref{abs sep Gauss} as a continuous variable analogue of the \emph{absolute separability} problem in finite-dimensional quantum information, which asks for the characterisation of those spectra $\sigma=(\lambda_1,\ldots,\lambda_{d d'})$ such that every bipartite quantum state on $\mathds{C}^d\otimes \mathds{C}^{d'}$ with spectrum $\sigma$ is separable~\cite{kus01}. For a recent review of the state of the art, we refer the reader to~\cite{abs sep review}. A suggestive argument concerning this analogy goes as follows. An arbitrary unitary transformation $\rho\mapsto U\rho U^\dag$ corresponds to an internal time evolution according to some unknown Hamiltonian. Then, the absolutely separable states are exactly those bipartite states whose correlations are so weak that they can not be made entangled by any internal evolution. In the case of continuous variable quantum systems, one may hold the free-field Hamiltonian $\mathcal{H}=\frac12 r^Tr$ as the privileged one, so that it makes sense to restrict oneself to those unitary evolutions that preserve this particular Hamiltonian. If the original state is Gaussian and the unitaries are generated by quadratic Hamiltonians, so that they are represented by symplectic matrices, preserving the free-field Hamiltonian is the defining feature of passive transformations, and one obtains exactly the problem we solved here.
As is often the case, the technical details and the nature of the solution are simpler in the Gaussian realm. We found that the condition for being `absolutely separable' in the Gaussian sense is expressed by a simple inequality involving only the two smallest ordinary eigenvalues of the QCM, and that there are no `absolutely PPT' states that are not `absolutely separable' too. This latter equivalence has been conjectured to hold for the original problem in discrete-variable systems as well, but so far only partial answers are available. Namely, the conditions for absolute PPT-ness can be written explicitly~\cite{hildebrand07}, but whether or not they imply absolute separability is in general unknown. However, the answer to this latter question has been shown to be affirmative for the case of two qubits~\cite{verstraete01} and more recently for qubit-qudit systems~\cite{johnston13}. \end{rem}
\section{Summary and outlook}\label{outro}
In this work we advanced the mathematical and physical study of separability and entanglement distillability in Gaussian states of continuous variable quantum systems. Based on the properties of Schur complements and other matrix analysis tools, we obtained a simplified necessary and sufficient condition for the separability of all multimode Gaussian states, requiring optimisation over the set of local covariance matrices of one subsystem only. Exploiting this result, we presented a compact proof of the equivalence between PPT and separability for $1$ vs $n$-mode Gaussian states, a seminal result in continuous variable quantum information theory~\cite{Simon00,Werner01}, as well as extended the criterion to multimode classes of so-called mono-symmetric and isotropic Gaussian states, through novel derivations. Furthermore, we completed the investigation of entanglement generation under passive operations by extending seminal results~\cite{passive} to consider the generation of any, possibly PPT, Gaussian entangled state: in this context we showed that, if passive operations can not turn an initial Gaussian state into a non-PPT one, then no PPT entanglement can be generated through them either. This can be interpreted as establishing the equivalence between absolute separability and absolute PPT-ness in the Gaussian world. Side results of our analysis include a novel proof that Gaussian states invariant under partial transposition are separable, as well as an independent proof of the equivalence between Gaussian separability and complete extendability with Gaussian extensions~\cite{Bhat16}.
In the context of this paper, and with the methods illustrated in this study, it would be interesting to research more general combinations of symmetries and conditions on the symplectic spectra of quantum covariance matrices whereby the sufficiency of the PPT separability criterion might be further extended. For instance, is it possible to obtain a Gaussian analogue of the results in~\cite{chen08}, whereby bound entangled Gaussian states can only exist given some simple condition on their symplectic rank? In our studies, both for mono-symmetric and isotropic states, large degeneracies in their symplectic spectra (for the marginal covariance matrix of one subsystem, and for the global covariance matrix of the bipartite system, respectively) played a key role in proving the sufficiency of PPT for separability. It would be desirable to provide a full systematic characterisation of such requirements, possibly drawing inspiration from and/or shedding new insight on the Gaussian quantum marginal problem~\cite{tyc}.
Finally, let us stress how matrix analysis tools such as those heavily hammered in this paper have already been proven useful for qualitative and quantitative analysis of entanglement and other correlations, including Einstein-Podolsky-Rosen steering, in general states of continuous variable systems~\cite{Giedke01,giedkemode,nogo1,nogo2,nogo3,eisemi,wise,Adesso12,gian,Simon16,Lami16,anders}. Aside from the fact that very powerful analytical results can be proven with relative simplicity using these tools, it is important to remark once more that the characterisations we provided of the separability problem, as well as the variational characterisation of the Schur complement and related problems, can be straightforwardly recast as {\em semidefinite programs}~\cite{eisemi}, thus leading to efficient numerical methods to witness inseparability and entanglement distillability in general multimode Gaussian or non-Gaussian states based on covariance matrices.
We will explore these and other applications in further studies.
\begin{acknowledgments} GA warmly acknowledges highly stimulating interactions with organisers, lecturers, and participants at the 2nd IMSc School on Quantum Information (Chennai, India, December 2016), during which this work was completed, and in particular very fruitful discussions with R.~Simon, M.~Banik, R.~Sengupta, and A.~Nayak on topics related to this paper. We acknowledge financial support from the European Union under the European Research Council (StG GQCOP No.~637352 and AdG IRQUAT No.~267386) and the European Commission (STREP RAQUEL No.~FP7-ICT-2013-C-323970), the Foundational Questions Institute (fqxi.org) Physics of the Observer Programme (Grant No.~FQXi-RFP-1601), the Spanish MINECO (Project no. FIS2013-40627-P and no. FIS2016-86681-P), and the Generalitat de Catalunya (CIRIT Project No. 2014 SGR 966). AS acknowledges financial support from EPSRC through grant EP/K026267/1. \end{acknowledgments}
\providecommand \doibase [0]{http://dx.doi.org/} \providecommand \dois[2]{\href{\doibase#1}{#2}}
\end{document} | arXiv | {
"id": "1612.05215.tex",
"language_detection_score": 0.7855172753334045,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Orbits of tori extended by finite groups and their polynomial hulls: the case of connected complex orbits}
\author{V.M.~Gichev}
\address{Omsk Branch of Sobolev Institute of Mathematics, Pevtsova 13, 644099, Omsk, Russia}
\email{gichev@ofim.oscsbras.ru} \thanks{The author was partially supported by RFBR Grants 06-08-01403 and 06-07-89051.}
\subjclass{Primary 32E20;
Secondary 32M15, 32M05. } \date{}
\keywords{Polynomial hulls, bounded symmetric domains}
\begin{abstract} Let $V$ be a complex linear space, $G\subset\mathop{\mbox{\rm GL}}\nolimits(V)$ be a compact group. We consider the problem of description of polynomial hulls $\wh{Gv}$ for orbits $Gv$, $v\in V$, assuming that the identity component of $G$ is a torus $T$. The paper contains a universal construction for orbits which satisfy the inclusion $Gv\subset T^{\mathord{\mathbb C}} v$ and a characterization of pairs $(G,V)$ such that it is true for a generic $v\in V$. The hull of a finite union of $T$-orbits in $T^{\mathord{\mathbb C}} v$ can be distinguished in $\mathop{\mbox{\rm clos}} T^{\mathord{\mathbb C}} v$ by a finite collection of inequalities of the type $\abs{z_1}^{s_1}\dots\abs{z_n}^{s_n}\leq c$. In particular, this is true for $Gv$. If powers in the monomials are independent of $v$, $Gv\subset T^{\mathord{\mathbb C}} v$ for a generic $v$, and either the center of $G$ is finite or $T^{\mathord{\mathbb C}}$ has an open orbit, then the space $V$ and the group $G$ are products of standard ones; the latter means that $G=S_nT$, where $S_n$ is the group of all permutations of coordinates and $T$ is either ${\mathord{\mathbb T}}^n$ or $\mathop{\mathrm{SU}}\nolimits(n)\cap{\mathord{\mathbb T}}^n$, where ${\mathord{\mathbb T}}^n$ is the torus of all diagonal matrices in $\mathop{\mathrm{U}}\nolimits(n)$. The paper also contains a description of polynomial hulls for orbits of isotropy groups of bounded symmetric domains. This result is already known, but we formulate it in a different form and supply with a shorter proof. \end{abstract}
\maketitle \section*{Introduction} Let $V$ be a finite-dimensional complex linear space and $G\subset\mathop{\mbox{\rm GL}}\nolimits(V)$ be a compact subgroup of $\mathop{\mbox{\rm GL}}\nolimits(V)$. We consider the problem of description of polynomially convex hulls for orbits $O_v=Gv$, $v\in V$. The {\it polynomially convex hull} (or {\it polynomial hull}) $\wh Q$ of a compact set $Q\subset V$ is defined as \begin{eqnarray}\label{defph}
\wh Q=\{z\in V:\,\abs{p(z)}\leq\sup_{{\mathord{\zeta}}\in Q}|p({\mathord{\zeta}})|\quad\mbox{for all}\quad p\in\mathord{\mathcal{P}}(V)\}, \end{eqnarray} where $\mathord{\mathcal{P}}(V)$ is the algebra of all holomorphic polynomials on $V$. It is usually difficult to find $\wh Q$. For $Q=Gv$, the answer is known if $G$ is an isotropy group of a bounded symmetric domain in ${\mathord{\mathbb C}}^n$. Paper \cite{Ka} contains a description of $G$-invariant polynomially convex compact sets, including hulls of orbits ($Q\subset V$ is {\it polynomially convex} if $\wh Q=Q$); it continues paper \cite{KZ} and uses results of \cite{FB}. On the other hand, it is known that an orbit of a compact linear group is polynomially convex if and only if the complex orbit $G^{\mathord{\mathbb C}} v$ is closed and $Gv$ is its real form (\cite{GL}). The cases $G=\mathop{\mathrm{U}}\nolimits(2),\mathop{\mathrm{SU}}\nolimits(2)$ were considered in \cite{An}, \cite{DG}. The problem of determination of polynomial hulls of orbits admits the following natural generalization: given a homogeneous space $M$ of a compact group $G$, describe maximal ideal spaces $\mathord{\mathcal{M}}_A$ of $G$-invariant closed subalgebras $A$ of $C(M)$, where $C(M)$ is the Banach algebra of all continuous complex-valued functions on $M$ with the sup-norm. If $A$ is generated by a finite-dimensional invariant subspace, then $\mathord{\mathcal{M}}_A$ can be realized as the polynomial hull of an orbit. Paper \cite{Gi} contains a description of $\mathord{\mathcal{M}}_A$ for bi-invariant algebras on compact groups and partial results on spherical homogeneous spaces. Maximal ideal spaces for $\mathop{\mathrm{U}}\nolimits(n)$-invariant algebras on spheres in ${\mathord{\mathbb C}}^n$ are described in \cite{Kan}.
In this paper we consider orbits $Gv$ of groups $G=FT$, where $F\subseteq G$ is a finite subgroup and $T$ is a torus, such that $G^{\mathord{\mathbb C}} v=T^{\mathord{\mathbb C}} v$. Let ${\mathord{\mathfrak{t}}}\subseteq{\mathord{\mathfrak{g}}}\mathord{\mathfrak{l}}(V)$ be the Lie algebra of $T$ and set ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}=i{\mathord{\mathfrak{t}}}$, $T^{\mathord{\mathbb R}}=\exp({\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}})$. Suppose that $v\in V$ has a trivial stable subgroup in $T$ and let $X\subset T^{\mathord{\mathbb R}} v$ be finite. The hull of $Y=TX$ admits a simple description. If $X=\{v\}$, then $\wh{Y}=\wh{Tv}$ is the closure of $T\exp(C_T)v$, where $C_T$ is a cone in ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$. If $T^{\mathord{\mathbb C}}$ is closed, then $\wh Y=T\exp(Q_X)v$, where $Q_X\subseteq{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$ is a convex polytope (the convex hull of the inverse image of $X$ for the mapping $\xi\to\exp(\xi)v$, $\xi\in{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$). Any segment in $Q_X$ corresponds to an analytic strip or an annulus in $\wh Y$. In general, $\wh Y$ is the union of $\wh {Tu}$, where $u$ runs over $\exp(Q_X)v$. Also, $\wh Y$ is distinguished in $\mathop{\mbox{\rm clos}} T^{\mathord{\mathbb C}} v$ by a finite family of monomial inequalities of the type \begin{eqnarray}\label{moine} \abs{z_1}^{s_1}\dots\abs{z_n}^{s_n}\leq c, \end{eqnarray} where $c\geq0$ and $s=(s_1,\dots,s_n)\in{\mathord{\mathbb R}}^n$ depend on $v$ and $X$. Vectors $s$ correspond to normals of faces of $C_T+Q_X$.
Thus, the problem of determination of $\wh{Gv}$ is not difficult if $Gv\subset T^{\mathord{\mathbb C}} v$. The latter is equivalent to the assumption that the complex orbit $G^{\mathord{\mathbb C}} v$ is connected. In Example~\ref{maico}, we give a construction for orbits which satisfy this condition; here is a sketch. The group $G=FT$ acts on the space $V=C(K)$, where $K$ is a finite $F$-invariant subset of ${\mathord{\mathfrak{t}}}^*$: $F$ acts naturally on $C(K)$, ${\mathord{\mathfrak{t}}}={\mathord{\mathfrak{t}}}^{**}$ is naturally embedded into $C(K)$, and $T=\exp({\mathord{\mathfrak{t}}})$ acts on $C(K)$ by multiplication. If $v\in C(K)$ is an $F$-invariant function, then $Gv\subset T^{\mathord{\mathbb C}} v$. According to Theorem~\ref{isase}, each connected complex orbit can be realized in this way. Further, we describe pairs $(V,G)$ such that \begin{eqnarray}\label{genev} Gv\subset T^{\mathord{\mathbb C}} v\quad\mbox{for a generic}\quad v\in V. \end{eqnarray} By Theorem~\ref{charf}, under the additional assumption that the complex linear span of $T^{\mathord{\mathbb C}} v$ coincides with $V$, this happens if and only if the group $G^{\mathord{\mathbb C}} Z$, where $Z$ is the centralizer of $G$ in $\mathop{\mbox{\rm GL}}\nolimits(V)$, has an open orbit in $V$. There are two extreme cases: (A) $Z\subseteq G^{\mathord{\mathbb C}}$; (B) $G$ has a finite center. An example for (A) is the group $G=S_n{\mathord{\mathbb T}}^n$ acting in ${\mathord{\mathbb C}}^n$, where ${\mathord{\mathbb T}}^n$ is the torus of all diagonal matrices in $\mathop{\mathrm{U}}\nolimits(n)$ and $S_n$ is the group of all permutations of coordinates. Replacing ${\mathord{\mathbb T}}^n$ with $\mathop{\mathrm{SU}}\nolimits(n)\cap{\mathord{\mathbb T}}^n$, we get an example for (B). Example~\ref{maict} contains a construction for pairs $(V,G)$ that satisfy (\ref{genev}). Theorem~\ref{asexa} states that the construction is a universal one. In Theorem~\ref{huini}, we determine pairs which satisfy (\ref{genev}) and the following condition:
\begin{eqnarray}\label{coxet} \mbox{vectors}\ s\ \mbox{in}\ (\ref{moine})\ \mbox{are independent of}\ v. \end{eqnarray}
The paper also contains a description of hulls $\wh{Gv}$ for $G=\mathop{\mathrm{Aut}}\nolimits_0(D)$, where $D$ is a bounded symmetric domain in the canonical realization and $\mathop{\mathrm{Aut}}\nolimits_0(D)$ is the stable subgroup of zero, which coincides with the group of all linear automorphisms of $D$. These hulls have already been described: the final step was done in paper \cite{Ka}, which essentially used \cite{KZ}, partial results appear in \cite{Sa} and \cite{FB}. Most of them use the technique of Jordan triples and Jordan algebras. We use Lie theory, in particular, an explicit construction of paper \cite{Wo} for a maximal abelian subspace ${\mathord{\mathfrak{a}}}$. A compact group acting in a Euclidean space is called {\it polar} if there exists a subspace (a {\it Cartan subspace}) such that each orbit meets it orthogonally. The group $G$ is polar in the ambient linear space ${\mathord{\mathfrak{d}}}$, and ${\mathord{\mathfrak{a}}}$ is the Cartan subspace for $G$. Real polar representations are classified in paper \cite{Da}; they are orbit equivalent (i.e., have the same orbits) to isotropy representations of Riemannian symmetric spaces. If $D$ is a polydisc ${\mathord{\mathbb D}}^n\subset{\mathord{\mathbb C}}^n$, where ${\mathord{\mathbb D}}$ is the unit disc in ${\mathord{\mathbb C}}$, then $G=S_n{\mathord{\mathbb T}}^n$; the polynomial hulls $\wh{Gv}$ are determined by the inequalities \begin{eqnarray}\label{mukin} \mu_k(z)\leq\mu_k(v), \end{eqnarray} where $k=1,\dots,n$ and $\mu_k$ are defined by \begin{eqnarray}\label{mukde} \mu_k(z)=\max\{\abs{{z_{{\mathord{\sigma}}(1)}}\dots{z_{{\mathord{\sigma}}(k)}}}:\,{\mathord{\sigma}}\in S_n\}. \end{eqnarray} The general case can be reduced to this one in the following way. Any bounded symmetric domain $D\subset{\mathord{\mathfrak{d}}}$ of rank $n$ admits an equivariant embedding of ${\mathord{\mathbb C}}^n$ to ${\mathord{\mathfrak{d}}}$, which induces an embedding of ${\mathord{\mathbb D}}^n$ to $D$, such that ${\mathord{\mathbb R}}^n\subset{\mathord{\mathbb C}}^n$ is the maximal abelian subspace ${\mathord{\mathfrak{a}}}$, and, for any $v\in{\mathord{\mathfrak{a}}}\,$, the hull of $\mathop{\mathrm{Aut}}\nolimits_0(D)v$ is the orbit of the hull of $\mathop{\mathrm{Aut}}\nolimits_0({\mathord{\mathbb D}}^n)v$. Each $\mu_k(z)$ has a unique continuation to a $K$-invariant function on ${\mathord{\mathfrak{d}}}$. The extended functions determine hulls by the same inequalities. Moreover, they are plurisubharmonic and can be treated as products of singular values of $z\in{\mathord{\mathfrak{d}}}$ or as norms of exterior powers of adjoint operators in suitable spaces. The subsystem of long roots of the restricted root system (i.e., the root system for ${\mathord{\mathfrak{a}}}$) has type $nA_1$; this defines the above embedding ${\mathord{\mathbb C}}^n\to{\mathord{\mathfrak{d}}}$. Furthermore, this makes it possible to determine hulls in terms of the adjoint representation (Theorem~\ref{last}). Thus, there is no need to consider different types of domains separately.
The reduction to the case of a torus extended by a finite group, which is described above, is contained in Section~\ref{isohe} (in papers \cite{Ka}, \cite{Sa}, the problem is also reduced to this case by another method). It does not use essentially the results of the previous sections (only Proposition~\ref{autdn}, in proof of Theorem~\ref{last}). These extensions satisfy conditions (\ref{genev}) and (\ref{coxet}); in addition, they possess the property that the complexified groups have open orbits. According to Theorem~\ref{huini}, any group with these properties is the product of groups $S_n{\mathord{\mathbb T}}^n$ acting in ${\mathord{\mathbb C}}^n$; it admits a natural realization as a group of automorphisms of a bounded symmetric domain (Corollary~\ref{conop}).
The following simple examples illustrate the case $Gv\not\subseteq T^{\mathord{\mathbb C}} v$ and show that condition~(\ref{genev}) is essential. Let $G=S_n{\mathord{\mathbb T}}^n$, and let ${\mathord{\epsilon}}_1,\dots,{\mathord{\epsilon}}_n$ be the standard base in ${\mathord{\mathbb C}}^n$. Then $\wh{G{\mathord{\epsilon}}_1}$ is the closure the union of discs ${\mathord{\mathbb D}}{\mathord{\epsilon}}_k$, $k=1,\dots,n$. Set $H=S_n{\mathord{\mathbb T}}$, where ${\mathord{\mathbb T}}$ acts by $z\to e^{it}z$, $t\in{\mathord{\mathbb R}}$, $z\in{\mathord{\mathbb C}}^n$. Then $\wh{H{\mathord{\epsilon}}_1}=\wh{G{\mathord{\epsilon}}_1}$. For $v={\mathord{\epsilon}}_1+{\mathord{\epsilon}}_2$, $\wh{Gv}$ is the closure of the union of $n\choose2$ bidiscs but ${\mathord{\mathbb T}}^n$ contains no proper torus $T$ such that $\wh{Gv}=\wh{Hv}$ for $H=S_nT$. However, for any subgroup $F\subseteq S_n$ which acts transitively on 2-sets and $H=F{\mathord{\mathbb T}}^n$ we have $\wh{Gv}=\wh{Hv}$.
\section{Preliminaries} We keep the notation of Introduction, in particular, (\ref{defph}) and (\ref{mukde}). Linear spaces are supposed to be finite dimensional and complex unless the contrary is explicitly stated. "Generic" means "in some open dense subset". Throughout the paper, we use the following notation: \begin{itemize} \item[] ${\mathord{\mathbb D}}$ and ${\mathord{\mathbb T}}$ are the open unit disc and the unit circle in ${\mathord{\mathbb C}}$, respectively; \item[] $V$ denotes a complex linear space (except for Section~\ref{isohe}); \item[] if $V$ is equipped with a linear base identifying it with ${\mathord{\mathbb C}}^n$, then ${\mathord{\mathbb T}}^n$ is the group of all diagonal unitary transformations; \item[] ${\mathord{\mathbb Z}}_2^n$ consists of all transformations in ${\mathord{\mathbb T}}^n$ with eigenvalues $\pm1$; \item[] ${\mathord{\epsilon}}_1,\dots,{\mathord{\epsilon}}_n$ is the standard base in ${\mathord{\mathbb C}}^n$ and ${\mathord{\mathbb R}}^n$; \item[] ${\mathord{\mathbb R}}^n_+$ is the set of vectors in ${\mathord{\mathbb R}}^n$ with positive entries; \item[] $S_K$ denotes the group of all permutations of a finite set $K$; if $K=\{1,\dots,n\}$, then $S_K=S_n$; \item[] $C(K)$ is the algebra of all complex-valued functions on $K$; \item[] ${\mathord{\bf 1}}$ is the identity of $C(K)$; \item[] $G\subset\mathop{\mbox{\rm GL}}\nolimits(V)$ is a compact group whose identity component is a torus $T$ (except for Section~\ref{isohe}); \item[] ${\mathord{\mathfrak{t}}}\subset{\mathord{\mathfrak{g}}}\mathord{\mathfrak{l}}(V)$ is the Lie algebra of $T$, ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}=i{\mathord{\mathfrak{t}}}$, ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}}={\mathord{\mathfrak{t}}}+{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$; \item[] $T^{\mathord{\mathbb R}}=\exp({\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}})$, $T^{\mathord{\mathbb C}}=\exp({\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}})$; \item[] ${\mathord{\mathbb C}}^*={\mathord{\mathbb T}}^{\mathord{\mathbb C}}={\mathord{\mathbb C}}\setminus \{0\}$; \item[] $\check T=\mathop{\mbox{\rm{Hom}}}(T,{\mathord{\mathbb T}})$ is the dual group to $T$; \item[] $\mathop{\mathrm{Aut}}\nolimits(D)$ is the group of all holomorphic automorphisms of a domain $D\subset V$, $\mathop{\mathrm{Aut}}\nolimits_0(D)=\mathop{\mathrm{Aut}}\nolimits(D)\cap\mathop{\mbox{\rm GL}}\nolimits(V)$; \item[] $\mathop{\mathrm{cone}}\nolimits X$ denotes the least convex cone which contains the set $X$;\item[] $\mathop{\mathrm{conv}}\nolimits X$ is the convex hull of $X$; \item[] $\mathop{\mbox{\rm clos}} X$ is the closure of $X$; \item[] $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb F}} X$ is the linear span of $X$ over the field ${\mathord{\mathbb F}}={\mathord{\mathbb C}},{\mathord{\mathbb R}},{\mathord{\mathbb Q}}$.
\end{itemize} Clearly, $\exp$ is bijective on ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$ and $T^{\mathord{\mathbb R}}\cong T^{\mathord{\mathbb C}}/T$. The differentiating at the identity $e$ defines an embedding of $\check T$ into the dual space ${\mathord{\mathfrak{t}}}^*$: $\chi\to-i d_e\chi$, where $\chi\in\check T$. This is a lattice in the vector group ${\mathord{\mathfrak{t}}}^*$, moreover, $T\cong{\mathord{\mathfrak{t}}}/L$, where $L$ is the dual lattice to $\check T$ in ${\mathord{\mathfrak{t}}}$. For $\chi\in\check T$, let $$V_\chi=\{v\in V:\,gv=\chi(g)v\ \mbox{\rm for all}\ g\in T\}$$ be the corresponding isotypical component of $V$. Then \begin{eqnarray}\label{decv} V=\sum\nolimits_{\chi\in\check T}\oplus\, V_\chi. \end{eqnarray} We assume that $V$ is equipped with a $G$-invariant inner product $\scal{\ }{\ }$. Then decomposition (\ref{decv}) is orthogonal. Let $\mathop{\mathrm{spec}}\nolimits(v)$ denote the spectrum of $v\in V$ (the set of $\chi\in\check T$ such that the $\chi$-component of $v$ is nonzero); for $X\subseteq V$, $$\mathop{\mathrm{spec}}\nolimits(X)=\cup_{x\in X}\mathop{\mathrm{spec}}\nolimits(x).$$ We say that $T$ has a {\it simple spectrum} if \begin{eqnarray}\label{sispe} \dim V_\chi\leq1 \end{eqnarray} for all $\chi\in\check T$. If (\ref{sispe}) is true, then there exists a unique (up to scaling factors) orthogonal base in $V$ which agree with (\ref{decv}) and a unique maximal torus ${\mathord{\mathbb T}}^n$ in $\mathop{\mbox{\rm GL}}\nolimits(V)$ which contains $T$. In what follows, we assume that (\ref{sispe}) holds; we shall see in the next section that such assumption is not restrictive. Thus, we may fix an identification \begin{eqnarray}\label{vcnck} V={\mathord{\mathbb C}}^n=C(K), \end{eqnarray} where $K=\{1,\dots,n\}$. If $F$ is a subgroup of $S_K$, then $C(K)^F$ denotes the set of all $F$-invariant functions on $K$; clearly, ${\mathord{\bf 1}}\in C(K)^F$. Further, ${(\bbC^*)^n}$ is the multiplicative group of all invertible functions in $C(K)$, ${\mathord{\mathbb T}}^n$ consists of functions with values in ${\mathord{\mathbb T}}$, and $({\mathord{\mathbb T}}^n)^{\mathord{\mathbb C}}={(\bbC^*)^n}$. The Lie algebra of ${\mathord{\mathbb T}}^n$ is realized as $i{\mathord{\mathbb R}}^n\subset{\mathord{\mathbb C}}^n$. The embedding $T\to{\mathord{\mathbb T}}^n$ induces embeddings of the Lie algebra and the fundamental group: ${\mathord{\mathfrak{t}}}\to i{\mathord{\mathbb R}}^n$, $\pi_1(T)\to i{\mathord{\mathbb Z}}^n\subset i{\mathord{\mathbb R}}^n$, respectively. Let ${\mathord{\Gamma}}$ be the image of $\pi_1(T)$. Then $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb R}}{\mathord{\Gamma}}={\mathord{\mathfrak{t}}}$; moreover, ${\mathord{\mathfrak{t}}}\cap i{\mathord{\mathbb Z}}^n={\mathord{\Gamma}}$ and ${\mathord{\mathfrak{t}}}/{\mathord{\Gamma}}=T$. The dual mapping $\check{\mathord{\mathbb T}}^n\to\check T$, which is defined by the restriction of characters $e^{-i\scal{x}{y}}$, where $x\in i{\mathord{\mathbb Z}}^n$, to ${\mathord{\mathfrak{t}}}$, is the orthogonal projection $\pi_{\mathord{\mathfrak{t}}}:\,i{\mathord{\mathbb Z}}^n\to{\mathord{\mathfrak{t}}}$. Thus, ${\mathord{\Gamma}}$ is a subgroup of finite index in $\check T=\pi_{\mathord{\mathfrak{t}}} i{\mathord{\mathbb Z}}^n$. Vectors in $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb Q}}\check T$ are called {\it rational}. The image of ${\mathord{\mathfrak{t}}}$ in $i{\mathord{\mathbb R}}^n$ can be distinguished by linear equations with integer coefficients. Hence, $\mathop{\mbox{\rm clos}}(T^{\mathord{\mathbb C}} v)$, for a generic $v\in V$, is the set of all solutions to a finite number of equalities with holomorphic monomials. Thus,
$Y\subset T^{\mathord{\mathbb C}}$ implies
$\wh{Y}\subset\mathop{\mbox{\rm clos}}(T^{\mathord{\mathbb C}} v)$.
Set \begin{eqnarray}\label{defct} C_T={\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}\cap\mathop{\mbox{\rm clos}}(-{\mathord{\mathbb R}}^n_+), \end{eqnarray} The cone $iC_T$ is dual to $\mathop{\mathrm{cone}}\nolimits(\mathop{\mathrm{spec}}\nolimits V)\subseteq{\mathord{\mathfrak{t}}}^*\subseteq i{\mathord{\mathbb R}}^n$. If $-\xi\in\mathop{\mbox{\rm clos}}({\mathord{\mathbb R}}^n_+)$, then $\iota=\lim_{t\to+\infty}\exp(t\xi)$ is an idempotent in $C(K)$ such that the multiplication by the complementary idempotent ${\mathord{\bf 1}}-\iota$ is a projection onto $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb C}}(\mathop{\mathrm{spec}}\nolimits(\xi))$. Set \begin{eqnarray}\label{defit} I_T=\{\lim\nolimits_{t\to+\infty}\exp(t\xi):\,\xi\in C_T\}. \end{eqnarray} Clearly, $I_T$ is finite and contains ${\mathord{\bf 1}}$.
\begin{lemma}\label{cloct} The closure of $\exp(C_T)$ is equal to $I_T\exp(C_T)$. \end{lemma} \begin{proof} Due to the evident inclusion $\mathop{\mbox{\rm clos}}(\exp(C_T))\supseteq I_T\exp(C_T)$, it is sufficient to prove that the set $S_T=I_T\exp(C_T)$ is closed. Clearly, $S_T$ is an abelian semigroup. The cone $C_T$ is polyhedral; hence, it is finitely generated: \begin{eqnarray*} C_T=\mathop{\mathrm{cone}}\nolimits\{\xi_1,\dots,\xi_m\}, \end{eqnarray*} where ${\mathord{\mathbb R}}^+\xi_k$ are the extreme rays of $C_T$, $k=1,\dots,m$. Obviously, $I_T$ is a finite semigroup, which is generated by the idempotents $\lim_{t\to+\infty}\exp(t\xi_k)$. Thus, the correspondence $(e^{-t_1},\dots,e^{-t_m})\to\exp(t_1\xi_1+\dots,t_m\xi_m)$ defines a mapping of $(0,1]^m$ onto $\exp(C_T)$, which continuously extends to $[0,1]^m$. It follows that its image is closed and coincides with $S_T$. \end{proof} Note that there is a natural one-to-one correspondence between $I_T$ and the set of faces of $C_T$.
\section{Hulls of finite unions of $T$-orbits in a $T^{\mathord{\mathbb C}}$-orbit} Let $v\in T^{\mathord{\mathbb C}}$. If $v=\sum_{\chi\in\check T}v_\chi$, where $v_\chi\in V_\chi$, $g\in T^{\mathord{\mathbb C}}$, and $u=gv$, then $u=\sum_{\chi\in\check T}\chi(g)v_\chi$. Since $\chi(g)\neq0$ for all $g\in G$ and $\chi\in\check T$, we get \begin{eqnarray} &u\in T^{\mathord{\mathbb C}} v\quad\Longrightarrow\quad\mathop{\mathrm{spec}}\nolimits(u)=\mathop{\mathrm{spec}}\nolimits(v);\label{speceq}\\ &\dim\left(V_\chi\cap\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb C}} Tv\right)\leq1\quad\mbox{\rm for all}\quad v\in V\quad\mbox{\rm and}\quad\chi\in\check T.\label{simsp} \end{eqnarray} Thus, the assumption that $T$ has a simple spectrum in $V$ is not restrictive in the problem of description of polynomial hulls of orbits $Gv$ such that $Gv\subset T^{\mathord{\mathbb C}} v$. Clearly, ${\mathord{\mathbb D}}^n=\wh{{\mathord{\mathbb T}}^n}$ in $\mathop{\mbox{\rm L}}\nolimits(V)$. For each $x\in C_T$ and any polynomial $p$ on $\mathop{\mbox{\rm L}}\nolimits(V)$, the holomorphic function $f({\mathord{\zeta}})=p(\exp({\mathord{\zeta}} x))$ is bounded in the halfplane $\Pi:\,\mathop{\mathrm{Re}}\nolimits {\mathord{\zeta}}\geq0$. Hence, $\exp(\Pi)$ is contained in $\wh T$. On the other hand, if $z\in {\mathord{\mathbb D}}^n\cap T^{\mathord{\mathbb C}}$, then $z=t\exp(x)$ for some $t\in T$ and $x\in C_T$ (the polar decomposition). By Lemma~\ref{cloct}, \begin{eqnarray*} \wh T=\mathop{\mbox{\rm clos}}({\mathord{\mathbb D}}^n\cap T^{\mathord{\mathbb C}})=T\mathop{\mbox{\rm clos}}(\exp (C_T))=TI_T\exp(C_T). \end{eqnarray*} If $v\in{(\bbC^*)^n}$, then ${(\bbC^*)^n} v={(\bbC^*)^n}$, and the mapping $z\to zv$ is a linear nondegenerate transformation of ${\mathord{\mathbb C}}^n$. Therefore, \begin{eqnarray}\label{hltgv} v\in {(\bbC^*)^n}\quad\Longrightarrow\quad\wh{Tv}=\wh Tv=TI_T\exp (C_T)v. \end{eqnarray} For an arbitrary $v\in V={\mathord{\mathbb C}}^n$, set \begin{eqnarray*} C_T^v=\{\xi\in{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}:\,\xi_k\leq0\ \mbox{if}\ v_k\neq0,\ k=1,\dots,n\}. \end{eqnarray*} Applying (\ref{hltgv}) to $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb C}}(\mathop{\mathrm{spec}}\nolimits(v))={\mathord{\mathbb C}}^n v$, we get \begin{eqnarray}\label{hultv} \wh{Tv}=T\mathop{\mbox{\rm clos}}(\exp(C_T^v)v. \end{eqnarray} Clearly, $C_T^v$ depends only on $\mathop{\mathrm{spec}}\nolimits(v)$. For $s\in{\mathord{\mathbb R}}^n$ and $z\in({\mathord{\mathbb C}}^*)^n$, set \begin{eqnarray*}
\nu_s(z)=\prod\nolimits_{k=1}^n|z_k|^{s_k}. \end{eqnarray*} If $s_k\geq0$, then the $k$-th factor in $({\mathord{\mathbb C}}^*)^n$ can be replaced with ${\mathord{\mathbb C}}$ (i.e., $\nu_s$ continuously extends to this product).
It is well known that for any holomorphically convex $T$-invariant set $U\subseteq T^{\mathord{\mathbb C}}$, the set $\log(U\cap T^{\mathord{\mathbb R}})\subseteq{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$ is convex. In particular, this is true for sets of $g\in T^{\mathord{\mathbb C}}$ such that $gv\in\wh{TX}$, where $X\subset T^{\mathord{\mathbb C}} v$, $v\in V$. Nevertheless, it is convenient to have an explicit construction of an analytic strip (or an annulus, if it is periodic) in a $T^{\mathord{\mathbb C}}$-orbit, which corresponds to a segment that joins two points in ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$; it is contained in the following lemma. Set \begin{eqnarray*} S=\{z\in{\mathord{\mathbb C}}:\,0\leq\mathop{\mathrm{Im}}\nolimits z\leq 1\}. \end{eqnarray*}
\begin{lemma}\label{strih} Let $v\in{\mathord{\mathbb C}}^n$ and $u\in T^{\mathord{\mathbb R}} v$. Then, there exists $\xi\in{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$ such that \begin{eqnarray}\label{anstdi} {\mathord{\lambda}}(z)=\exp(z\xi)v \end{eqnarray} is a holomorphic mapping ${\mathord{\lambda}}:\,S\to T^{\mathord{\mathbb C}} v$ which satisfies conditions \begin{eqnarray*} {\mathord{\lambda}}(\partial S)\subseteq Tv\cup Tu,\\ {\mathord{\lambda}}(0)=v,\quad{\mathord{\lambda}}(1)=u. \end{eqnarray*} If the stable subgroup of $v$ in $T^{\mathord{\mathbb R}}$ is trivial, then $\xi$ is unique. \end{lemma} \begin{proof} These properties hold for $\xi\in{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$ such that $\exp(\xi)v=u$; such a $\xi$ exists, since $\exp$ is a bijection ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}\to T^{\mathord{\mathbb R}}$. The last assertion is clear. \end{proof}
If $\xi\in C_T$, then (\ref{anstdi}) defines an analytic halfplane in $\wh{Tv}$; for ${\mathord{\Gamma}}$-rational $\xi$, ${\mathord{\lambda}}$ is periodic and defines an analytic disc in $\wh {Tv}$. Together with Lemma~\ref{strih} this gives a characterization of hulls for finite unions of $T$-orbits in $T^{\mathord{\mathbb C}}$. Suppose that $X\subset T^{\mathord{\mathbb R}} v$ is finite and the stable subgroup of $v$ in $T$ is trivial. Then, the inverse to the mapping $x\to\exp(x)v$ is well defined. Let us denote it by $\log_v$ and set \begin{eqnarray} &Q_X=\mathop{\mathrm{conv}}\nolimits(\log\nolimits_v X),\label{defqx}\\ &P_X=Q_X+C_T.\label{defpx} \end{eqnarray} The set $P_X$ is a convex polyhedron, which is unbounded if $C_T\neq0$. Hence, there exists a finite set ${\mathord{\mathfrak{N}}}_X\subset{\mathord{\mathbb R}}^n$ and, for each $s\in{\mathord{\mathfrak{N}}}_X$, real numbers $c_s$ such that \begin{eqnarray}\label{depx} P_X=\{x\in{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}:\,\scal{x}{s}\leq c_s\ \mbox{\rm for all}\ s\in{\mathord{\mathfrak{N}}}_X\}. \end{eqnarray} The set ${\mathord{\mathfrak{N}}}_X$ consists of vectors orthogonal to faces of $P_X$, whose projections into $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb R}} P_X$ look outside of it; clearly, it is not unique in general. \begin{proposition}\label{hulfi} Let $v\in {(\bbC^*)^n}$. Suppose that $Y\subset T^{\mathord{\mathbb C}} v$ is a finite union of $T$-orbits (including $Tv$), and set $X=T^{\mathord{\mathbb R}} v\cap Y$. Then $X$ is finite and \begin{eqnarray} \wh{Y}&=&\mathop{\mbox{\rm clos}}\left(T\exp(P_X)v\right)\label{grofu}\\ &=& \mathop{\mbox{\rm clos}}\left\{z\in{(\bbC^*)^n}:\,\nu_s(z)\leq e^{c_s}\nu_s(v), \ s\in{\mathord{\mathfrak{N}}}_X\right\}\label{inehu}\\ &=& \bigcup\nolimits_{u\in\exp(Q_X)v}\wh{Tu}\label{cuphu}\\ &=&T\exp(P_X)I_Tv,\label{addfa} \end{eqnarray} where $Q_X,P_X,{\mathord{\mathfrak{N}}}_X$ are as above and $I_T$ is defined in {\rm(\ref{defit})}. \end{proposition} \begin{proof} Due to the polar decomposition, the set $Tu\cap T^{\mathord{\mathbb R}} v$, for each $u\in T^{\mathord{\mathbb C}} v$, is nonvoid and consists of a single point. Hence, $X$ is finite and $Y=TX$. The inclusion $(\exp Q_X)v\subseteq\wh{Y}$ follows from Lemma~\ref{strih} and Phragm\'en--Lindel\"of Principle. The inclusion $\exp(C_T)u\subseteq\wh{Tu}$ is true for any $u\in {\mathord{\mathbb C}}^n$. Since it holds for all $u\in T\exp(Q_X)$, the left-hand side of (\ref{grofu}) includes the right-hand side. If $z=\exp(\xi)v$, where $\xi\in{\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}}$, then $z_k=e^{\xi_k}v_k$, $k=1,\dots,n$; due to (\ref{depx}), this implies that the right-hand side of (\ref{grofu}) coincides with (\ref{inehu}). According to (\ref{hltgv}), the right-hand side of (\ref{grofu}) and (\ref{cuphu}) intersect $T^{\mathord{\mathbb C}} v$ by the set \begin{eqnarray*} T\exp(P_X)v=\exp(Q_X)T\exp(C_T)v; \end{eqnarray*} clearly, it is dense in (\ref{cuphu}). Since $Q_X$ is compact, the set (\ref{cuphu}) is closed. The compactness of $Q_X$, the above equality, and Lemma~\ref{cloct} imply that (\ref{addfa}) is closed; hence, it is the same as the right-hand side of (\ref{grofu}).
Each of the sets (\ref{grofu})--(\ref{addfa}) includes $Y$. Thus, it remains to prove that (\ref{addfa}) is polynomially convex. If $x\in{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}\setminus P_X$, then there exists $s\in{\mathord{\mathbb R}}^n$ such that \begin{eqnarray}\label{sepa} \sup\{\scal{y}{s}:\,y\in P_X\}<\scal{x}{s}. \end{eqnarray} Since $Q_X$ is compact, the linear functional on ${\mathord{\mathfrak{t}}}$ in the right-hand side of (\ref{sepa}) must be nonnegative on $C_T$. According to (\ref{defct}), we may assume that $s\in\mathop{\mbox{\rm clos}}{\mathord{\mathbb R}}^n_+$. It follows that (\ref{sepa}) holds in a neighborhood of $s$ in $\mathop{\mbox{\rm clos}}{\mathord{\mathbb R}}^n_+$. Thus, $s$ can be assumed rational (hence, integer) with strictly positive entries. Then,
$p(z)=z_1^{s_1}\dots z_n^{s_n}$ is a holomorphic polynomial such that $|p\,|$ separates $\exp(x)v$ and $T\mathop{\mbox{\rm clos}}(\exp(P_X)v)$. Therefore, \begin{eqnarray*} \wh Y\cap T^{\mathord{\mathbb C}} v=T\exp(P_X)v. \end{eqnarray*} For any $\iota \in I_T$, the projection $z\to \iota z$ commutes with $T$. This makes it possible to apply the above arguments to the vector $\iota v$, the set $\iota X$, and to the restriction of $T$ to $\iota {\mathord{\mathbb C}}^n$. Consequently, \begin{eqnarray}\label{iotor} \wh{\iota Y}\cap T^{\mathord{\mathbb C}} \iota v=T\exp(P_{\iota X})\iota v = \iota T\exp(P_{X})v \end{eqnarray} (clearly, $\iota\exp(P_{X}) v=\exp(P_{\iota X})\iota v$). By (\ref{defit}), $\iota Y\subseteq\wh Y$, hence, $\wh{\iota Y}\subseteq\wh Y$; on the other hand, $\iota \wh Y\subseteq\wh{\iota Y}$ since $p\circ\iota$ is a polynomial on ${\mathord{\mathbb C}}^n$ for any polynomial $p\,$ on $\iota{\mathord{\mathbb C}}^n$. Thus, $\iota \wh Y=\wh{\iota Y}=\wh Y\cap\iota{\mathord{\mathbb C}}^n$. Together with (\ref{iotor}), this implies the polynomial convexity of (\ref{addfa}). \end{proof} If $T={\mathord{\mathbb T}}^n$, then Proposition~\ref{hulfi} follows from the well-known characterization of polynomially convex Reinhardt domains. \begin{corollary}\label{noclo} For any $v\in{(\bbC^*)^n}$, the orbit $T^{\mathord{\mathbb C}} v$ is closed in ${\mathord{\mathbb C}}^n$ if and only if $\,Tv$ is polynomially convex, and this is equivalent to $C_T=0$. Then, $\wh{Y}=T\exp(Q_X)v$ for all $Y,X$ as above. \end{corollary} \begin{proof} The orbit $T^{\mathord{\mathbb C}} v$ is closed if and only if the convex hull of $\mathop{\mathrm{spec}}\nolimits(v)=\mathop{\mathrm{spec}}\nolimits({\mathord{\mathbb C}}^n)$ contains $0$ in its relative interior (see, for example, \cite[Proposition~6.15]{VP}). Since $T\subseteq\mathop{\mbox{\rm GL}}\nolimits(n,{\mathord{\mathbb C}})$, the set $\mathop{\mathrm{spec}}\nolimits({\mathord{\mathbb C}}^n)$ is generating in ${\mathord{\mathfrak{t}}}^*$. Hence, $T^{\mathord{\mathbb C}} v$ is closed if and only if $C_T=0$; by (\ref{hultv}), this is equivalent to $\wh{Tv}=Tv$. Then, $\wh{Y}=T\exp(Q_X)v$ by (\ref{grofu}) and (\ref{speceq}). \end{proof} There is a version of the first assertion for an arbitrary compact linear group $G$: a $G^{\mathord{\mathbb C}}$-orbit is closed if and only if it contains a polynomially convex $G$-orbit (\cite[Theorem~1 and Theorem~5]{GL}). For a torus $T$, all $T$-orbits in $T^{\mathord{\mathbb C}} v$ are simultaneously polynomially convex or non-convex, but this is not true if $G$ is not abelian.
\section{Finite extensions of $T$ that keep a $T^{\mathord{\mathbb C}}$-orbit} In this section, we consider the case where the set $X$ defined in the previous section is an orbit of a finite group $F$ which normalizes $T$ and keeps the $T^{\mathord{\mathbb C}}$-orbit. We assume that $T\subseteq G$, $T$ is a torus, $G$ is a subgroup of $\mathop{\mbox{\rm GL}}\nolimits(V)$, $F$ is a finite subgroup of $G$, and \begin{eqnarray} &G=FT=TF,\quad F\cong G/T,\label{injec}\\ &Gv\subseteq T^{\mathord{\mathbb C}} v,\label{incor}\\ &v\in{(\bbC^*)^n}\subset{\mathord{\mathbb C}}^n=V.\label{vgene} \end{eqnarray} By (\ref{injec}), $T$ is normal in $G$. Clearly, (\ref{incor}) is equivalent to $Fv\subseteq T^{\mathord{\mathbb C}} v$ and to the connectedness of $G^{\mathord{\mathbb C}}$. Here is an illustrating example. \begin{example}\label{autd2} Let $G=\mathop{\mathrm{Aut}}\nolimits_0({\mathord{\mathbb D}}^2)$ be the group of linear automorphisms of the bidisc ${\mathord{\mathbb D}}^2\subset{\mathord{\mathbb C}}^2$. Clearly, $G=FT$, where $F=S_2$ is generated by the transposition $\tau$ of the coordinates, $T={\mathord{\mathbb T}}^2$, $T^{\mathord{\mathbb C}}=({\mathord{\mathbb C}}^*)^2$, and $T^{\mathord{\mathbb C}} v=({\mathord{\mathbb C}}^*)^2$ for any $v$ that lies outside the coordinate lines. Thus, (\ref{incor}) holds for all $v\in({\mathord{\mathbb C}}^*)^2$ (however, (\ref{incor}) fails for any $v\neq0$ in ${\mathord{\mathbb C}}^2\setminus ({\mathord{\mathbb C}}^*)^2$). The hull $\wh{Gv}$ can be distinguished by the inequalities \begin{eqnarray}
&\max\{|z_1|,|z_2|\}\leq \max\{|v_1|,|v_2|\},\label{ined2}\\
&|z_1z_2|\leq|v_1v_2|.\label{ined2a} \end{eqnarray} Clearly, (\ref{ined2}) and (\ref{ined2a}) define a polynomially convex set. Let $z_1,z_2>0$ (a generic $T$-orbit evidently contains such a point $z$). Then, $z$ and $\tau z$ can be joined by an analytic strip with the boundary in $Tz\cup T\tau z$: \begin{eqnarray*} {\mathord{\lambda}}_z(s)=(z_1^{1-s}z_2^{s},z_1^{s}z_2^{1-s}), \quad s\in S. \end{eqnarray*} Set $q=\ln\frac{z_1}{z_2}$ and let $z_1>z_2$. Then, the strip can be written in the form \begin{eqnarray*} {\mathord{\lambda}}_z(s)=(e^{-s}z_1,e^{s}z_2),\quad 0\leq\mathop{\mathrm{Re}}\nolimits s\leq q. \end{eqnarray*} It is periodic with the period $2\pi i$ and defines a $\tau$-invariant annulus in $\wh{Gv}$ with $\tau$-fixed points $(\sqrt{z_1z_2},\sqrt{z_1z_2})$ and $(-\sqrt{z_1z_2},-\sqrt{z_1z_2})$. As $z_2\to0$, the annulus tends to a couple of discs: $(e^{-s}z_1,0)$ and $(0,e^{-s}z_1)$, where $\mathop{\mathrm{Re}}\nolimits s>0$, $0\leq\mathop{\mathrm{Im}}\nolimits s\leq2\pi$ (the circle $\mathop{\mathrm{Re}}\nolimits s=\frac{q}{2}$, $0\leq\mathop{\mathrm{Im}}\nolimits s\leq 2\pi$ collapses to zero). Let $z\in \wh{Gv}\cap{\mathord{\mathbb R}}^2$. Then $\wh{Gv}$ contains a bidisc ${\mathord{\mathbb D}}^2z$. It intersects ${\mathord{\mathbb R}}^2$ by a rectangle, which is symmetric with respect to the coordinate axes. If $z$ lies on an axis, then the rectangle degenerates into a segment. Let $v_1> v_2>0$. The union of these rectangles with vertices in the set $Q$ of real points of the annulus, which joins $v$ and $\tau v$, is a curvilinear octagon. It degenerates into a pair of segments if $v_2=0$ and into a square if $v_1=v_2$ (see \cite[Fig. 2]{KZ} for the 3-dimensional case). In the logarithmic coordinates in the first quadrant, $Q$ is a segment. Also, note that all nontrivial $T^{\mathord{\mathbb C}}$-orbits are not closed.\qed \end{example} In \cite{Bj}, Bj\"ork found a typical situation where analytic annuli appear in the maximal ideal space $\mathord{\mathcal{M}}_A$ of a commutative Banach algebra $A$ which admits a nontrivial action of ${\mathord{\mathbb T}}$ by automorphisms: this happens if $T$-invariant functions on $\mathord{\mathcal{M}}_A$ do not separate distinct ${\mathord{\mathbb T}}$-orbits. In \cite{GL}, it was noted that analytic strips and/or annuli appear in $\wh{Gv}$ if the stable subgroup of $v$ in $G^{\mathord{\mathbb C}}$ does not coincide with the complexification of the stable subgroup of $v$ in $G$. \begin{proposition}\label{autdn} The hulls $\wh{Gv}$ for orbits of $G=\mathop{\mathrm{Aut}}\nolimits_0({\mathord{\mathbb D}}^n)=S_n{\mathord{\mathbb T}}^n$ are distinguished by inequalities {\rm(\ref{mukin})}, where $\mu_k$ are defined by {\rm(\ref{mukde})}. \end{proposition} \begin{proof} The approximation by decreasing sequences of hulls makes it possible to reduce the proposition to the case of a generic $v$ in {\rm(\ref{mukin})}. Then, applying to $v=(v_1,\dots,v_n)$ a suitable transformation in ${\mathord{\mathbb T}}^n$, we may assume that \begin{eqnarray}\label{gewey} v_1> v_2> \dots> v_n> 0. \end{eqnarray} Moreover, we may use Proposition~\ref{hulfi} with $X=S_n v$, $C_T=-\mathop{\mbox{\rm clos}}{\mathord{\mathbb R}}^n_+$ (we keep the notation of Proposition~\ref{hulfi}). Since $X$, $Q_X$, $C_T$, $P_X$, and $\mu_k$ are $S_n$-invariant, $S_n$ is transitive on $X$, by (\ref{mukde}), (\ref{mukin}), and (\ref{inehu}), it is sufficient to prove that the vectors $\xi_k=\sum_{r=1}^k{\mathord{\epsilon}}_r$, $k=1,\dots,n$, correspond to the faces of $P_X$ that meet at $v$, are orthogonal to them, and look outside of $P_X$.
Set $\eta_1={\mathord{\epsilon}}_2-{\mathord{\epsilon}}_1, \dots,\eta_{n-1}={\mathord{\epsilon}}_{n}-{\mathord{\epsilon}}_{n-1},\eta_n=-{\mathord{\epsilon}}_n$. Then $\{-\eta_k\}_{k=1}^n$ is a base in ${\mathord{\mathbb R}}_n$, which is dual to the base $\{\xi_k\}_{k=1}^n$. We claim that the cone of the polyhedron $P_X$ at the vertex $v$ is generated by $\{\eta_k\}_{k=1}^n$. This implies the assertion above (note that both cones are simplicial). If $\tau\in S_n$ is a transposition $(k,j)$, then $v-\tau v=(v_k-v_j)({\mathord{\epsilon}}_k-{\mathord{\epsilon}}_j)$. If ${\mathord{\sigma}},{\mathord{\kappa}}\in S_n$ then $v-{\mathord{\sigma}}{\mathord{\kappa}} v=(v-{\mathord{\kappa}} v)+({\mathord{\kappa}} v-{\mathord{\sigma}}{\mathord{\kappa}} v)$. Furthermore, $S_n$ is generated by transpositions $(k,k+1)$, and $v_k-v_{k+1}>0$ by (\ref{gewey}), where $k=1,\dots,n-1$. Therefore, vectors $\eta_1,\dots,\eta_{n-1}$ generate the cone of $Q_X$ at $v$. Since $-{\mathord{\epsilon}}_k=\sum_{r=0}^{k-1}\eta_{n-r}$ and $C_T$ is generated by $-{\mathord{\epsilon}}_k$, $k=1,\dots,n$, this proves the proposition. \end{proof}
Property (\ref{incor}) implies $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb C}} Gv=\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb C}} T^{\mathord{\mathbb C}} v$. Hence, we may assume that (\ref{sispe}) is valid. Then, a generic $\xi\in{\mathord{\mathfrak{t}}}$ has a simple spectrum. Any $f\in F$ permutes eigenvalues and eigenspaces. Thus, assuming (\ref{sispe}) and identifying $V$ with $C(K)$ in accordance with (\ref{vcnck}), we get that each element of $F$ is a composition of a permutation of $K$ and a multiplication by a function on $K$. Further, (\ref{vgene}) implies that the stable subgroup of $v$ in $T^{\mathord{\mathbb C}}$ is trivial. Hence, \begin{eqnarray*} T^{\mathord{\mathbb R}} v\cong T^{\mathord{\mathbb C}}/T\cong {\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}, \end{eqnarray*} where the identification of $T^{\mathord{\mathbb R}} v$ and ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$ is realized by $\xi\to\exp(\xi)v$, $\xi\in{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$. \begin{lemma}\label{fixtc} Let $G\subset\mathop{\mbox{\rm GL}}\nolimits(V)$, a subgroup $F\subseteq G$, and $v\in V$ satisfy (\ref{injec})--(\ref{vgene}). Then $T^{\mathord{\mathbb C}} v$ contains a $G$-invariant $T$-orbit. Moreover, there exists a mapping $f\to t_f$, $F\to T$, such that $\wt F=\{t_f f:\,f\in F\}$ is a subgroup of $G$ which has a fixed point in $T^{\mathord{\mathbb C}}$ and satisfies (\ref{injec})--(\ref{vgene}). \end{lemma} \begin{proof} The group $F$ naturally acts on $T^{\mathord{\mathbb R}} v\cong T^{\mathord{\mathbb C}}/T\cong{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$. Any $g\in F$ is a composition of ${\mathord{\sigma}}\in S_K$ and a multiplication by a function in $C(K)$. Since ${\mathord{\mathfrak{t}}}$ acts on $C(K)$ by multiplication on linear functions and ${\mathord{\sigma}}$ induces a linear transformation in ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}}$, the induced action of $F$ on ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$ is affine. Since $F$ is finite, it has a fixed point in ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$. Hence, $T^{\mathord{\mathbb C}} v$ contains a $G$-invariant $T$-orbit. Let us fix a point $u$ in it and define $t_f$ by $t_ffu=u$; the choice is unique due to (\ref{vgene}). Taken together with (\ref{injec}), this implies that $\wt F$ is a group, which obviously satisfies the lemma. \end{proof} According to Lemma~\ref{fixtc}, we may assume without loss of generality that \begin{eqnarray}\label{fixev} fv=v\quad\mbox{for all}\quad f\in F. \end{eqnarray} In the following example we give a construction (associated with a given finite group $F$) for orbits with property (\ref{incor}).
\begin{example}\label{maico} Let ${\mathord{\mathfrak{t}}}$ be a real linear space, ${\mathord{\mathfrak{t}}}^*$ be the dual space to ${\mathord{\mathfrak{t}}}$, $L$ be a lattice in ${\mathord{\mathfrak{t}}}$, and $L^*\subset{\mathord{\mathfrak{t}}}^*$ be the dual lattice to $L$. Set \begin{eqnarray*} {\mathord{\lambda}}_x(y)=y(x),\quad\mbox{where}\ x\in{\mathord{\mathfrak{t}}},\ y\in{\mathord{\mathfrak{t}}}^*. \end{eqnarray*} Let $K$ be a finite subset of $L^*$ that generates $L^*$ as a subgroup of the vector group ${\mathord{\mathfrak{t}}}^*$. Then \begin{eqnarray} &{\mathord{\mathfrak{t}}}^*=\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb R}} K,\label{spank}\\ &L=\{x\in{\mathord{\mathfrak{t}}}:\,{\mathord{\lambda}}_x(K)\subset{\mathord{\mathbb Z}}\}.\label{defz} \end{eqnarray} Further, let $F$ be a finite subgroup of $\mathop{\mbox{\rm GL}}\nolimits({\mathord{\mathfrak{t}}})$ which keeps $K$. Set $V=C(K)$. The mapping \begin{eqnarray}\label{defla}
{\mathord{\lambda}}:\,x\to i{\mathord{\lambda}}_x\big|_K \end{eqnarray} is an embedding ${\mathord{\mathfrak{t}}}\to V$, which has a natural extension to ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}}$. Set \begin{eqnarray}\label{defex} \exp(x)=e^{2\pi i{\mathord{\lambda}}_x}. \end{eqnarray} Clearly, $L=\ker\exp$. Hence, $\exp$ defines an embedding of $T={\mathord{\mathfrak{t}}}/L$ and $T^{\mathord{\mathbb C}}$ into the group ${(\bbC^*)^n}$: \begin{eqnarray*} T^{\mathord{\mathbb C}}=\exp({\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}})\subseteq {(\bbC^*)^n}. \end{eqnarray*} The group $T^{\mathord{\mathbb C}}$ acts on $C(K)$ by multiplication. The inclusion $v\in C(K)^F$ is the same as (\ref{fixev}); it implies (\ref{incor}). Furthermore, if $v\in {(\bbC^*)^n}$, then \begin{eqnarray}\label{spatc} \mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb C}} Tv=V. \end{eqnarray} Indeed, the space $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb C}} T$ is a subalgebra of $C(K)$, which separates points of the finite set $K$. Hence, it coincides with $C(K)$. \qed \end{example} \begin{theorem}\label{isase} Let a group $G\subset\mathop{\mbox{\rm GL}}\nolimits(V)$, a finite subgroup $F\subseteq G$, a torus $T$, and a vector $v\in V$ satisfy (\ref{injec})--(\ref{vgene}), (\ref{fixev}), and (\ref{spatc}). Then $V,G,F,T,v$ can be realized as in Example~\ref{maico}, where \begin{eqnarray}\label{vgenf} v\in {(\bbC^*)^n}\cap C(K)^F. \end{eqnarray} Conversely, if $V,G,F,T,v$ are as in Example~\ref{maico} and $v$ satisfies (\ref{vgenf}), then (\ref{injec})--(\ref{vgene}), (\ref{fixev}), and (\ref{spatc}) are true. \end{theorem} \begin{proof} The group $F$ acts in ${\mathord{\mathfrak{t}}}$ and ${\mathord{\mathfrak{t}}}^*$ by the adjoint action. Let $K\subset{\mathord{\mathfrak{t}}}^*$ be the collection of all weights for the representation of $T$ in $V$; clearly, $K$ is $F$-invariant. It follows from (\ref{spatc}) and (\ref{incor}) that the weights are multiplicity free. This defines an equivariant linear isomorphism between $V$ and $C(K)$, where the group $T$ acts by multiplication. Thus, ${\mathord{\lambda}}$ and $\exp$ are well defined by (\ref{defla}) and (\ref{defex}). According to (\ref{fixev}) and (\ref{vgene}), (\ref{vgenf}) is true; (\ref{spank}) holds since $T\subset\mathop{\mbox{\rm GL}}\nolimits(V)$ is compact and acts effectively on $V$ (note that the annihilator of $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb R}} K$ in ${\mathord{\mathfrak{t}}}$ acts trivially due to (\ref{defex}) and (\ref{vgenf})). Let us define $L$ by (\ref{defz}). Then $L=\ker\exp$ by (\ref{defex}). Hence, $L$ is a lattice in ${\mathord{\mathfrak{t}}}$ and the group $L^*$ generated by $K$ is the dual lattice in ${\mathord{\mathfrak{t}}}^*$.
The converse was proved in Example~\ref{maico}. \end{proof}
\section{Finite extensions of $T$ which keep generic $T^{\mathord{\mathbb C}}$-orbits}
In what follows, we use the setting of Example~\ref{maico}. Let $Z$ denote the centralizer of $G$ in $\mathop{\mbox{\rm GL}}\nolimits(V)$. We assume that ${(\bbC^*)^n}$ acts in $V=C(K)$ by multiplication. \begin{lemma}\label{centr} $Z=C(K)^F\cap {(\bbC^*)^n}$. \end{lemma} \begin{proof} Since ${\mathord{\lambda}}({\mathord{\mathfrak{t}}})$ separates points of $K$, $Z\subseteq{(\bbC^*)^n}$. The multiplication by $u\in C(K)$ commutes with $F$ if and only if $u$ is $F$-invariant. \end{proof}
In general, condition (\ref{incor}) does not hold for a generic vector $v$. Hence, there is a natural problem: {\sl describe $V$ and $G$ such that generic orbits satisfy (\ref{incor})}. The following proposition contains a simple criterion. \begin{proposition}\label{sicri} Let $V,G$ be as in Example~\ref{maico}. Then $G$ satisfies (\ref{incor}) for a generic $v\in V$ if and only if \begin{eqnarray}\label{opfix} C(K)={\mathord{\lambda}}({\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}})+C(K)^F. \end{eqnarray} In this case, each $T^{\mathord{\mathbb C}}$-orbit in ${(\bbC^*)^n}$ intersects $C(K)^F$. \end{proposition} \begin{proof} It follows from Lemma~\ref{centr} that the right-hand side of (\ref{opfix}) is the tangent space at ${\mathord{\bf 1}}$ to the set $T^{\mathord{\mathbb C}}\,Z$. Clearly, $\wt G^{\mathord{\mathbb C}}=ZG^{\mathord{\mathbb C}}$ is a group, $T^{\mathord{\mathbb C}} Z$ is the identity component of $\wt G^{\mathord{\mathbb C}}$, and the right-hand side of (\ref{opfix}) is the tangent space to $\wt G^{\mathord{\mathbb C}}{\mathord{\bf 1}}$. Hence, (\ref{opfix}) holds if and only if $\wt G^{\mathord{\mathbb C}}{\mathord{\bf 1}}$ is open. Moreover, this is equivalent to the equality
$T^{\mathord{\mathbb C}} Z=\exp({\mathord{\lambda}}({\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}})+C(K)^F)={(\bbC^*)^n}$.
Therefore, each $T^{\mathord{\mathbb C}}$-orbit in ${(\bbC^*)^n}$ intersects $C(K)^F$, i.e., contains an $F$-fixed point. Thus, (\ref{opfix}) implies (\ref{incor}) for $v\in {(\bbC^*)^n}$.
Let (\ref{incor}) hold and let $W$ be an $F$-invariant neighborhood of ${\mathord{\bf 1}}$. If $W$ is sufficiently small, then the condition $\log{\mathord{\bf 1}}=0$ defines a branch of $\log$ in $W$. We may assume that $\log W$ is convex and symmetric.
This makes it possible to define roots in $W$: $w^{\frac1r}=\exp\left(\frac1r\log w\right)$.
For $v\in W^{\frac12}$ and $f\in F$, set $g_f=\left(\frac {fv}{v}\right)^{\frac1r}$, where $r=\mathop{\mbox{\rm card}} F$, and
$g=\prod\nolimits_{f\in F}g_f$.
Then $g v$ is $F$-fixed. If (\ref{incor}) holds for $v$, then $g_f\in T^{\mathord{\mathbb C}}$ for all $f\in F$; hence, $g v\in T^{\mathord{\mathbb C}} v$. Consequently, for all $v\in W$, $T^{\mathord{\mathbb C}} v$ intersects $C(K)^F$. Since $Z$ keeps this property of orbits, it follows that $T^{\mathord{\mathbb C}} Z$ has a nonempty interior. This implies (\ref{opfix}). \end{proof} \begin{theorem}\label{charf} Let $G\subset\mathop{\mbox{\rm GL}}\nolimits(V)$ be a semidirect product of a torus $T$ and a finite subgroup $F$, and let $Z$ be the centralizer of $G$ in $\mathop{\mbox{\rm GL}}\nolimits(V)$. Suppose that $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb C}} Tv=V$ for some $v\in V$. Then the following conditions are equivalent: \begin{itemize} \item[(\romannumeral1)] $Gv\subset T^{\mathord{\mathbb C}} v$ for a generic $v\in V$; \item[(\romannumeral2)] $G^{\mathord{\mathbb C}} Zv$ is open in $V$ for a generic $v\in V$. \end{itemize}
\end{theorem} \begin{proof} By Theorem~\ref{isase}, we may use the construction of Example~\ref{maico}. According to Lemma~\ref{centr}, (\romannumeral2) is equivalent to
(\ref{opfix}), and the assertion follows from Proposition~\ref{sicri}. \end{proof} We shall give a constructive description of these spaces and groups. Set \begin{eqnarray*} C_0(K)=\left\{u\in C(K):\,\ \sum\nolimits_{q\in K}u(q)=0\right\}. \end{eqnarray*} Sometimes, we identify points in $K$ with their characteristic functions.
\begin{example}\label{maict} Let $V={\mathord{\mathbb C}}^n=C(K)$, where $K=\{1,\dots,n\}$, let $F$ be a subgroup of $S_n$, and \begin{eqnarray}\label{deck} K=K_1\cup\dots\cup K_p \end{eqnarray} be the partition of $K$ into $F$-orbits. For $k\in\{1,\dots,p\}$, set $V_k=C(K_k)$. Then $V=V_1\oplus\dots\oplus V_p$. Set
\begin{eqnarray*} &{\mathord{\mathfrak{t}}}_k^{0}=C_0(K_k)\cap i{\mathord{\mathbb R}}^n,\\ &T_k^0=\exp({\mathord{\mathfrak{t}}}_k^0)\subset C(K_k), \end{eqnarray*} where $\exp$ is defined by (\ref{defex}). Set ${\mathord{\mathfrak{t}}}^0={\mathord{\mathfrak{t}}}_1^0\oplus\dots\oplus {\mathord{\mathfrak{t}}}_p^0,$ \begin{eqnarray*} T^0=\exp({\mathord{\mathfrak{t}}}^0)=T_1^0\times\dots\times T_p^0. \end{eqnarray*} Let $T$ be an $F$-invariant torus such that \begin{eqnarray}\label{inct} T^0\subseteq T\subseteq{\mathord{\mathbb T}}^n \end{eqnarray} and set $G=FT$. Then generic $G^{\mathord{\mathbb C}}$-orbits satisfy (\ref{incor}). The group $G$ is irreducible if and only if $F$ is transitive on $K$; in general, $F$-orbits in $K$ define $G$-irreducible components of $V$. There are two extreme cases in (\ref{inct}). \begin{itemize} \item[(A)]\label{exama} If $T={\mathord{\mathbb T}}^n$, then there is one open orbit ${(\bbC^*)^n}$ of the group $G^{\mathord{\mathbb C}}=FT^{\mathord{\mathbb C}}$, which evidently satisfies (\ref{incor}). If $F$ is nontrivial, then there exist degenerate orbits that do not satisfy (\ref{incor}); moreover, if $F$ is transitive on $K$, then all non-open $G^{\mathord{\mathbb C}}$-orbits, except for zero, are nontrivial finite unions of $T^{\mathord{\mathbb C}}$-orbits. \item[(B)]\label{examb} If $T=T^0$, then generic orbits are closed. They have codimension $p$ and are distinguished by equations \begin{eqnarray*} \prod\nolimits_{r\in K_k}z_r=c_k, \end{eqnarray*} where $c_k\in{\mathord{\mathbb C}}^*$, $k=1,\dots,p$. \end{itemize} \end{example} Note that (A) and (B) are invariant under the Cartesian product (the group $F$ need not be the product of groups $F_k$ of irreducible components but must have the same orbits in $K$ as $F_1\times\dots\times F_p$). In terms of Example~\ref{maico}: in (A), ${\mathord{\mathfrak{t}}}={\mathord{\mathbb R}}^n$, the mapping ${\mathord{\lambda}}:\,{\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}}\to C(K)$ is surjective, $K=\{{\mathord{\epsilon}}_1,\dots,{\mathord{\epsilon}}_n\}$; in (B), ${\mathord{\mathfrak{t}}}=i{\mathord{\mathbb R}}^n\cap C_0(K)$, ${\mathord{\lambda}}({\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}})=C_0(K)$, and the set $K$ is the projection of $\{{\mathord{\epsilon}}_1,\dots,{\mathord{\epsilon}}_n\}$ into ${\mathord{\mathfrak{t}}}^*={\mathord{\mathfrak{t}}}$. In both cases, $K$ is the set of all vertices of a regular simplex. \qed \begin{theorem}\label{asexa} Let $V,G$ be as in Theorem~\ref{charf} and let {\rm{(\romannumeral1)}} hold. Then $V,G$ can be realized as in Example~\ref{maict}. Furthermore, \begin{itemize} \item[\rm(1)] $V,G$ are of type {\rm(A)} if and only if $G^{\mathord{\mathbb C}}$ has an open orbit, \item[\rm(2)] {\rm(B)} is equivalent to the assumption that the center of $G$ is finite, \item[\rm(3)] if $G$ is irreducible, then either {\rm(A)} or {\rm(B)} holds. \end{itemize} \end{theorem} Let $C(K)^F_+$ be the cone of all nonnegative functions in $C(K)^F$. \begin{lemma}\label{crifi} Let $G$ and $V$ be as in Example~\ref{maico}. Then, the orbit $G^{\mathord{\mathbb C}} v$ is closed for a generic $v\in V$ if and only if \begin{eqnarray}\label{close} {\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}\cap C(K)^F_+=0. \end{eqnarray} \end{lemma} \begin{proof} Clearly, $G^{\mathord{\mathbb C}} v$ is closed if and only if $T^{\mathord{\mathbb C}} v$ is closed. Let $v\in{(\bbC^*)^n}$. By Proposition~\ref{hulfi} and Corollary~\ref{noclo}, $T^{\mathord{\mathbb C}} v$ is not closed if and only if $C_T\neq0$. Since $C_T$ is $F$-invariant by (\ref{defct}), it contains $\sum_{f\in F} fu$ for each $u\in C_T$. Thus, $C_T=0$ is equivalent to (\ref{close}). \end{proof} \begin{proof}[Proof of Theorem~\ref{asexa}] Suppose that $G$ is irreducible or, equivalently, $F$ is transitive. Then $Z={\mathord{\mathbb C}}^*\,{\mathord{\bf 1}}$ according to Lemma~\ref{centr}. If ${\mathord{\bf 1}}\in{\mathord{\lambda}}({\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}})$, then $T^{\mathord{\mathbb C}}\supseteq Z$ and $T^{\mathord{\mathbb C}} v$ is open for a generic $v\in V$ by Theorem~\ref{charf}. If ${\mathord{\bf 1}}\notin{\mathord{\lambda}}({\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}})$, then (\ref{close}) is true; by Lemma~\ref{crifi}, $T^{\mathord{\mathbb C}} v$ is closed for a generic $v\in V$. By Proposition~\ref{sicri}, a generic $T^{\mathord{\mathbb C}}$-orbit intersects $C^*{\mathord{\bf 1}}$. Consequently, we have \begin{eqnarray}\label{codim} \mathop{\mathrm{codim}}\nolimits G^{\mathord{\mathbb C}} v=1. \end{eqnarray} Let ${\mathord{\bf 1}}\in T^{\mathord{\mathbb C}}\cap Z$. The orthogonal projection of ${\mathord{\bf 1}}$ into the tangent space ${\mathrm T}_{{\mathord{\bf 1}}}T^{\mathord{\mathbb C}}{\mathord{\bf 1}}$ is $F$-fixed. Hence, it is proportional to ${\mathord{\bf 1}}$; since ${\mathord{\bf 1}}\notin{\mathord{\lambda}}({\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}})$, this implies ${\mathord{\bf 1}}\perp{\mathord{\lambda}}({\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}}){\mathord{\bf 1}}$. Therefore, ${\mathrm T}_{\mathord{\bf 1}} T^{\mathord{\mathbb C}}{\mathord{\bf 1}}$ coincides with the tangent space to the hypersurface $z_1\dots z_n=1$ at ${\mathord{\bf 1}}$; since the monomial on the left is an eigenfunction of $T^{\mathord{\mathbb C}}$, this group keeps it. Due to (\ref{codim}), $T^{\mathord{\mathbb C}}{\mathord{\bf 1}}$ coincides with this hypersurface. Then, $T={\mathord{\mathbb T}}^n\cap\mathop{\mathrm{SU}}\nolimits(n)$, and any $T^{\mathord{\mathbb C}}$-orbit that intersects $Z$ is a hypersurface $z_1\dots z_n=c$, for some $c\in{\mathord{\mathbb C}}^*$. This implies ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb C}}=C_0(K)$ and $T=T^0$.
Thus, the theorem is proved for all irreducible $G$. The projection onto each irreducible component keeps the property (\ref{incor}) for generic orbits since it commutes with $G$. Hence, (\romannumeral1) holds for all irreducible components. They correspond to $F$-orbits $K_k$ in the partition (\ref{deck}).
Let ${\mathord{\mathfrak{t}}}_k^0$, $k=1,\dots,p\,$, be defined as in Example~\ref{maict}. According to the arguments above,
${\mathord{\lambda}}\left({\mathord{\mathfrak{t}}}|_{K_k}\right)\supseteq{\mathord{\lambda}}({\mathord{\mathfrak{t}}}_k^0)$ for all $k$. If $x\in{\mathord{\mathfrak{t}}}$, then the averaging \begin{eqnarray*} &Ax=\frac1r\sum\nolimits_{f\in F}fx,\quad r=\mathop{\mbox{\rm card}} F, \end{eqnarray*} distinguishes the $F$-fixed component of $x$ (i.e., $Ax\in C(K)^F\cap{\mathord{\mathfrak{t}}}$ and $x-Ax\in{\mathord{\mathfrak{t}}}^0$); since ${\mathord{\mathfrak{t}}}$ is $F$-invariant, it contains both components. By Lemma~\ref{centr}, if $G$ has a finite center, then
${\mathord{\lambda}}\left({\mathord{\mathfrak{t}}}|_{K_k}\right)={\mathord{\lambda}}({\mathord{\mathfrak{t}}}_k^0)$ for all $k$. It follows that $${\mathord{\mathfrak{t}}}\subseteq{\mathord{\mathfrak{t}}}^0={\mathord{\mathfrak{t}}}_1^0\oplus\dots\oplus {\mathord{\mathfrak{t}}}_p^0.$$ On the other hand, (\romannumeral2) and Lemma~\ref{centr} imply $\mathop{\mathrm{codim}}\nolimits {\mathord{\mathfrak{t}}}\leq\dim C(K)^F=p$. Hence, the inclusion above is in fact the equality. Thus, we get (B) assuming that $G$ has a finite center. The converse is true since ${\mathord{\mathfrak{t}}}^0$ does not contain a nontrivial $F$-fixed element. The same arguments show that any $F$-invariant torus $T$ includes $T^0$ if (\romannumeral1) is true. This proves that $V,G$ admit the realization of Example~\ref{maict}; (1) and (2) are clear. \end{proof} \begin{corollary} Let $G$ be as in Theorems~4.5 and 4.3. Then $G$ contains a closed subgroup $G^0$ such that \begin{itemize} \item[(1)] each connected component of $G$ contains a connected component of $G^0$, \item[(2)] $G^0$ has a finite center, \item[(3)] generic orbits of $(G^0)^{\mathord{\mathbb C}}$ are closed, \item[(4)] $Gv\cap T^{\mathord{\mathbb R}} v=G^0v\cap(T^0)^{\mathord{\mathbb R}} v$ for a generic $v\in V$. \end{itemize} \end{corollary} \begin{proof} By Theorem~\ref{asexa} and (\ref{inct}), $G\supseteq T^0$, where $T^0$ is as in (B). Clearly, $F$ normalizes $T^0$. Hence, $G^0=FT^0$ is a group, which satisfies the corollary. \end{proof} Proposition~\ref{hulfi} makes it possible to find $\wh{Gv}$ for $G$ as above. If $T={\mathord{\mathbb T}}^n$, then $T\supset{\mathord{\mathbb Z}}_2^n$ and generic $T$-orbits intersect ${\mathord{\mathbb R}}^n_+$; hence, we may assume $v\in{\mathord{\mathbb R}}^n_+$. Then $\wh{Tv}\cap{\mathord{\mathbb R}}^n$ is a parallelepiped $\Pi_v=\mathop{\mathrm{conv}}\nolimits\{(\pm v_1,\dots,\pm v_n)\}$. Clearly, $\Pi_v={\mathord{\mathbb Z}}^n_2\Pi_v^+$, where $\Pi_v^+=\Pi_v\cap\mathop{\mbox{\rm clos}}{\mathord{\mathbb R}}^n_+$. Since ${\mathord{\mathbb R}}^n_+=T^{\mathord{\mathbb R}} v$, we may use Proposition~\ref{hulfi} with $X=Fv$, $C_T=-\mathop{\mbox{\rm clos}}{\mathord{\mathbb R}}^n_+$, $P_X=\mathop{\mathrm{conv}}\nolimits(Fv)-{\mathord{\mathbb R}}^n_+$: \begin{eqnarray*} \wh{Gv}=\cup_{u\in\exp(Q_v)}{\mathord{\mathbb D}}^n u=T\cup_{u\in\exp(Q_v)}\Pi_{u}= T\cup_{u\in\exp(Q_v)}\Pi_{u}^+, \end{eqnarray*} where $Q_v=\mathop{\mathrm{conv}}\nolimits{Fv}$. For the description in the form (\ref{inehu}), one has to know normal vectors to faces of $\mathop{\mathrm{conv}}\nolimits Fv$. Since $F$ may be an arbitrary subgroup of $S_n$, they need not be proportional to rational vectors (for example, this is true for the cyclic subgroup of order 3 in $S_3$). We shall describe the situation where they are locally independent of $v$; since they depend on $v$ continuously, this is equivalent to the condition that they are rational. Note that the vector which joins two points in ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$ as in Lemma~\ref{strih} is rational if and only if the strip reduces to an annulus. In Example~\ref{maict}, $F$ need not be the product of groups corresponding to the irreducible components; we shall see that $F$ possesses this property in the case under consideration.
Let $U$ be a real vector space and $F\subset\mathop{\mbox{\rm GL}}\nolimits(U)$ be a finite group. Set \begin{eqnarray*} C_u=\mathop{\mathrm{cone}}\nolimits(u-Fu); \end{eqnarray*} this is the cone at the vertex $u$ of the polytope $\mathop{\mathrm{conv}}\nolimits(Fu)$ (which may be degenerate). We say that {\it $C_u$ is locally independent of $u$} if, for a generic $u\in U$, $C_u=C_w$ for all $w$ that are sufficiently close to $u$. \begin{lemma}\label{coxno} Let $U$ be a real vector space and $F$ be a finite subgroup of $\mathop{\mbox{\rm GL}}\nolimits(U)$. Suppose that $C_u$ is locally independent of $u$. Then $F$ is generated by reflections in hyperplanes in $U$. \end{lemma} \begin{proof} We may assume without loss of generality that $U$ is equipped with an inner product and that $F\subseteq\mathop{\mbox{\rm O}}\nolimits(U)$. Let ${\mathord{\mathbb R}}_+(u-fu)$, $f\in F$, be an extreme ray of $C_u$. The equality $C_u=C_w$ for $w$ in a neighborhood of $u$ implies that this ray does not change near $u$. Hence, $\dim({\mathord{\bf 1}}-f)U=1$. Since $f$ is orthogonal and nontrivial, it is a refection in a hyperplane. The stable subgroup of a generic $u\in U$ is trivial (hence, $F$ acts freely on a generic orbit) and each vertex of $\mathop{\mathrm{conv}}\nolimits(Fu)$ can be joined with $u$ by a chain of edges. Applying the above arguments repeatedly to $u,fu$, etc., we get that $F$ is generated by reflections in hyperplanes. \end{proof} For any $g\in{\mathord{\mathbb Z}}_2^nS_n$ and $k=1,\dots,n$, $g{\mathord{\epsilon}}_k=\pm\,{\mathord{\epsilon}}_{{\mathord{\sigma}}(k)}$ for some ${\mathord{\sigma}}\in S_n$. The mapping $f\to{\mathord{\sigma}}$ is a natural homomorphism ${\mathord{\mathbb Z}}_2^nS_n\to S_n$, which we denote by $\phi$. \begin{lemma}\label{perre} Let $F$ be a transitive subgroup of $S_n$ acting in ${\mathord{\mathbb R}}^n$ by permutations of coordinates and let a group $H\subseteq {\mathord{\mathbb Z}}_2^nS_n$ be generated by reflections in hyperplanes in ${\mathord{\mathbb R}}^n$. If $\phi(H)=F$, then $F=S_n$. \end{lemma} \begin{proof} Let $\rho$ be a reflection in a hyperplane in ${\mathord{\mathbb R}}^n$. If $\rho\in{\mathord{\mathbb Z}}_2^nS_n=BC_n$, then it is conjugate to a reflection in a wall of the Weyl chamber that is distinguished by the inequalities $x_1>\dots>x_n>0$. Hence, $\phi(\rho)$ is a transposition if it is nontrivial. Since $F=\phi(H)$, $F$ is generated by transpositions. It remains to note that any subgroup of $S_n$, which is generated by transpositions, coincides with $S_n$ if it is transitive on $\{1,\dots,n\}$ (consider the graph with the vertices $\{1,\dots,n\}$ and edges corresponding to transpositions and note that inclusions $(k,l)\in F$, $(l,m)\in F$ imply $(k,m)\in F$; this makes it possible to use the induction). \end{proof} We say that a pair $(V,G)$ is {\it standard} if it is isomorphic to (A) or (B) in Example~\ref{maict} with $F=S_K$. The {\it product} of pairs $(V_k,G_k)$, $k=1,\dots,m$, is the pair $(\sum_{k=1}^m V_k, \prod_{k=1}^m G_k)$. \begin{theorem}\label{huini} Let $G=FT$ be a compact subgroup of $\mathop{\mbox{\rm GL}}\nolimits(n,{\mathord{\mathbb C}})$, where $T\subseteq{\mathord{\mathbb T}}^n$ is a torus and $F$ is a subgroup of $S_n$. Suppose that $Gv\subset T^{\mathord{\mathbb C}} v$ for a generic $v\in V$ and \begin{itemize} \item[\rm(1)] either $T={\mathord{\mathbb T}}^n$ or the center of $G$ is finite, \item[\rm(2)] for a generic $v\in{\mathord{\mathbb C}}^n$, $\wh{Gv}$ can be distinguished in $\mathop{\mbox{\rm clos}} T^{\mathord{\mathbb C}} v$ by a family of inequalities \begin{eqnarray*}
|z_1|^{s_1}\dots |z_n|^{s_n}\leq \rho_s(v), \end{eqnarray*} where $\rho_s(v)\geq0$ and vector $s=(s_1,\dots,s_n)$ runs over a certain finite subset of ${\mathord{\mathbb R}}^n$ which is independent of $v$.
\end{itemize} Then $(V,G)$ is isomorphic to the product of standard pairs. Moreover, if $G$ is irreducible, then $(V,G)$ is standard. \end{theorem} \begin{proof} Let $G$ be irreducible. Then $F$ is transitive and $(V,G)$ are as in (A) or as in (B) by Theorem~\ref{asexa}. Suppose that (B) is the case. It follows from (2) and Proposition~\ref{hulfi} that the polytope $Q_X\subset{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$, where $X=Gv\cap T^{\mathord{\mathbb R}} v$, for a generic $v$, satisfies the assumption of Lemma~\ref{coxno}. Therefore, $F$ is generated by reflections (we may assume that $F\subset\mathop{\mbox{\rm O}}\nolimits({\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}})$). They extend to reflections in hyperplanes in ${\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}+{\mathord{\mathbb R}}\,{\mathord{\bf 1}}={\mathord{\mathbb R}}^n$ if we assume that they fix ${\mathord{\bf 1}}$. Then, Lemma~\ref{perre} implies $F=S_n$. The case (A) can be reduced to (B): it is sufficient to replace ${\mathord{\mathbb T}}^n$ with $T=\mathop{\mathrm{SU}}\nolimits(n)\cap{\mathord{\mathbb T}}^n$ since $F$ evidently keeps $T$ and to note that (2) remains true due to Proposition~\ref{hulfi}. Thus, $(V,G)$ is standard.
Let the center of $G$ be finite. According to Theorem~\ref{asexa}, $T$ may be identified with the group $T^0$ in Example~\ref{maict}. In particular, $G^{\mathord{\mathbb C}} v$ is closed for a generic $v$ and $C_T=0$ due to Proposition~\ref{hulfi}. By Proposition~\ref{sicri}, generic orbits contain $F$-fixed points. Applying the arguments above (which did not use the assumption that $G$ is irreducible), we get that the cones at the vertices of the convex polytope $Q_X$, $X=Gv\cap T^{\mathord{\mathbb R}}\subset{\mathord{\mathfrak{t}}}^{\mathord{\mathbb R}}$, are locally independent of $v$. Clearly, the same is true for its projection into each space ${\mathord{\mathfrak{t}}}^0_k$ corresponding to an irreducible component $V_k$ of $V={\mathord{\mathbb C}}^n$. This implies that all irreducible components are standard. Thus, $F_k=S(K_k)$, where $k=1,\dots,p$ and $K=K_1\cup\dots\cup K_p$ is the partition of $K$ into $F$-orbits. Due to Theorem~\ref{asexa}, it is sufficient to prove that \begin{eqnarray}\label{produ} F=F_1\times\dots\times F_p. \end{eqnarray}
By Lemma~\ref{coxno}, $F|_{{\mathord{\mathfrak{t}}}^0}$ is generated by reflections in hyperplanes in ${\mathord{\mathfrak{t}}}^0$; the condition that they keep real $F$-invariant functions on $K$ uniquely defines their extension to ${\mathord{\mathbb R}}^n$. Hence, $F$ is generated by reflections in ${\mathord{\mathbb R}}^n$. A permutation which induces a reflection in a hyperplane in ${\mathord{\mathbb R}}^n$ is a transposition of a pair of coordinates; this pair is necessarily contained in only one of the sets $K_k$, $k=1,\dots,p$. This proves (\ref{produ}).
If $T={\mathord{\mathbb T}}^n$, then $T$ is a product of tori in irreducible components. Thus, the case $T={\mathord{\mathbb T}}^n$ follows from the above case, since the assumptions of the theorem hold true for the group $T^0$ if they hold for $T$ in (\ref{inct}) in Example~\ref{maict}. \end{proof}
\section{Hulls of isotropy orbits of bounded symmetric domains\label{isohe}}
We start with a preliminary material on hermitian symmetric spaces following \cite{Wo} but adapting the exposition to our purpose in order to be as self contained as possible. For a subset $X$ of a Lie algebra ${\mathord{\mathfrak{g}}}$, ${\mathord{\mathfrak{z}}}(X)=\{z\in{\mathord{\mathfrak{g}}}:\,[z,X]=0\}$ is the centralizer of $X$. Let $G$ be a simple real noncompact Lie group with a finite center, $K$ be its maximal compact subgroup, and ${\mathord{\mathfrak{g}}},{\mathord{\mathfrak{k}}}$ be their Lie algebras, respectively. If the center ${\mathord{\mathfrak{z}}}={\mathord{\mathfrak{z}}}({\mathord{\mathfrak{k}}})$ of ${\mathord{\mathfrak{k}}}$ is nontrivial, then ${\mathord{\mathfrak{g}}}$ is called {\it hermitian}. Then ${\mathord{\mathfrak{k}}}={\mathord{\mathfrak{z}}}({\mathord{\mathfrak{z}}})$ and $\dim{\mathord{\mathfrak{z}}}=1$ (note that $K$ is irreducible in ${\mathord{\mathfrak{g}}}/{\mathord{\mathfrak{k}}}$). Let ${\mathord{\mathfrak{c}}}$ be a Cartan subalgebra of ${\mathord{\mathfrak{k}}}$. Then ${\mathord{\mathfrak{c}}}$ is also a Cartan subalgebra of ${\mathord{\mathfrak{g}}}$ and ${\mathord{\mathfrak{z}}}\subseteq{\mathord{\mathfrak{c}}}$. There exists ${\mathord{\mathsf{k}}}\in{\mathord{\mathfrak{z}}}$ such that $\mathop{\mathrm{ad}}\nolimits({\mathord{\mathsf{k}}})$ has eigenvalues $0,\pm i$ (it is unique up to a sign; $\ker\mathop{\mathrm{ad}}\nolimits({\mathord{\mathsf{k}}})={\mathord{\mathfrak{k}}}$). Then ${\mathord{\kappa}}=e^{\pi\mathop{\mathrm{ad}}\nolimits({\mathord{\mathsf{k}}})}$ is the Cartan involution which defines the Cartan decomposition \begin{eqnarray}\label{carta} {\mathord{\mathfrak{g}}}={\mathord{\mathfrak{k}}}\oplus{\mathord{\mathfrak{d}}}, \end{eqnarray} where ${\mathord{\mathfrak{k}}},{\mathord{\mathfrak{d}}}$ are eigenspaces for $1,-1$, respectively. Furthermore, $j=\mathop{\mathrm{ad}}\nolimits({\mathord{\mathsf{k}}})$ is a complex structure in ${\mathord{\mathfrak{d}}}$. This defines the structure of a hermitian symmetric space of noncompact type in $D=G/K$. These spaces can be realized as bounded symmetric domains in ${\mathord{\mathbb C}}^n$ with $K=\mathop{\mathrm{Aut}}\nolimits_0(D)$. Any irreducible bounded symmetric domain admits such a realization. Let ${\mathord{\Delta}}\subseteq i{\mathord{\mathfrak{c}}}^*$ be the root system of ${\mathord{\mathfrak{g}}}^{\mathord{\mathbb C}}$. Each ${\mathord{\alpha}}\in{\mathord{\Delta}}$ corresponds to an $\mathop{\mathrm{sl}}\nolimits_2$-triple ${\mathord{\mathsf{h}}}_{\mathord{\alpha}},{\mathord{\mathsf{e}}}_{\mathord{\alpha}},{\mathord{\mathsf{f}}}_{\mathord{\alpha}}$ such that $i{\mathord{\mathsf{h}}}_{\mathord{\alpha}}\in{\mathord{\mathfrak{c}}}$. Thus, ${\mathord{\alpha}}({\mathord{\mathsf{h}}}_{\mathord{\alpha}})=2$, $[{\mathord{\mathsf{e}}}_{\mathord{\alpha}},{\mathord{\mathsf{f}}}_{\mathord{\alpha}}]={\mathord{\mathsf{h}}}_{\mathord{\alpha}}$, and \begin{eqnarray}\label{roots} [h,{\mathord{\mathsf{e}}}_{\mathord{\alpha}}]={\mathord{\alpha}}(h){\mathord{\mathsf{e}}}_{\mathord{\alpha}},\quad [h,{\mathord{\mathsf{f}}}_{\mathord{\alpha}}]=-{\mathord{\alpha}}(h){\mathord{\mathsf{f}}}_{\mathord{\alpha}} \end{eqnarray} for all $h\in{\mathord{\mathfrak{c}}}^{\mathord{\mathbb C}}$. We identify ${\mathord{\mathfrak{c}}}^{\mathord{\mathbb C}}$ and $({\mathord{\mathfrak{c}}}^*)^{\mathord{\mathbb C}}$ equipping ${\mathord{\mathfrak{g}}}$ with an $\mathop{\mathrm{Ad}}\nolimits(K)$-invariant sesquilinear inner product and normalize it by the condition \begin{eqnarray}\label{longn}
\max\{|{\mathord{\alpha}}|:\,{\mathord{\alpha}}\in{\mathord{\Delta}}\}=\sqrt2. \end{eqnarray} Then short roots must have length $1$ (note that $G_2$ has no real hermitian form). The set ${\mathord{\Delta}}^\vee=\{{\mathord{\mathsf{h}}}_{\mathord{\alpha}}:\,{\mathord{\alpha}}\in{\mathord{\Delta}}\}$ is the dual root system. The above normalization implies $h_{\mathord{\alpha}}={\mathord{\alpha}}$ for long roots and $h_{\mathord{\alpha}}=2{\mathord{\alpha}}$ for short ones. Since $\mathop{\mathrm{ad}}\nolimits(h)$, $h\in{\mathord{\mathfrak{c}}}$, has eigenvalues $0$ and ${\mathord{\alpha}}(h)$, where ${\mathord{\alpha}}\in{\mathord{\Delta}}$, we get ${\mathord{\alpha}}(i{\mathord{\mathsf{k}}})=0,\pm1$, i.e., $i{\mathord{\mathsf{k}}}$ is a {\it microweight} (of ${\mathord{\Delta}}^\vee$). For $s=0,\pm1$, set \begin{eqnarray}\label{defds} {\mathord{\Delta}}_s=\{{\mathord{\alpha}}\in{\mathord{\Delta}}:\,{\mathord{\alpha}}(i{\mathord{\mathsf{k}}})=s\}. \end{eqnarray} Since ${\mathord{\mathfrak{k}}}\oplus i{\mathord{\mathfrak{d}}}$ is a compact real form of ${\mathord{\mathfrak{g}}}^{\mathord{\mathbb C}}$ and $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb R}}\{i{\mathord{\mathsf{h}}}_{\mathord{\alpha}},{\mathord{\mathsf{e}}}_{\mathord{\alpha}}-{\mathord{\mathsf{f}}}_{\mathord{\alpha}}, i({\mathord{\mathsf{e}}}_{\mathord{\alpha}}+{\mathord{\mathsf{f}}}_{\mathord{\alpha}})\}$ is the $\mathop{\mathrm{su}}\nolimits(2)$-subalgebra corresponding to a root ${\mathord{\alpha}}\in{\mathord{\Delta}}$, we have \begin{eqnarray}\label{spand} {\mathord{\mathfrak{d}}}=\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb R}}\{{\mathord{\mathsf{e}}}_{\mathord{\alpha}}+{\mathord{\mathsf{f}}}_{\mathord{\alpha}},\ i({\mathord{\mathsf{e}}}_{\mathord{\alpha}}-{\mathord{\mathsf{f}}}_{\mathord{\alpha}}):\,{\mathord{\alpha}}\in{\mathord{\Delta}}_1\}. \end{eqnarray} Set ${\mathord{\mathfrak{s}}}_{\mathord{\alpha}}=\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb R}}\{i{\mathord{\mathsf{h}}}_{\mathord{\alpha}},{\mathord{\mathsf{e}}}_{\mathord{\alpha}}+{\mathord{\mathsf{f}}}_{\mathord{\alpha}},i({\mathord{\mathsf{e}}}_{\mathord{\alpha}}-{\mathord{\mathsf{f}}}_{\mathord{\alpha}})\}$. Then ${\mathord{\mathfrak{s}}}_{\mathord{\alpha}}$ is an $\mathop{\mathrm{sl}}\nolimits(2,{\mathord{\mathbb R}})$-subalgebra of ${\mathord{\mathfrak{g}}}^{\mathord{\mathbb C}}$ and \begin{eqnarray}\label{singe} {\mathord{\alpha}}\in{\mathord{\Delta}}_{\pm1}\quad\Longleftrightarrow\quad{\mathord{\mathfrak{s}}}_{\mathord{\alpha}}\subseteq{\mathord{\mathfrak{g}}}. \end{eqnarray} Let $E$ be a maximal subset of pairwise orthogonal long roots in ${\mathord{\Delta}}_1$. Set \begin{eqnarray*} &{\mathord{\mathsf{h}}}=\sum\nolimits_{{\mathord{\alpha}}\in E}{\mathord{\mathsf{h}}}_{\mathord{\alpha}},\quad {\mathord{\mathsf{e}}}=\sum\nolimits_{{\mathord{\alpha}}\in E}{\mathord{\mathsf{e}}}_{\mathord{\alpha}},\quad {\mathord{\mathsf{f}}}=\sum\nolimits_{{\mathord{\alpha}}\in E}{\mathord{\mathsf{f}}}_{\mathord{\alpha}};\\ &{\mathord{\mathfrak{s}}}=\sum\nolimits_{{\mathord{\alpha}}\in E}\oplus\,{\mathord{\mathfrak{s}}}_{\mathord{\alpha}}. \end{eqnarray*} Let ${\mathord{\alpha}},{\mathord{\beta}}\in E$, ${\mathord{\alpha}}\neq{\mathord{\beta}}$. Since ${\mathord{\alpha}},{\mathord{\beta}}$ are long and orthogonal, $\pm{\mathord{\alpha}}\pm{\mathord{\beta}}\notin{\mathord{\Delta}}$. Hence, \begin{eqnarray}\label{comms} {\mathord{\alpha}},{\mathord{\beta}}\in E,\ {\mathord{\alpha}}\neq{\mathord{\beta}}\quad\Longrightarrow\quad[{\mathord{\mathfrak{s}}}_{\mathord{\alpha}},{\mathord{\mathfrak{s}}}_{\mathord{\beta}}]=0. \end{eqnarray} It follows that ${\mathord{\mathsf{h}}},{\mathord{\mathsf{e}}},{\mathord{\mathsf{f}}}$ is an $\mathop{\mathrm{sl}}\nolimits_2$-triple and ${\mathord{\mathfrak{s}}}$ is a subalgebra of ${\mathord{\mathfrak{g}}}$. Set \begin{eqnarray} {\mathord{\theta}}=e^{\frac14\pi\mathop{\mathrm{ad}}\nolimits({\mathord{\mathsf{e}}}-{\mathord{\mathsf{f}}})},\label{defth}\\ {\mathord{\mathfrak{a}}}=\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb R}}{\mathord{\theta}} E.\label{defa} \end{eqnarray} Here is the standard realization of root systems $B_n$ and $C_n$: \begin{eqnarray*} &B_n=\{\pm{\mathord{\epsilon}}_k\pm{\mathord{\epsilon}}_l,\ \pm{\mathord{\epsilon}}_m:\,\ k,l,m=1,\dots,n,\ k<l\};\\ &C_n=\{\pm{\mathord{\epsilon}}_k\pm{\mathord{\epsilon}}_l,\ \pm2{\mathord{\epsilon}}_m:\,\ k,l,m=1,\dots,n,\ k<l\}. \end{eqnarray*} Then $C_n=B_n^\vee$, but $C_n$ does not satisfy (\ref{longn}). These systems have microweights; up to the action of the Weyl group, they are: \begin{eqnarray*} B_n:&{\mathord{\epsilon}}_1;\\ C_n:&\frac{i}2({\mathord{\epsilon}}_1+\dots+{\mathord{\epsilon}}_n). \end{eqnarray*} There are no other irreducible root systems which have microweights and contain roots of different lengths. Also, $B_n$ and $C_n$ have the same Weyl group $BC_n={\mathord{\mathbb Z}}^n_2S_n$.
\begin{lemma}\label{maxab} The space ${\mathord{\mathfrak{a}}}$ is a maximal abelian subspace of ${\mathord{\mathfrak{d}}}$. \end{lemma} \begin{proof} A straightforward calculation with 2-matrices shows that ${\mathord{\theta}}{\mathord{\mathsf{h}}}={\mathord{\mathsf{e}}}+{\mathord{\mathsf{f}}}$. By (\ref{comms}), \begin{eqnarray}\label{thhef} {\mathord{\theta}}{\mathord{\mathsf{h}}}_{\mathord{\alpha}}={\mathord{\mathsf{e}}}_{\mathord{\alpha}}+{\mathord{\mathsf{f}}}_{\mathord{\alpha}}\quad\mbox{\rm for all}\ {\mathord{\alpha}}\in E. \end{eqnarray} It follows from (\ref{spand}) that ${\mathord{\mathfrak{a}}}\subseteq{\mathord{\mathfrak{d}}}$. Moreover, ${\mathord{\mathfrak{a}}}$ is abelian due to (\ref{comms}). Set $\Xi={\mathord{\Delta}}\cap E^\bot$. We claim that \begin{eqnarray}\label{orcen} \Xi\subseteq{\mathord{\Delta}}_0. \end{eqnarray} Indeed, a root in ${\mathord{\Delta}}_1\cap\Xi$ must be short. This may happen only in $B_n$ or $C_n$, since $G_2$ and $F_4$ have no microweights and other irreducible root systems have no roots of different lengths. In $B_n$, ${\mathord{\mathsf{k}}}$ is a short root and all other short roots are orthogonal to ${\mathord{\mathsf{k}}}$. Hence, they do not belong to ${\mathord{\Delta}}_1$. In $C_n$, $E=\{2{\mathord{\epsilon}}_1,\dots2{\mathord{\epsilon}}_n\}$; then $\Xi=\emptyset$. Since ${\mathord{\Delta}}_{-1}=-{\mathord{\Delta}}_1$, this proves (\ref{orcen}).
Set ${\mathord{\mathfrak{b}}}=E^\bot\cap{\mathord{\mathfrak{c}}}$ and ${\mathord{\mathfrak{m}}}=\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb C}}\{{\mathord{\mathsf{e}}}_{\mathord{\alpha}},{\mathord{\mathsf{f}}}_{\mathord{\alpha}}:\,{\mathord{\alpha}}\in\Xi\}$. It follows from (\ref{orcen}) that ${\mathord{\mathfrak{m}}}\subseteq{\mathord{\mathfrak{k}}}^{\mathord{\mathbb C}}$. Clearly, ${\mathord{\mathfrak{z}}}(E)={\mathord{\mathfrak{c}}}^{\mathord{\mathbb C}}\oplus{\mathord{\mathfrak{m}}}$. The space ${\mathord{\mathfrak{m}}}$ is ${\mathord{\theta}}$-invariant, because ${\mathord{\theta}}$ fixes roots in $\Xi$. Due to (\ref{defa}), we get \begin{eqnarray*} {\mathord{\mathfrak{z}}}({\mathord{\mathfrak{a}}})={\mathord{\theta}}{\mathord{\mathfrak{z}}}(E)={\mathord{\mathfrak{b}}}^{\mathord{\mathbb C}}\oplus{\mathord{\mathfrak{a}}}^{\mathord{\mathbb C}}\oplus{\mathord{\mathfrak{m}}} \end{eqnarray*} Since ${\mathord{\mathfrak{b}}}^{\mathord{\mathbb C}}\oplus{\mathord{\mathfrak{m}}}\subseteq{\mathord{\mathfrak{k}}}^{\mathord{\mathbb C}}$, this implies ${\mathord{\mathfrak{z}}}({\mathord{\mathfrak{a}}})\cap{\mathord{\mathfrak{d}}}={\mathord{\mathfrak{a}}}$. \end{proof}
The projection of ${\mathord{\theta}}{\mathord{\Delta}}$ into ${\mathord{\mathfrak{a}}}$ is the {\it restricted root system} ${\mathord{\Delta}}_{\mathord{\mathfrak{a}}}$ (it is also the set of roots for $\mathop{\mathrm{ad}}\nolimits({\mathord{\mathfrak{a}}})$ in ${\mathord{\mathfrak{g}}}$). The group \begin{eqnarray*}
W=\{\mathop{\mathrm{Ad}}\nolimits(g):\,g\in K,\ \mathop{\mathrm{Ad}}\nolimits(g){\mathord{\mathfrak{a}}}={\mathord{\mathfrak{a}}}\}|_{\mathord{\mathfrak{a}}}, \end{eqnarray*} acting in ${\mathord{\mathfrak{a}}}$, is the Weyl group of ${\mathord{\mathfrak{a}}}$.
In what follows, we denote by ${\mathord{\mathfrak{v}}}$ the complexification of ${\mathord{\mathfrak{a}}}$ with respect to the complex structure $j$ (thus, ${\mathord{\mathfrak{v}}}\subset{\mathord{\mathfrak{d}}}$). The set ${\mathord{\theta}} E$ is a base in ${\mathord{\mathfrak{v}}}$; enumerating it, we identify ${\mathord{\mathfrak{v}}}$ with ${\mathord{\mathbb C}}^n$. Set ${\mathord{\mathfrak{t}}}=\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb R}} iE$, $T=\exp{\mathord{\mathfrak{t}}}$, $H=WT$. The torus $T={\mathord{\mathbb T}}^n$ is a maximal compact subgroup in the group $\exp{\mathord{\mathfrak{s}}}\subseteq G$.
\begin{proposition}\label{pasta} The following assertions hold: \begin{enumerate} \item ${\mathord{\Delta}}_{\mathord{\mathfrak{a}}}$ is a root system of type $BC_n$ or $C_n$; \item the pair $({\mathord{\mathfrak{v}}},H)$ is standard with $T={\mathord{\mathbb T}}^n$. \end{enumerate} \end{proposition} \begin{proof} (1). Let ${\mathord{\Delta}}_{\mathord{\mathfrak{a}}}\setminus {\mathord{\theta}} E$ contain a long root ${\mathord{\alpha}}$. Then ${\mathord{\alpha}}=\frac12({\mathord{\alpha}}_1+{\mathord{\alpha}}_2+{\mathord{\alpha}}_3+{\mathord{\alpha}}_4)$ for some
${\mathord{\alpha}}_1,\dots,{\mathord{\alpha}}_4\in {\mathord{\theta}} E$, since $|{\mathord{\alpha}}|^2=2$ and $\scal{{\mathord{\alpha}}}{{\mathord{\beta}}}=0,\pm1$ for all ${\mathord{\beta}}\in E$ due to the normalization (\ref{longn}) (note that ${\mathord{\alpha}},{\mathord{\beta}}$ generate $A_2$ if $\scal{{\mathord{\alpha}}}{{\mathord{\beta}}}\neq0$). Roots ${\mathord{\alpha}},{\mathord{\alpha}}_1,\dots,{\mathord{\alpha}}_4$ generate $D_4$, since only $A_4$ and $D_4$ among irreducible systems of rank 4 consist of roots of equal length, but $A_4$ does not contain an orthogonal base. Since $\scal{i{\mathord{\mathsf{k}}}}{{\mathord{\beta}}}=1$ for all ${\mathord{\beta}}\in E$, the projection of $i{\mathord{\theta}}{\mathord{\mathsf{k}}}$ into $\mathop{\mathrm{span}}\nolimits_{\mathord{\mathbb R}} D_4$ is a microweight ${\mathord{\omega}}$ such that $\scal{{\mathord{\omega}}}{{\mathord{\alpha}}_k}=1$, $k=1,\dots,4$, but $D_4$ has no microweight with this property (in the realization above, $D_4=B_4\cap C_4$ and the microweights are either $\pm{\mathord{\epsilon}}_k$ or $\frac12(\pm{\mathord{\epsilon}}_1\pm{\mathord{\epsilon}}_2\pm{\mathord{\epsilon}}_3\pm{\mathord{\epsilon}}_4$)). Thus, $E\cup (-E)$ is the set of all long roots in ${\mathord{\Delta}}_{\mathord{\mathfrak{a}}}$. According to the classification of irreducible root systems, only $C_n$ and $BC_n=B_n\cup C_n$ has the property that linearly independent long roots are mutually orthogonal.
(2). The maximal compact subgroup of the group corresponding to ${\mathord{\mathfrak{s}}}$ is ${\mathord{\mathbb T}}^n$. Hence, $T={\mathord{\mathbb T}}^n\supset{\mathord{\mathbb Z}}_2^n$. Systems $C_n$ and $BC_n$ have the same Weyl group $W=BC_n$. Therefore, $H=WT=S_n{\mathord{\mathbb T}}^n$. \end{proof} Let $D$ be a bounded symmetric domain in a complex linear space ${\mathord{\mathfrak{d}}}$ (may be, reducible) and ${\mathord{\mathfrak{v}}}\subseteq {\mathord{\mathfrak{d}}}$ be the complex linear span of a maximal abelian subspace in ${\mathord{\mathfrak{d}}}$ (thus, we identify ${\mathord{\mathfrak{d}}}$ with the corresponding space in the Cartan decomposition (\ref{carta}), which is induced by the Cartan involutions in irreducible components). Let $\mathop{\mathrm{Aut}}\nolimits_{00}({\mathord{\mathfrak{v}}},D)$ denote the subgroup of all linear transformations in $\mathop{\mathrm{Aut}}\nolimits(D)$ which keep ${\mathord{\mathfrak{v}}}$ and each irreducible component of $D$. \begin{corollary}\label{conop} Let $F$ be a subgroup of $S_n$, $G=F{\mathord{\mathbb T}}^n\subset\mathop{\mbox{\rm GL}}\nolimits(n,{\mathord{\mathbb C}})$. Then $G$ satisfies condition (2) of Theorem~\ref{huini} if and only if $(V,G)$ is isomorphic to a pair $\left({\mathord{\mathfrak{v}}},\mathop{\mathrm{Aut}}\nolimits_{00}({\mathord{\mathfrak{v}}},D)\right)$ for a bounded symmetric domain $D$. \end{corollary} \begin{proof} All pairs $({\mathord{\mathbb C}}^n, S_n{\mathord{\mathbb T}}^n)$ appear as $\left({\mathord{\mathfrak{v}}},\mathop{\mathrm{Aut}}\nolimits_{00}({\mathord{\mathfrak{v}}},D)\right)$ for matrix balls $D$. It remains to combine Theorem~\ref{huini} and Proposition~\ref{pasta}. \end{proof} It is possible now to describe hulls of $K$-orbits in ${\mathord{\mathfrak{d}}}$ (with respect to the complex structure $j$) it terms of Proposition~\ref{autdn}. The key point is that $K$ is polar in ${\mathord{\mathfrak{d}}}$: each $K$-orbit meets ${\mathord{\mathfrak{a}}}$ orthogonally (i.e., ${\mathord{\mathfrak{a}}}$ is a {\it Cartan subspace}). This is true, since all maximal abelian subspaces are conjugate in ${\mathord{\mathfrak{d}}}$ by $K$, $\mathop{\mathrm{ad}}\nolimits(a)$ is symmetric if $a\in{\mathord{\mathfrak{d}}}$ and, for a generic $a\in{\mathord{\mathfrak{a}}}$, $\ker\mathop{\mathrm{ad}}\nolimits(a)={\mathord{\mathfrak{a}}}$; hence, \begin{eqnarray}\label{polar} [a,{\mathord{\mathfrak{g}}}]={\mathord{\mathfrak{a}}}^\bot. \end{eqnarray} We may include the linear base in ${\mathord{\mathfrak{v}}}$ into a base in ${\mathord{\mathfrak{d}}}$ as the first $n$ vectors of the latter. Then $z_1,\dots,z_n$ are coordinates in ${\mathord{\mathfrak{v}}}$ and linear functions in ${\mathord{\mathfrak{d}}}$. The functions $\mu_k$ in (\ref{mukde}) admit a $K$-invariant extension to ${\mathord{\mathfrak{d}}}$: \begin{eqnarray}\label{extmu}
\mu_k(z)=\sup\{|(gz)_{1}\dots (gz)_{k}|:\,g\in K\}, \end{eqnarray} where $k=1,\dots,n$. The following lemma shows that (\ref{extmu}) is an extension indeed. \begin{lemma}\label{coinc} For $z\in{\mathord{\mathfrak{v}}}$, {\rm(\ref{mukde})} and {\rm(\ref{extmu})} coincide. \end{lemma} \begin{proof} It follows from (\ref{polar}) that any critical point of the linear function $\mathop{\mathrm{Re}}\nolimits z_1$ on the orbit $Kz$ belongs to ${\mathord{\mathfrak{a}}}$. If the lemma is not true, then there exist $z\in{\mathord{\mathfrak{v}}}$ and
$k\in\{1,\dots,n\}$ such that $|(gz)_k|>|z_k|$. Transformations in $S_n$ and $T$ reduce the problem to the case $z_1>\dots>z_n>0$ and $k=1$, but then the assumption implies that $\mathop{\mathrm{Re}}\nolimits z_1$ attains its maximal value on $Kz$ outside of ${\mathord{\mathfrak{a}}}$. \end{proof} \begin{proposition}\label{hulfe} For any $v\in{\mathord{\mathfrak{d}}}$, $\wh{Kv}=\{z\in{\mathord{\mathfrak{d}}}:\,\mu_k(z)\leq\mu_k(v),\ k=1,\dots,n\}$. \end{proposition} \begin{proof} Due to (\ref{extmu}), each $\mu_k$ is a supremum of absolute values of holomorphic polynomials. Hence, the right-hand side is polynomially convex. Thus, it includes $\wh{Kv}$. The inverse inclusion holds, since each $K$-orbit intersects ${\mathord{\mathfrak{v}}}$ by an $H$-orbit and hulls of $H$-orbits are distinguished in ${\mathord{\mathfrak{v}}}$ by the same inequalities according to Proposition~\ref{autdn} and Lemma~\ref{coinc}. \end{proof} The functions $\mu_k$ can be written in more invariant terms. To do it, note that the Weyl group of ${\mathord{\Delta}}_{\mathord{\mathfrak{a}}}$ has the form ${\mathord{\mathbb Z}}_2^n S_n$ in the base ${\mathord{\theta}} E$ by (\ref{defa}); thus, $z_k={\mathord{\alpha}}_k(z)$, $k=1,\dots,n$, where ${\mathord{\alpha}}_k\in{\mathord{\theta}} E$ and $z\in{\mathord{\mathfrak{a}}}$. Therefore, $z_k$ are eigenvalues of $\mathop{\mathrm{ad}}\nolimits(z)$ in the subspace generated by the corresponding root vectors. The problem is to distinguish this subspace (in fact, we use a slightly different version). After that, functions $\mu_k$ can be defined as norms of some operators according to the following lemma (this observation was used in \cite{Ko} in another context). \begin{lemma}\label{normk} Let $V$ be a Euclidean space and $A$ be a symmetric nonnegative operator in $V$ with eigenvalues ${\mathord{\lambda}}_1\geq{\mathord{\lambda}}_2\geq\dots\geq{\mathord{\lambda}}_m\geq0$, where $m=\dim V$. Let $A^{\wedge k}$ be its natural extension to the $k$-th exterior power $V^{\wedge k}=\bigwedge^k V$. Then \begin{eqnarray*}
\|A^{\wedge k}\|_{V^{\wedge k}}={\mathord{\lambda}}_1\dots{\mathord{\lambda}}_k, \end{eqnarray*}
where $\|\ \|_k$ is the operator norm with respect to the inner product in $V^{\wedge k}$. \end{lemma} \begin{proof} The norm of a nonnegative symmetric operator is equal to its maximal eigenvalue. \end{proof} Let $v\in{\mathord{\mathfrak{g}}}$ be semisimple and $\pi(v)$ denote the projection onto $\ker\mathop{\mathrm{ad}}\nolimits(v)$ along other eigenspaces of $\mathop{\mathrm{ad}}\nolimits(v)$ (note that $\pi(v)$ is a function of $\mathop{\mathrm{ad}}\nolimits(v)$, since it is the residue at zero of the resolvent of $\mathop{\mathrm{ad}}\nolimits(v)$). Set \begin{eqnarray*} &a(v)=\mathop{\mathrm{ad}}\nolimits([v,[v,{\mathord{\mathsf{k}}}]])\pi(v)\mathop{\mathrm{ad}}\nolimits({\mathord{\mathsf{k}}}),\label{defop}\\
&p_k(v)=\|a(v)^{\wedge k}\|_{{\mathord{\mathfrak{g}}}^{\wedge k}},\quad k=1,\dots,n.\label{defno} \end{eqnarray*} The space ${\mathord{\mathfrak{d}}}$ is $a(v)$-invariant and $\ker a(v)\supseteq{\mathord{\mathfrak{k}}}$. We assume that ${\mathord{\mathfrak{g}}}$ is equipped with some $K$-invariant inner product, which extends the inner product in ${\mathord{\mathfrak{d}}}$. It follows from the calculation below that $a(v)$ is symmetric and has range $\mathop{\mathrm{ad}}\nolimits({\mathord{\mathsf{k}}}){\mathord{\mathfrak{a}}}$. Let $n=\dim{\mathord{\mathfrak{a}}}$ be the rank of the symmetric space $D$. It is equal to the codimension of a generic $K$-orbit in ${\mathord{\mathfrak{d}}}$. \begin{theorem}\label{last} For any $v\in{\mathord{\mathfrak{d}}}$, \begin{eqnarray*} \wh{Kv}=\{z\in{\mathord{\mathfrak{d}}}:\,p_k(z)\leq p_k(v),\ k=1,\dots,n\}. \end{eqnarray*} \end{theorem} \begin{proof} It is sufficient to prove the assertion for a generic $v\in{\mathord{\mathfrak{d}}}$. Clearly, $p_k$ are $K$-invariant. Hence, we may assume $v\in{\mathord{\mathfrak{a}}}$. Then, by (\ref{defa}) and (\ref{thhef}), \begin{eqnarray*} v=\sum\nolimits_{{\mathord{\alpha}}\in E} v_{\mathord{\alpha}}({\mathord{\mathsf{e}}}_{\mathord{\alpha}}+{\mathord{\mathsf{f}}}_{\mathord{\alpha}}), \end{eqnarray*} where $ v_{\mathord{\alpha}}\in{\mathord{\mathbb R}}$. According to (\ref{roots}) and (\ref{defds}), $[{\mathord{\mathsf{k}}},v]=\sum\nolimits_{{\mathord{\alpha}}\in E} iv_{\mathord{\alpha}}({\mathord{\mathsf{e}}}_{\mathord{\alpha}}-{\mathord{\mathsf{f}}}_{\mathord{\alpha}})$. Thus, \begin{eqnarray*} [v,[v,{\mathord{\mathsf{k}}}]]=\sum\nolimits_{{\mathord{\alpha}}\in E} 2iv_{\mathord{\alpha}}^2{\mathord{\mathsf{h}}}_{\mathord{\alpha}} \end{eqnarray*} due to (\ref{comms}). Also, (\ref{comms}) implies that $\mathop{\mathrm{ad}}\nolimits(i{\mathord{\mathsf{h}}}_{\mathord{\alpha}})$ keeps ${\mathord{\mathfrak{v}}}$ and has eigenvalues $0,\pm2i$ in it for each ${\mathord{\alpha}}\in E$. Therefore, ${\mathord{\mathfrak{v}}}$ is $\mathop{\mathrm{ad}}\nolimits([v,[v,{\mathord{\mathsf{k}}}]])$-invariant and its eigenvalues are $\pm4v_{\mathord{\alpha}}^2i$, ${\mathord{\alpha}}\in E$. Since $\mathop{\mathrm{ad}}\nolimits({\mathord{\mathsf{k}}}){\mathord{\mathfrak{g}}}={\mathord{\mathfrak{d}}}$ and $\pi(v){\mathord{\mathfrak{d}}}={\mathord{\mathfrak{a}}}$ for a generic $v\in{\mathord{\mathfrak{a}}}$, the space ${\mathord{\mathfrak{v}}}$ is $a(v)$-invariant; moreover, $a(v){\mathord{\mathfrak{g}}}=\mathop{\mathrm{ad}}\nolimits({\mathord{\mathsf{k}}}){\mathord{\mathfrak{a}}}\subseteq{\mathord{\mathfrak{v}}}$. Thus, $a(v)$ has eigenvalues $0,\pm4v_{\mathord{\alpha}}^2$ in ${\mathord{\mathfrak{g}}}$. According to Lemma~\ref{normk} and (\ref{mukde}), \begin{eqnarray}\label{pkmuk} p_k(v)=4^k\mu_k^2(v) \end{eqnarray} for $v\in{\mathord{\mathfrak{v}}}$ and $k=1,\dots,n$. Since $p_k$ and $\mu_k$ are $K$-invariant, (\ref{pkmuk}) holds for all $v\in{\mathord{\mathfrak{d}}}$. The theorem follows from Proposition~\ref{hulfe}. \end{proof} \begin{corollary} Functions $p_k$, $k=1,\dots,n$, are plurisubharmonic in ${\mathord{\mathfrak{d}}}$ with respect to the complex structure $j=\mathop{\mathrm{ad}}\nolimits({\mathord{\mathsf{k}}})$. \end{corollary} \begin{proof} By (\ref{pkmuk}) and (\ref{extmu}), \begin{eqnarray*}
p_k(z)=4^k\sup\{|(gz)_{1}^2\dots (gz)_{k}^2|:\,g\in K\}. \end{eqnarray*} The right-hand side is plurisubharmonic, since the functions $z_k^2$ are $j$-holomorphic and $j$ is $K$-invariant. \end{proof} One can get the same functions $p_k$ by replacing ${\mathord{\mathfrak{g}}}$ with ${\mathord{\mathfrak{d}}}$, endowed with the complex structure $j$, and $a(v)$ with $\mathop{\mathrm{ad}}\nolimits([v,jv])(\pi(v)+\pi(jv))$.
\end{document} | arXiv | {
"id": "0704.1095.tex",
"language_detection_score": 0.5585522651672363,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\titlerunning{Properties of almost complete local revivals} \authorrunning{Ermakov}
\title{Generalized Almost Complete Revivals in quantum spin chains}
\author{\firstname{I.V.}~\surname{Ermakov}} \email[E-mail: ]{ermakov1054@yandex.ru} \affiliation{Skolkovo Institute of Science and Technology, Skolkovo Innovation Center 3, Moscow 143026, Russia.} \affiliation{Department of Mathematical Methods for Quantum Technologies, Steklov Mathematical Institute of Russian Academy of Sciences, 8 Gubkina St., Moscow 119991, Russia.} \affiliation{Laboratory for the Physics of Complex Quantum Systems, Moscow Institute of Physics and Technology, Institutsky per. 9, Dolgoprudny, Moscow region, 141700, Russia}
\received{May 03, 2022}
\begin{abstract} The conception of almost complete revivals has been introduced recently. In a quantum many-body system local observable may exhibit an almost complete revival to its maximal value at the predetermined moment of time. In this paper we extend the original procedure such that the revival may be from an arbitrary point on the Bloch sphere to the arbitrary point. Furthermore in the proposed procedure the reviving and collapsing sites are not necessarily the same. We also demonstrate that for spins $S$ higher than $1/2$ almost complete revivals are suppressed as $1/S$. \end{abstract} \subclass{81Q80} \keywords{Thermalization, revivals, delayed disclosure of a secret, spin chains}
\maketitle
\section{\label{s1_intro} Introduction }
Local observables of non-integrable quantum many-body systems are expected to quickly reach their equilibrium values. This process of reaching thermal equilibrium is also known as themalization. It's also usually implied that once some observable has reached thermal equilibrium it stays there practically forever exhibiting only meaningless fluctuations upon its equilibrium value. There is however a number of physical mechanisms when thermalization is either slowed down or not present at all. Among such mechanisms are many-body localization \cite{nandkishore2015many}, spin-glasses \cite{rademaker2020slow,stein2013spin}, systems with long-range interactions \cite{gong2013prethermalization,neyenhuis2017observation} or systems with constrains \cite{turner2018weak,bernien2017probing,lin2019exact}. All these mechanisms however are associated with the properties of the system under consideration, an alternative approach is to consider specially designed initial states without putting significant restrictions on the system itself.
In our recent work \cite{ermakov2021almost} we proposed a mechanism of constructing special initial states such that the selected local observable after relaxation exhibits almost complete revival (ACR) to its initial value at the predetermined moment of time. In a closed many-body system $C=A\cup B$ for a small subsystem $A$ in a pure state the rest of the system $B$ usually serves as a thermal reservoir, which leads to the equilibration of $A$. However the reservoir $B$ can be finely tuned such that at some predetermined revival time $\tau$, subsystem $A$ will be out of equilibrium. So in fact the reservoir can work both ways: thermalize the subsystem or on the contrary push it out of equilibrium. The latter however is exponentially rare event, yet if one can has access all the degrees of freedom of $B$, then the $B$ can be tuned such that this event will take place at any desired time $\tau$.
In \cite{ermakov2021almost} we also suggested several applications of ACR to benchmarking of quantum similators. In particular, successful preparation of ACR state can be proved with only one local measurement. We had also proposed to utilize ACR for entanglement assisted sensing and delayed disclosure of a secret. The latter is a scheme which allows one to encode a piece of information into the quantum system such that this information will be accessible only in the short vicinity of the revival moment $\tau$ see Figure \ref{clocks}. Any attempt to extract the information earlier will lead to its permanent destruction. In this regard a system of 5-15 qubits prepared in the ACR state represents some sort of ''Quantum time capsule'' which cannot be opened before some specified time.
\begin{figure}
\caption{Schematic illustration of delayed disclosure of a secret. The information is available only at two moments of time $t=0$ and $t=\tau$. Any attempt to extract the information before the $\tau$ leads to its destruction. }
\label{clocks}
\end{figure}
In \cite{ermakov2021almost} we considered only revivals of spins polarized along $z$-axis to the same value. In the present paper we provide a general prescription on how to construct ACR from an arbitrary point of a Block Sphere to another arbitrary point. Furthermore the collapsing and reviving spins should not necessarily coincide. In this regard, it is not a revival of some local observable as such, but rather a recovery of purity in local subsystem.
We also consider spin chains with spin $S>1/2$, and show that in this case ACR are suppressed by factor $1/S$. This result is in agreement with the classical picture where the ACR are impossible due to the fact that classical spin chains are chaotic in general.
\section{General revival scheme in chains of spin 1/2}
In this section we generalize the prescription offered in \cite{ermakov2021almost} such that the initial and revived values of selected local spin are both arbitrary points on a Bloch Sphere. Furthermore, we show that the revival may occur on a site different from initial one.
Let us consider a lattice of $L$ interacting spins $1/2$. Each spin is describes by the operator $\{S^\alpha_i\}$ where $i$ is the lattice index and $\alpha=x,y,z$ is the spin projection index. As a collapsing observable we pick the spin $\vec{S}_q$ and $\vec{S}'_p$ as a reviving one. Notice that while values of $q$ and $p$ as well as $\vec{S}_q$ and $\vec{S}'_p$ are different in general, there are no restrictions for them to coincide.
One-spin Hilbert spaces are defined as $|1_i\rangle$ and $|0_i\rangle$, such that $\langle 1_i|S^z_i| 1_i\rangle=1/2$ and $\langle 0_i|S^z_i| 0_i\rangle=-1/2$. Let us introduce the basis $\mathcal{B}=\{|\varphi_j\rangle\}^{2^L}_{j=1}$, here $|\varphi_j\rangle$ is a many-body basis vector. Let us use the following ordering for the basis $\mathcal{B}$:
\begin{align}\label{basis05}
\mathcal{B}=\{|1_1 \ 1_2 \dots 1_L\rangle, |1_1 \ 1_2 \dots 0_L\rangle, \ \dots \ , |0_1 \ 0_2 \dots 1_L\rangle,|0_1 \ 0_2 \dots 0_L\rangle\}. \end{align}
Choice of basis determines further construction of ACR states. In our case it is convenient to quantize basis along $z$-axis and order it such as each many-body vector $|\varphi_j\rangle$ corresponds to the base-$2$ form of an integer $j$.
Let us use the following parametrization of the ACR state:
\begin{align}\label{ap1}
|\Phi_\text{ACR}(0)\rangle=\sum\limits^{2^{q-1}}_{k=1}\sum\limits^{2^{L-q}}_{n=1}\left(A_{s(k,n)}|\varphi_{s(k,n)}\rangle+\alpha A_{s(k,n)}|\varphi_{s(k,n)+2^{L-q}}\rangle\right), \end{align} here $q$ is the collapsing site, $\alpha$ - complex parameter defining the state of the collapsing site on a Bloch Sphere, and function $s(k,n)$ is defined as:
\begin{align}\label{sknf} s(k,n)=2^{L-q+1}(k-1)+n. \end{align}
The ansatz (\ref{ap1}) for the initial wavefunction guarantees that it has a form of tensor product $|\Phi_\text{ACR}(0)\rangle=|l_q\rangle\otimes|\Psi_\text{res}\rangle$, where $|l_q\rangle$ is the wavefunction of the $q$-th site which has the form:
\begin{align}\label{ap2}
|l_q\rangle=\frac{|0_q\rangle+\alpha|1_q\rangle}{\sqrt{1+|\alpha|^2}}, \end{align}
$|\Psi_\text{res}\rangle$ describes the rest of the system, we refer to $|\Psi_\text{res}\rangle$ as to a 'reservoir'. The parameter $\alpha$ is a complex number which determines the position of $q$-th spin on a Bloch sphere as below:
\begin{align}
\langle S^x_q\rangle=\frac{\text{Re}\alpha}{1+|\alpha|^2}, \qquad \langle S^y_q\rangle=\frac{\text{Im}\alpha}{1+|\alpha|^2}, \qquad \langle S^z_q\rangle=-\frac{1}{2}\cdot\frac{1-|\alpha|^2}{1+|\alpha|^2}.\nonumber \end{align}
Let us take a closer look at the expression (\ref{ap1}). In general, a wavefunction in a form of tensor product $|l_q\rangle\otimes|\Psi_\text{res}\rangle$ has $2^{L-1}$ independent parameters $\mathcal{A}=\{\{A_{s(k,n)}\}^{2^{L-1}}_{n=1}\}^{2^{q-1}}_{k=1}$. Function (\ref{sknf}) gives us pairs of basis vectors $|\varphi_{s(k,n)}\rangle$ and $|\varphi_{s(k,n)+2^{L-q}}\rangle$ such that these two vectors are identical for all but one $q$-th site. For example if we set $q=L$, then for $k=1$ we will have $|\varphi_{s(1,1)}\rangle=|1_1 \ 1_2 \dots 1_L\rangle$ and $|\varphi_{s(1,1)+1}\rangle=|1_1 \ 1_2 \dots 0_L\rangle$, taking all the $k=\overline{1,2^{q-1}}$ we will end up with $2^{L-1}$ basis vectors which are different only for the $L$-th site. Now to have a $q$-th spin in a pure state (\ref{ap2}) we need to demand that $A_{s(k,n)+2^{L-q}}=\alpha A_{s(k,n)}$. If this condition is satisfied, then initial wavefunction always has a form of tensor product $|\Phi_\text{ACR}(0)\rangle=|l_q\rangle\otimes|\Psi_\text{res}\rangle$ for any set of parameters $\mathcal{A}$. In Figure \ref{scheme} (a) we schematically illustrate the ansatz (\ref{ap1}).
\begin{figure}
\caption{(a) Schematic representation of the ansats (\ref{ap1}) in the many-body Hilbert space. (b) Example of revival conditions (\ref{cond2}) in case of $q=p=1$ and $\vec{S}_1=\vec{S}'_1=(0,0,1/2)$. Matrix $\hat{V}$ coincides with the bottom-left submatrix $u$ of size $2^{L-1}$, states (\ref{ap1}) and (\ref{ap3}) have simple structure in the basis (\ref{basis05}). }
\label{scheme}
\end{figure}
The set of parameters $\mathcal{A}$ defines the state of the reservoir $|\Psi_\text{res}\rangle$. If all the parameters are chosen randomly then the $q$-th spin will quickly entangle with the reservoir and will remain entangled virtually forever, same applies to all other spins. Our goal is to choose such set $\mathcal{A}$ that at the specified ''revival'' time $\tau$ wavefunction would split into a tensor product again $|\Phi_\text{ACR}(\tau)\rangle=|\bar{l}_p\rangle\otimes|\bar{\Psi}_\text{res}\rangle$.
Let us demand:
\begin{align}\label{ap3}
|\Phi_\text{ACR}(\tau)\rangle&=e^{-iH\tau}|\Phi_\text{ACR}(0)\rangle=\nonumber\\
&\sum\limits^{2^{p-1}}_{k=1}\sum\limits^{2^{L-p}}_{n=1}\left(C_{\bar{s}(k,n)}|\varphi_{\bar{s}(k,n)}\rangle+\beta C_{\bar{s}(k,n)}|\varphi_{\bar{s}(k,n)+2^{L-p}}\rangle\right), \end{align}
this wavefunction has a form similar to (\ref{ap1}), with the difference that now it is $p$-th spin in the pure state. The state $|\bar{l}_p\rangle$ is parametrized by the complex number $\beta$ similarly to (\ref{ap2}):
\begin{align}
|l_q\rangle=\frac{|0_q\rangle+\beta|1_q\rangle}{\sqrt{1+|\beta|^2}},\nonumber \end{align} we also modified $s(k,n)$ to $\bar{s}(k,n)=2^{L-p+1}(k-1)+n$, to have a revival on $p$-th site.
To find such set $\mathcal{A}$ that (\ref{ap3}) is satisfied we need to know the full form of the evolution operator at the revival moment $u\equiv e^{-iH\tau}$. Let us take a look at the full form of the condition (\ref{ap3}):
\begin{align}\label{apsys} &u_{1,1}A_1+\cdots+u_{1,2^L}A_{2^L}=C_1\nonumber\\ &u_{2,1}A_2+\cdots+u_{2,2^L}A_{2^L}=C_2\nonumber\\ &\vdots \\ &u_{2^L,1}A_2+\cdots+u_{2^L,2^L}A_{2^L}=C_{2^L}.\nonumber \end{align}
By substituting $A_{s(k,n)+2^{L-q}}=\alpha A_{s(k,n)}$ into (\ref{apsys}), for $n=\overline{1,2^{L-q}}$ and $k=\overline{1,2^{q-1}}$, we eliminate $2^{L-1}$ variables from the system (\ref{apsys}). We can also eliminate $2^{L-1}$ equations from (\ref{apsys}) by using the fact that $C_{\bar{s}(k,n)+2^{L-p}}=\alpha C_{\bar{s}(k,n)}$. Thus we obtain a set of conditions:
\begin{align}\label{cond1} \hat{V}\mathcal{A}=0, \end{align} where the matrix $\hat{V}$ is given by:
\begin{align}\label{ap4m} V_{ki}=u_{d[k],\bar{d}[i]}-\beta^{-1} u_{d[k]+2^{L-p},\bar{d}[i]}+\alpha u_{d[k],\bar{d}[i]+2^{L-q}}-\alpha\beta^{-1}u_{d[k]+2^{L-p},\bar{d}[k]+2^{L-q}}, \end{align}
where indexes $k,j=\overline{1,2^{L-1}}$, and sets of indexes $d$ and $\bar{d}$ are ordered sets:
\begin{align}\label{setsindex} &d=\{\{s(k,n)\}^{2^{q-1}}_{k=1}\}^{2^{L-q}}_{n=1},\nonumber\\ &\bar{d}=\{\{\bar{s}(k,n)\}^{2^{p-1}}_{k=1}\}^{2^{L-p}}_{n=1}. \end{align}
If matrix (\ref{ap4m}) is degenerate, then (\ref{cond1}) has a solution and therefore (\ref{ap3}) is satisfied exactly. However we argued in \cite{ermakov2021almost} that in case of interacting non-integrable Hamiltonian (\ref{ap4m}) must always be non-degenerate. In this case the only solution of $\mathcal{A}=0$ which is irrelevant. Let us allow one equation from (\ref{cond1}) have non-zero right-hand side
\begin{align}\label{cond2} \hat{V}\mathcal{A}=(\delta,0,\dots,0)^T, \end{align}
here $\delta$ is a parameter which will be determined from normalization condition on (\ref{ap2}).
Let us assume that typical matrix element of $v$ have absolute value $|v_{ki}|\sim 1/\sqrt{2^L}$ and a largely random phase. In this case we can estimate typical values of $\mathcal{A}$ and $\mathcal{C}$ as$|A_0|\sim 1/\sqrt{2^{L-1}}$ and $|C_0|\sim 1/\sqrt{2^{L-1}}$. Now if we substitute these typical values into the left-hand side of the first equation in (\ref{cond2}) we obtain $\delta\sim 1/\sqrt{2^{L-1}}$. This is a key assumption for the existence of ACR, we will discuss it further in the paper.
Let us formulate the procedure of ACR construction as step by step algorithm. In order to construct ACR one needs to:
\begin{enumerate}
\item Pick a collapsing $q$ and reviving $p$ sites.
\item Choose a position of collapsing $\vec{S}_q$ and reviving $\vec{S}'_p$ spins on a Bloch Sphere. Determine corresponding parameters $\alpha$ and $\beta$.
\item Compute sets of indexes $d$ and $\bar{d}$ (\ref{setsindex}).
\item Compute matrix (\ref{ap4m}).
\item Solve set of equations (\ref{cond2}). In practice one can set $\delta=1$ to obtain a non-normalized solution.
\item When set of parameters $\mathcal{A}$ is determined, construct wavefunction (\ref{ap1}) and normalize it. \end{enumerate}
In Figure \ref{scheme} (b) we illustrate a particular example of obtaining conditions (\ref{cond2}). In this example we imply that $p=q$ and that $\vec{S}_p=\vec{S}'_q=(0,0,1/2)$, which corersponds to $\alpha,\beta\rightarrow\infty$ or simply $|l_q\rangle=|1_q\rangle$. In this case matrix (\ref{ap4m}) is simply bottom-left block of size $2^{L-1}$ and wavefunctions (\ref{ap1}) and (\ref{ap3}) are easy to construct. This example corresponds to the one considered in \cite{ermakov2021almost}.
\subsection{Example of ACR}
Let us now construct ACR for the Hamiltonian:
\begin{align}\label{hamhuse}
H_1= & \sum_{j=1}^L\left(g\sigma^x_j+h\sigma^z_j+J\sigma^z_j\sigma^z_{j+1} \right), \end{align} here parameters $(g,h,J)=(0.9045,0.8090,1)$ are used. Periodic boundary conditions $\sigma^\alpha_i=\sigma^\alpha_{i+L}$ are imposed. For the system sizes where exact diaginalization is available, it was tested thoroughly in \cite{kim2013ballistic,kim2014testing} that this Hamiltonian is in a great agreement with Eigenstate Thermalization Hypothesis (ETH) \cite{deutsch1991quantum,srednicki1994chaos}.
Consider the initial state $|\Phi_\text{ACR}(0)\rangle=|l_1\rangle\otimes|\Psi_\text{res}\rangle$, here $\alpha=i$ therefore $\vec{S}_1=(0,1/2,0)$. Let us set the revival time as $\tau=10$, and pick $p=5$ and $\beta=-\sqrt{2/9}-1/3i$. By solving the system of equations (\ref{cond2}) for the system of $L=12$ spins we find such $|\Phi_\text{ACR}(0)\rangle$ that $\vec{S}_5=(-0.353611,-0.249662,0.249849)$, see Fig. \ref{ap_huse_fig}. The norm of $|\vec{S}_5|^2=0.499797$ is close to $1/2$, therefore the $5$-th spin is almost at the pure state at the revival moment. We showed in \cite{ermakov2021almost} that the discrepancy between perfect revival and ACR vanishes exponentially with the system size.
\begin{figure}
\caption{Time evolution of local observables $S^\alpha_m$ for the Hamiltonian (\ref{hamhuse}). The dynamics on the 1st site $m=1$ is shown in (a) and of the 5th $m=5$ in (b). Revival time $\tau=10.0$, system size $L=12$, $\alpha=i$, $\beta=-\sqrt{2/9}-1/3i$. }
\label{ap_huse_fig}
\end{figure}
\section{Higher spins}~
\begin{figure}
\caption{Time evolution of the local observables $\langle \bar{S}^\alpha_1\rangle=\langle S^\alpha_1\rangle/S$, for the Hamiltonian (\ref{ap_hams}) for different quantum spins $S=\frac{1}{2},1,\frac{3}{2},2$. Green line corresponds to the $z$ projection, dotted blue and orange lines to $x$ and $y$ correspondingly. Revival time $\tau=5$. }
\label{difs}
\end{figure}
In this section we apply the mechanism of ACR construction for the case of quantum spins $S$ higher than $\frac{1}{2}$. Let us consider the case when collapsing and reviving sites coincide $q=p=1$ and when $|l_1\rangle=|1_1\rangle$. Let us consider the Hamiltonian:
\begin{align}\label{ap_hams}
H_2= & \sum_{j=1}^L\left(J_x\, S_j^x S_{j+1}^x +J_y\,S_j^y S_{j+1}^y \right)\nonumber\\
& +\sum_{j=1}^L\left(h_x\, S_j^x+h_y\, S_j^y \right), \end{align}
Periodic boundary conditions are imposed and parameters $(J_x,J_y,h_x,h_y)=(-2.0,-4.0,2.2,2.2)$ are used. Since this Hamiltonian acts in XY plane, then the equilibrium values of $\langle S^z_j\rangle=0$. The Hamiltonian (\ref{ap_hams}) is far from integrability, as evidenced by energy-level-spacing statistics \cite{atas2013distribution}.
For spins $S$ it is convenient to order the basis $\mathcal{B}_S=\{|\varphi^S_j\rangle\}^{g^L}_{j=1}$ as $\mathcal{B}_S=\{g^L_g,...,2_g,1_g,0_g\}$ here $g=2S+1$ and $j_g$ is a base-$g$ form of an integer $j$. The initial state $|\Phi_\text{ACR}(0)\rangle$ has the form: \begin{align}\label{psiinS}
|\Phi_\text{ACR}(0)\rangle=\sum\limits^{g^{L-1}}_{n=1}A_n|\varphi^S_n\rangle. \end{align} conditions for observation ACR in the basis $\mathcal{B}_S$ changes to:
\begin{align}\label{sysS} &\sum\limits^{g^{L-1}}_{n=1}u_{g^{L-1}(g-1)+1,n}A_n=\delta\nonumber \\ &\sum\limits^{g^{L-1}}_{n=1}u_{g^{L-1}(g-1)+2,n}A_n=0\nonumber \\ &\cdots \\ &\sum\limits^{g^{L-1}}_{n=1}u_{g^L,n}A_n=0.\nonumber \end{align}
The system (\ref{sysS}) has $g^{L-1}$ variables and equations, $\delta\neq 0$ to be determined from normalization conditions. The dimensionality of the Hilbert space is $\mathcal{D}=g^L$, the system (\ref{psiinS}) allows us to set to zero at the moment $\tau$ only $g^{L-1}-1$ of the coefficients. There are also $p=g^{L-1}(g-1)+1$ of non-zero coefficients left in $e^{-iHt}|\Phi_\text{ACR}(0)\rangle$, so $\langle S^z_1(\tau)\rangle$ can not exhibit almost complete revival for spins higher than $S>\frac{1}{2}$. With increasing system size its revival value converges to:
\begin{align} \langle S^z_1(\tau)\rangle\simeq\frac{1}{2S}.\nonumber \end{align}
In a Fig. \ref{difs} we plot the time evolution of $\langle \bar{S}^\alpha_1\rangle=\langle S^\alpha_1\rangle/S$ for different values of spin $S$. The $\langle\bar{S}^z_1(\tau)\rangle$ decreases with $S$ which is in agreement with the classical picture in which one can not predict trajectory for arbitrary time if the system is chaotic.
\section{Discussion}~ We proposed a general scheme which allows one to construct a many-body state which is out of equilibrium on a level of local observables at the two moment of times: the initial $t=0$ and the revival one $t=\tau$.
The key assumption which we made in the paper is that the unitary which drives the system is similar to a random rotation in a many-body Hilbert space. In particular we assumed that absolute values of $|u_{mn}|\sim 1/\sqrt{2^L}$ are all of the same order and have random phases which are uncorrelated. We believe that this assumption holds for any Hamiltonian which satisfies ETH. We don't have a rigorous proof of this assumption. Nevertheless if we assume that it is correct, then we can suggest a practically useful criteria of ACR reachability. Let us introduce the matrix participation ratio $\text{MPR}$ as
\begin{align}
\text{MPR}=\left(\sum\limits_{ij}|u_{ij}|^2\right)^2\Biggm/\sum\limits_{ij}|u_{ij}|^4,\nonumber \end{align}
if for given unitary $u$ the value of $\text{MPR}\rightarrow 1$, then the ACR is reachable.
The unitary $u$ is not necessarily should describe quantum evolution or be associated with any physical Hamiltonian. The interesting direction is to study ACR for more experimentally relevant unitaries. It is particularly interesting to look at unitaries which can be implemented on existing quantum computers.
\begin{acknowledgments} The work is supported by Basis Foundation (Grant No. 18-1-5-19-1). \end{acknowledgments}
\end{document} | arXiv | {
"id": "2205.05584.tex",
"language_detection_score": 0.7289206981658936,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{
Position measurement and the Huygens-Fresnel principle:
A quantum model of Fraunhofer diffraction for polarized pure states
}
\author{Bernard Fabbro} \email[ ]{bernard.fabbro@cea.fr}
\affiliation
{IRFU,CEA, Universit\'e Paris-Saclay, 91191 Gif-sur-Yvette, France\\}
\begin{abstract}
In most theories of diffraction by a diaphragm, the amplitude of the diffracted
wave, and hence the position wave function of the associated particle, is
calculated directly without prior calculation of the quantum state.
Few models express the state of the particle to then deduce the position and
momentum wave functions related to the diffracted wave.
We present a model of this type for Fraunhofer diffraction.
The diaphragm is assumed to be a device for measuring
the three spatial coordinates of the particles passing through the aperture.
A matrix similar to the S-matrix of the scattering theory describes the process
which turns out to be more complex than a simple position measurement.
Some predictions can be tested.
The wavelet emission involved in the Huygens-Fresnel principle
occurs from several neighboring wavefronts instead of just one,
causing typical damping of the diffracted wave intensity.
An angular factor plausibly accounts for
the decrease in intensity at large diffraction angles,
unlike the obliquity factors of the wave optics theories.
The position measurement modifies the polarization states and for an incident
photon in an elliptically polarized pure state, the ellipse axes
can undergo a rotation which depends on the diffraction angles. \\ \\
{\bf Keywords:} {position measurement, Huygens-Fresnel principle,
Fraunhofer diffraction, S-matrix, large diffraction angles,
diffracted light polarization} \end{abstract}
\maketitle
\section{Introduction} {
Quantum mechanics is involved in many studies on diffraction.
Since the first quantum theory of Fraunhofer diffraction
by a grating \cite{EpsEhr}, several models have emerged,
using the formalism of path integrals \cite{PathInt,BaBas,Beau},
the calculation of trajectories in the framework of
hidden variable theories \cite{PhDH,SBMA}
or the resolution of the wave equation combined
with the use of the Kirchhoff integral \cite{Wub}.
In more recent studies, various topics are discussed such as the effects
of diffraction on the transmission of information
in quantum optical systems \cite{Lupo},
the role of the quantum behavior of the diaphragm electrons
in diffraction of light by a small hole \cite{JuKelb},
the interactions between the quantum states of different modes
in diffracted Gaussian beams \cite{XiLan},
the connection between orbital angular momentum transfer and helicity
in the diffraction of light \cite{Deepa}.
However, one question does not seem to have received much attention:
the possibility of starting from the postulates of quantum mechanics to treat
diffraction by a diaphragm as a consequence of a measurement of the position
of the particle associated with the wave as it passes through the aperture.
The first model based on this approach relates to the measurement
of one transverse coordinate and provides the same predictions as those
of wave optics for the case of Fraunhofer diffraction with slits \cite{Marcella}.
Afterward, several aspects of this model were discussed \cite{RotBou}.
More recently, quantum trajectories has been used to describe the motion
of the particle after the measurement of one transverse coordinate in a model
giving predictions for Fraunhofer and Fresnel diffractions by a slit \cite{JoMat}.
There does not seem to have been any other publications on this issue so far.
In the model presented below, we start from the observation
that the detection of a particle in the far field region beyond a diaphragm
provides a measurement of its momentum.
Then, we assume that the distribution of this momentum results
from a measurement of the three spatial coordinates of the particle
during its passage through the aperture and that this position measurement
has an effect on the polarization if the particle has spin.
The change in momentum and polarization is described
by a "diffraction matrix" similar to the S-matrix
of the scattering theory \cite{LLrel}.
Although this model only applies to the far field,
it nevertheless provides specific predictions
about the Huygens-Fresnel principle, the diffraction at large angles
and, in the case of light, the polarization of the photons
detected beyond the diaphragm.
We present the model in Sec. $\!\!\!$~\ref{sec:etcontex}.
Next, some predictions regarding intensity and polarization measurements
are described in Sec. $\!\!\!$~\ref{sec:predreli}.
Finally, we summarize in Sec. $\!\!\!$~\ref{sec:conclu}.
}
\section{Quantum model of Fraunhofer diffraction by an aperture} { \label{sec:etcontex}
\subsection{Measurement of quantities related to the detected particles.} { \label{sub:aspexp}
\subsubsection{Experimental setup and first assumptions.} {
The model applies for an experimental setup
with the following characteristics (Fig. \!\!~\ref{fig:dispexa}).
The diaphragm is a plane assumed to be of zero thickness and perfectly opaque.
The aperture, of finite area, can be of any shape
and possibly formed of several parts.
The origin of the laboratory frame of reference $(O;x,y,z)$ is located at
the aperture and the $(\Ox,\Oy)$ plane is that of the diaphragm.
The source is located on the $\cz$ axis.
Detectors placed beyond the diaphragm measure the local counting rate
and possibly the polarization.
The position of a detection point is denoted
by its radius-vector $\ensuremath{\mathbf{ d}}$.
\begin{figure}
\caption{Experimental setup and laboratory frame of reference
(right-handed coordinate system).}
\label{fig:dispexa}
\end{figure}
It is assumed that there is neither creation nor annihilation of particles
during the passage of the wave through the aperture. It is also assumed
that the particles are free when they move between the source and the diaphragm
and between the diaphragm and the detectors.
Moreover, we consider the case of a low intensity source
emitting either non-relativistic particles or photons.
We can then individually assign a quantum state to each non-relativistic
particle or a one-photon state of the electromagnetic field to each photon,
both for the incident wave and for the diffracted wave.
Finally, we suppose that the source-diaphragm and diaphragm-detector distances
are large enough for the aperture to be viewed as a point from the detectors
and for the incident wave to be close to a plane wave when it arrives
at the aperture.
For simplicity, this plane wave is supposed to be monochromatic
with the wave vector $\vkondini$ in the direction of the $\cz$ axis.
}
\subsubsection{Measurement of the momentum of the detected particles.} {
From the above assumptions and conditions, we can assign the momentum
$\hbar\vkondini$ to the incident particle and a momentum $\hbar\vkond$ such that:
\begin{eqnarray}\vkond \;=\;\frac{\mkond}{\distdipo}\;\ensuremath{\mathbf{ d}} \label{eq:detadn} \end{eqnarray}
to the particle detected at point of radius-vector $\ensuremath{\mathbf{ d}}$,
provided that the modulus $\mkond$ is measured.
However, no significant difference between the wavelength
of the diffracted wave and that of the incident wave
is observed in diffraction experiments with a diaphragm. Hence:
\begin{eqnarray}\mkond\,\simeq\,\mkondini, \label{eq:contrax} \end{eqnarray}
which is in accordance with kinematics because the particle transfers
a very small part of its energy to the diaphragm.
So it is not required to determine $\mkond$ by a special measurement.
Furthermore, the part of the diffracted wave returning from the aperture
to the region where the source is located is very weak.
For simplicity, we assume that the momentum of the particle associated
with the diffracted wave is always such that:
\begin{eqnarray} \kz\,>\,0. \label{eq:contray} \end{eqnarray}
The relations (\!\!~\ref{eq:detadn}), (\!\!~\ref{eq:contrax}) and
(\!\!~\ref{eq:contray}) imply that it is possible to measure
the momentum probability density function (PDF)
of the particle after its passage through the aperture
in the case of diffraction at infinity.
The measurement can be performed, for example,
by arranging detectors on a hemisphere of center $O$
and radius $\distdipo$ in the half-space $\cz>0$.
The radius must be such that $\ensuremath{\Delta}\ll\distdipo$,
where $\ensuremath{\Delta}$ is the size of the aperture,
otherwise (\!\!~\ref{eq:detadn}) cannot be used.
The Fraunhofer diffraction criterion, that is:
$\ensuremath{\Delta}^2/(\lgond\distdipo)\,\ll\,1$ \cite{BoWo,Soma,LLangdif},
is then satisfied if $\distdipo$ is large enough,
whatever the value of $\lgond/\ensuremath{\Delta}$.
}
\subsubsection{Measurement of the polarization of the detected particles.} {
The polarization measuring device (analyzer for photons, Stern and Gerlach
apparatus for atoms, etc...) is placed in front of the detector
which is located, given (\!\!~\ref{eq:detadn}), in the direction
of the momentum $\hbar\vkond$ of the detected particle.
The measurement therefore gives the probabilities of the eigenvalues
of the spin component on a quantization axis $\cZ[\vkond]$
which must be chosen with respect to a coordinate system
$\{\cx[\vkond],\cy[\vkond],\cz[\vkond]\}$ attached to the detected particle.
Finally, it is possible to measure, on a particle of spin $\modspin$,
the probability of finding the result $\spinz$ for its spin component
on a $\cZ[\vkond]$ axis {\it if} the measurement of its momentum gives
the result $\hbar\vkond$. It is therefore a {\it conditional} probability.
By convention, the coordinate system attached to the incident particle
is the laboratory frame of reference
(Fig. \!\!~\ref{fig:dispexa}) whose $\cz\equiv\cz[\vkondini]$ axis
is in the direction of the momentum $\hbar\vkondini$.
For the detected particle, we choose the coordinate system obtained
from the laboratory frame of reference by the rotation
$\oprotos(\eulapre,\eulanuy,0)$ where the Euler angles are defined according to
the $z-y-z$ convention, so that $\eulapre$ and $\eulanuy$ are
the azimuth and the polar angle, respectively, of $\vkond$. Hence:
\begin{eqnarray} \ilinp[\vkond]=\oprotos(\eulapre,\eulanuy,0)\,
\ilinp[\vkondini]\,,\hspace{0.25cm}\ilinp=\cx,\cy,\cz;
\hspace{0.5cm}\cz[\vkond]\parallel\vkond. \hspace{0.75cm} \label{eq:rotkok} \end{eqnarray}
The zero value of the third Euler angle defines a choice of the directions
of the $\cx[\vkond]$ and $\cy[\vkond]$ axes in the transverse plane to $\vkond$
such that the coordinate system attached to the detected particle
in the case $\eulapre=\eulanuy=0$ is coincident
with the laboratory frame of reference.
Two very different cases arise concerning the quantization axis. For a
non-relativistic particle, this axis can be chosen in any direction. There is
then an infinite number of possible $\cZ[\vkond]$ axes for each vector $\vkond$.
On the other hand, for a relativistic particle, the quantization axis
must be in the direction of the momentum because the only spin component
eigenstates are the helicity states \cite{LLrel}.
There is then only one possibility which is $\cZ[\vkond]=\cz[\vkond]$,
according to the above convention.
}
}
\subsection{Diffraction operator.} { \label{sub:diffrop}
\subsubsection{Measurement of the position of the incident particles.} {
Since it is possible to measure the momentum PDF and the polarization
of the particles associated with the diffracted wave at infinity,
we can consider the construction of a quantum model
whose purpose is to provide the expressions of these quantities.
The model proposed here is based on the assumption that each incident particle
undergoes a position measurement as it passes through the aperture.
The detection of a particle beyond the diaphragm can indeed be considered
as proof that it effectively passed through the aperture and was therefore
localized at this place during a short period of time with a precision
of the order of the size of the aperture \cite{Heisen}.
For simplicity, we consider that the localization occurs instantaneously.
We then assume that the source emits a particle at time $\tpre$,
that this particle passes through the aperture at time $\tmes$
and that it is detected at time $\tecr$.
The time $\tmes$ can then be interpreted as the time when the state of the
particle changes because of the position measurement performed by the diaphragm
and the purpose of the model is to build a {\it diffraction operator}
which describes this change of state.
}
\subsubsection{Using S-matrix theory formalism.} {
The quantum state of the particle of spin $\modspin$ at time $\temps$ is
assumed to be a pure state denoted $\debket\etat^{(\modspin)}(\temps)\finket$.
Since the incident wave is close to a monochromatic plane wave with wave vector
$\vkondini$ and given (\!\!~\ref{eq:contrax}), the incident particle and the
particle associated with the diffracted wave are
in an energy state close to the eigenstate of eigenvalue $\hbar\freqo$,
where $\freqo=\vlum\,(\,\hbar^{-2}\masse^2\vlum^2+{\mkondini}^2\,)^{1/2}$.
The initial and final states are therefore close to stationary states of the form:
\begin{eqnarray}\!\!\begin{array}{rlcl}\displaystyle
\debket\etat_{\textup{in}}^{(\modspin)}(\temps)\!\finket\!\!&=&
\exp(-\icmp\freqo\temps)\,\debket\etatm^{(\modspin)}_{\textup{in}}\finket\!\!,&
\;\,\tpre<\temps<\tmes,
\\ \displaystyle
\debket\etat_{\textup{out}}^{(\modspin)}(\temps)\!\finket\!\!&=&
\exp(-\icmp\freqo\temps)\,\debket\etatm^{(\modspin)}_{\textup{out}}\finket\!\!,&
\;\,\tmes<\temps<\tecr. \end{array} \hspace{0.55cm} \label{eq:etasymp} \end{eqnarray}
where $\debket\etatm^{(\modspin)}_{\textup{in}}\finket$ and
$\debket\etatm^{(\modspin)}_{\textup{out}}\finket$ are time-independent states.
Since time dependence only appears in global phase factors,
knowing the exact values of $\tpre$, $\tmes$ and $\tecr$ is not essential
and, as in the S-matrix theory, we consider a diffraction operator
$\opedif^{(\modspin)}$ which projects the initial time-independent state
on the final time-independent state (called "initial state" and "final state"
in the following). The change of state is expressed by:
\begin{eqnarray} \debket\etatm^{(\modspin)}_{\textup{out}}\finket=
\left[\,\norm^{(\modspin)}\,\right]^{-1/2}\; \opedif^{(\modspin)}
\debket\etatm^{(\modspin)}_{\textup{in}}\finket, \label{eq:fgenera} \end{eqnarray}
where $\norm^{(\modspin)}$ is the normalization factor:
\begin{eqnarray}\norm^{(\modspin)}\equiv
\debvalop\etatm^{(\modspin)}_{\textup{in}}
\midvalopa\opedif^{(\modspin)}\adjoint\,
\opedif^{(\modspin)}\midvalopb\etatm^{(\modspin)}_{\textup{in}}\finvalop. \label{eq:fgenerb} \end{eqnarray}
All the information on the "particle-diaphragm interaction"
is contained in the matrix elements of the diffraction operator
from which we can get the transition amplitudes between the initial state
and the final momentum and spin component eigenstates.
Since we only consider one-particle states, these eigenstates
are represented by the state vectors:
\begin{eqnarray}\begin{array}{rlll} \opcrea(\vkond)\ketvide
&\!\!=\!\!& \debket\vkond\finket,
\hspace{0.25cm} & \modspin = 0,
\\
\opcrea\!\left(\vkond,[\spinz]_{\cZ[\vkond]}\right)\ketvide
&\!\!=\!\!& \debket\vkond\finket\!\!\ptens\!\debket\spinz\finket\irkeoq,
\hspace{0.25cm} & \modspin\neq 0.
\end{array}\hspace{0.75cm} \label{eq:onepsa} \end{eqnarray}
where $\ketvide$ is the vacuum state,
$\opcrea\!\left(\vkond,[\spinz]_{\cZ[\vkond]}\right)$
is the creation operator of a particle of momentum $\hbar\vkond$
and spin component $\spinz$ on the quantization axis $\cZ[\vkond]$
and $\debket\spinz\finket\irkeoq$ is the eigenstate
of spin component $\spinz$ on $\cZ[\vkond]$.
The initial state is given by:
\begin{eqnarray}\debket\etatm^{(\modspin)}_{\textup{in}}\finket= \left\{ \begin{array}{lll}\displaystyle \debket\vkondini\finket
&\hspace{0.5cm}\mbox{if}& \modspin=0
\\ \displaystyle \debket\vkondini\finket\!\ptens\!
\debket\kisin\finket
&\hspace{0.5cm}\mbox{if}& \modspin\neq 0,
\end{array}\right.\hspace{0.75cm} \label{eq:onepsi} \end{eqnarray}
where $\debket\kisin\finket$ is the initial state of spin polarization
prepared with the amplitudes $\ilkoeoq\debpscal\spinz\midpscal\kisin\finpscal$.
}
\subsubsection{Structure of the diffraction operator.} {
From (\!\!~\ref{eq:fgenera}) and (\!\!~\ref{eq:onepsi}),
the non-normalized final state for a particle without spin is expressed by:
\begin{eqnarray} \opedif^{(0)}\!\debket\etatm^{(0)}_{\textup{in}}\finket
\!\!= \opedif^{(0)}\!\debket\vkondini\finket\!\!=\!\!\inttrp\!\differ^3\mkond
\,\debket\vkond\finket\!\!\debvalop\!\vkond\!\midvalopa\opedif^{(0)}
\midvalopb\!\vkondini\!\finvalop\!\!.\hspace{0.75cm} \label{eq:onepsf} \end{eqnarray}
To generalize this expression to the case of a particle of non-zero spin,
we rely on the following observation.
For the photon, the quantization axis is in the direction
of the momentum and the eigenvalue zero of the spin component
is impossible \cite{LLrel}.
Therefore, the change in the direction of the momentum of the photon
due to diffraction causes a modification of its spin polarization
so that this impossibility of the eigenvalue zero is preserved.
More generally, we assume that for any particle, the momentum exchange
with the diaphragm causes a specific change in spin polarization.
The change in polarization corresponds to a rearrangement of the spin
component wave functions and therefore results from the action of a
unitary rotation operator.
So we are led to assume that if the measurement of the momentum
of the detected particle gives the result $\hbar\vkond$ then
the probabilities of the results of a simultaneous measurement of the
spin component correspond to a polarization state which depends
on $\vkond$ in the form:
\begin{eqnarray} \debket\ensuremath{\etcospin^{(\modspin)}_{\textup{out}}(\vkond)}\finket
=\oprotsp^{(\modspin)}[\,\eulera(\vkond),\eulerb(\vkond),\eulerc(\vkond)\,]
\,\debket\kisin\finket\!,\hspace{0.75cm} \label{eq:rotinid} \end{eqnarray}
where $\oprotsp^{(\modspin)}[\,\eulera(\vkond),\eulerb(\vkond),
\eulerc(\vkond)\,]$ is the {\it operator of the spin rotation
associated with the momentum transfer $\hbar\vkondini\rightarrow\hbar\vkond$}.
The state $\debket\ensuremath{\etcospin^{(\modspin)}_{\textup{out}}(\vkond)}\finket$ is in some way the "conditional state"
of polarization associated with the momentum eigenstate $\debket\vkond\finket$.
The Euler angles $\eulea_{\entj}(\vkond)$ are defined with respect to
the quantization axis $\cZ[\vkondini]$ and are three parameters of the model.
They are functions of $\vkond$, not known a priori.
They also depend on $\vkondini$ and possibly on other parameters
as for example the spin of the particle: $\eulea_{\entj}(\vkond)\equiv
\eulea_{\entj}^{\vkondini,\modspin,...}(\vkond)$.
An additional assumption is needed to generalize (\!\!~\ref{eq:onepsf}).
For a spinless particle, the position and momentum wave functions are Fourier
transforms of each other. In the case of diffraction with a diaphragm,
the shape of the final momentum distribution is therefore determined
by the shape of the aperture.
We assume that this determination is the same if the particle has spin,
so that the final momentum distribution of a particle with spin
is the same as that of a spinless particle that would have the same energy.
There do not seem to be any experimental facts invalidating this assumption.
The easiest way to generalize (\!\!~\ref{eq:onepsf}) taking into account
(\!\!~\ref{eq:onepsi}), (\!\!~\ref{eq:rotinid}) and the additional assumption
above is to express the action of $\opedif^{(\modspin)}$ on the initial state
in the following form
(we use the notation $\oprotsp^{(\modspin)}(\vkond)$ instead of
$\oprotsp^{(\modspin)}[\,\eulera(\vkond),\eulerb(\vkond),\eulerc(\vkond)\,]$
for simplicity and we insert the identity operator
$\sum_{\spinz} \debket\spinz\finket\irkeoq\;\ilkeoq\debbra\spinz\finbra$):
\begin{eqnarray}\begin{array}{l}\displaystyle
\opedif^{(\modspin)}\debket\etatm^{(\modspin)}_{\textup{in}}\finket
=\;\opedif^{(\modspin)}\left(\,\debket\vkondini\finket\!\! \ptens\!
\debket\kisin\finket\right)
\\ \displaystyle \hspace{0.35cm}
=\!\inttrp\!\differ^3\mkond\debket\vkond\finket\!\debvalop\vkond\midvalopa
\opedif^{(0)}\midvalopb\vkondini\finvalop\!\ptens\!\debket\ensuremath{\etcospin^{(\modspin)}_{\textup{out}}(\vkond)}\finket
\\ \displaystyle \hspace{0.35cm}
=\!\inttrp\!\differ^3\mkond\debket\vkond\finket\!
\debvalop\vkond\midvalopa\opedif^{(0)}\midvalopb\vkondini\finvalop \\ \displaystyle \hspace{0.75cm}
\ptens \sum_{\spinz}\debket\spinz\finket\irkeoq\;\ilkeoq\debvalop\spinz
\midvalopa\oprotsp^{(\modspin)}(\vkond)\midvalopb\kisin\!\finvalop\!\!,
\;\;\;\;\modspin \neq 0. \end{array}\hspace{0.75cm} \label{eq:onepsg} \end{eqnarray}
From (\!\!~\ref{eq:fgenera}), (\!\!~\ref{eq:onepsi}), (\!\!~\ref{eq:onepsf})
and (\!\!~\ref{eq:onepsg}), the final state is a linear combination of the
momentum and spin component eigenstates given by (\!\!~\ref{eq:onepsa}) and
the diffraction operator is:
\begin{eqnarray}\!\opedif^{(\modspin)}=\left\{\begin{array}{cl}\displaystyle
\opedif^{(0)}&\;\mbox{ if }\modspin=0
\\ \displaystyle
\inttrp\!\differ^3\mkond\,\debket\vkond\finket\!\!\debbra\vkond\finbra
\opedif^{(0)}\!\!\ptens\!\oprotsp^{(\modspin)}(\vkond)&\;
\mbox{ if }\modspin\neq 0.
\end{array}\right.\hspace{0.75cm} \label{eq:onepsp} \end{eqnarray}
The operator $\opedif^{(0)}$ will be called {\it "momentum part"}
of the diffraction operator $\opedif^{(\modspin)}$.
}
\subsubsection{General expressions of the final amplitudes and probabilities.} {
From (\!\!~\ref{eq:rotinid}) and since $\oprotsp^{(\modspin)}(\vkond)$
is unitary:
\begin{eqnarray} \debpscal\ensuremath{\etcospin^{(\modspin)}_{\textup{out}}(\vkond)}\midpscal\ensuremath{\etcospin^{(\modspin)}_{\textup{out}}(\vkond)}\finpscal
=\debpscal\kisin\midpscal\kisin\finpscal=1. \hspace{0.5cm} \label{eq:rotuncn} \end{eqnarray}
From (\!\!~\ref{eq:fgenerb}) into which we substitute
(\!\!~\ref{eq:onepsf}) (if $\modspin=0$) or (\!\!~\ref{eq:onepsg})
(if $\modspin\neq 0$) and given (\!\!~\ref{eq:rotuncn}),
we find that the normalization factor is independent of the spin:
\begin{eqnarray} \norm^{(\modspin)}\equiv\norm
=\!\int\!\differ^3\mkond \,\left|\debvalop\vkond\midvalopa\opedif^{(0)}
\midvalopb\vkondini\finvalop\right|^2\hspace{0.25cm}\forall\modspin. \hspace{0.75cm} \label{eq:fgenerw} \end{eqnarray}
If $\modspin=0$, the probability amplitude to detect the particle
with momentum $\hbar\vkond$ is obtained by substituting (\!\!~\ref{eq:onepsf})
into (\!\!~\ref{eq:fgenera}). Given (\!\!~\ref{eq:onepsi}) and
(\!\!~\ref{eq:fgenerw}), this leads to:
\begin{eqnarray} \debpscal\vkond\midpscal\etatm^{(0)}_{\textup{out}}\finpscal=
\norm^{-1/2}\debvalop\vkond\midvalopa\opedif^{(0)}\midvalopb\vkondini
\finvalop\!. \hspace{0.75cm} \label{eq:finstc} \end{eqnarray}
The PDF to detect the particle with momentum $\hbar\vkond$ is:
\begin{eqnarray} \pdfp^{(0)}_{\vavkond}(\vkond)=\left|\debpscal\!\vkond\!
\midpscal\etatm^{(0)}_{\textup{out}}\finpscal\right|^{2}. \label{eq:fgeners} \end{eqnarray}
If $\modspin\neq 0$, the probability amplitude to detect the particle
with momentum $\hbar\vkond$ and spin component $\spinz$
on the $\cZ[\vkond]$ axis is obtained by substituting (\!\!~\ref{eq:onepsg})
into (\!\!~\ref{eq:fgenera}). Given (\!\!~\ref{eq:fgenerw}) and
(\!\!~\ref{eq:finstc}), this leads to:
\begin{eqnarray}\begin{array}{l}\displaystyle
\left(\!\debbra\vkond\finbra\!\!\ptens\!\!\ilkeoq\debbra\spinz\finbra\!\left)
\debket\etatm^{(\modspin)}_{\textup{out}}\finket\!\!\right.\right.
\\ \displaystyle \hspace{2.5cm}
\!=\!\debpscal\!\vkond\!\midpscal\etatm^{(0)}_{\textup{out}}\finpscal\!
\ilkeoq\debpscal\!\spinz\!\midpscal\ensuremath{\etcospin^{(\modspin)}_{\textup{out}}(\vkond)}\finpscal\!\!. \end{array} \hspace{0.75cm} \label{eq:fgenert} \end{eqnarray}
The joint probability function to detect the particle
with momentum $\hbar\vkond$ and spin component $\spinz$ on the
$\cZ[\vkond]$ axis is expressed, according to the definition of the conditional
probability and from (\!\!~\ref{eq:fgenert}), by:
\begin{eqnarray}\!\begin{array}{l}\displaystyle
\pdfpj^{(\modspin)}_{\vavkond,\vaspinz}
\!\left(\vkond,[\spinz]_{\cZ[\vkond]}\right)
\!=\!\pdfp^{(\modspin)}_{\vavkond}(\vkond)\,
\probcon_{\vaspinzc}^{(\modspin)}\left([\spinz]_{\cZ[\vkond]}\right)
\\ \displaystyle \hspace{2cm}
=\left|\!\debpscal\!\vkond\!\midpscal
\etatm^{(0)}_{\textup{out}}\finpscal\!\right|^{2}
\,\left|\ilkeoq\debpscal\spinz\midpscal\ensuremath{\etcospin^{(\modspin)}_{\textup{out}}(\vkond)}\finpscal\right|^2\!\!,
\end{array} \hspace{0.7cm} \label{eq:fgenerm} \end{eqnarray}
where $\pdfp^{(\modspin)}_{\vavkond}(\vkond)$ is the PDF to detect,
without polarization measurement, the particle with momentum $\hbar\vkond$
and $\probcon_{\vaspinzc}^{(\modspin)}\left([\spinz]_{\cZ[\vkond]}\right)$
is the conditional probability to detect the particle with spin component
$\spinz$ on the $\cZ[\vkond]$ axis if its momentum is $\hbar\vkond$.
If $\modspin\neq 0$, $\pdfp^{(\modspin)}_{\vavkond}(\vkond)$ is
the marginal PDF obtained by summing (\!\!~\ref{eq:fgenerm}) over $\spinz$.
Given (\!\!~\ref{eq:rotuncn}) and (\!\!~\ref{eq:fgeners}), this leads to
$\pdfp^{(\modspin)}_{\vavkond}(\vkond)=\pdfp^{(0)}_{\vavkond}(\vkond)$.
Hence, given (\!\!~\ref{eq:finstc}) and (\!\!~\ref{eq:fgeners}):
\begin{eqnarray}
\pdfp^{(\modspin)}_{\vavkond}(\vkond)\equiv\pdfp^{}_{\vavkond}(\vkond)
=\norm^{-1}\!\left|\debvalop\vkond\midvalopa
\opedif^{(0)}\!\midvalopb\vkondini\finvalop\right|^{2}
\hspace{0.25cm}\forall\modspin,\hspace{0.75cm} \label{eq:fgenerq} \end{eqnarray}
which expresses that the momentum PDF of the detected particle
without polarization measurement is independent of its spin
and initial polarization.
Moreover, substituting (\!\!~\ref{eq:finstc}) into (\!\!~\ref{eq:fgenerm})
and given (\!\!~\ref{eq:fgenerq}), we get:
\begin{eqnarray} \probcon_{\vaspinzc}^{(\modspin)}
\left([\spinz]_{\cZ[\vkond]}\right)
=\left|\ilkeoq\debpscal\spinz\midpscal\ensuremath{\etcospin^{(\modspin)}_{\textup{out}}(\vkond)}\finpscal\right|^2. \hspace{0.75cm} \label{eq:fgenerr} \end{eqnarray}
The experimentaly accessible quantities are those given by
(\!\!~\ref{eq:fgenerq}) and (\!\!~\ref{eq:fgenerr}).
To calculate them, we therefore need to express the matrix elements
$\debvalop\vkond\midvalopa\opedif^{(0)}\midvalopb \vkondini\finvalop$
and the amplitudes $\ilkeoq\debpscal\spinz\midpscal\ensuremath{\etcospin^{(\modspin)}_{\textup{out}}(\vkond)}\finpscal$.
This is the subject of the next two subsections.
}
}
\subsection{Momentum part of the diffraction operator} { \label{sub:projini}
In this subsection, we first deal with the case of non-relativistic particles.
We will then show that the developed formalism
can be transposed to the case of photons.
\subsubsection{Position measurement and the Huygens-Fresnel principle.} {
At the time $\tmes$ of the position measurement, the position wave function
of the particle undergoes a localization at the aperture
of the diaphragm (postulate of wave function reduction).
During this temporary localization, the transverse coordinates of the particle
correspond to the aperture and the longitudinal coordinate is equal or close to
$\cz=0$ since the particle then crosses the plane of the diaphragm.
The position measurement is therefore a measurement
of the {\it three} spatial coordinates. \\
The measurement of the transverse coordinates is associated with the projector:
\begin{eqnarray}
\Projtran^{\ouvas}\equiv\intdbl_{\ouvas}\differ\cx\differ\cy\,
\debket\cx\cy\finket\!\!\debbra\cx\cy\finbra, \label{eq:proxy}\end{eqnarray}
where $\ouvas$ is the aperture.
Then, the easiest way to describe the measurement of $\cz$ is
to use a projector of the form:
\begin{eqnarray} \Projlong^{\Delta\cz}\;\equiv\;
\int_{-\Delta\cz/2}^{+\Delta\cz/2}\differ\cz\,
\debket\cz\finket\!\!\debbra\cz\finbra, \label{eq:proz}\end{eqnarray}
where the width $\Delta\cz$ of the interval $[-\Delta\cz/2,+\Delta\cz/2]$
is a parameter of the model whose value is not known a priori.
Finally, the measurement of the three coordinates $(\cx,\cy,\cz)$ is assumed
to be associated with the projector:
\begin{eqnarray}
\Proj^{\ouvas,\Delta\cz}\;\equiv\;
\Projtran^{\ouvas}\otimes\Projlong^{\Delta\cz}. \label{eq:proxyz}\end{eqnarray}
Since the aperture $\ouvas$ is a 2D surface, we should have in principle:
$\Delta\cz=0$, but the integral on the right-hand side of (\!\!~\ref{eq:proz})
is zero in this case. Suppose then that $\Delta\cz\neq 0$.
From (\!\!~\ref{eq:proxyz}), we have:
$\Proj^{\ouvas,\Delta\cz}\!\debket\vkondini\finket\!=\Projtran^{\ouvas}
\debket\kox\,\koy\finket\!\ptens\Projlong^{\Delta\cz}\debket\koz\finket$.
Therefore, from (\!\!~\ref{eq:proz}), the PDF corresponding to
the probability of finding a result within the interval $[\cz,\cz+\differ\cz]$
when measuring the longitudinal coordinate is proportional to:
\begin{eqnarray}\!\!\begin{array}{l}\displaystyle
\left|\!\debvalop\!\cz\!\midvalopa\Projlong^{\Delta\cz}\midvalopb
\!\koz\!\finvalop\!\right|^2
\\ \displaystyle \hspace{2cm}
= \left\{\!\begin{array}{cll}
(2\pi)^{-1}&\;\mbox{if}&\!\cz\in[\,-\Delta\cz/2,+\Delta\cz/2\,]
\\
0 &\;\mbox{if}&\!\cz\notin[\,-\Delta\cz/2,+\Delta\cz/2\,].
\end{array} \right. \end{array} \hspace{0.5cm} \label{eq:probpjr} \end{eqnarray}
If $\Delta\cz$ is small, the action of $\Proj^{\ouvas,\Delta\cz}$ localizes
the probability of presence of the particle in a narrow region around
the wavefront at the aperture and consequently its longitudinal coordinate
is $\cz=0$ with excellent accuracy.
This localization of the probability of presence occurs at time $\tmes$.
Therefore, at any time $\temps>\tmes$, the diffracted wave has been emitted
from a volume including the wavefront at the aperture and its close vicinity.
We are then close to a situation consistent with the Huygens-Fresnel principle.
Perfect compatibility would therefore be obtained if $\Delta\cz = 0$;
however, in this case, it is not possible to obtain a PDF from the function
expressed by (\!\!~\ref{eq:probpjr}) because it is zero everywhere except
at $\cz=0$ where its value is finite.
However, if the value at $\cz=0$ were infinite,
we would obtain a PDF equal to the Dirac distribution $\delta(z)$.
Thus, given the good agreement between the measurements performed so far
and the predictions of the classical theories based on the Huygens-Fresnel
principle, this is worth looking for a way to treat this limit case.
Fortunately, it turns out that this is possible provided, however,
that the notion of projector is generalized.
}
\subsubsection{Position filtering operator: Multi-wavefront Huygens-Fresnel principle.} { \label{par:posfilt}
If $\Delta\cz=0$, a PDF equal to $\delta(\cz)$ can be obtained
by using, instead of the projector (\!\!~\ref{eq:proz}),
a {\it filtering operator} $\opredulong^{\Delta\cz}$ defined as:
\begin{eqnarray} \opredulong^{\Delta\cz}\equiv
\int\differ\cs\;\sqrt{\fredDzl(\cs)}\;
\debket\cs\finket\!\!\debbra\cs\finbra,\hspace{0.5cm} \label{eq:projods} \end{eqnarray}
where $\fredDzl(\cs)$ is a positive function normalized to 1,
such that its integral outside the interval
$[\,-\Delta\cz/2,+\Delta\cz/2\,]$ is negligible, and such that:
\begin{equation} \lim_{\Delta\cz\rightarrow 0}\fredDzl(\cz)=\delta(\cz). \label{eq:projodt} \end{equation}
From (\!\!~\ref{eq:projods}):
\begin{eqnarray}\begin{array}{rcl}\displaystyle
\left|\!\debvalop\!\cz\!\midvalopa\opredulong^{\Delta\cz}\midvalopb
\!\koz\!\finvalop\!\right|^2\!\!
&=&\left|\!\debpscal\cs\midpscal\etatiz\finpscal\!\right|^2\,\fredDzl(\cz)
\\ \displaystyle \hspace{2.75cm}
&=&(2\pi)^{-1}\,\fredDzl(\cz). \end{array} \hspace{0.75cm} \label{eq:projodw} \end{eqnarray}
Therefore, given (\!\!~\ref{eq:projodt}), if $\Delta\cs=0$,
$\left|\!\debvalop\!\cz\!\midvalopa
\opredulong^{\Delta\cz}\midvalopb\!\koz\!\finvalop\!\right|^2$ is defined
and proportional to $\delta(\cs)$.
This allows to obtain a PDF equal to $\delta(\cs)$ after normalization.
However, the problem is not completely solved because,
from (\!\!~\ref{eq:projods}) and (\!\!~\ref{eq:projodt}),
$\opredulong^{\Delta\cz}$ is not defined if $\Delta\cz=0$
since the square root of $\delta(\cz)$ is not defined.
So we are in a way compelled to assume that $\Delta\cz$ is not zero
(but possibly close to zero, so that the PDF can then be expressed
with a good approximation by the Dirac distribution).
This implies reviewing the question of the connection between diffraction
and the Huygens-Fresnel principle.
The case $\Delta\cz=0$ corresponds to the Kirchhoff integral where
a single-wavefront Huygens-Fresnel principle is applied:
The wavelets contributing to the diffracted wave are emitted from
one wavefront located at the aperture.
The case $\Delta\cz>0$, suggested by the quantum approach,
would then correspond to a multi-wavefront Huygens-Fresnel principle
where several neighboring wavefronts contribute with different weights
whose distribution is the function $\fredDzl(\cz)$.
Moreover, from the first equality of (\!\!~\ref{eq:projodw}), $\fredDzl(\cz)$
can also be interpreted as the weight with which the filtering operator selects
the result $\cz$ from the value at $\cz$ of the position wave function
in the initial state $\debket\etatiz\finket$.
This weight, as a function of $\cz$, will be called the
{\it longitudinal position filtering function}.
For the transverse coordinates, the projector (\!\!~\ref{eq:proxy})
can be replaced by the filtering operator:
\begin{eqnarray} \opredutran^{\ouvas}\equiv\intdbl\differ\cx\differ\cy
\;\sqrt{\fredAt(\cx,\cy)}\;\debket\cx\cy\finket\!\!\debbra\cx\cy\finbra,
\hspace{0.75cm} \label{eq:redxy}\end{eqnarray}
where $\fredAt(\cx,\cy)$ is the {\it transverse position filtering function}.
It can be considered that the transmission of the incident wave is the same over
the entire area of the aperture so that this function corresponds to a
{\it uniform} filtering which truncates the wave function. Hence:
\begin{equation} \fredAt(\cx,\cy)\;=\;\surfouva^{-1}\times
\left\{ \begin{array}{cll}
1&\mbox{ if }& (\cx,\cy)\in\ouvas \\
0&\mbox{ if }& (\cx,\cy) \notin\ouvas, \end{array} \right. \label{eq:tronqfod} \end{equation}
where $\surfouva$ is the area of $\ouvas$. From (\!\!~\ref{eq:proxy}),
(\!\!~\ref{eq:redxy}) and (\!\!~\ref{eq:tronqfod}):
$\opredutran^{\ouvas}=\surfouva^{-1/2}\,\Projtran^{\ouvas}$,
so the action of the two operators leads to the same state
after normalization.
More generally, any projector is equivalent to a uniform filtering operator.
The filtering operator allows us to consider the case
of a non-uniform filtering. In particular, the longitudinal filtering
could be non-uniform contrary to the transverse filtering
because the aperture is limited by a material edge in the transverse plane
whereas there are no edges along the longitudinal direction.
The longitudinal filtering function could then be a continuous function
forming a peak centered around $\cz=0$ and of width $\Delta\cz$.
The precise shape of the filtering function is part of the
assumptions of the model. This shape may matter
if $\Delta\cz$ is large but probably not if $\Delta\cz$
is close to zero because the PDF is then close to the Dirac distribution. \\
Finally, given ($\!\!$~\ref{eq:projods}) and ($\!\!$~\ref{eq:redxy}),
we replace the projector $\Proj^{\ouvas,\Delta\cz}$ defined in
($\!\!$~\ref{eq:proxyz}) by the filtering operator:
\begin{eqnarray}\begin{array}{c}\displaystyle
\opredu^{\ouvas,\Delta\cz}\equiv
\opredutran^{\ouvas}\ptens\opredulong^{\Delta\cz}=\!\int\!\differ^3\rvec\,
\sqrt{\fred^{\ouvas,\Delta\cz}(\rvec)}
\,\debket\rvec\finket\!\!\debbra\rvec\finbra,
\\ \displaystyle
\fred^{\ouvas,\Delta\cz}(\rvec)\;\equiv\;
\fredAt(\cx,\cy)\;\fredDzl(\cz). \end{array}\hspace{0.75cm} \label{eq:tronqfoc} \end{eqnarray}
The volume $\ouvas\times[-\Delta\cz/2,+\Delta\cz/2]$
of transverse section $\ouvas$ and length $\Delta\cz$,
centered at the origin O is called a {\it three-dimensional (3D) aperture}.
The 3D aperture can be defined as the region where the position wave function
of the particle is temporarily localized during the position measurement.
The aperture $\ouvas$ and the interval $[-\Delta\cz/2,+\Delta\cz/2]$
are called the {\it transverse 2D aperture} and
the {\it longitudinal 1D aperture}, respectively (Fig. $\!\!$~\ref{fig:ouvgenz}).
\begin{figure}
\caption{
Example of 3D aperture (section in the $(\Ox,\Oz)$ plane) with the
corresponding transverse and longitudinal position filtering functions. }
\label{fig:ouvgenz}
\end{figure}
In the case of a uniform filtering, the aperture is the region
where the filtering function is non-zero. In the case of a non-uniform
filtering, the filtering function can be non-zero everywhere
(for example if it is a Gaussian). We are then led to define more generally
the aperture as the region outside of which the integral
of the filtering function is negligible.
In ($\!\!$~\ref{eq:tronqfoc}), $\Delta\cz$ does not depend on $\cx$ and $\cy$,
which is an implicit assumption in the definition ($\!\!$~\ref{eq:projods}).
More generally, the {\it position filtering operator} is defined by:
\begin{eqnarray}\opredu^{\ouva}=\inttrp\!\differ^3\rmod\,
\sqrt{\fredA(\rvec)}\;\debket\rvec\finket\!\!\debbra\rvec\finbra,\hspace{0.75cm} \label{eq:tronqfob} \end{eqnarray}
where $\ouva$ is the 3D aperture whose shape can be assumed
to be more or less complicated and $\fredA(\rvec)$ is the
{\it position filtering function} whose expression can be assumed
to be different from a product of the form (\!\!~\ref{eq:tronqfoc}).
}
\subsubsection{The need to consider kinematics.} {
From (\!\!~\ref{eq:tronqfob}), $\left|\debvalop\rvec\midvalopa
\opredu^{\ouva}\midvalopb\vkondini\finvalop\right|^2$ is proportional to
$\fredA(\rvec)$. So the state $\opredu^{\ouva}\debket\vkondini\finket$
is associated with the momentum PDF of the particle just after its
localization at the aperture, when it is about to move away from the diaphragm.
Moreover, from (\!\!~\ref{eq:fgenerq}), the state
$\opedif^{(0)}\debket\vkondini\finket$ corresponds to the momentum PDF
$\pdfp_{\vavkond}(\vkond)$ of the particle detected beyond the diaphragm.
Since the particle is free after its passage through the aperture,
its momentum is conserved until its detection,
which suggests that $\opedif^{(0)}$ is nothing other than $\opredu^{\ouva}$.
However, this cannot be the case for the following reason.
From (\!\!~\ref{eq:tronqfob}), the momentum wave function of the state
$\opredu^{\ouva}\debket\vkondini\finket$ is expressed by:
\begin{eqnarray}\debvalop\vkond\midvalopa\opredu^{\ouva}\midvalopb
\vkondini\finvalop\!=(2\pi)^{-3/2}\;
\jfour^{\ouva}(\vkond-\vkondini),\hspace{0.75cm} \label{eq:elmafsq} \end{eqnarray}
where $\jfour^{\ouva}(\vkond-\vkondini)$ is the Fourier transform
of the square root of the position filtering function:
\begin{eqnarray}\begin{array}{l} \displaystyle
\jfour^{\ouva}(\vkond-\vkondini)\,\equiv\,(2\pi)^{-3/2}
\\ \displaystyle \hspace{1.75cm} \times
\!\!\inttrp\!\differ^3\rmod\;\sqrt{\fredA(\rvec)}\;
\exp\left[\,-\icmp(\vkond-\vkondini)\,\pscal\,\rvec\,\right].
\end{array} \hspace{0.75cm} \label{eq:nrspinb} \end{eqnarray}
If $\opedif^{(0)}$ is equal to $\opredu^{\ouva}$,
the PDF $\pdfp_{\vavkond}(\vkond)$ is obtained by
substituting (\!\!~\ref{eq:elmafsq}) into (\!\!~\ref{eq:fgenerq}).
Then, the widths $\Delta\kx$, $\Delta\ky$ and $\Delta\kz$
of this PDF are those of the distribution associated with the Fourier
transform $\jfour^{\ouva}(\vkond-\vkondini)$ and are therefore related to
the widths $\Delta\cx$, $\Delta\cy$ and $\Delta\cz$ of the 3D aperture
by the uncertainty relations.
However, if $\Delta\cx$ for example is small enough,
the relation $\Delta\cx\Delta\kx\gtrsim 1$ implies that
$\Delta\kx$ can be sufficiently large so that $|\kx|>\mkondini$
with non-zero probability and therefore
the relation (\!\!~\ref{eq:contrax}) is not satisfied in such a case.
However, this is not possible because (\!\!~\ref{eq:contrax}) results from
kinematics and is moreover confirmed by experiment.
This issue comes from the fact that the position wave function
of the state $\opredu^{\ouva}\debket\vkondini\finket$ is localized
in the 3D aperture $\ouva$ and that consequently its momentum wave function is
spread out, which results in a spreading of the distribution
of the momentum modulus and therefore of the energy.
For (\!\!~\ref{eq:contrax}) to be satisfied, we are led to assume that
$\opedif^{(0)}$ is not simply equal to $\opredu^{\ouva}$
but is rather of the form:
\begin{eqnarray}\opedif^{(0)}=\,\opredu^{\mkondini}\;\opredu^{\ouva}, \label{eq:opdifra} \end{eqnarray}
where $\opredu^{\mkondini}$ is an {\it energy-momentum filtering operator}
whose role is to act on the state $\opredu^{\ouva}\debket\vkondini\finket$,
which is then a {\it localized transitional state},
to obtain a final state of same energy as that of the initial state.
}
\subsubsection{Energy-momentum filtering operator.} {
The filtering operator $\opredu^{\mkondini}$ must be associated
with the domain $\ouvcin$ of the momentum space that corresponds
to the vectors $\vkond$ compatible with kinematics. So we define,
using an expression similar to (\!\!~\ref{eq:tronqfob}):
\begin{equation}\opredu^{\mkondini}\equiv\!\inttrp\differ^3\mkond\;
\sqrt{\fredcin(\vkond)}\;\debket\vkond\finket\!\!\debbra\vkond\finbra, \label{eq:transfob} \end{equation}
where $\fredcin(\vkond)$ is a {\it momentum-energy filtering function}
which must represent the weight with which the filtering operator
selects the result $\vkond$ from the value at $\vkond$ of the
momentum wave function in the localized transitional state
$\opredu^{\ouva}\debket\vkondini\finket$.
From (\!\!~\ref{eq:contrax}) and (\!\!~\ref{eq:contray}),
we are led to assume that this function is of the form:
\begin{eqnarray}\fredcin(\vkond)\equiv\consn\;
\gaussmk\!\left(|\vkond|-\mkondini\right)\;\delta_{1\,\sgn[\kz]},\hspace{0.5cm} \label{eq:transfoc} \end{eqnarray}
where $\consn$ is a normalization constant that will be calculated below,
$\gaussmk\left(|\vkond|-\mkondini\right)$ is a function of the modulus
of $\vkond$ forming a peak centered at $|\vkond|=\mkondini$
and of width $\Delta\mkond$ close to zero
(in accordance with (\!\!~\ref{eq:contrax})) and
the Kronecker delta $\delta_{1\,\sgn[\kz]}$ ensures that
$\fredcin(\vkond)$ is zero if $\kz\leq 0$
(in accordance with (\!\!~\ref{eq:contray})).
From (\!\!~\ref{eq:transfoc}), using the spherical coordinates,
the normalization to 1 of $\fredcin(\vkond)$ is expressed by:
\begin{eqnarray}\!\!
1=\consn\!\!\int_{0}^{\infty}\!\!\!\!\!\!\differ\mkond\;\mkond^2\,
\gaussmk\!\left(\mkond\!-\!\mkondini\right)
\!\!\int_{0}^{\pi}\!\!\!\!\!\differ\theta\,\sin\!\theta\,
\delta_{1\,\sgn[\cos\theta]}
\!\int_{0}^{2\pi}\!\!\!\!\!\!\differ\phi.\hspace{0.75cm} \label{eq:transfod} \end{eqnarray}
Since $\Delta\mkond$ is close to zero, we can replace
$\gaussmk\left(\mkond-\mkondini\right)$ by $\delta(\mkond-\mkondini)$ in
the integral over $\mkond$ whose value is therefore close to ${\mkondini}^2$.
Then, (\!\!~\ref{eq:transfod}) implies:
$\consn\simeq{\mkondini}^{\!-2}\,(2\pi)^{-1}$.
Substituting (\!\!~\ref{eq:transfoc}) with this value of $\consn$
into (\!\!~\ref{eq:transfob}), we get:
\begin{eqnarray}\begin{array}{l}\displaystyle
\opredu^{\mkondini}\simeq\,(2\pi)^{-1/2}\,\mkondini^{-1}
\\ \displaystyle \hspace{1cm} \times\!
\!\inttrp\!\differ^3\mkond\;\sqrt{\gaussmk\!\left(|\vkond|-\mkondini\right)}
\;\delta_{1\,\sgn[\kz]}\;\debket\vkond\finket\!\!\debbra\vkond\finbra. \end{array} \hspace{0.75cm} \label{eq:transfon} \end{eqnarray}
We can interpret $\opredu^{\mkondini}$ as an operator which represents
an energy-momentum measurement including a measurement of the momentum modulus
(in other words of the energy) giving the result $\hbar\mkondini$
with near certainty and a measurement of the momentum longitudinal component
giving the result $\hbar\kz>0$.
}
\subsubsection{Matrix element of the momentum part of the diffraction operator.} {
Substituting (\!\!~\ref{eq:tronqfob}) - in which we insert
the identity operator $\inttrp\differ^3\mkond\,\debket\vkond\finket
\!\debbra\vkond\finbra$ after $\debket\rvec\finket\!\debbra\rvec\finbra$,
and (\!\!~\ref{eq:transfon}) into (\!\!~\ref{eq:opdifra}), and given
(\!\!~\ref{eq:nrspinb}), we obtain:
\begin{eqnarray}\!\!\!\!\begin{array}{l} \displaystyle
\opedif^{(0)}\!\simeq\,(2\pi)^{-2}\,\mkondini^{-1}
\!\!\inttrp\!\differ^3\mkond\,\sqrt{\gaussmk\!\left(|\vkond|\!-\!\mkondini\right)}
\;\delta_{1\,\sgn[\kz]}
\\ \displaystyle \hspace{2.75cm} \times
\!\!\inttrp\!\differ^3\mkond'\;\jfour^{\ouva}(\vkond-\vkond')
\;\debket\vkond\finket\!\!\debbra\vkond'\finbra.
\end{array} \hspace{0.65cm} \label{eq:tronqfoe} \end{eqnarray}
Hence, instead of (\!\!~\ref{eq:elmafsq}):
\begin{eqnarray}\begin{array}{l} \displaystyle
\debvalop\vkond\midvalopa\opedif^{(0)}\midvalopb\vkondini\finvalop
\simeq(2\pi)^{-2}\,\mkondini^{-1}
\\ \displaystyle \hspace{1.5cm} \times
\sqrt{\gaussmk\!\left(|\vkond|-\mkondini\right)}\;\delta_{1\,\sgn[\kz]}
\;\jfour^{\ouva}(\vkond-\vkondini). \end{array} \hspace{0.75cm} \label{eq:opdispa} \end{eqnarray}
}
\subsubsection{Photons.} {
A position filtering operator of the form ($\!\!$~\ref{eq:tronqfob}),
where the projector $\debket\rvec\finket\!\!\debbra\rvec\finbra$ is involved,
cannot be used for the photon because the localized photon states are eigenstates
of a photon position operator different from the position observable of
the non-relativistic case.
Several problems were encountered and then finally resolved to construct
this photon position operator and, more generally, to elaborate
a true quantum mechanics of the photon
\cite{NeWi,BiBi,Sipe,Hacoc,HaBay,SmRay,Brod,HaDeb,BaMos,MaHaw}.
The localized photon states are biorthogonal \cite{Brod}
with a specific scalar product \cite{HaDeb} and it follows that the
appropriate operator to replace the projector
$\debket\rvec\finket\!\!\debbra\rvec\finbra$ in the photon case is
$\ensuremath{\chpotv^{(-)}}(\rvec,\temps)\ketvide\!\pscal\!\bravide\ensuremath{\chelec^{(+)}}(\rvec,\temps)$,
where $\oppotvpm(\rvec,\temps)$ and $\opchelpm(\rvec,\temps)$ are the
positive and negative frequency field operators of the transverse
vector potential and electric field.
These field operators are given by \cite{CTDRG}:
\begin{eqnarray}\begin{array}{l} \displaystyle
\ensuremath{\chpotv^{(-)}}(\rvec,\temps)=\left[\ensuremath{\chpotv^{(+)}}(\rvec,\temps)\right]^{\dagger}=
\sqrt{\frac{\hbar}{2\epso}}\;(2\pi)^{-3/2}
\\ \displaystyle \hspace{0.5cm}\times
\!\!\inttrp\!\frac{\differ^3\mkond}{\sqrt{\freq}}\sum_{\ilinp=x,y}\!
\!\exp\left[\,\icmp\left(\freq\temps-\vkond\pscal\rvec\right)\,\right]
\veclinp^{(\ilinp)}_{\vkond}\;\opcrea(\vkond,\ilinp[\vkond]),
\\ \displaystyle \hspace{0.1cm}
\ensuremath{\chelec^{(-)}}(\rvec,\temps)=\left[\ensuremath{\chelec^{(+)}}(\rvec,\temps)\right]^{\dagger}=
-\frac{\partial}{\partial\temps}\ensuremath{\chpotv^{(-)}}(\rvec,\temps),
\end{array} \hspace{0.75cm} \label{eq:locafa} \end{eqnarray}
where $\freq=\vlum\mkond$,
$\veclinp^{(\ilinp)}_{\vkond}$ is the unitary vector of the $\ilinp[\vkond]$
axis of a coordinate system such that $\cz[\vkond]\parallel\vkond$
and $\opcrea(\vkond,\ilinp[\vkond])$ is the creation operator
of a photon of momentum $\hbar\vkond$ and linearly polarized in the direction
of the $\ilinp[\vkond]$ axis.
Similarly to (\!\!~\ref{eq:onepsa}), we have:
\begin{eqnarray}\opcrea(\vkond,\ilinp[\vkond])\ketvide\;=\;
\debket\vkond\finket\!\!\ptens\!\debket\ilinp\finket\irkeoz,\hspace{0.75cm} \label{eq:onepsb} \end{eqnarray}
where $\debket\ilinp\finket\irkeoz$ is the basis state
of linear polarization in the direction of the $\ilinp[\vkond]$ axis.
From (\!\!~\ref{eq:locafa}) and (\!\!~\ref{eq:onepsb}):
\begin{eqnarray}\!\!\begin{array}{l}\displaystyle
\ensuremath{\chpotv^{(-)}}(\rvec,\temps)\ketvide\!\pscal\!\bravide\ensuremath{\chelec^{(+)}}(\rvec,\temps)
\,=\,\frac{\icmp\hbar}{2\epso}\,(2\pi)^{-3}
\\ \displaystyle \hspace{0.3cm}\times
\!\!\inttrp\!\!\differ^3\mkond\!\!\inttrp\!\!\differ^3\mkond'
\sqrt{\mkond'\!/\mkond\,}
\,\exp\{\icmp\,[(\freq\!-\!\freq')\temps\!-\!(\vkond\!-\!\vkond')\pscal\rvec]\}
\\ \displaystyle \hspace{0.3cm}
\times\debket\vkond\finket\!\!\debbra\vkond'\finbra\ptens\!\!
\sum_{\ilinp=\cx,\cy}\sum_{\ilinp'=\cx,\cy}{\veclinp^{(\ilinp)}_{\vkond}}
\!\pscal\,\veclinp^{(\ilinp')}_{\vkond'}\debket\ilinp\finket\irkeoz\;
\ilkeozp\debbra\ilinp'\finbra. \end{array} \hspace{0.75cm} \label{eq:locafd} \end{eqnarray}
The photon has a spin 1 and this implies that its spin projection eigenstates
are equivalent to vectors of complex components in the basis
$\{\veclinp^{(\cx)}_{\vkond}, \veclinp^{(\cy)}_{\vkond},
\veclinp^{(\cz)}_{\vkond}\}$ \cite{LLQM}.
Moreover, the basis states $\debket\ilinp\finket\irkeoz$
are specific linear combinations of the spin projection eigenstates
\cite{CTDRG,Mess} such that $\debket\ilinp\finket\irkeoz$ is equivalent to
the real vector ${\veclinp^{(\ilinp)}_{\vkond}}$. Therefore:
${\veclinp^{(\ilinp)}_{\vkond}} \pscal\,\veclinp^{(\ilinp')}_{\vkond'} =\,
{\veclinp^{(\ilinp)}_{\vkond}}^{*}\!\pscal\,\veclinp^{(\ilinp')}_{\vkond'}
=\ilkeoz\debpscal\ilinp\midpscal\ilinp'\finpscal\irkeozp$.
So the double sum over $\ilinp$ and $\ilinp'$ in ($\!\!$~\ref{eq:locafd})
is the product of the identity operator by itself,
successively expressed by the closure relations of the bases
$\{\debket\xpol\finket\irkeoz,\debket\ypol\finket\irkeoz\}$ and
$\{\debket\xpol\finket\irkeozp,\debket\ypol\finket\irkeozp\}$.
The action of the operator $\ensuremath{\chpotv^{(-)}}(\rvec,\temps)\ketvide\!\pscal\!
\bravide\ensuremath{\chelec^{(+)}}(\rvec,\temps)$ therefore has no effect
on the polarization states so we can just consider
its restriction to the subspace of the momentum states.
So, replacing in (\!\!~\ref{eq:tronqfob})
$\debket\rvec\finket\!\debbra\rvec\finbra$ by the right-hand side
of ($\!\!$~\ref{eq:locafd}) without the double sum over $\ilinp$ and $\ilinp'$
and multiplying by the factor $-2\icmp\epso/\hbar$
to obtain the same dimension as that of $\opredu^{\ouva}$
(length to the power $-3/2$), we are led to assume
that the position filtering operator for the photon is:
\begin{eqnarray}\begin{array}{l}\displaystyle
\opredu^{\ouva}_{\textup{phot}}(\temps)=(2\pi)^{-3}
\!\!\inttrp\!\differ^3\rmod\;\sqrt{\fredA(\rvec)}\;
\!\inttrp\!\!\differ^3\mkond\!\!\inttrp\!\!\differ^3\mkond'\,
\\ \displaystyle \hspace{0.25cm}\times
\,\sqrt{\mkond'\!/\mkond\,}
\,\exp\{\icmp\,[(\freq\!-\!\freq')\temps\!-\!(\vkond\!-\!\vkond')\pscal\rvec]\}
\debket\vkond\finket\!\!\debbra\vkond'\finbra. \end{array} \hspace{0.75cm} \label{eq:locafc} \end{eqnarray}
Furthermore, we express the momentum part of the diffraction operator
in a form similar to ($\!\!$~\ref{eq:opdifra}):
\begin{eqnarray}\opedif^{(0)}_{\textup{phot}}(\temps)=\opredu^{\mkondini}\;
\opredu^{\ouva}_{\textup{phot}}(\temps). \hspace{0.75cm} \label{eq:locaff} \end{eqnarray}
Then, substituting ($\!\!$~\ref{eq:transfon}) and ($\!\!$~\ref{eq:locafc})
into ($\!\!$~\ref{eq:locaff}) - and given ($\!\!$~\ref{eq:nrspinb}) -
we finally obtain:
\begin{eqnarray}\hspace{-0.12cm}\begin{array}{l}\displaystyle
\opedif^{(0)}_{\textup{phot}}(\temps)\simeq(2\pi)^{-2}\mkondini^{-1}
\!\!\!\inttrp\!\!\differ^3\mkond\,
\sqrt{\gaussmk\!\left(|\vkond|\!-\!\mkondini\right)}\;\delta_{1\,\sgn[\kz]}
\\ \displaystyle \hspace{0.1cm}\times
\!\!\inttrp\!\!\differ^3\mkond'\sqrt{\mkond'\!/\mkond\,}\,
\exp[\,\icmp(\freq\!-\!\freq')\temps\,]\;
\jfour^{\ouva}(\vkond\!-\!\vkond')\debket\vkond\finket\!\!\debbra\vkond'\finbra\!.
\end{array}\hspace{0.75cm} \label{eq:locafb} \end{eqnarray}
By calculating the matrix element $\debvalop\vkond\midvalopa
\opedif^{(0)}_{\textup{phot}}(\temps)\midvalopb\vkondini\finvalop$
from ($\!\!$~\ref{eq:locafb}), we get an expression
with the factor $\exp[\icmp(\freq-\freqo)\temps]$. Now, from
($\!\!$~\ref{eq:contrax}), $\mkond\!\simeq\!\mkondini$,
and so $\freq\!\simeq\!\freqo$. Therefore, the matrix element in question
does not actually depend on time and we find that its expression
is nothing other than ($\!\!$~\ref{eq:opdispa}).
This relation can therefore be used both for non-relativistic
particles and for photons.
}
\subsubsection{Characteristics of the measurement process.} {
From (\!\!~\ref{eq:opdifra}) and (\!\!~\ref{eq:locaff}),
we see that the momentum part $\opedif^{(0)}$ of the diffraction operator
depends on the momentum modulus $\mkondini$ of the incident particle.
Therefore, the initial state is changed by the action of an operator
which depends on this initial state itself.
This reflects the fact that the diaphragm and the particle form
an inseparable system during the measurement,
in accordance with the Copenhagen interpretation of quantum mechanics.
Moreover, using (\!\!~\ref{eq:tronqfob}), (\!\!~\ref{eq:transfon})
and (\!\!~\ref{eq:locafc}), we can verify that
the product of operators on the right-hand sides of
(\!\!~\ref{eq:opdifra}) and (\!\!~\ref{eq:locaff}) is not commutative.
This non-commutativity imposes the order in which the operators act to create
the final state from the initial state.
This order is related to the temporal unfolding of an irreversible process
whose sequence is as follows: initial state $\rightarrow$ position measurement
($\opredu^{\ouva}$) $\rightarrow$ localized transitional state $\rightarrow$
energy-momentum measurement ($\opredu^{\mkondini}$) $\rightarrow$ final state
$\rightarrow$ measurement of momentum and polarization (detectors).
The two first measurements ($\opedif^{(0)}$) are not equivalent
to one measurement to which the uncertainty relations apply.
These relations are satisfied for each of the two measurements.
Let $\Delta\cx$, $\Delta\kx$ be the uncertainties of the first measurement
which creates the localized transitional state and $\Delta'\cx$, $\Delta'\kx$
those of the second measurement which creates the final state.
So $\Delta\cx$ is the width of the aperture and $\Delta'\kx$
is the width of the distribution of $\kx$ in the final state.
In this state, we have: $-\mkond\leq\kx\leq +\mkond$, so
$\Delta'\kx\lesssim 2\mkond$. Hence, because of kinematics
[Eq. (\!\!~\ref{eq:contrax})]: $\Delta'\kx\lesssim 2\mkondini$ which is finite.
Therefore, if $\Delta\cx$ is small enough, we then have:
$\Delta\cx\Delta'\kx\lesssim 1$ but this is not a problem because $\Delta\cx$
is associated with the first measurement while $\Delta'\kx$ is associated with
the second measurement. On the other hand, we have: $\Delta\cx\Delta\kx\gtrsim 1$
and $\Delta'\cx\Delta'\kx\gtrsim 1$, where $\Delta'\cx$ corresponds to the extent
of the diffracted wave.
We also have the relations: $\Delta\temps\Delta\freq\gtrsim 1$ and
$\Delta'\temps\Delta'\freq\gtrsim 1$ between the lifetimes and the widths
in energy of the transitional state and of the final state.
We can assume that $\Delta\temps\simeq\Delta\cz/\mvit$
where $\mvit$ is the speed of the particle.
Because of the Huygens-Fresnel principle, it is expected that
$\Delta\cz\simeq 0$ (Sec. ~\ref{par:posfilt}). So $\Delta\temps\simeq 0$.
Moreover, given (\!\!~\ref{eq:contrax}), we have $\Delta'\freq\simeq 0$.
Hence: $\Delta\temps\Delta'\freq \lesssim 1$.
}
}
\subsection{Polarization amplitudes of the detected particles} { \label{sub:polarp}
\subsubsection{Non-relativistic particles.} {
The quantization axis $\cZ[\vkond]$ belongs to a coordinate system
$\{\cX[\vkond],\cY[\vkond],\cZ[\vkond]\}$ defined by
$\ilinq[\vkond]=\oprotos(\Eulapre,\Eulanuy,\Eularop)\,\ilinp[\vkond]$
($\ilinq=\cX,\cY,\cZ$; $\ilinp=\cx,\cy,\cz$), where
$\oprotos(\Eulapre,\Eulanuy,\Eularop)$ is a rotation whose Euler angles
can be chosen arbitrarily and $\{\cx[\vkond],\cy[\vkond],\cz[\vkond]\}$
is the coordinate system attached to the particle.
Moreover, according to (\!\!~\ref{eq:rotkok}):
$\ilinp[\vkond]=\oprotos(\eulapre,\eulanuy,0)\,\ilinp[\vkondini]$.
The rotation of the eigenstates has the same Euler angle as the rotation
of the axes because a physical system in a given eigenstate must rotate
with the coordinate system associated with the quantization axis
to remain in this eigenstate. Therefore:
\begin{eqnarray}\begin{array}{ccl}
\debket\spinz\finket\irkeoqe &=&\oprotsp^{(\modspin)}
(\Eulapre,\Eulanuy,\Eularop)\debket\spinz\finket\irkeoqz,
\\
\debket\spinz\finket\irkeoqz&=&\oprotsp^{(\modspin)}
(\,\eulapre,\eulanuy,0\,)\debket\spinz\finket\ensuremath{^{}_{\!\!\cz[\vkondini]}}\,. \end{array}\hspace{0.75cm} \label{eq:fgenerv} \end{eqnarray}
In the present case, where the directions of the $\ilinq[\vkond]$ axes
are defined by the rotation $\oprotos(\Eulapre,\Eulanuy,\Eularop)$,
the angle $\Eularop$ must be mentioned in the notation
$\debket\spinz\finket\irkeoqe$ because $\cZ[\vkond]$ only depends on
$\Eulapre,\Eulanuy$ whereas the rotation operator $\oprotsp^{(\modspin)}
(\Eulapre,\Eulanuy,\Eularop)$ (so a priori the resulting state)
also depends on $\Eularop$.
To express the final polarization amplitudes (quantization axis $\cZ[\vkond]$)
as a function of the initial amplitudes (quantization axis $\cZ[\vkondini]$),
we multiply the relation (\!\!~\ref{eq:rotinid}) on the left by
$\ilkeoqe\debbra\spinz\finbra$ and we insert the identity operator
$\sum_{\spinz'}\!\debket\!\spinz'\!\finket\!\ensuremath{^{}_{\!\!\cZ[\vkondini],\Eularop_{0}}}\;
\ensuremath{^{}_{\cZ[\vkondini],\Eularop_{0}\!\!\!}}\debbra\!\spinz'\!\finbra$ before the ket $\debket\kisin\finket$.
We then use (\!\!~\ref{eq:fgenerv}) and the relation:
$\oprotsp^{(\modspin)}(\eulea,\euleb,\eulec)^{\dagger}=\oprotsp^{(\modspin)}
(\eulea,\euleb,\eulec)^{-1}=\oprotsp^{(\modspin)}(-\eulec,-\euleb,-\eulea)$
which results from the unitarity of the rotation operators. We get
\begin{eqnarray}\hspace{-0.15cm} \begin{array}{l}\displaystyle
\ilkeoqe\debpscal\!\spinz\midpscal\ensuremath{\etcospin^{(\modspin)}_{\textup{out}}(\vkond)}\!\finpscal\!=\sum_{\spinz'}
\\ \displaystyle
\ensuremath{^{}_{\;\;\;\,\cz[\vkondini]}\!}\!\debvalop\!\spinz\!\midvalopa
\oprotsp^{(\modspin)}(0,-\eulanuy,-\eulapre)\,
\oprotsp^{(\modspin)}(-\Eularop,-\Eulanuy,-\Eulapre)
\right.\right.\right.
\\ \displaystyle \hspace{1.25cm} \times
\left.\left.\left.\!\!
\oprotsp^{(\modspin)}(\eulera,\eulerb,\eulerc)\,
\oprotsp^{(\modspin)}(\Eulapre_{0},\Eulanuy_{0},\Eularop_{0})
\midvalopb\!\spinz'\!\finvalop\ensuremath{^{}_{\!\!\cz[\vkondini]}}\;
\\ \displaystyle \hspace{3.75cm} \times\,
\ensuremath{^{}_{\cZ[\vkondini],\Eularop_{0}\!\!\!}}\debpscal\!\spinz'\!\midpscal\!\kisin\!\finpscal\!,
\end{array}\hspace{0.5cm} \label{eq:rotinia} \end{eqnarray}
where $\cZ[\vkond]=\cZ[\vkond;\Eulapre,\Eulanuy]$,
$\cZ[\vkondini]=\cZ[\vkondini;\Eulapre_{0},\Eulanuy_{0}]$,
$\eulea_{\entj}\equiv\eulea_{\entj}(\vkond)$ and
$\vkond\equiv\vkond(\mkond,\eulanuy,\eulapre)$.
The matrix element of the product of the four rotation operators
can be calculated from the standard formula
\begin{eqnarray} \debvalop\!\spinz\!\midvalopa\oprotsp^{(\modspin)}
(\eulea,\euleb,\eulec)\midvalopb\!\spinz'\finvalop\!=
\exp[-\icmp(\spinz\eulea\!+\!\spinz'\eulec)]
\;\mrotd_{\spinz\spinz'}^{\,(\modspin)}(\euleb)\,,
\hspace{0.75cm} \label{eq:polpoda} \end{eqnarray}
where $(\mrotd_{\spinz\spinz'}^{\,(\modspin)}(\euleb))$
is a $(2\modspin+1)\!\times\!(2\modspin+1)$ matrix
whose expression is known \cite{Mess}.
}
\subsubsection{Relativistic particles.} {
The quantization axis $\cZ[\vkond]$
must have the same direction as that of the momentum $\vkond$.
Since $\cz[\vkond]\parallel\vkond$ (Eq. (\!\!~\ref{eq:rotkok})],
this implies $\cZ[\vkond]=\cz[\vkond]$.
We then have $\Eulanuy=0$ and $\oprotos(\Eulapre,0,\Eularop)=
\oprotos(\Eulapre+\Eularop,0,0)=\oprotos(0,0,\Eulapre+\Eularop)$ which is
a rotation of arbitrary angle $\Eulapre+\Eularop$ around $\cz[\vkond]$.
To simplify, we choose $\Eulapre=0$ and $\oprotos(0,0,\Eularop)$.
We then apply the first relation of (\!\!~\ref{eq:fgenerv}) to the rotation
$\oprotsp^{(\modspin)}(0,0,\Eularop)$.
Using (\!\!~\ref{eq:polpoda}) and the property
$\mrotd_{\spinz\spinz'}^{\,(\modspin)}(0)=\delta_{\spinz\spinz'}$ \cite{Mess},
this leads to: $\debket\spinz\finket\irkeoqe=
\exp(-\icmp\spinz\Eularop)\debket\spinz\finket\irkeoqz$.
Then, since $\cZ[\vkond]=\cz[\vkond]$ and $\cz[\vkond]\parallel\vkond$,
we will use the notation $\debket\spinz\finket\irkeuz$ for simplicity. Finally,
\begin{eqnarray}\debket\spinz\finket\irkeuz
\equiv\oprotsp^{(\modspin)}(0,0,\Eularop)\debket\spinz\finket\irkeoz
=\exp\,(-\icmp\spinz\Eularop)\debket\spinz\finket\irkeoz.\hspace{0.75cm} \label{eq:pocirc} \end{eqnarray}
Substituting into (\!\!~\ref{eq:rotinia}), we get:
\begin{eqnarray}\begin{array}{l}\displaystyle
\ilkeoz\debpscal\!\spinz\midpscal\ensuremath{\etcospin^{(\modspin)}_{\textup{out}}(\vkond)}\!\finpscal
=\sum_{\spinz'}
\\ \displaystyle \hspace{0.25cm} \times
\,\ilkoeoz\debvalop\!\spinz\!\midvalopa
\oprotsp^{(\modspin)}(0,-\eulanuy,-\eulapre)\,
\oprotsp^{(\modspin)}(\eulera,\eulerb,\eulerc)
\midvalopb\!\spinz'\!\finvalop\irkoeoz\;
\\ \displaystyle \hspace{3cm} \times
\ilkoeoz\debpscal\!\spinz'\!\midpscal\!\kisin\!\finpscal\!.
\end{array}\hspace{0.75cm} \label{eq:fgenero} \end{eqnarray}
In the rest of this subsection, we apply the model to the case of the photon.
}
\subsubsection{Helicity amplitudes of the detected photons.} {
Since the photon is relativistic and has a spin 1, its spin component eigenstates
are the helicity states $\debket +1\finket\irkeoz$, $\debket 0\finket\irkeoz$,
and $\debket -1\finket\irkeoz$.
However, the photon is also massless, so its helicity can only have the values
$\pm 1$ \cite{LLrel}; the value zero is impossible, whatever the momentum. Hence:
\begin{eqnarray}\ilkeoz\debpscal 0 \midpscal\ensuremath{\etcospin^{(\spinun)}_{\textup{out}}(\vkond)}\finpscal
=\,\ilkoeoz\debpscal 0 \midpscal\kiuin\finpscal\,=\;0.\hspace{0.75cm} \label{eq:fapendd} \end{eqnarray}
This relation determines the functions
$\eulera[\vkond(\mkond,\eulanuy,\eulapre)]$ and
$\eulerb[\vkond(\mkond,\eulanuy,\eulapre)]$.
Indeed, substituting it into (\!\!~\ref{eq:fgenero})
applied to $\modspin=1$ and $\spinz=0$, we obtain:
\begin{eqnarray}\begin{array}{l}\displaystyle
0\;=\sum_{\spinz'=\pm 1}
\ilkoeoz\debvalop \!0\! \midvalopa
\oprotsp^{(\spinun)}(0,-\eulanuy,-\eulapre)
\right. \right. \right.
\\ \displaystyle \hspace{1.5cm}
\left. \left. \left. \times\,
\oprotsp^{(\spinun)}(\eulera,\eulerb,\eulerc)
\midvalopb\!\spinz'\!\finvalop\irkoeoz\,\ilkoeoz\debpscal\spinz'\midpscal
\kiuin\finpscal\!, \end{array} \hspace{0.75cm} \label{eq:fapende} \end{eqnarray}
which must be satisfied whatever the initial state. Hence:
\begin{eqnarray}\ilkoeoz\debvalop\!0\!\midvalopa
\oprotsp^{(\spinun)}(0,-\eulanuy,-\eulapre)\,
\oprotsp^{(\spinun)}(\eulera,\eulerb,\eulerc)
\midvalopb\!\pm 1\!\finvalop\irkoeoz\!=0.\;\hspace{0.75cm} \label{eq:fapendf} \end{eqnarray}
We then express the left-hand side by using (\!\!~\ref{eq:polpoda})
applied to $\modspin\!=\!1$ and where the matrix
$(\mrotd_{\spinz\spinz'}^{\,(\spinun)}(\euleb))$ is given by \cite{Mess}:
\begin{eqnarray}
\!\!\left(\mrotd_{\spinz\spinz'}^{\,(\spinun)}(\euleb)\right)\!=\!\frac{1}{2}
\!\left( \begin{array}{ccc}
\displaystyle 1\!+\!\cos\euleb&
\displaystyle -\sqrt{2}\sin\euleb&
\displaystyle 1\!-\!\cos\euleb
\\
\displaystyle \sqrt{2}\sin\euleb&
\displaystyle 2\cos\euleb&
\displaystyle -\sqrt{2}\sin\euleb
\\
\displaystyle 1\!-\!\cos\euleb&
\displaystyle \sqrt{2}\sin\euleb&
\displaystyle 1\!+\!\cos\euleb
\end{array} \right)\!.\hspace{0.75cm} \label{eq:djjp} \end{eqnarray}
(Note that the order of the values of $\spinz$ and $\spinz'$ is: $+1,0,-1$).
This leads to the equations:
\begin{eqnarray}\begin{array}{c}
\sin\eulanuy\sin(\eulapre-\eulera)=0,
\\
\sin\eulanuy\cos\eulerb\cos(\eulapre-\eulera)-\cos\eulanuy\sin\eulerb=0. \end{array} \hspace{0.75cm} \label{eq:fapendg} \end{eqnarray}
The first equation implies $\eulera(\vkond)=\eulapre+\entn\pi$, $\entn=0,1$.
Substituting into the second equation, we get:
$\eulerb(\vkond)=(-1)^{\entn}\eulanuy+\entn'\pi$, $\entn'=0,1$.
If $\eulapre=\eulanuy=0$, we then have: $\vkond=\vkondini$, which implies
$\eulera(\vkondini)=\entn\pi$ and $\eulerb(\vkondini)=\entn'\pi$.
However, if $\vkond=\vkondini$, there is no reason for the spin polarization
state to change. Hence,
from (\!\!~\ref{eq:rotinid}), $\oprotsp^{(\spinun)}
\!\left[\eulera(\vkondini),\eulerb(\vkondini),\eulerc(\vkondini)\right]$
is equal to the identity operator, which implies:
$\eulera(\vkondini)=\eulerb(\vkondini)=\eulerc(\vkondini)=0$.
Therefore: $\entn=\entn'=0$ and we get
\begin{eqnarray}\eulera(\vkond)=\eulapre,
\hspace{0.75cm}\eulerb(\vkond)=\eulanuy, \hspace{0.75cm} \label{eq:fapendh} \end{eqnarray}
\begin{eqnarray}\;\eulerc(\vkondini)=0. \hspace{0.75cm} \label{eq:fapendi} \end{eqnarray}
From (\!\!~\ref{eq:polpoda}), (\!\!~\ref{eq:djjp})
and (\!\!~\ref{eq:fapendh}), the matrix whose elements appear
in the right-hand side of (\!\!~\ref{eq:fgenero}) is given by:
\begin{eqnarray}\begin{array}{l}\displaystyle
\!\left(\ilkoeoz\debvalop\spinz\midvalopa
\oprotsp^{(1)}(0,-\eulanuy,-\eulapre)\,
\oprotsp^{(1)}[\eulapre,\eulanuy,\eulerc(\vkond)]\midvalopb
\spinz'\finvalop\irkoeoz\,\right)
\\ \displaystyle \hspace{1.5cm}\;
= \left( \begin{array}{ccc} \displaystyle
\exp\left[-\icmp\eulerc(\vkond)\right]&\;0\;&0
\\
0 &\;1\;& 0
\\ 0 &\;0\;& \displaystyle
\exp\left[\icmp\eulerc(\vkond)\right]
\end{array} \,\right)\!.\hspace{0.75cm}
\end{array} \label{eq:chpopha} \end{eqnarray}
Finally, from (\!\!~\ref{eq:fgenero}), (\!\!~\ref{eq:fapendh})
and (\!\!~\ref{eq:chpopha}):
\begin{eqnarray} \ilkeoz\debpscal\!\spinz\midpscal\ensuremath{\etcospin^{(\spinun)}_{\textup{out}}(\vkond)}\!\finpscal\!
=\exp\left[-\icmp\spinz\eulerc(\vkond)\,\right]\,
\ilkoeoz\debpscal\!\spinz\midpscal\kiuin\!\finpscal. \hspace{0.75cm} \label{eq:chpophb} \end{eqnarray}
Diffraction causes a phase shift of $2\eulerc(\vkond)$ between
the amplitudes of the helicity states $\debket\pm 1\finket$
and conserves the modulus of each of these amplitudes.
}
\subsubsection{Linear polarization amplitudes of the detected photons.} { \label{par:amplin}
It is useful to express the amplitudes of linear polarization for any
direction of the maximum transmission axis of the analyzer.
We associate with the analyzer the coordinate system
$\{\cX[\vkond],\cY[\vkond],\cz[\vkond]\}$ associated with the quantization axis
$\cz[\vkond]$ and we assume by convention that the axis
$\cX[\vkond]=\oprotos(0,0,\Eularop)\,\cx[\vkond]\equiv\cx[\vkond,\Eularop]$
is the maximum transmission axis whose direction is therefore defined by
the choice of the value of $\Eularop$. \\
The helicity states and the basis states of linear polarization
in the directions of the $\ilinp[\vkond,\Eularop]$ axes ($\ilinp=\cx,\cy$)
are related by \cite{Mess,CTDRG}:
\begin{eqnarray}\debket\helic\finket\irkeuz = \displaystyle
\frac{-\helic}{\sqrt{2}} \left(\;\debket\xpol\finket\irkeuz
+\icmp\,\helic\debket\ypol\finket\irkeuz\;\right), \hspace{0.75cm} \label{eq:polinb} \end{eqnarray}
where $\helic=\pm 1$ is the helicity.
According to (\!\!~\ref{eq:pocirc}) applied to the helicity states
$\debket\helic\finket\irkeuz$ and $\debket\helic\finket\irkeoz$
expressed from (\!\!~\ref{eq:polinb}), the basis states
$\debket\ilinp\finket\irkeoz$ transform like the real unitary vectors
$\veclinp^{(\ilinp)}_{\vkond}$ of the $\ilinp[\vkond]$ axes:
\begin{eqnarray}\begin{array}{lll}
\debket\xpol\finket\irkeuz
&=&\;\,\cos\Eularop\debket\xpol\finket\irkeoz
+\sin\Eularop\debket\ypol\finket\irkeoz,
\\
\debket\ypol\finket\irkeuz
&=&\!\!-\sin\Eularop\debket\xpol\finket\irkeoz
+\cos\Eularop\debket\ypol\finket\irkeoz, \end{array}\hspace{0.75cm} \label{eq:inelpob} \end{eqnarray}
which implies in particular:
\begin{eqnarray}\debket\ypol\finket\irkeuz=
\debket\xpol\finket^{}_{\!\!\vkond,\Eularop+\frac{\pi}{2}}.\hspace{0.75cm} \label{eq:inelpof} \end{eqnarray}
Finally, from (\!\!~\ref{eq:chpophb}), (\!\!~\ref{eq:polinb}) and
(\!\!~\ref{eq:inelpob}), we get:
\begin{eqnarray}\begin{array}{l} \displaystyle
\ilkeuz\debpscal\xpol\midpscal\ensuremath{\etcospin^{(\spinun)}_{\textup{out}}(\vkond)}\finpscal
=\cos\left[\,\eulerc(\vkond)\!-\!\Eularop\,\right]\,
\ilkoeoz\debpscal\!\xpol\midpscal\kiuin\finpscal
\\ \displaystyle \hspace{2.5cm}\,
\;\,-\sin\left[\,\eulerc(\vkond)\!-\!\Eularop\,\right]\,
\ilkoeoz\debpscal\!\ypol\midpscal\kiuin\finpscal, \end{array}\hspace{0.75cm} \label{eq:polinc} \end{eqnarray}
from which we deduce $\,\ilkeuz\debpscal\ypol\midpscal\ensuremath{\etcospin^{(\spinun)}_{\textup{out}}(\vkond)}\finpscal$
by using (\!\!~\ref{eq:inelpof}).
}
\subsubsection{Case of an initial state elliptically polarized (photons).} {
By generalizing (\!\!~\ref{eq:polinb}), we can express
any elliptically polarized initial state in the form:
\begin{eqnarray}
\debket\etaelli^{(\spinun)}_{\textup{in}}\!\finket\!\!
\equiv -\helicin\!\left(\,\cos\ellipin \debket\xpol\finket\irkoeiz
\!+\icmp\,\helicin\sin\ellipin \debket\ypol\finket\irkoeiz\,\right)\!,
\hspace{0.75cm} \label{eq:inelpoa} \end{eqnarray}
where $\azaxeli$, $\ellipin$ and $\helicin$ represent
the major axis azimuth, the ellipticity angle, and the handedness, respectively
\footnote{
We use the following definitions:
$\azaxeli\equiv\azaxel(\vkondini)$ is the angle between the $\cx[\vkondini]$ axis
and the major axis of the ellipse in the transverse plane to $\vkondini$,
$0\leq\azaxeli<\pi$;
$\ellipin=\arctan$[\,(length of the minor axis)/(length of the major axis)\,],
$0\leq\ellipin\leq\pi/4$;
and $\helicin=\pm 1$ represents the direction of rotation of the
electric field vector (provided that $\ellipin\neq 0$).
The value $\helicin=+1$ corresponds to a counterclockwise
rotation if the rotation axis and the momentum
of the photon are directed toward the receiver.
If $\ellipin=0$, the polarization is linear along the direction defined by
the angle $\azaxeli$.
If $\ellipin=\pi/4$, the polarization is circular and $\helicin$ is equal
to the helicity because (\!\!~\ref{eq:inelpoa}) becomes identical to
(\!\!~\ref{eq:polinb}) applied to $\helic=\helicin$,
$\vkond=\vkondini$ and $\Eularop=\azaxeli$.
}.
The final state resulting from the initial state
$\debket\etaelli^{(\spinun)}_{\textup{in}}\finket$
is also an elliptically polarized state which we denote
$\debket\etaelli^{(\spinun)}_{\textup{out}}(\vkond)\finket$.
Indeed, by applying (\!\!~\ref{eq:polinc}) to
$\debket\etaelli^{(\spinun)}_{\textup{in}}\finket$ defined by
(\!\!~\ref{eq:inelpoa}) and using (\!\!~\ref{eq:inelpob}), we obtain:
\begin{eqnarray}\begin{array}{rcl} \displaystyle
\ilkeuz\debpscal\xpol\midpscal
\etaelli^{(\spinun)}_{\textup{out}}(\vkond)\finpscal
&\!\!=\!\!&-\helicin\cos\ellipin
\cos\left[\,\azaxeli\!+\!\eulerc(\vkond)\!-\!\Eularop\,\right]\;
\\
&&\;\;+\;\icmp\,\sin\ellipin
\sin\left[\,\azaxeli\!+\!\eulerc(\vkond)\!-\!\Eularop\,\right]. \end{array} \hspace{0.75cm} \label{eq:inelpoc} \end{eqnarray}
Then, by making the identity operator
$\sum_{\ilinp=\cx,\cy}\debket\ilinp\finket\irkeoz\,
\ilkeoz\debbra\ilinp\finbra$ act on the state
$\debket\etaelli^{(\spinun)}_{\textup{out}}(\vkond)\finket$
and using successively (\!\!~\ref{eq:inelpoc})
(applied with $\Eularop=0$), (\!\!~\ref{eq:inelpof})
and (\!\!~\ref{eq:inelpob}), we get:
\begin{eqnarray}\begin{array}{l}\displaystyle
\debket\etaelli^{(\spinun)}_{\textup{out}}(\vkond)\finket
=-\helicin\left[\, \cos\ellipin
\debket\xpol\finket^{}_{\!\!\vkond,\azaxeli+\eulerc(\vkond)} \right.
\\ \left.\displaystyle \hspace{3cm}
+\,\icmp\,\helicin\,\sin\ellipin
\debket\ypol\finket^{}_{\!\!\vkond,\azaxeli+\eulerc(\vkond)}
\,\right]. \end{array} \hspace{0.75cm} \label{eq:inelpog} \end{eqnarray}
Comparing with (\!\!~\ref{eq:inelpoa}), we see that the ellipticity
and the handedness are conserved and that the ellipse axes undergo
a rotation of angle $\eulerc(\vkond)$.
The major axis azimuth in the transverse plane $\{\cx[\vkond],\cy[\vkond]\}$
is: $\azaxel(\vkond)=\azaxeli+\eulerc(\vkond)$.
}
}
}
\section{Some predictions of the model} { \label{sec:predreli}
\subsection{Relative intensity (polarization not measured)} { \label{sub:densprob}
\subsubsection{Angular distribution of the final momentum.} {
From (\!\!~\ref{eq:fgenerq}) and (\!\!~\ref{eq:opdispa}), the PDF of
the final momentum if the polarization is not measured is expressed by:
\begin{eqnarray} \begin{array}{l}\displaystyle
\pdfp^{}_{\vavkond}(\vkond)\,\simeq\,\norm^{-1}\,(2\pi)^{-4}\mkondini^{-2}
\\ \displaystyle \hspace{1.5cm} \times\,
\gaussmk\!\left(|\vkond|\!-\!\mkondini\right)\delta_{1\,\sgn[\kz]}
\left|\jfour^{\ouva}(\vkond\!-\!\vkondini)\right|^2\!\!. \end{array} \hspace{0.75cm} \label{eq:dpkcart} \end{eqnarray}
Since the experimental setup directly measures the direction of $\vkond$,
it is useful to replace the Cartesian components
by the modulus and two angles giving the direction.
This change of variables must be done by a one-to-one transformation which must
moreover be defined in the half-space $\kz>0$ because of (\!\!~\ref{eq:contray}).
The spherical coordinates $\mkond,\angdev,\eulapre$ cannot be used because
the associated transformation is not one-to-one (if $\angdev=0$,
$\eulapre$ is undetermined and the Jacobian is zero).
On the other hand, we can use the {\it diffraction angles} $\angdif_{\cx}$
and $\angdif_{\cy}$ \cite{LLangdif} which are the projections
of the polar angle $\angdev$ on the planes
$(\cx,\cz)$ and $(\cy,\cz)$ (Fig. \!\!~\ref{fig:dirang}).
\begin{figure}
\caption{
Diffraction angles $\angdif_{\cx}$ and $\angdif_{\cy}$.}
\label{fig:dirang}
\end{figure}
The new variables
$(\mkond,\angdif_{\cx},\angdif_{\cy})$ are such that: $\mkond>0$,
$-\pi/2<\angdif_{\cx}<+\pi/2$, $-\pi/2<\angdif_{\cy}<+\pi/2$ and
the required transformation $(\kx,\ky,\kz)\leftrightarrow(\mkond,\angdif_{\cx},
\angdif_{\cy})$ is:
\begin{eqnarray}\begin{array}{c}
\hspace{0.75cm}\vkond\left(\mkond,\angdif_{\cx},\angdif_{\cy}\right)
\;=\;\mkond\,\cos\angdev\,
\left(\begin{array}{c}
\tan\angdif_{\cx}
\\ \tan\angdif_{\cy}
\\ 1 \end{array}\right),
\\
\cos\angdev=\left(1\!+\!\tan^2\angdif_{\cx}\!+\!\tan^2\angdif_{\cy}
\right)^{-1/2}\!,\hspace{0.25cm} 0\leq\angdev<\pi/2.
\end{array}\hspace{0.75cm} \label{eq:ouvrb} \end{eqnarray}
The change of PDF due to the change of variables is expressed by:
\begin{eqnarray}\!\pdfp^{}_{\vamkond,\vadifx,\vadify}
(\mkond,\angdif_{\cx},\angdif_{\cy})
=\left|\ensuremath{\mathrm{J}}(\mkond,\angdif_{\cx},\angdif_{\cy})\right|
\pdfp^{}_{\vavkond}[\,\vkond(\mkond,\angdif_{\cx},\angdif_{\cy})\,],
\hspace{0.75cm} \label{eq:ouvrf} \end{eqnarray}
where $\ensuremath{\mathrm{J}}(\mkond,\angdif_{\cx},\angdif_{\cy})$ is the determinant
of the Jacobian of the transformation (\!\!~\ref{eq:ouvrb}) which is finite
and non-zero and whose calculation leads to the {\it angular factor:}
\begin{eqnarray}\ensuremath{\Gamma}\left(\angdif_{\cx},\angdif_{\cy}\right)
\equiv\mkond^{-2}\left|\ensuremath{\mathrm{J}}(\mkond,\angdif_{\cx},\angdif_{\cy})\right|=
\frac{\cos\angdev}{1-\sin^2\!\angdif_{\cx}\,\sin^2\!\angdif_{\cy}}.
\hspace{0.75cm} \label{eq:fganga} \end{eqnarray}
Expressing $\pdfp^{}_{\vavkond}\left[\,
\vkond(\mkond,\angdif_{\cx},\angdif_{\cy})\,\right]$
from (\!\!~\ref{eq:dpkcart}) and substituting
into (\!\!~\ref{eq:ouvrf}), given (\!\!~\ref{eq:fganga}), we get:
\begin{eqnarray}\!\!\begin{array}{l}\displaystyle
\pdfp^{}_{\vamkond,\vadifx,\vadify}\!\left(\mkond,\angdif_{\cx},
\angdif_{\cy}\right)\simeq\norm^{-1}(2\pi)^{-4}\mkondini^{-2}
\mkond^2\,\gaussmk\!\left(\mkond\!-\!\mkondini\right)
\\ \displaystyle \hspace{1.25cm}\times\,
\ensuremath{\Gamma}(\angdif_{\cx},\angdif_{\cy})\left|\,\jfour^{\ouva}
\!\left[\,\vkond(\mkond,\angdif_{\cx},\angdif_{\cy})\!-\!
\vkond(\mkondini,0,0)\,\right]\,\right|^2\!\!.
\end{array}\hspace{0.75cm} \label{eq:mcfoh} \end{eqnarray}
From (\!\!~\ref{eq:contrax}), $\Delta\mkond$ is close to zero.
We can therefore replace the function $\gaussmk\left(\mkond-\mkondini\right)$
by the Dirac distribution $\delta(\mkond-\mkondini)$
and express the angular distribution of the final momentum by:
\begin{eqnarray}\!\!\begin{array}{l}\displaystyle
\pdfp^{}_{\vadifx,\vadify}\!\left(\angdif_{\cx},\angdif_{\cy}\right)
\equiv\!\int_{0}^{\infty}\!\!\!\differ\mkond'\,
\pdfp^{}_{\vamkond,\vadifx,\vadify}\!\left(\mkond',\angdif_{\cx},
\angdif_{\cy}\right) \\ \\ \displaystyle \hspace{0.75cm}
\simeq\,\frac{\ensuremath{\Gamma}(\angdif_{\cx},\angdif_{\cy})}{(2\pi)^{4}\norm}\,
\left|\,\jfour^{\ouva}\!\left[\,\vkond(\mkond,\angdif_{\cx},\angdif_{\cy})
-\vkond(\mkond,0,0)\,\right]\,\right|^2. \end{array}\hspace{0.75cm} \label{eq:mcfou} \end{eqnarray}
where we now consider for simplicity that $\mkond$ represents both
the modulus of $\vkondini$ and that of $\vkond$.
The normalization factor $\norm$ can be expressed by substituting
(\!\!~\ref{eq:opdispa}) into (\!\!~\ref{eq:fgenerw}).
Using the change of variables (\!\!~\ref{eq:ouvrb})
and given (\!\!~\ref{eq:contrax}), we get:
\begin{eqnarray}\begin{array}{l}\displaystyle
\norm\simeq(2\pi)^{-4}\!\!
\int_{-\pi/2}^{+\pi/2}\!\!\differ\angdif_{\cx}
\int_{-\pi/2}^{+\pi/2}\!\!\differ\angdif_{\cy}
\\ \displaystyle \hspace{1cm} \times\,
\ensuremath{\Gamma}(\angdif_{\cx},\angdif_{\cy})\left|\,\jfour^{\ouva}
\!\left[\,\vkond(\mkond,\angdif_{\cx},\angdif_{\cy})
-\vkond(\mkond,0,0)\,\right]\,\right|^2\!\!\!.
\end{array}\hspace{0.75cm} \label{eq:mcfon} \end{eqnarray}
}
\subsubsection{Quantum formula of the relative intensity in Fraunhofer scalar diffraction.} {
To avoid calculating the integral (\!\!~\ref{eq:mcfon}), we consider
the ratio of the values of the angular distribution between the direction
$(\angdif_{\cx},\angdif_{\cy})$ and the forward direction $(0,0)$.
This ratio is nothing other than the relative intensity between
the directions of $\vkond$ and $\vkondini$.
Thus, in the quantum model (QM), the expression of the relative intensity is:
\begin{eqnarray} \left[\,\frac{\intens(\angdif_{\cx},\angdif_{\cy})}
{\intens(0,0)}\,\right]^{\ouva}_{\textup{QM}}\;=\;
\frac{\pdfp^{}_{\vadifx,\vadify}(\angdif_{\cx},\angdif_{\cy})}
{\pdfp^{}_{\vadifx,\vadify}(0,0)}. \label{eq:intrela} \end{eqnarray}
From (\!\!~\ref{eq:mcfou}) and since $\ensuremath{\Gamma}(0,0)=1$, this leads to:
\begin{eqnarray}\begin{array}{l}\displaystyle \!\!\left[\,\frac{\intens(\angdif_{\cx},\angdif_{\cy})}
{\intens(0,0)}\,\right]^{\ouva}_{\mathrm{QM}}
\simeq\;\ensuremath{\Gamma}(\angdif_{\cx},\angdif_{\cy}) \\ \displaystyle \hspace{2cm}\times\;
\frac{\displaystyle\left|\,
\jfour^{\ouva}[\,\vkond(\mkond,\angdif_{\cx},\angdif_{\cy})\!-\!
\vkond(\mkond,0,0)\,]
\,\right|^2}{\displaystyle \left|\,\jfour^{\ouva}\left(0\right)\right|^2}. \end{array}\hspace{0.75cm} \label{eq:omonoch} \end{eqnarray}
For an aperture of the form $\ouva\equiv\ouvas\times[-\Delta\cz/2,+\Delta\cz/2]$,
where $\Delta\cz$ is independent of $(\cx,\cy)$, the position filtering function
$\fredA(\rvec)$ is equal to $\fred^{\ouvas,\Delta\cz}(\rvec)$ given by
(\!\!~\ref{eq:tronqfoc}). From this and (\!\!~\ref{eq:tronqfod}), the relation
(\!\!~\ref{eq:nrspinb}) leads to:
\begin{eqnarray}
\jfour^{\ouva}(\vkond-\vkondini)\;=\;
\jfour^{\ouvas}_{\indtran}(\kx,\ky)\;\jfour^{\Delta\cz}_{\indlong}(\kz-\mkond), \label{eq:prpobf} \end{eqnarray}
where:
\begin{eqnarray}\begin{array}{l} \displaystyle
\jfour^{\ouvas}_{\indtran}(\kx,\ky)\,\equiv\,(2\pi)^{-1}\,\surfouva^{-1/2}
\\ \displaystyle \hspace{2.25cm}\times
\!\!\intdbl_{\ouvas}\!\!\differ\cx\differ\cy\,\;
\exp\left[\,-\icmp(\kx\cx+\ky\cy)\,\right], \end{array}\hspace{0.75cm} \label{eq:prpobc} \end{eqnarray}
\begin{eqnarray}\begin{array}{l} \displaystyle
\jfour^{\Delta\cz}_{\indlong}(\kz-\mkond)\,\equiv\, (2\pi)^{-1/2}
\\ \displaystyle \hspace{2cm}\times
\!\!\int\!\differ\cz\;
\sqrt{\fredDzl(\cz)} \;\exp\left[\,-\icmp(\kz-\mkond)\cz\,\right]. \end{array}\hspace{0.75cm} \label{eq:prpobd} \end{eqnarray}
Substituting (\!\!~\ref{eq:prpobf}) into (\!\!~\ref{eq:omonoch})
and expressing $\vkond(\mkond,\angdif_{\cx},\angdif_{\cy})$ from
($\!\!$~\ref{eq:ouvrb}), we obtain:
\begin{eqnarray}
\hspace{-0.25cm}\left[\frac{\intens(\angdif_{\cx},\angdif_{\cy})}
{\intens(0,0)}\right]^{\ouva}_{\mathrm{QM}}
\!\!\simeq\ensuremath{\Gamma}(\angdif_{\cx},\angdif_{\cy})\,
\ensuremath{T}^{\ouvas}(\mkond,\angdif_{\cx},\angdif_{\cy})\,
\ensuremath{L}^{\Delta\cz}(\mkond,\angdev),\hspace{0.5cm}
\label{eq:intrelm} \end{eqnarray}
where $\angdev$ and $\ensuremath{\Gamma}(\angdif_{\cx},\angdif_{\cy})$
are given by (\!\!~\ref{eq:ouvrb}) and (\!\!~\ref{eq:fganga}), respectively,
$\ensuremath{T}^{\ouva}(\mkond,\angdif_{\cx},\angdif_{\cy})$
is the {\it transverse diffraction term}:
\begin{eqnarray}\hspace{-0.5cm}\begin{array}{l}
\ensuremath{T}^{\ouvas}(\mkond,\angdif_{\cx},\angdif_{\cy})
\equiv\frac{\displaystyle\,\left|\,
\jfour^{\ouvas}_{\indtran}(\mkond\cos\angdev\,\tan\angdif_{\cx}\,,\,
\mkond\cos\angdev\,\tan\angdif_{\cy})\,\right|^2}
{\displaystyle\left|\,\jfour^{\ouvas}_{\indtran}(0,0)\,\right|^2} \end{array}\hspace{0.25cm} \label{eq:intrelt} \end{eqnarray}
and $\ensuremath{L}^{\ouva}(\mkond,\angdev)$
is the {\it longitudinal diffraction term}:
\begin{eqnarray}\ensuremath{L}^{\Delta\cz}(\mkond,\angdev)
\;\equiv\;\frac{\displaystyle
\left|\,\jfour^{\Delta\cz}_{\indlong}[\,\mkond\,(\cos\angdev-1)\,]\,\right|^2}
{\displaystyle\left|\,\jfour^{\Delta\cz}_{\indlong}(0)\,\right|^2}. \label{eq:intreln} \end{eqnarray}
}
\subsubsection{Test of the Huygens-Fresnel principle.} {
The relative intensity expressed by the quantum formula
(\!\!~\ref{eq:intrelm}) depends on the width $\Delta\cz$
of the longitudinal 1D aperture (Fig. \!\!~\ref{fig:ouvgenz}).
The value of $\Delta\cz$ can therefore be fitted to data obtained
from the measurement of the intensity as a function of the diffraction angle.
As previously mentioned (\S \!\!~\ref{par:posfilt}),
$\Delta\cz$ is the width of the distribution of the wavefronts
emitting the wavelets which contribute to the diffracted wave.
An experimental study directly concerning the Huygens-Fresnel principle
can therefore be considered.
}
\subsubsection{Comparison with the predictions of the scalar theories of wave optics.} {
In wave optics (WO), there are several versions of the scalar theory of
diffraction which differ by their assumed boundary conditions.
The best known are the theories of
Fresnel-Kirchhoff (FK) and Rayleigh-Sommerfeld (RS1 and RS2).
In Fraunhofer diffraction, for an initial monochromatic plane wave
in normal incidence,
the amplitude predicted by these theories at a point of radius vector
$\ensuremath{\mathbf{ d}}$ beyond the diaphragm can be expressed,
given (\!\!~\ref{eq:detadn}), in the form \cite{BoWo,Soma}:
\begin{eqnarray}\begin{array}{l}\displaystyle
\amplik^{\ouvas}(\ensuremath{\mathbf{ d}})\equiv
\amplik^{\ouvas,\mkond}\!\left(\distdipo,\frac{\vkond}{\mkond}\right)\!
\simeq-\,\vargc_{0}\frac{\icmp\mkond}{2\pi}
\frac{\exp\left[\icmp\mkond
\!\left(\distsodi\!+\!\distdipo\right)\right]}{\distsodi\,\distdipo}
\\ \displaystyle \hspace{0.5cm}
\times\,\ensuremath{\Omega}[(\vkondini,\vkond)]
\!\intdbl_{\ouvas}\differ\cx\differ\cy\;
\exp\!\left[-\icmp\mkond\left(\frac{\kx}{\mkond}\cx
+\frac{\ky}{\mkond}\cy\right)\right],
\end{array} \hspace{0.75cm} \label{eq:cfrkirl} \end{eqnarray}
where $\vargc_{0}$ is a constant, $\distsodi$ is the distance source-aperture
and $\ensuremath{\Omega}[(\vkondini,\vkond)]$ is the {\it obliquity factor}.
The latter depends on the {\it deflection angle} $(\vkondini,\vkond)$
which is also the polar angle $\angdev$ (Fig. \!\!~\ref{fig:dirang}).
Its value is specific to the theory:
\begin{eqnarray}\ensuremath{\Omega}(\angdev)\;=\;
\left\{ \begin{array}{cc}\displaystyle
(1+\cos\angdev)/2 \hspace{1cm}&\mbox{(FK)}
\\ \displaystyle\cos\angdev \hspace{1cm}&\mbox{(RS1)}
\\ \displaystyle 1 \hspace{1cm}&\;\mbox{(RS2)}. \end{array}\right.\hspace{0.75cm} \label{eq:cfrkirfi} \end{eqnarray}
From (\!\!~\ref{eq:detadn}), the intensity at point of radius-vector
$\ensuremath{\mathbf{ d}}$ is proportional to the intensity in the direction of
$\vkond(\mkond,\angdif_{\cx},\angdif_{\cy})$. Hence:
\begin{eqnarray}
\left[\frac{\intens(\angdif_{\cx},\angdif_{\cy})}
{\intens(0,0)}\right]^{\ouvas}_{\mathrm{WO}}=\;
\frac{\raisebox{0.2cm}{$\left|\,\amplik^{\ouvas,\mkond}
\left[\,\distdipo\,,\,\vkond(\mkond,\angdif_{\cx},\angdif_{\cy})/\mkond
\,\right]\,\right|^2$}}{\raisebox{-0.225cm}{$\left|\,\amplik^{\ouvas,\mkond}
\left[\,\distdipo\,,\,\vkond(\mkond,0,0)/\mkond\,\right]\,\right|^2$}}.
\hspace{0.75cm} \label{eq:intrelka} \end{eqnarray}
Expressing $\vkond(\mkond,\angdif_{\cx},\angdif_{\cy})$
and $\vkond(\mkond,0,0)$ from ($\!\!$~\ref{eq:ouvrb}) and
substituting into ($\!\!$~\ref{eq:cfrkirl}) then into
($\!\!$~\ref{eq:intrelka}), we see that $\distdipo$ is eliminated.
Then, since $\ensuremath{\Omega}(0)=1$ and given ($\!\!$~\ref{eq:prpobc}) and
($\!\!$~\ref{eq:intrelt}):
\begin{eqnarray}\left[
\frac{\intens(\angdif_{\cx},\angdif_{\cy})}
{\intens(0,0)}\right]^{\ouvas}_{\mathrm{WO}}
\simeq\;\ensuremath{\Omega}(\angdev)^2\;\,
\ensuremath{T}^{\ouvas}(\mkond,\angdif_{\cx},\angdif_{\cy}). \label{eq:intrell} \end{eqnarray}
The comparison of the formulas (\!\!~\ref{eq:intrelm}) and (\!\!~\ref{eq:intrell})
shows that the transverse diffraction term
$\ensuremath{T}^{\ouvas}(\mkond,\angdif_{\cx},\angdif_{\cy})$ is the same
in the two cases.
This is because the integrals in (\!\!~\ref{eq:prpobc}) and
(\!\!~\ref{eq:cfrkirl}) are the same.
The differences come from the angular factors
$\ensuremath{\Gamma}(\angdif_{\cx},\angdif_{\cy})$ and $\ensuremath{\Omega}(\angdev)^2$ and from
the presence of the longitudinal diffraction term
$\ensuremath{L}^{\Delta\cz}(\mkond,\angdev)$ in the quantum formula.
If the angles are small, the angular factors and the
longitudinal diffraction term are all close to 1 so that the quantum model
gives the same result as that of wave optics.
On the other hand, if the angles increase,
discrepancies appear between the different predictions.
}
\subsubsection{Example of comparison.} {
Let us consider the intensity variation in the horizontal plane $(Ox,Oz)$
for which we have: $\angdif_{\cy}=0$, $\angdif_{\cx}=\angdev$
if $\angdif_{\cx}\geq 0$, $\angdif_{\cx}=-\angdev$ if $\angdif_{\cx}\leq 0$.
In this case, it is convenient to make the notation change:
$(\angdif_{\cx},\angdev)\rightarrow(\angdif,|\angdev|)$, where
$-\pi/2<\angdif<+\pi/2$ (diffraction angle) and
$0\leq|\angdev|<+\pi/2$ (polar angle in the half-space $\cz>0$).
Since $\cos|\angdev|=\cos\angdev$, the relations
(\!\!~\ref{eq:fganga}) and (\!\!~\ref{eq:cfrkirfi}) then lead to:
\begin{eqnarray}\ensuremath{\Gamma}(\angdif,0)=\cos\angdev,\hspace{0.75cm}
\ensuremath{\Omega}(|\angdev|)=\ensuremath{\Omega}(\angdev).\hspace{0.75cm} \label{eq:difrect} \end{eqnarray}
We now consider the case of a rectangular slit $\fente$ of width $2\larg$
and of height $2\haut$ centered at $(\cx,\cy)=(0,0)$.
The expression (\!\!~\ref{eq:prpobc}) leads to:
\begin{eqnarray}\jfour^{\fente}_{\indtran}(\kx,\ky)\,=\,
\frac{\sqrt{\larg\haut}}{\pi}\;\frac{\sin\larg\kx}{\larg\kx}
\;\frac{\sin\haut\ky}{\haut\ky}. \hspace{0.75cm} \label{eq:fourtr} \end{eqnarray}
Given the notation change introduced above, the relation (\!\!~\ref{eq:ouvrb})
implies: $\kx=\mkond\cos|\angdev|\tan\angdif=\mkond\sin\angdev$ and $\ky=0$.
Applying (\!\!~\ref{eq:fourtr}) to these values
and substituting into (\!\!~\ref{eq:intrelt}), we get the well-known result:
\begin{eqnarray}\ensuremath{T}^{\fente}(\mkond,\angdif,0)\;=\;
\left[\,\frac{\sin(\larg\mkond\sin\angdif)}
{\larg\mkond\sin\angdif}\,\right]^2.\hspace{0.75cm} \label{eq:ttransr} \end{eqnarray}
Then, we suppose that the longitudinal filtering function is for example
a Gaussian. In this case, the width of the longitudinal aperture depends
on the standard deviation and on a threshold under which the integral of the
Gaussian outside the interval $[-\Delta\cz(\sigz)/2,+\Delta\cz(\sigz)/2]$
is considered as negligible (for example, with a threshold of $10^{-2}$,
we have: $\Delta\cz(\sigz)\simeq 5.16\,\sigz$ \cite{pdgGauss}).
Assuming that $\fred_{\indlong}^{\Delta\cz(\sigz)}(\cz)$
is a Gaussian centered at $\cz=0$ and of standard deviation $\sigz$,
the expression (\!\!~\ref{eq:prpobd}) leads to \cite{GradRyz}:
\begin{eqnarray}
\jfour^{\Delta\cz(\sigz)}_{\indlong}(\kz\!-\!\mkond)=
\!\left(\frac{2}{\pi}\right)^{\!\!1/4}\!\!\!\!\sqrt{\sigz}\,
\exp\!\left[-{\sigz}^2(\kz\!-\!\mkond)^2\right]\!.\hspace{0.75cm} \label{eq:poiga} \end{eqnarray}
Substituting into (\!\!~\ref{eq:intreln}), we get:
\begin{equation}\ensuremath{L}^{\Delta\cz(\sigz)}(\mkond,|\angdev|)\;=\;
\exp\left[\,-8\,{\sigz}^2\mkond^2\,\sin^4(\angdif/2)\,\right]. \label{eq:tlongig} \end{equation}
Curves obtained from formulae (\!\!~\ref{eq:intrelm})
and (\!\!~\ref{eq:intrell}) (applied with
(\!\!~\ref{eq:cfrkirfi}), (\!\!~\ref{eq:difrect}),
(\!\!~\ref{eq:ttransr}) and (\!\!~\ref{eq:tlongig}))
are shown in Fig. \!\!\!\!~\ref{fig:diffraca}
for a case of photon diffraction. \\
If $\sigz=0$, the longitudinal diffraction term is equal to 1.
This corresponds to the largest values predicted by the quantum model.
It is with the FK theory that the quantum model (QM1)
is in better agreement.
However, at $90^{\circ}$, the FK theory predicts
values that are generally non-zero,
which does not seem plausible (same for the RS2 theory).
The angular factors $\ensuremath{\Gamma}(\angdif,0)=\cos\angdev$ of the quantum model
and $\ensuremath{\Omega}(\angdev)^2=\cos^{2}\angdif$ of the RS1 theory
are the only ones which account for
the decrease in intensity towards zero at 90$^{\circ}$.
However, the factor $\cos\angdif$ seems more likely because
it is the same as that obtained by applying the exact calculation
of the diffraction by a wedge \cite{Somb} to the case of two wedges
of zero angle placed opposite one another to form a slit \cite{LLSom}. \\
If $\sigz>0$, the longitudinal diffraction term is strictly less than 1.
The values of the quantum model, maximum for $\sigz=0$, undergo a damping
which increases with $|\angdif|$ and $\sigz$.
As $\sigz$ increases from zero, the QM curve deviates more and more
from the QM1 curve and then goes below the RS1 curve.
Coincidentally, the curves QM and RS1 can be very close
but not for all values of $\angdif$ since the angular factors are different.
If $\sigz$ is large enough, the QM curve globally decreases much more rapidly
than the WO and QM1 curves and the gap becomes significant
at not too large angles (QM2).
Such a result obtained experimentally would be a signal of the need
to use a "multi-wavefronts" Huygens-Fresnel principle
to describe the diffraction by an aperture. \\
\begin{figure}
\caption{
Comparison between different theoretical predictions
of the relative intensity in Fraunhofer diffraction
as a function of the diffraction angle in the horizontal plane
for a rectangular slit of width $2\larg=10\;\mu$m and
an incident monochromatic plane wave corresponding to photons
of wavelength $\lgond=632.8$ nm (helium-neon laser).
Five predictions are presented: three predictions of wave optics (WO)
corresponding to the scalar theories of Fresnel-Kirchhoff (FK) and
Rayleigh-Sommerfeld (RS1 and RS2) and two predictions of the quantum model
(QM) corresponding to two values of the standard deviation $\sigz$
associated with a Gaussian longitudinal filtering (GLF) of the incident wave:
$\sigz=0$ (QM1) and $\sigz=\larg/10$ (QM2).
The values of the five intensities are distributed according to the decreasing
order: RS2, FK, QM1, RS1, QM2, whatever $\angdif$ is
over the entire range 0$^\circ$- 90$^\circ$.
These predictions correspond to the case where the polarization is not measured. }
\label{fig:diffraca}
\end{figure}
}
\subsubsection{Large diffraction angles.} {
From the above analysis, it turns out
that the relative gaps between the predictions of the different models
considered here are significant at large angles.
Moreover, from a survey of the literature, it seems that
no accurate experimental study of the diffraction in this region
has been carried out so far.
Since the time when the FK and RS1-2 theories were formulated
(late 19th century),
technologies in optics have made tremendous progress
due in particular
to accurate measurements of intensity by charge-coupled
devices which make it possible to achieve
a sufficiently expanded dynamic range.
An experimental study of this still little explored region
is therefore probably feasible at the present time.
}
}
\subsection{Polarization probabilities (photons)} { \label{sub:propolar}
From (\!\!~\ref{eq:fgenerr}) and (\!\!~\ref{eq:chpophb}), the conditional
probability to detect a photon of helicity $\helic$ if its momentum is
$\hbar\vkond$ is:
\begin{eqnarray}
\!\!\probcon_{\vaspinzh}^{(\spinun)}\!\left([\helic]_{\vkond}\right)
=\left|\!\ilkeoz\debpscal\!\helic\midpscal
\ensuremath{\etcospin^{(\spinun)}_{\textup{out}}(\vkond)}\!\finpscal\!\right|^2\!\!
=\left|\!\ilkoeoz\debpscal\!\helic\midpscal\!\kiuin
\finpscal\!\right|^2\!\!.
\hspace{0.75cm} \label{eq:chpophc} \end{eqnarray}
So the probabilities of the helicity states and consequently of the circular
polarizations are conserved.
Note that
for an aperture of sub-wavelength size, circular polarization probabilities
are not conserved for all diffraction angles because the aperture limits the
transmission of circularly polarized light \cite{Shin}.
This effect is not taken into account in assumption (\!\!~\ref{eq:rotinid})
and consequently the polarization predicted by the model does not match the
experiment in this specific case. \\
For an elliptically polarized initial state
$\debket\etaelli^{(\spinun)}_{\textup{in}}\finket$,
with major axis azimuth $\azaxeli$, ellipticity angle $\ellipin$ and
handedness $\helicin$ (Eq. (\!\!~\ref{eq:inelpoa})),
the conditional probabilities of linear polarization
in the direction defined by the angle $\Eularop$
with respect to the $\cx[\vkond]$ axis
are expressed, from (\!\!~\ref{eq:inelpoc}), by:
\begin{eqnarray}\!\!\begin{array}{l}\displaystyle
\probcon_{\vaxpxizh}^{(\spinun)}\left([\xpol]_{\vkond,\Eularop}\right)
=\left|\;\ilkeuz\debpscal\xpol\midpscal
\etaelli^{(\spinun)}_{\textup{out}}(\vkond)\finpscal\right|^2\!\!
\\ \displaystyle \hspace{1.25cm}
=\frac{1}{2}\left\{\,1+\cos2\ellipin
\cos2[\,\azaxeli+\eulerc(\vkond)-\Eularop\,]\,\right\}, \end{array}\hspace{0.75cm} \label{eq:pislpb} \end{eqnarray}
whatever $\helicin$, where $\eulerc(\vkond)$ is the rotation angle
of the ellipse axes due to diffraction.
From (\!\!~\ref{eq:pislpb}), we have:
\begin{eqnarray} \eulerc(\vkond)=\Eularop\!-\!\azaxeli
+\frac{1}{2}\arccos\frac{\,2\left|\,\ilkeuz\debpscal\!\xpol
\midpscal\etaelli^{(\spinun)}_{\textup{out}}(\vkond)\!\finpscal
\right|^2\!\!-1\,}{\cos 2\ellipin}, \hspace{0.75cm} \label{eq:pislpc} \end{eqnarray}
where $\vkond=\vkond(\mkond,\eulanuy,\eulapre)$.
Therefore, the measurement of the probability $\left|\,\ilkeuz\debpscal\xpol
\midpscal\etaelli^{(\spinun)}_{\textup{out}}(\vkond)\finpscal\right|^2$
as a function of $\mkond,\eulanuy$, and $\eulapre$ makes it possible to fit the
function $\eulerc[\vkond(\mkond,\eulanuy,\eulapre)]$ to the experimental data
(provided that $\ellipin\neq\pi/4$).
From (\!\!~\ref{eq:rotkok}) and (\!\!~\ref{eq:fapendi}),
its expected value is zero for $\eulanuy=\eulapre=0$. \\
In the case of a linear polarization ($\ellipin=0$),
the final polarization is also linear in the direction defined by the angle
$\azaxeli+\eulerc(\vkond)$ [Eq. (\!\!~\ref{eq:inelpog})].
Assuming that the maximum transmission axis of the analyzer is the axis
$\oprotos(0,0,\Eularop)\,\cx[\vkond]$,
the device can be rotated around $\cz[\vkond]$ so as to find the angle
$\Eularop_{1}(\vkond)$ such that $\left|\,\,^{}_{\!\!\vkond,\Eularop_{1}(\vkond)
\!\!\!}\debpscal\!\xpol\midpscal\etaelli^{(\spinun)}_{\textup{out}}(\vkond)
\!\finpscal\right|^2=1$. Then, (\!\!~\ref{eq:pislpc}) leads to:
$\eulerc(\vkond)=\Eularop_{1}(\vkond)-\azaxeli$.
}
}
\section{Conclusion} { \label{sec:conclu}
It is possible to construct a model based exclusively on quantum mechanics
to describe the Fraunhofer diffraction by a diaphragm.
In the model presented here, the quantum concept of measurement was used,
within the framework of the S-matrix formalism, to describe the passage
of the particles through the aperture.
The notion of projector had to be generalized by that
of filtering operator in order to obtain a description of the measurement
compatible with the Huygens-Fresnel principle.
Then, because of kinematics,
it was necessary to assume that the passage of the particle through
the aperture is described by a double measurement starting with
the measurement of position (which creates a localized transitional state
of indeterminate energy) and ending with an energy-momentum measurement
(which creates the final state with the same energy as the initial state).
The model suggests that the wavelets involved in the Huygens-Fresnel principle
are emitted from several neighboring wavefronts distributed along
the longitudinal direction in the aperture region.
These wavefronts contribute with different weights to the
amplitude of the diffracted wave and the width of their distribution,
not known a priori, can be fitted to the data from measurement of the intensity
as a function of the diffraction angle.
If this width is large enough,
a significant damping of the intensity at large angles is predicted.
A direct experimental study of the Huygens-Fresnel principle
is therefore possible.
Moreover, the model provides predictions concerning the still little explored
region of large diffraction angles.
In particular, it predicts the decrease in intensity towards zero
at 90$^{\circ}$, contrary to most of the scalar theories of wave optics.
Finally, in the case of light in single-photon states and for an incident
monochromatic plane wave, the model predicts that the transfer of momentum
between the photon and the diaphragm conserves the probabilities of the
circular polarizations but can cause a phase shift between the amplitudes
of the associated helicity states.
For an initial state elliptically polarized,
the conservation of the ellipticity and of the handedness is predicted.
The phase shift between the amplitudes of the helicity states
corresponds to a rotation of the axes of the ellipse.
The angle of this rotation depends on the diffraction angles
and is not known a priori.
Its values can be fitted to the data from measurements of the polarization
of the photons detected beyond the diaphragm.
It would thus be possible to get information
on how diffraction modifies the polarization of light. \\
}
}
\end{document} | arXiv | {
"id": "1710.09758.tex",
"language_detection_score": 0.5903728604316711,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\pagestyle{myheadings} \markboth {
{\small\rm Y. B. Jun et al.}} {{\small\rm Characterizations of Fuzzy Fated Filters of $R_0$-algebras
Based on Fuzzy Points}
}
\title{Characterizations of Fuzzy Fated Filters of $R_0$-algebras
Based on Fuzzy Points}
\bth \author{Young Bae Jun}
\no{\smaller Department of Mathematics Education (and RINS), Gyeongsang National University,
Chinju 660-701, Korea.\\ Email: skywine@gmail.com}
\dzh
\author{J. Kavikumar \ and \ Muhmmad Akram}
\no{\smaller Department of Mathematics and Statistics, Faculty of Science, Technology and Human Development, Universiti Tun Hussein Onn Malaysia,
86400 Parit Raja, Batu Pahat, Johor, Malaysia\\ Email: kaviphd@gmail.com; makram\_69@yahoo.com}
\dzh
\no{\smallerbf AMS Mathematics Subject Classification(2000):}{\smaller\ 06F35, 03G25, 08A72 } \flh \no{\smallerbf Abstract.}{\smaller\ More general form of the notion of quasi-coincidence of a fuzzy point with a fuzzy subset is considered, and generalization of fuzzy fated of $R_0$-algebras is discussed.
The notion of an $(\in, \in\! \vee \, {q}_k)$-fuzzy fated filter in
a $R_0$-algebra is introduced, and several properties are investigated. Characterizations of an $(\in, \in\! \vee \, {q}_k)$-fuzzy fated filter in an $R_0$-algebra are discussed.
Using a collection of fated filters, a $(\in, \in\! \vee \, {q}_k)$-fuzzy fated filter
is established.} \zyh \no{\smallerbf Keywords:}{\smaller\ (Fated) filter, Fuzzy (fated) filter,
$(\in,$ $\in\! \vee \, {q})$-fuzzy (fated) filter,
$(\in,$ $\in\! \vee \, {q}_k)$-fuzzy fated filter,
Strong $(\in,$ $\in\! \vee \, {q}_k)$-fuzzy fated filter.}
\defif and only if {if and only if } \defthe following assertions are equivalent: {the following assertions are equivalent: } \def\tfrac{1-k}{2}{\tfrac{1-k}{2}} \def\, {q}_k\, {\, {q}_k\, } \def\in\! \vee \, {q}\, {\in\! \vee \, {q}\, } \def\in\! \wedge \, {q}\, {\in\! \wedge \, {q}\, } \def\in\! \vee \, {q}_k\, {\in\! \vee \, {q}_k\, } \def\in\! \vee \, {q}_r\, {\in\! \vee \, {q}_r\, } \def\in\! \wedge \, {q}_k\, {\in\! \wedge \, {q}_k\, } \def \, \overline{\in} \vee \overline{{q}_k} \,{ \, \overline{\in} \vee \overline{{q}_k} \,} \def \, \overline{\in} \vee \overline{q} \,{ \, \overline{\in} \vee \overline{q} \,} \def \, \overline{\in} \, { \, \overline{\in} \, } \def \, \overline{{q}_k} \, { \, \overline{{q}_k} \, } \def\mu} \def\mbp{\nu} \def\mcp{\gamma{\mu} \def\mbp{\nu} \def\mcp{\gamma} \deffuzzy fated {fuzzy fated }
\zjq \normalsize \zhangjie{Introduction} \no One important task of artificial intelligence is to make the computers simulate beings in dealing with certainty and uncertainty in information. Logic appears in a ``sacred'' (respectively, a ``profane'') form which is dominant in proof theory (respectively, model theory). The role of logic in mathematics and computer science is twofold -- as a tool for applications in both areas, and a technique for laying the foundations. Non-classical logic including many-valued logic, fuzzy logic, etc., takes the advantage of classical logic to handle information with various facets of uncertainty (see \cite{INS172-1} for generalized theory of uncertainty), such as fuzziness, randomness etc. Non-classical logic has become a formal and useful tool for computer science to deal with fuzzy information and uncertain information. Among all kinds of uncertainties, incomparability is an important one which can be encountered in our life.
The concept of $R_0$-algebras was first introduced by Wang in \cite{Wang00} by providing an algebraic proof of the completeness theorem of a formal deductive system \cite{INS117-47}. Obviously,
$R_0$-algebras are different from the BL-algebras. Further, Pei and Wang \cite{SCE32-56} proved $NM$-algebras are categorically isomorphic to $R_0$-algebras. Jun and Liu \cite{ID93249} studied (fated) filters of $R_0$-algebras. They mentioned that the theory of $R_0$-algebras becomes one of the theoretical applications to the development of the theory of $MTL$-algebras. Some concrete practical and theoretical applications of $R_0$-algebras can be found in \cite{SCE32-56, Wang00}. Pei \cite{PEI01} proposed a new kind of fuzzy algebraic structure with the purpose to extending the concept of $R_0$-algebras and BL-algebras using results of normal residuated lattices into fuzzy settings.
Liu and Li \cite{INS171-61} discussed the fuzzy set theory of filters in $R_0$-algebras.
The algebraic structure of set theories dealing with uncertainties has been studied by some authors. The most appropriate theory for dealing with uncertainties is the theory of fuzzy sets developed by Zadeh \cite{IC8-338}. Murali \cite{INS158-277} proposed a definition of a fuzzy point belonging to fuzzy subset under a natural equivalence on fuzzy subset. The idea of quasi-coincidence of a fuzzy point with a fuzzy set, which is mentioned in \cite{JMAA76-571}, played a vital role to generate some different types of fuzzy subsets. It is worth pointing out that Bhakat and Das \cite{FSS51-235, FSS80-359} initiated the concepts of $(\alpha,\beta)$-fuzzy subgroups by using the ``belongs to'' relation $(\in \, )$ and ``quasi-coincident with'' relation $({q})$ between a fuzzy point and a fuzzy subgroup, and introduced the concept of an $(\in, \in\! \vee \, {q}\, )$-fuzzy subgroup. In particular, an $(\in, \in\!\vee \, {q})$-fuzzy subgroup is an important and useful generalization of Rosenfeld's fuzzy subgroup. As a generalization of the notion of fuzzy filters in $R_0$-algebras, Ma et al. \cite{MLQ55-493} dealt with the notion of $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy filters in $R_0$-algebras.
In \cite{ID 980315}, Han et al. dealt with the fuzzy set theory of fated filters in $R_0$-algebras. They provided conditions for a fuzzy filter to be a fuzzy fated filter, and introduced the notion of $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy fated filters. They established a relation between an $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy filter and an $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy fated filter, and provided conditions for an $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy filter to be an $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy fated filter. They also dealt with characterizations of an $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy fated filter. It is now natural to investigate more general type of $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy fated filters of an $R_0$-algebra. As a first step in this direction, we introduce the concept of an $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filter of an $R_0$-algebra, and discuss some fundamental aspects of $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filters. We deal with characterizations of $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filters.
Using a collection of fated filters, we establish an $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filter.
The important achievement of the study with an $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filter is that the notion of an $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy fated filter is a special case of an $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filter, and thus so many results in the paper \cite{ID 980315} are corollaries of our results obtained in this paper. \zjq \zhangjie{Preliminaries}
\no Let $L$ be a bounded distributive lattice with order-reversing involution $\neg$ and a binary operation $\to.$ Then $(L, \wedge, \vee, \neg, \to)$ is called an {\it $R_0$-algebra} (see \cite{Wang00}) if it satisfies the following axioms: \begin{lieju}
\item[\rm (R1)] $x\to y=\neg y\to \neg x,$
\item[\rm (R2)] $1\to x=x,$
\item[\rm (R3)] $(y\to z)\wedge ((x\to y)\to (x\to z))=y\to z,$
\item[\rm (R4)] $x\to (y\to z)=y\to (x\to z),$
\item[\rm (R5)] $x\to (y\vee z)=(x\to y)\vee (x\to z),$
\item[\rm (R6)] $(x\to y)\vee ((x\to y)\to (\neg x\vee y))=1.$ \end{lieju}
Let $L$ be an $R_0$-algebra. For any $x,y\in L,$ we define $x\odot y=\neg (x\to \neg y)$ and $x\oplus y=\neg x\to y.$ It is proved that $\odot$ and $\oplus$ are commutative, associative and $x\oplus y=\neg(\neg x\odot \neg y),$ and $(L, \wedge, \vee, \odot, \to, 0,1)$ is a residuated lattice.
For any elements $x,$ $y$ and $z$ of an $R_0$-algebra $L,$ we have the following properties (see {\rm \cite{SCE32-56}}). \begin{lieju}
\item[\rm (a1)] $x\le y$ if and only if $x\to y=1,$
\item[\rm (a2)] $x\le y\to x,$
\item[\rm (a3)] $\neg x=x\to 0,$
\item[\rm (a4)] $(x\to y)\vee (y\to x)=1,$
\item[\rm (a5)] $x\le y$ implies $y\to z \le x\to z,$
\item[\rm (a6)] $x\le y$ implies $z\to x \le z\to y,$
\item[\rm (a7)] $((x\to y)\to y)\to y=x\to y,$
\item[\rm (a8)] $x\vee y=((x\to y)\to y)\wedge ((y\to x)\to x),$
\item[\rm (a9)] $x\odot \neg x=0$ and $x\oplus \neg x=1,$
\item[\rm (a10)] $x\odot y\le x\wedge y$ and $x\odot (x\to y)\le
x\wedge y,$
\item[\rm (a11)] $(x\odot y)\to z=x\to (y\to z),$
\item[\rm (a12)] $x\le y\to (x\odot y),$
\item[\rm (a13)] $x\odot y\le z$ if and only if $x\le y\to z,$
\item[\rm (a14)] $x\le y$ implies $x\odot z\le y\odot z,$
\item[\rm (a15)] $x\to y\le (y\to z)\to (x\to z),$
\item[\rm (a16)] $(x\to y)\odot (y\to z)\le x\to z.$ \end{lieju}
A non-empty subset $A$ of an $R_0$-algebra $L$ is called a {\it
filter} of $L$ if it satisfies the following two conditions: \begin{lieju}
\item[\rm (b1)] $1 \in A.$
\item[\rm (b2)] $(\forall x \in A)~(\forall y \in L)
~(x \to y \in A \Longrightarrow y \in A).$
\end{lieju}
It can be easily verified that a non-empty subset $A$ of an $R_0$-algebra $L$ is a filter of $L$ if and only if it satisfies the following conditions:
\begin{lieju}
\item[\rm (b3)] $(\forall x, y \in A)~(x\odot y \in A).$
\item[\rm (b4)] $(\forall y \in L)~(\forall x \in A)~(x \le y \Longrightarrow y \in A).$ \end{lieju}
\begin{definition}\label{{D091006}}
A fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of an $R_0$-algebra $L$ is called a {\it fuzzy
filter} of $L$ if it satisfies:
\begin{lieju}
\item[\rm (c1)] $(\forall x, y \in L)~(\mu} \def\mbp{\nu} \def\mcp{\gamma(x\odot y) \ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x), \mu} \def\mbp{\nu} \def\mcp{\gamma(y)\}).$
\item[\rm (c2)] $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is order-preserving, that is,
$(\forall x, y \in L)~(x \le y \Longrightarrow \mu} \def\mbp{\nu} \def\mcp{\gamma(x) \le \mu} \def\mbp{\nu} \def\mcp{\gamma(y)).$
\end{lieju}\end{definition} \fudl
\begin{thm}\label{{T091006}}
A fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of an $R_0$-algebra $L$ is a fuzzy filter of $L$
if and only if it satisfies: \begin{lieju}
\item[\rm (c3)] $(\forall x \in L)~(\mu} \def\mbp{\nu} \def\mcp{\gamma(1) \ge \mu} \def\mbp{\nu} \def\mcp{\gamma(x)),$
\item[\rm (c4)] $(\forall x, y \in L)~(\mu} \def\mbp{\nu} \def\mcp{\gamma(y) \ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x \to y), \mu} \def\mbp{\nu} \def\mcp{\gamma(x)\}).$ \end{lieju}\end{thm} \fudl
For any fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ and $t \in (0, 1],$ the set
\[U(\mu} \def\mbp{\nu} \def\mcp{\gamma; t) = \{x \in L \mid \mu} \def\mbp{\nu} \def\mcp{\gamma(x) \ge t\}\] is called a
{\it level subset} of $L.$ It is well known that a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ is a fuzzy filter of $L$ if and only if the non-empty level subset $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t),$ $t\in (0,1],$ of $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is a filter of $L.$
A fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of a set $L$ of the form
\begin{equation*}
\mu} \def\mbp{\nu} \def\mcp{\gamma(y):=\left\{\begin{array}{ll}
t\in (0,1] &{\rm if}\;\, y=x, \\
0 &{\rm if}\;\, y\ne x,
\end{array}\right.
\end{equation*} is said to be a {\it fuzzy point} with support $x$ and value $t$ and is denoted by $x_t.$
For a fuzzy point $x_t$ and a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L,$ Pu and Liu \cite{JMAA76-571} introduced the symbol $x_t\, {\alpha}\, \mu} \def\mbp{\nu} \def\mcp{\gamma,$ where $\alpha \in \{\in, {q}, \in\! \vee \, {q}\, \}.$
We say that \begin{lieju}
\item[\rm (i)] $x_t$ {\it belong to} $\mu} \def\mbp{\nu} \def\mcp{\gamma,$ denoted by $x_t\in \mu} \def\mbp{\nu} \def\mcp{\gamma,$ if $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge t,$
\item[\rm (ii)] $x_t$ is {\it quasi-coincident with} $\mu} \def\mbp{\nu} \def\mcp{\gamma,$ denoted by
$x_t\, {q}\, \mu} \def\mbp{\nu} \def\mcp{\gamma,$ if $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)+t>1,$
\item[\rm (iii)] $x_t\in\! \vee \, {q}\, \mu} \def\mbp{\nu} \def\mcp{\gamma$ if $x_t\in \mu} \def\mbp{\nu} \def\mcp{\gamma$ or $x_t\, {q}\, \mu} \def\mbp{\nu} \def\mcp{\gamma,$ \end{lieju}
\zjq \zhangjie{Generalizations of fuzzy fated filters based on fuzzy points} \no
In what follows, $L$ is an $R_0$-algebra unless otherwise specified.
In \cite{ID93249}, the notion of a fated filter of $L$ is introduced as follows.
A non-empty subset $A$ of $L$ is called a {\it
fated filter} of $L$ (see \cite{ID93249}) if it satisfies (b1) and
\begin{equation}\begin{split}\label{{z091220}}
(\forall x,y \in L)~(\forall a \in A)
~(a\to ((x \to y)\to x) \in A \Longrightarrow x \in A).
\end{split}\end{equation} \fudl \begin{lem}[\cite{ID93249}]\label{{L091209}} A filter $F$ of $L$ is fated if and only if the following assertion is valid.
\begin{equation}\begin{split}\label{{z091209}}
(\forall x,y,z\in L)~ \bigl(x\to (y\to z)\in F, ~x\to y\in F
~\Rightarrow ~x\to z\in F\bigr).
\end{split}\end{equation} \end{lem} \fudl \begin{lem}[\cite{ID93249}]\label{{L091209-1}} A filter $F$ of $L$ is fated if and only if the following assertion is valid.
\begin{equation}\begin{split}\label{{z091209-1}}
(\forall x,y\in L)~ \bigl((x\to y)\to x\in F
~\Rightarrow ~x\in F\bigr).
\end{split}\end{equation} \end{lem} \fudl
Denote by $FF(L)$ the set of all fated filters of $L.$ Note that $FF(L)$ is a complete lattice under the set inclusion with the largest element $L$ and the least element $\{1\}.$
In what follows, let $k$ denote an arbitrary
element of $[0,1)$ unless otherwise specified.
To say that $x_t\, \, {q}_k\, \, \mu} \def\mbp{\nu} \def\mcp{\gamma,$ we mean $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)+t+k>1.$
To say that $x_t\in\! \vee \, {q}_k\, \, \mu} \def\mbp{\nu} \def\mcp{\gamma,$ we mean $x_t\in \mu} \def\mbp{\nu} \def\mcp{\gamma$ or $x_t\, \, {q}_k\, \, \mu} \def\mbp{\nu} \def\mcp{\gamma.$
\begin{definition}\label{{D091115-33}} A fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ is said to be an {\it $(\in, \in\! \vee \, {q}_k\, )$-fuzzy fated filter} of $L$ if it satisfies:
\begin{enumerate}
\item[\rm (1)] $x_t\in \mu} \def\mbp{\nu} \def\mcp{\gamma
~\Longrightarrow ~1_t\in\! \vee \, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma,$
\item[\rm (2)] $(a\to ((x\to y)\to x))_t\in \mu} \def\mbp{\nu} \def\mcp{\gamma, ~a_s\in \mu} \def\mbp{\nu} \def\mcp{\gamma
~\Longrightarrow ~x_{\min\{t,s\}}\in\! \vee \, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma$
\end{enumerate}
for all $x,a,y\in L$ and $t,s\in (0,1].$
\end{definition}
If a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ satisfies (c3) and Definition \ref{{D091115-33}}3.3(2), then we say that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is a {\it strong $\left(\in, \in\! \vee \, {q}_k\, \right)$-fuzzy fated filter} of $L.$
A (strong) $\left(\in, \in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of $L$ with
$k=0$ is called a (strong) $\left(\in, \in\! \vee \, {q}\, \right)$-fuzzy fated
filter of $L.$
\begin{example}\label{{E091003-33}}
Let $L=\{0,a,b,c,d,1\}$ be a set with the following Hasse diagram and Cayley
tables:
\begin{table}[h] \begin{center} \begin{picture}(40,30) \thinlines
\put(20,45){\circle*{3}}
\put(20,28){\circle*{3}}
\put(20,11){\circle*{3}}
\put(20,-6){\circle*{3}}
\put(20,-23){\circle*{3}}
\put(20,-40){\circle*{3}}
\put(25,-43){$0$}
\put(25,-26){$a$}
\put(25,-10){$b$}
\put(25,8){$c$}
\put(25,24){$d$}
\put(25,43){$1$}
\put(20,-40){\line(0,1){85}}
\end{picture} \hspace{3mm}
\begin{tabular}{c|c} \hline\noalign{
}
$x$ & ~$\neg x$ \\
\noalign{
}\hline\noalign{
}
$0$ & ~$1$ \\
$a$ & ~$d$ \\
$b$ & ~$c$ \\
$c$ & ~$b$ \\
$d$ & ~$a$ \\
$1$ & ~$0$ \\
\noalign{
}\hline
\end{tabular} \hspace{10mm}
\begin{tabular}{c|cccccc} \hline\noalign{
}
$\to$ & ~$0$ & ~$a$ & ~$b$ & ~$c$ & ~$d$ & ~$1$\\
\noalign{
}\hline\noalign{
}
$0$ & ~$1$ & ~$1$ & ~$1$ & ~$1$ & ~$1$ & ~$1$ \\
$a$ & ~$d$ & ~$1$ & ~$1$ & ~$1$ & ~$1$ & ~$1$ \\
$b$ & ~$c$ & ~$c$ & ~$1$ & ~$1$ & ~$1$ & ~$1$ \\
$c$ & ~$b$ & ~$b$ & ~$b$ & ~$1$ & ~$1$ & ~$1$ \\
$d$ & ~$a$ & ~$a$ & ~$b$ & ~$c$ & ~$1$ & ~$1$ \\
$1$ & ~$0$ & ~$a$ & ~$b$ & ~$c$ & ~$d$ & ~$1$ \\
\noalign{
}\hline
\end{tabular} \end{center} \end{table}
\noindent
Then $(L,\wedge,\vee,\neg,\to,0,1)$ is an $R_0$-algebra (see \cite{INS171-61}), where $x\wedge y=\min\{x,y\}$ and $x\vee y=\max\{x,y\}.$
Define a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ by
\[\mu} \def\mbp{\nu} \def\mcp{\gamma :L\rightarrow [0,1],
~~x \mapsto \begin{cases}
0.7 & \mbox{\rm if } \, x=1, \\
0.6 & \mbox{\rm if } \, x=c, \\
0.4 & \mbox{\rm if } \, x=d, \\
0.2 & \mbox{\rm if } \, x\in \{0,a,b\}.
\end{cases} \] It is routine to verify that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is a strong
$(\in,$ $\in\! \vee {q}_{0.4})$-fuzzy fated filter of $L.$
A fuzzy subset $\mbp$ of $L$ defined by
\[\mbp :L\rightarrow [0,1],
~~x \mapsto \begin{cases}
0.8 & \mbox{\rm if } \, x\in \{c,d\}, \\
0.7 & \mbox{\rm if } \, x=1, \\
0.3 & \mbox{\rm if } \, x\in \{0,a,b\}.
\end{cases} \] is an $(\in,$ $\in\! \vee {q}_{0.2})$-fuzzy fated filter of $L,$ but it is not a strong $(\in,$ $\in\! \vee {q}_{0.2})$-fuzzy fated filter of $L.$
\end{example}
\begin{thm}\label{{T110902}}
Every $(\in, \in)$-fuzzy fated filter of $L$ is an $(\in,
\in\! \vee \, {q}_k\, )$-fuzzy fated filter.
\end{thm}
\begin{proof} Straightforward. \end{proof}
Taking $k=0$ in Theorem 3.5 \ref{{T110902}}, we have the following corollary.
\begin{cor}\label{C110902}
Every $(\in, \in)$-fuzzy fated filter of $L$ is an $(\in,
\in\! \vee \, {q}\, )$-fuzzy fated filter.
\end{cor}
The converse of Theorem 3.5 \ref{{T110902}} is not true as seen in the following example.
\begin{example}
The $(\in,$ $\in\! \vee {q}_{0.2})$-fuzzy fated filter $\mbp$ of $L$ in
Example 3.4\ref{{E091003-33}} is not an $(\in, \in)$-fuzzy fated filter of $L.$ \end{example}
Obviously, every strong $(\in, \in\! \vee \, {q}_k\, )$-fuzzy fated filter is an $(\in, \in\! \vee \, {q}_k\, )$-fuzzy fated filter, but not converse as seen in Example 3.4. \ref{{E091003-33}}
We provided characterizations of an $(\in, \in\! \vee \, {q}_k\, )$-fuzzy fated filter.
\begin{thm}\label{{T091115-5}} A fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ is an $(\in, \in\! \vee \, {q}_k\, )$-fuzzy fated filter of $L$ if and only if it satisfies the following inequalities:
\begin{enumerate}
\item[\rm (1)] $(\forall x\in L)$ $(\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x), \tfrac{1-k}{2}\}),$
\item[\rm (2)] $(\forall x,a,y\in L)$
$(\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\}).$
\end{enumerate} \end{thm}
\begin{proof} Let $\mu} \def\mbp{\nu} \def\mcp{\gamma$ be an $(\in, \in\! \vee \, {q}_k\, )$-fuzzy fated filter of $L.$ Assume that there exists $a\in L$ such that $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)<\min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\}.$ Then $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)<t\le \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\}$ for some $t\in (0,\tfrac{1-k}{2}],$ and so $a_t\in \mu} \def\mbp{\nu} \def\mcp{\gamma.$ It follows from Definition \ref{{D091115-33}}3.3(1) that $1_t\in\! \vee \, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma,$ i.e., $1_t\in \mu} \def\mbp{\nu} \def\mcp{\gamma$ or $1_t\, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma$ so that $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge t$ or $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)+t+k>1.$ This is a contradiction. Hence
$\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x), \tfrac{1-k}{2}\}$ for all $x\in L.$ Suppose that there exist $a,b,c\in L$ such that
\[\mu} \def\mbp{\nu} \def\mcp{\gamma(b)<\min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((b\to c)\to b)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\right\}.\]
Then $\mu} \def\mbp{\nu} \def\mcp{\gamma(b)<s\le \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((b\to c)\to b)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\right\}$ for some $s\in \left(0,\tfrac{1-k}{2}\right].$ Thus
$(a\to ((b\to c)\to b))_s\in \mu} \def\mbp{\nu} \def\mcp{\gamma$ and $a_s\in \mu} \def\mbp{\nu} \def\mcp{\gamma.$ Using Definition \ref{{D091115-33}}3.3(2), we have
$b_s=b_{\min\left\{s,s\right\}}\in\! \vee \, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma,$ which implies that
$\mu} \def\mbp{\nu} \def\mcp{\gamma(b)\ge s$ or $\mu} \def\mbp{\nu} \def\mcp{\gamma(b)+s+k>1.$ This is a contradiction, and
therefore
\[\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\right\}\] for all $x,a,y\in L.$
Conversely, let $\mu} \def\mbp{\nu} \def\mcp{\gamma$ be a fuzzy subset of $L$ that satisfies two conditions (1) and (2). Let $x\in L$ and $t\in (0,1]$ be such that $x_t\in \mu} \def\mbp{\nu} \def\mcp{\gamma.$ Then $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge t,$ which implies from (1) that $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x), \tfrac{1-k}{2}\}\ge \min\{t, \tfrac{1-k}{2}\}.$ If $t\le \tfrac{1-k}{2},$ then $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge t,$ i.e., $1_t\in \mu} \def\mbp{\nu} \def\mcp{\gamma.$ If $t>\tfrac{1-k}{2},$ then $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge \tfrac{1-k}{2}$ and so $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)+t+k>\tfrac{1-k}{2}+\tfrac{1-k}{2}+k=1,$ i.e., $1_t\, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma.$ Hence $1_t\in\! \vee \, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma.$ Let $x,a,y\in L$ and $t,s\in (0,1]$ be such that $(a\to ((x\to y)\to x))_t\in \mu} \def\mbp{\nu} \def\mcp{\gamma$ and $a_s\in \mu} \def\mbp{\nu} \def\mcp{\gamma.$ Then $\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x))\ge t$ and $\mu} \def\mbp{\nu} \def\mcp{\gamma(a)\ge s.$ It follows from (2) that
\begin{equation*}\begin{split}
\mu} \def\mbp{\nu} \def\mcp{\gamma(x) &\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\}\\
&\ge \min\{t,s,\tfrac{1-k}{2}\}.
\end{split}\end{equation*} If $\min\{t,s\}\le \tfrac{1-k}{2},$ then $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\{t,s\},$ which shows that $x_{\min\{t,s\}}\in \mu} \def\mbp{\nu} \def\mcp{\gamma.$ If $\min\{t,s\}>\tfrac{1-k}{2},$ then $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \tfrac{1-k}{2},$ and thus $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)+\min\{t,s\}+k>1,$ i.e., $x_{\min\{t,s\}}\, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma.$ Hence $x_{\min\{t,s\}}\in\! \vee \, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma.$ Consequently, $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $(\in, \in\! \vee \, {q}_k\, )$-fuzzy fated filter of $L.$ \end{proof} \fudl \begin{cor}
If $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $\left(\in, \in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of $L$ with
$\mu} \def\mbp{\nu} \def\mcp{\gamma(1)<\tfrac{1-k}{2},$ then $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $(\in,\in)$-fuzzy fated filter of $L.$
\end{cor} \fudl \begin{cor}[\cite{ID 980315}]\label{{C110901}} A fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ is an $(\in, \in\! \vee \, {q}\, )$-fuzzy fated filter of $L$ if and only if it satisfies the following inequalities:
\begin{enumerate}
\item[\rm (1)] $(\forall x\in L)$ $(\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x), 0.5\}),$
\item[\rm (2)] $(\forall x,a,y\in L)$
$(\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), 0.5\}).$
\end{enumerate} \end{cor}
\begin{proof} It follows from taking $k=0$ in Theorem 3.8. \ref{{T091115-5}}\end{proof} \fudl \begin{cor}\label{{C091123-3}} Every strong $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filter $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ satisfies the following inequalities:
\begin{enumerate}
\item[\rm (1)] $(\forall x\in L)~\bigl(\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x), \tfrac{1-k}{2}\}\bigr),$
\item[\rm (2)] $(\forall x,a,y\in L)
~\bigl(\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\}\bigr).$ \end{enumerate}\end{cor} \fudl \begin{thm}\label{{T091115-66}} A fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ is an $(\in, \in\! \vee \, {q}_k\, )$-fuzzy fated filter of $L$ if and only if it satisfies the following assertion:
\begin{eqnarray}\label{{z091115-6}}
\left(\forall t\in \left(0,\tfrac{1-k}{2}\right]\right)
~\left(U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\in FF(L)\cup \{\emptyset\}\right).
\end{eqnarray}\end{thm}
\begin{proof} Assume that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $\left(\in, \in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of $L.$ Let $t\in \left(0,\tfrac{1-k}{2}\right]$ be such that $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\ne \emptyset.$ Then there exists $x\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t),$ and so $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge t.$ Using Theorem \ref{{T091115-5}}3.8(1), we get
\[\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x), \tfrac{1-k}{2}\right\}\ge \min\left\{t,\tfrac{1-k}{2}\right\}=t,\] i.e., $1\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$ Assume that $a\to ((x\to y)\to x)\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ for all $x,y\in L$ and $a\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$ Then
$\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x))\ge t$ and $\mu} \def\mbp{\nu} \def\mcp{\gamma(a)\ge t.$ It follows
from Theorem \ref{{T091115-5}}3.8(2) that
\begin{equation*}\begin{split}
\mu} \def\mbp{\nu} \def\mcp{\gamma(x) &\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\right\}\\
&\ge \min\left\{t,\tfrac{1-k}{2}\right\}=t
\end{split}\end{equation*}
so that $x\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$ Therefore $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ is a fated filter
of $L.$
Conversely, let $\mu} \def\mbp{\nu} \def\mcp{\gamma$ be a fuzzy subset of $L$ satisfying the assertion (\ref{{z091115-6}}). Assume that
$\mu} \def\mbp{\nu} \def\mcp{\gamma(1)<\min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\right\}$ for some $a\in L.$ Putting
$t_a:=\min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\right\},$ we have $a\in U\left(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_a\right)$ and so
$U\left(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_a\right)\ne \emptyset.$ Hence $U\left(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_a\right)$ is a fated
filter of $L$ by (\ref{{z091115-6}}), which implies that $1\in U\left(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_a\right).$ Thus $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge t_a,$ which is a contradiction. Therefore $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x), \tfrac{1-k}{2}\right\}$ for all $x\in L.$ Suppose that
\[\mu} \def\mbp{\nu} \def\mcp{\gamma(x)<\min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\right\}\] for some $x,a,y\in L.$ Taking
$t_x:=\min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\right\},$ we get
$a\to ((x\to y)\to x)\in U\left(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_x\right)$ and $a\in U\left(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_x\right).$ It follows from (\ref{{z091220}}) that $x\in U\left(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_x\right),$ i.e., $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge t_x.$ This is a contradiction. Hence
\[\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\right\}\] for all $x,a,y\in L.$ Using Theorem 3.8\ref{{T091115-5}}, we conclude that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $\left(\in, \in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of $L.$ \end{proof}
If we take $k=0$ in Theorem 3.12\ref{{T091115-66}}, then we have the following corollary.
\begin{cor}[\cite{ID 980315}] \label{{C091115-66}} A fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ is an $(\in, \in\! \vee \, {q}\, )$-fuzzy fated filter of $L$ if and only if it satisfies the following assertion:
\begin{eqnarray}\label{{z091115-66}}
(\forall t\in (0,0.5])~\left(U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\in FF(L)\cup \{\emptyset\}\right).
\end{eqnarray}\end{cor}
\begin{thm}\label{{T110923}}
If $k<r$ in $[0,1),$ then every $\left(\in, \in\! \vee \, {q}_k\, \right)$-fuzzy
fuzzy fated filter of $L$ is an $\left(\in, \in\! \vee \, {q}_r\, \right)$-fuzzy
fuzzy fated filter.
\end{thm}
\begin{proof} Straightforward. \end{proof}
The converse of Theorem 3.14\ref{{T110923}} is not true as seen in
the following example.
\begin{example}
Consider an $R_0$-algebra $L=\{0,a,b,c,d,1\}$ which is appeared in
Example 3.4\ref{{E091003-33}}.
Define a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ by
\[\mu} \def\mbp{\nu} \def\mcp{\gamma :L\rightarrow [0,1],
~~x \mapsto \begin{cases}
0.9 & \mbox{\rm if } \, x=d, \\
0.7 & \mbox{\rm if } \, x=c, \\
0.3 & \mbox{\rm if } \, x=1, \\
0.1 & \mbox{\rm if } \, x\in \{0,a,b\}.
\end{cases} \] It is routine to verify that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an
$(\in,$ $\in\! \vee {q}_{0.4})$-fuzzy fated filter of $L.$
Since
\[U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)=\begin{cases}
\{c,d\} & \mbox{\rm if } \, t\in (0.3, 0.35], \\
\{c,d,1\} & \mbox{\rm if } \, t\in (0.1, 0.3], \\
L & \mbox{\rm if } \, t\in (0, 0.1], \\
\end{cases} \] we know from Theorem 3.12\ref{{T091115-66}} that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is not
an $(\in,$ $\in\! \vee {q}_{0.3})$-fuzzy fated filter of $L.$
\end{example}
\begin{prop}\label{{P091209-9}}
Every $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filter $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ satisfies the
following inequalities.
\begin{enumerate}
\item[\rm (1)] $\mu} \def\mbp{\nu} \def\mcp{\gamma(x\to z)\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x\to (y\to z)),
\mu} \def\mbp{\nu} \def\mcp{\gamma(x\to y), \tfrac{1-k}{2}\right\},$
\item[\rm (2)] $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma((x\to y)\to x), \tfrac{1-k}{2}\right\}$ \end{enumerate} for all $x,y,z\in L.$ \end{prop}
\begin{proof} (1) Suppose that there exist $a,b,c\in L$ such that
\[\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to c)<\min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to (b\to c)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a\to b), \tfrac{1-k}{2}\right\}.\] Taking
$t:=\min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to (b\to c)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a\to b),\tfrac{1-k}{2}\right\}$
implies that
$a\to (b\to c)\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t),$ $a\to b\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ and $t\in \left(0,\tfrac{1-k}{2}\right].$ Since $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\in FF(L)$ by Theorem 3.12\ref{{T091115-66}}, it follows from Lemma 3.1\ref{{L091209}} that $a\to c\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t),$ i.e., $\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to c)\ge t.$ This is a contradiction, and therefore $\mu} \def\mbp{\nu} \def\mcp{\gamma$ satisfies (1).
(2) If $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $\left(\in,\in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of $L,$ then $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\in FF(L)\cup \{\emptyset\}$ for all $t\in \left(0,\tfrac{1-k}{2}\right]$ by Theorem 3.12\ref{{T091115-66}}. Hence $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\in F(L)\cup \{\emptyset\}$ for all $t\in \left(0,\tfrac{1-k}{2}\right].$ Suppose that
\[\mu} \def\mbp{\nu} \def\mcp{\gamma(x)<t\le \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma((x\to y)\to x), \tfrac{1-k}{2}\right\}\] for some $x,y\in L$ and $t\in \left(0,\tfrac{1-k}{2}\right].$ Then $(x\to y)\to x\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t),$ which implies from Lemma 3.2\ref{{L091209-1}} that $x\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t),$ i.e., $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge t.$ This is a contradiction. Hence
$\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma((x\to y)\to x), \tfrac{1-k}{2}\right\}$ for all $x,y\in L.$ \end{proof} \fudl \begin{cor}[\cite{ID 980315}]\label{{C091209}}
Every $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy fated filter $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ satisfies the
following inequalities.
\begin{enumerate}
\item[\rm (1)] $\mu} \def\mbp{\nu} \def\mcp{\gamma(x\to z)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x\to (y\to z)),
\mu} \def\mbp{\nu} \def\mcp{\gamma(x\to y), 0.5\},$
\item[\rm (2)] $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma((x\to y)\to x), 0.5\}$ \end{enumerate} for all $x,y,z\in L.$ \end{cor} \fudl \begin{thm}\label{{T091118-8}}
If $F$ is a fated filter of $L,$ then a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of
$L$ defined by
\[\mu} \def\mbp{\nu} \def\mcp{\gamma:L\rightarrow [0,1], ~x\mapsto
\left\{\begin{array}{ll}
t_1 & \textrm{if \, $x\in F$}, \\
t_2 & \textrm{otherwise}
\end{array}\right.\] where $t_1\in \left[\tfrac{1-k}{2},1\right]$ and $t_2\in \left(0,\tfrac{1-k}{2}\right),$ is an $\left(\in, \in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of $L.$ \end{thm}
\begin{proof} Note that
\begin{equation*}
U(\mu} \def\mbp{\nu} \def\mcp{\gamma;s)=\left\{\begin{array}{ll}
F & \textrm{if \, $s\in (t_2, \tfrac{1-k}{2}]$}, \\
L & \textrm{if \, $s\in (0,t_2]$}
\end{array}\right.
\end{equation*} which is a fated filter of $L.$ It follows from Theorem 3.12 \ref{{T091115-66}} that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $\left(\in, \in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of $L.$ \end{proof} \fudl \begin{cor}[\cite{ID 980315}]\label{{C091118}}
If $F$ is a fated filter of $L,$ then a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of
$L$ defined by
\[\mu} \def\mbp{\nu} \def\mcp{\gamma:L\rightarrow [0,1], ~x\mapsto
\left\{\begin{array}{ll}
t_1 & \textrm{if \, $x\in F$}, \\
t_2 & \textrm{otherwise}
\end{array}\right.\] where $t_1\in [0.5,1]$ and $t_2\in (0,0.5),$ is an $(\in, \in\! \vee \, {q}\, )$-fuzzy fated filter of $L.$ \end{cor} For any fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ and any $t\in (0,1],$ we consider two subsets:
\begin{equation*}\begin{split}
& Q(\mu} \def\mbp{\nu} \def\mcp{\gamma;t):=\left\{x\in L\mid x_t\, {q}\, \mu} \def\mbp{\nu} \def\mcp{\gamma\right\},
~~[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t:=\left\{x\in L\mid x_t\in\! \vee \, {q}\, \, \mu} \def\mbp{\nu} \def\mcp{\gamma\right\}. \end{split} \end{equation*} It is clear that $[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t=U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\cup Q(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ (see \cite{ID 980315}). We also consider the following two sets:
\[
Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t):=\left\{x\in L\mid x_t\, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma\right\},
~~[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k:=\left\{x\in L\mid x_t\in\! \vee \, {q}_k\, \, \mu} \def\mbp{\nu} \def\mcp{\gamma\right\}.\] Obviously, $[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k=U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\cup Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ and if $k=0$ then $Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)=Q(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ and $[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k=[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t.$
\begin{thm}\label{{T091124-4}} If $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $\left(\in, \in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of $L,$ then
\[\left(\forall t\in (\tfrac{1-k}{2},1]\right)~\left(Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\in FF(L)\cup \{\emptyset\}\right).\] \end{thm}
\begin{proof} Assume that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $\left(\in, \in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of $L$ and let $t\in \left(\tfrac{1-k}{2},1\right]$ be such that
$Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\ne \emptyset.$ Then there exists $x\in Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t),$ and so $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)+t+k>1.$ Using Theorem 3.8\ref{{T091115-5}}(1), we have \begin{equation*}\begin{split}
\mu} \def\mbp{\nu} \def\mcp{\gamma(1)&\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x),\tfrac{1-k}{2}\right\} \\
& =\left\{\begin{array}{ll}
\tfrac{1-k}{2} & \textrm{if \, $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \tfrac{1-k}{2}$}, \\
\mu} \def\mbp{\nu} \def\mcp{\gamma(x) & \textrm{if \, $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)<\tfrac{1-k}{2}$}
\end{array}\right.\\
&>1-t-k,
\end{split}\end{equation*} which implies that $1\in Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$ Assume that
$a\to ((x\to y)\to x)\in Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ and $a\in Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ for all $x,a,y\in L.$
Then $(a\to ((x\to y)\to x))_t\, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma$ and $a_t\, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma,$ that is,
$\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x))>1-t-k$ and $\mu} \def\mbp{\nu} \def\mcp{\gamma(a)>1-t-k.$
Using Theorem \ref{{T091115-5}}3.8(2), we get \[\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\right\}.\] Thus, if $\min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a)\}< \tfrac{1-k}{2},$ then
\[\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a)\right\}>1-t-k.\] If $\min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a)\}\ge \tfrac{1-k}{2},$ then $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \tfrac{1-k}{2}>1-t-k.$ It follows that $x_t\, {q}_k\, \mu} \def\mbp{\nu} \def\mcp{\gamma$ so that $x\in Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$ Therefore $Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ is a fated filter of $L.$ \end{proof} \fudl \begin{cor}[\cite{ID 980315}]\label{{C091124-4}} If $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $(\in, \in\! \vee \, {q}\, )$-fuzzy fated filter of $L,$ then
\[(\forall t\in (0.5,1])~\bigl(Q(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\in FF(L)\cup \{\emptyset\}\bigr).\] \end{cor} \fudl \begin{cor}\label{{C091124-4}} If $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is a strong $\left(\in, \in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of $L,$ then
\[(\forall t\in (\tfrac{1-k}{2},1])~\left(Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\in FF(L)\cup \{\emptyset\}\right).\] \end{cor} \fudl The converse of Corollary \ref{{C091124-4}} is not true as shown by the following example.
\begin{example}\label{{E091124-4}}
Consider the $(\in,$ $\in\! \vee {q}_{0.2})$-fuzzy fated filter $\mbp$ of $L$
which is given in Example 3.4\ref{{E091003-33}}. Then
\begin{equation*}
Q_k(\mbp;t)=\left\{\begin{array}{ll}
L & \textrm{if \, $t\in (0.5, 1]$}, \\
\{c,d,1\} & \textrm{if \, $t\in (0.4,0.5]$}
\end{array}\right.
\end{equation*} is a fated filter of $L.$ But $\mbp$ is not a strong $(\in,$ $\in\! \vee {q}_{0.2})$-fuzzy fated filter of $L.$ \end{example}
\begin{thm}\label{{T091225-5}}
For a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L,$ the following assertions are
equivalent:
\begin{enumerate}
\item[\rm (1)] $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $\left(\in,\in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of $L.$
\item[\rm (2)] $(\forall t\in (0,1])~\left([\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k\in FF(L)\cup
\{\emptyset\}\right).$
\end{enumerate}
We call $[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k$ an {\it $(\in\! \vee \, {q}_k\, )$-level fated filter} of $\mu} \def\mbp{\nu} \def\mcp{\gamma.$ \end{thm}
\begin{proof} Assume that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $\left(\in,\in\! \vee \, {q}_k\, \right)$-fuzzy fated filter of
$L$ and let $t\in (0,1]$ be such that $[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k\ne \emptyset.$ Then there exists $x\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k=U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\cup Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t),$ and so $x\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ or $x\in Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$
If $x\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t),$ then $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge t.$ It follows from Theorem
\ref{{T091115-5}}3.8(1) that \begin{equation*}\begin{split}
\mu} \def\mbp{\nu} \def\mcp{\gamma(1)&\ge \min\left\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x), \tfrac{1-k}{2}\right\}\ge \min\left\{t,\tfrac{1-k}{2}\right\} \\
& =\left\{\begin{array}{ll}
t & \textrm{if \, $t\le \tfrac{1-k}{2}$}, \\
\tfrac{1-k}{2}>1-t-k & \textrm{if \, $t>\tfrac{1-k}{2}$}
\end{array}\right.\\
\end{split}\end{equation*} so that $1\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\cup Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)=[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k.$ If $x\in Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t),$ then $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)+t+k>1.$ Thus \begin{equation*}\begin{split}
\mu} \def\mbp{\nu} \def\mcp{\gamma(1)&\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x), \tfrac{1-k}{2}\}\ge \min\{1-t-k,\tfrac{1-k}{2}\} \\
& =\left\{\begin{array}{ll}
1-t-k & \textrm{if \, $t> \tfrac{1-k}{2}$}, \\
\tfrac{1-k}{2}\ge t & \textrm{if \, $t\le \tfrac{1-k}{2}$}
\end{array}\right.\\
\end{split}\end{equation*} and so $1\in Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\cup U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)=[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k.$
Let $x,a,y\in L$ be such that $a\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k$ and $a\to ((x\to y)\to x)\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k.$ Then
\[ \mu} \def\mbp{\nu} \def\mcp{\gamma(a)\ge t ~\text{ or } ~\mu} \def\mbp{\nu} \def\mcp{\gamma(a)+t+k>1,\]
and
\[\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x))\ge t ~\text{ or }
~\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x))+t+k>1.\]
We can consider four cases:
\begin{eqnarray}
&& \text{\rm $\mu} \def\mbp{\nu} \def\mcp{\gamma(a)\ge t$ and $\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x))\ge t,$} \label{{z091224-1}}\\
&& \text{\rm $\mu} \def\mbp{\nu} \def\mcp{\gamma(a)\ge t$ and $\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x))+t+k>1,$} \label{{z091224-2}}\\
&& \text{\rm $\mu} \def\mbp{\nu} \def\mcp{\gamma(a)+t+k>1$ and $\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x))\ge t,$} \label{{z091224-3}}\\
&& \text{\rm $\mu} \def\mbp{\nu} \def\mcp{\gamma(a)+t+k>1$ and $\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x))+t+k>1.$} \label{{z091224-4}} \end{eqnarray} For the first case, Theorem \ref{{T091115-5}}3.8(2) implies that \begin{equation*}\begin{split}
\mu} \def\mbp{\nu} \def\mcp{\gamma(x)&\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\}\\
&\ge \min\{t,\tfrac{1-k}{2}\}
=\left\{\begin{array}{ll}
\tfrac{1-k}{2} & \textrm{if \, $t> \tfrac{1-k}{2}$}, \\
t & \textrm{if \, $t\le \tfrac{1-k}{2}$}
\end{array}\right.\\
\end{split}\end{equation*} so that $x\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ or $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)+t+k>\tfrac{1-k}{2}+\tfrac{1-k}{2}+k=1,$ i.e., $x\in Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$ Hence $x\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k.$
Case (\ref{{z091224-2}}) implies that \begin{equation*}\begin{split}
\mu} \def\mbp{\nu} \def\mcp{\gamma(x)&\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\}\\
&\ge \min\{1-t-k,t,\tfrac{1-k}{2}\}
=\left\{\begin{array}{ll}
1-t-k & \textrm{if \, $t> \tfrac{1-k}{2}$}, \\
t & \textrm{if \, $t\le \tfrac{1-k}{2}.$}
\end{array}\right.\\
\end{split}\end{equation*} Thus $x\in Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\cup U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)=[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k.$ Similarly, $x\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k$ for the case (\ref{{z091224-3}}). The final case implies that \begin{equation*}\begin{split}
\mu} \def\mbp{\nu} \def\mcp{\gamma(x)&\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\}\\
&\ge \min\{1-t-k,\tfrac{1-k}{2}\}
=\left\{\begin{array}{ll}
1-t-k & \textrm{if \, $t> \tfrac{1-k}{2}$}, \\
\tfrac{1-k}{2} & \textrm{if \, $t\le \tfrac{1-k}{2}$}
\end{array}\right.\\
\end{split}\end{equation*} so that $x\in Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\cup U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)=[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k.$ Consequently $[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k$ is a fuzzy fated filter of $L.$
Conversely, let $\mu} \def\mbp{\nu} \def\mcp{\gamma$ be a fuzzy subset of $L$ such that $[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k$ is a fated filter of $L$ whenever it is nonempty for all $t\in (0,1].$ If there exists $a\in L$ such that $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)<\min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\},$ then
$\mu} \def\mbp{\nu} \def\mcp{\gamma(1)<t_a\le \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\}$ for some $t_a\in (0,\tfrac{1-k}{2}].$
It follows that $a\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_a)$ but $1\notin U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_a).$ Also, $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)+t_a+k<2t_a+k\le 1$ and so $1\notin Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_a).$ Hence $1\notin U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_a)\cup Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_a)=[\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_a}^k,$ which is a contradiction. Therefore $\mu} \def\mbp{\nu} \def\mcp{\gamma(1)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x), \tfrac{1-k}{2}\}$ for all $x\in L.$ Suppose that
\begin{equation}\begin{split}\label{{z091224-5}}
\mu} \def\mbp{\nu} \def\mcp{\gamma(x)<\min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\} \end{split}\end{equation} for some $x,a,y\in L.$ Taking
$t:=\min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\}$ implies
that $t\in (0,\tfrac{1-k}{2}],$ $a\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\subseteq [\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k$ and $a\to ((x\to y)\to x)\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\subseteq [\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k.$ Since $[\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k\in FF(L),$ it follows that $x\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_t^k=U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\cup Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$ But (\ref{{z091224-5}}) induces $x\notin U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)$ and $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)+t+k<2t+k\le 1,$ i.e., $x\notin Q_k(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$ This is a contradiction, and thus
$\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge \min\{\mu} \def\mbp{\nu} \def\mcp{\gamma(a\to ((x\to y)\to x)), \mu} \def\mbp{\nu} \def\mcp{\gamma(a), \tfrac{1-k}{2}\}$ for all $x,a,y\in L.$ Using Theorem 3.8\ref{{T091115-5}}, we conclude that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filter of $L.$ \end{proof} \fudl \begin{cor}[\cite{ID 980315}]\label{{C091225-5}}
For a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L,$ the following assertions are
equivalent:
\begin{enumerate}
\item[\rm (1)] $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy fated filter of $L.$
\item[\rm (2)] $(\forall t\in (0,1])~\left([\mu} \def\mbp{\nu} \def\mcp{\gamma]_t\in FF(L)\cup
\{\emptyset\}\right).$
\end{enumerate}\end{cor}
\begin{proof} Taking $k=0$ in Theorem 3.24\ref{{T091225-5}} induced the desired result.
\end{proof} \fudl \begin{thm}\label{{T090503-11}} Given any chain of fated filters $F_0\subset F_1\subset \cdots \subset F_n=L$ of $L,$ there exists an $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filter $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ whose level fated filters are precisely the members of the chain with $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;\tfrac{1-k}{2})=F_0.$ \end{thm}
\begin{proof} Let $\{t_i\in (0,\tfrac{1-k}{2})\mid i=1,2,\cdots, n\}$ be such that $t_1>t_2>\cdots >t_n.$ Define a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ by
\[\mu} \def\mbp{\nu} \def\mcp{\gamma : L\rightarrow [0,1],
~~ x\mapsto
\left\{\begin{array}{ll}
t_0 ~(\ge \tfrac{1-k}{2}) &{\rm if}\;\, x=1,\\
t ~(\ge t_0) &{\rm if}\;\, x\in F_0\setminus \{1\},\\
t_1 &{\rm if}\;\, x\in F_1 \setminus F_0,\\
t_2 &{\rm if}\;\, x\in F_2 \setminus F_1,\\
\cdots \\
t_n &{\rm if}\;\, x\in F_n \setminus F_{n-1}. \end{array}\right. \] Then \begin{equation*}
U(\mu} \def\mbp{\nu} \def\mcp{\gamma;s)=\left\{\begin{array}{ll}
F_0 &{\rm if}\;\, s\in (t_1, \tfrac{1-k}{2}],\\
F_1 &{\rm if}\;\, s\in (t_2, t_1],\\
F_2 &{\rm if}\;\, s\in (t_3, t_2],\\
\cdots \\
F_n=R &{\rm if}\;\, s\in (0, t_n]. \end{array}\right. \end{equation*} Using Theorem 3.12\ref{{T091115-66}}, we know that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filter of $L.$ It follows from the construction of $\mu} \def\mbp{\nu} \def\mcp{\gamma$ that $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;\tfrac{1-k}{2})=F_0$ and $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t_i)=F_i$ for $i=1,2,\cdots, n.$ \end{proof} \fudl \begin{cor}\label{{C090503-11}} Given any chain of fated filters $F_0\subset F_1\subset \cdots \subset F_n=L$ of $L,$ there exists an $(\in,$ $\in\! \vee \, {q}\, )$-fuzzy fated filter $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ whose level fated filters are precisely the members of the chain with $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;0.5)=F_0.$ \end{cor}
Using a class of fated filters, we make an $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filter of $L.$
\begin{thm}\label{{T090503-22}}
Let $\{F_t\mid t\in \Lambda\},$ where $\Lambda \subseteq
\left(0,\tfrac{1-k}{2}\right],$ be a collection of fated filters of $L$ such that
\begin{enumerate}
\item[\rm (i)] $L=\bigcup\limits_{t\in \Lambda}F_t,$
\item[\rm (ii)] $(\forall s,t\in \Lambda)$ $(s<t \,
\Leftrightarrow \, F_t\subset F_s).$
\end{enumerate}
Then a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ defined by $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)=\sup\{t\in
\Lambda \mid x\in F_t\}$ for all $x\in L$ is an $(\in,$
$\in\! \vee \, {q}_k\, )$-fuzzy fated filter of $L.$ \end{thm}
\begin{proof} According to Theorem 3.12\ref{{T091115-66}}, it is sufficient to show that $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)\ne \emptyset$ is a fated filter of $L$ for all $t\in (0,\tfrac{1-k}{2}].$ We consider two cases:
\[\text{\rm (i)\, $t=\sup\{s\in \Lambda \mid s<t\},$ \, \,
(ii)\, $t\ne \sup\{s\in \Lambda \mid s<t\}.$}\] Case (i) implies that
\[x\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t) \, \Longleftrightarrow \, (x\in F_s, ~~\forall
s<t) \, \Longleftrightarrow \, x\in \bigcap\limits_{s<t}F_s,\] and so $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)=\bigcap\limits_{s<t}F_s$ which is a fated filter of $L.$ In the second case, we have $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)=\bigcup\limits_{s\ge t}F_s.$ Indeed, if $x\in \bigcup\limits_{s\ge t}F_s,$ then $x\in F_s$ for some $s\ge t.$ Thus $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\ge s\ge t,$ i.e., $x\in U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$ This proves $\bigcup\limits_{s\ge t}F_s\subset U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$ To prove the reverse inclusion, let $x\notin \bigcup\limits_{s\ge t}F_s.$ Then $x\notin F_s$ for all $s\ge t.$ Since $t\ne \sup\{s\in \Lambda \mid s<t\},$ there exists $\varepsilon >0$ such that $(t-\varepsilon, t)\cap \Lambda =\emptyset.$ Hence $x\notin F_s$ for all $s>t-\varepsilon,$ which means that if $x\in F_s$ then $s\le t-\varepsilon.$ Thus $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\le t-\varepsilon<t,$ and so $x\notin U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t).$ Therefore $U(\mu} \def\mbp{\nu} \def\mcp{\gamma;t)=\bigcup\limits_{s\ge t}F_s$ which is also a fated filter of $L.$ Consequently, $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is an $(\in,$ $\in\! \vee \, {q}_k\, )$-fuzzy fated filter of $L.$ \end{proof}
\begin{cor}\label{{C090503-2}}
Let $\{F_t\mid t\in \Lambda\},$ where $\Lambda \subseteq
(0,0.5],$ be a collection of fated filters of $L$ such that
\begin{enumerate}
\item[\rm (i)] $L=\bigcup\limits_{t\in \Lambda}F_t,$
\item[\rm (ii)] $(\forall s,t\in \Lambda)$ $(s<t \,
\Leftrightarrow \, F_t\subset F_s).$
\end{enumerate}
Then a fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ defined by $\mu} \def\mbp{\nu} \def\mcp{\gamma(x)=\sup\{t\in
\Lambda \mid x\in F_t\}$ for all $x\in L$ is an $(\in,$
$\in\! \vee \, {q}\, )$-fuzzy fated filter of $L.$ \end{cor}
A fuzzy subset $\mu} \def\mbp{\nu} \def\mcp{\gamma$ of $L$ is said to be {\it proper} if ${\rm Im}(\mu} \def\mbp{\nu} \def\mcp{\gamma)$ has at least two elements. Two fuzzy subsets are said to be {\it equivalent} if they have same family of level subsets. Otherwise, they are said to be {\it non-equivalent}.
\begin{thm}\label{{T080727-7}} Let $\mu} \def\mbp{\nu} \def\mcp{\gamma$ be an $(\in,\in\! \vee \, {q}_k\, )$-fuzzy fated filter of $L$ such that $\#\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\mid \mu} \def\mbp{\nu} \def\mcp{\gamma(x)<\tfrac{1-k}{2}\}\ge 2.$ Then there exist two proper non-equivalent
$(\in,\in\! \vee \, {q}_k\, )$-fuzzy fated filters of $L$ such that $\mu} \def\mbp{\nu} \def\mcp{\gamma$ can be expressed as the union of them. \end{thm}
\begin{proof} Let $\{\mu} \def\mbp{\nu} \def\mcp{\gamma(x)\mid \mu} \def\mbp{\nu} \def\mcp{\gamma(x)<\tfrac{1-k}{2}\}=\{t_1, t_2, \cdots, t_r\},$ where $t_1>t_2>\cdots >t_r$ and $r\ge 2.$ Then the chain of $(\in\! \vee \, {q}_k\, )$-level fated filters of $\mu} \def\mbp{\nu} \def\mcp{\gamma$ is
\[[\mu} \def\mbp{\nu} \def\mcp{\gamma]_{\tfrac{1-k}{2}}^k\subseteq [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_1}^k\subseteq [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_2}^k\subseteq \cdots
\subseteq [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_r}^k=L.\] Let $\mbp$ and $\mcp$ be fuzzy subsets of $L$ defined by
\begin{equation}
\mbp(x)=\left\{\begin{array}{ll}
t_1 &{\rm if}\;\, x\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_1}^k,\\
t_2 &{\rm if}\;\, x\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_2}^k\setminus [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_1}^k,\\
\cdots &{\rm }\;\, \\
t_r &{\rm if}\;\, x\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_r}^k\setminus [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_{r-1}}^k,\\ \end{array}\right. \nonumber \end{equation} and
\begin{equation}
\mcp(x)=\left\{\begin{array}{ll}
\mu} \def\mbp{\nu} \def\mcp{\gamma(x) &{\rm if}\;\, x\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{\tfrac{1-k}{2}}^k,\\
k &{\rm if}\;\, x\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_2}^k\setminus [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{\tfrac{1-k}{2}}^k,\\
t_3 &{\rm if}\;\, x\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_3}^k\setminus [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_2}^k,\\
\cdots &{\rm }\;\, \\
t_r &{\rm if}\;\, x\in [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_r}^k\setminus [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_{r-1}}^k,\\ \end{array}\right. \nonumber \end{equation} respectively, where $t_3<k<t_2.$ Then $\mbp$ and $\mcp$ are $(\in, \in\! \vee \, {q}_k\, )$-fuzzy fated filters of $L,$ and $\mbp, \mcp\le \mu} \def\mbp{\nu} \def\mcp{\gamma.$ The chains of $(\in\! \vee \, {q}_k\, )$-level fated filters of $\mbp$ and $\mcp$ are, respectively, given by \[ [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_1}^k\subseteq [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_2}^k\subseteq \cdots
\subseteq [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_r}^k\] and
\[[\mu} \def\mbp{\nu} \def\mcp{\gamma]_{\tfrac{1-k}{2}}^k\subseteq [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_2}^k\subseteq \cdots
\subseteq [\mu} \def\mbp{\nu} \def\mcp{\gamma]_{t_r}^k.\] Therefore $\mbp$ and $\mcp$ are non-equivalent and clearly $\mu} \def\mbp{\nu} \def\mcp{\gamma=\mbp\cup \mcp.$ This completes the proof. \end{proof}
\zhangjie{Conclusion} \no In this paper, using the "belongs to" relation ($\in$) and quasicoincidence with relation ($q$) between the fuzzy point and fuzzy sets, we introduce the notions of $(\in, \in \vee \,{q_k})$-fuzzy fated filter in an $R_0$-algebras and investigate some related properties. We have dealt with characterizations of an $(\in, \in \vee \,{q_k})$-fuzzy fated filter in $R_0$-algebras and have obtained an $(\in, \in \vee \,{q_k})$-fuzzy fated filter is that the notion of an $(\in, \in \vee \,{q})$-fuzzy fated filter is a special case of an $(\in, \in \vee \,{q_k})$-fuzzy fated filter. Based on these results, we shall focus on other types and their relationships among them, and also consider these generalized rough fuzzy filters of $R_0$-algebras. \fudl
\end{document} | arXiv | {
"id": "1507.01448.tex",
"language_detection_score": 0.5872161388397217,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\begin{frontmatter}
\title{Supersonic flutter analysis of flat composite panels by unified formulation}
\author[unsw]{S Natarajan \corref{cor1}\fnref{fn1}} \author[india]{Ganapathi Manickam} \author[portugal,dubai]{AJM Ferreira} \author[italy,dubai]{E Carrera}
\cortext[cor1]{Corresponding author}
\address[unsw]{School of Civil \& Environmental Engineering, The University of New South Wales, Sydney, Australia} \address[india]{Head, Stress \& DTA, IES-Aerospace, Mahindra Satyam Computers Services Ltd., Bangalore, India} \address[portugal]{Faculdade de Engenharia da Universidade do Porto, Porto, Portugal.} \address[italy]{Department of Aeronautics and Aerospace Engineering, Politecnico di Torino, Italy.} \address[dubai]{Department of Mathematics, Faculty of Science, King Abdulaziz University, P.O. Box 80203, Jeddah 21589, Saudi Arabia.} \fntext[fn1]{School of Civil \& Environmental Engineering, The University of New South Wales, Sydney, NSW 2052, Australia. Tel:+61 293855030, Email: s.natarajan@unsw.edu.au; sundararajan.natarajan@gmail.com}
\begin{abstract} In this paper, the linear flutter characteristics of laminated composite flat panels immersed in a supersonic flow is investigated using field consistent elements within the framework of unified formulation. The influence of the aerodynamic damping on the supersonic flutter characteristics of flat composite panels is also investigated. The aerodynamic force is evaluated using two-dimensional static aerodynamic approximation for high supersonic flow. Numerical results are presented for laminated composites that bring out the influence of the flow angle, the boundary conditions, the plate thickness and the plate aspect ratio on the flutter characteristics. \end{abstract}
\begin{keyword}
Laminated plates \sep unified formulation \sep supersonic flutter analysis \sep field consistent element \sep shear locking \sep sinusoidal shear deformation theory \end{keyword}
\end{frontmatter}
\section{Introduction} Engineered materials, such as the laminated composites are extensively used in various engineering disciplines such as in the aerospace engineering, automotive engineering and civil engineering. The laminated composite structures are often made of several orthotropic layers with different materials stacked together to achieve desired properties. Such a construction provides flexibility in tailoring the properties of the structures by varying the stack up sequence or by changing the fiber orientation in the lamina. In practice, the use of these materials in aerospace industries has necessitated to understand the dynamic characteristics of laminated structures, especially when exposed to air flow. The phenomenon called `panel flutter' involves the interaction of panel inertia force, the elastic restoring force, the thermal force and the air stream which passes over one side of the panel. This study is important in aerospace structural design in evaluating the fatigue life and allowable stress of the structural component exposed to supersonic flow. This has attracted researchers~\cite{singhaganapathi2005, wang1997} to study the flutter characteristics of composite panels. Sawyer~\cite{sawyer1977} using classical lamination theory studied the flutter characteristics of laminated plates. Srinivasan and Babu~\cite{srinivasanbabu1987} examined the flutter of laminated quadrilateral plates. Liao and Sun~\cite{liaosun1993} investigated the supersonic flutter behaviour of stiffened composite skew plates using a degenerated shell element. Pidaparti and Chang~\cite{pidapartichang1998} investigated the flutter characteristics of skewed and cracked composite panels. However, all these investigations neglected the effect of damping. Singha and Ganapathi~\cite{singhaganapathi2005} studied the flutter characteristics of skew composite plates by considering damping and thermo-mechanical loads using a shear deformable high precision 4-noded quadrilateral element.
In describing the plate kinematics, several laminate plate theories have been applied for the analysis of composite plates, such as the classical laminated plate theory (CLPT)~\cite{bosereddy1998}, first order shear deformation theory (FSDT)~\cite{whitneypagano1970} and other higher-order shear deformation theories (HSDTs)~\cite{reddy1984,kantmanjunatha1988,pandyakant1988,ganapathipatel2004}. Recently Carrera~\cite{carrera2001} derived a series of axiomatic approaches, coined as Carrera Unified Formulation~\cite{carrerademasi2002} for the general description of two-dimensional formulations for multilayered plates and shells. With this unified formulation, it is possible to implement in a single software a series of hierarchical formulations, thus affording a systematic assessment of different theories ranging from simple equivalent single layer models up to higher order layerwise descriptions. The plate structures are analyzed by employing a numerical technique. One such powerful and versatile technique is the finite element method. The CUF has been used to develop discrete models such as the finite element method~\cite{carrerademasi2002,carrerademasi2002a} and, more recently, meshless methods based upon collocation with radial basis functions~\cite{ferreiraroque2011}.
\paragraph{Objective} In this paper, a $\mathcal{C}^o$ 4-noded quadrilateral shear flexible element is employed to study the free vibration and flutter characteristics of laminated composite plates immersed in a supersonic flow. The plate kinematics is based on Carrera Unified Formulation (CUF) and a hybrid displacement assumption is used for the in-plane and the transverse displacements. The shear locking is alleviated by employing a field consistent approach. The influence of the flow angle, the plate thickness, the plate aspect ratio, the boundary conditions and damping on the critical aerodynamic pressure is numerically studied.
\paragraph{Outline} The paper commences with a brief discussion on the unified formulation for plates and the finite element discretization. Section \ref{edesc} describes the element employed for this study. The efficiency of the present formulation, numerical results and parametric studies are presented in Section~\ref{numres}, followed by concluding remarks in the last section.
\section{Carrera Unified Formulation}\label{cuftheory} \subsection{Basis of CUF} Let us consider a laminated plate composed of perfectly bonded layers with coordinates $x,y$ along the in-plane directions and $z$ along the thickness direction of the whole plate, while $z_k$ is the thickness of the $k^{\rm th}$ layer. The CUF is a useful tool to implement a large number of two-dimensional models with the description at the layer level as the starting point. By following the axiomatic modelling approach, the displacements $\mathbf{u}(x,y,z) = ( u(x,y,z), v(x,y,z), w(x,y,z))$ are written according to the general expansion as: \begin{equation} \mathbf{u}(x,y,z) = \sum\limits_{\tau = 0}^N F_\tau(z) \mathbf{u}_\tau(x,y) \label{eqn:unifieddisp} \end{equation} where $F(z)$ are known functions to model the thickness distribution of the unknowns, $N$ is the order of the expansion assumed for the through-thickness behaviour. By varying the free parameter $N$, a \emph{hierarchical} series of two-dimensional models can be obtained. The strains are related to the displacement field via the geometrical relations: \begin{eqnarray} \boldsymbol{\varepsilon}_{pG} = \left[ \begin{array}{ccc} \varepsilon_{xx} & \varepsilon_{yy} & \gamma_{xy} \end{array} \right]^{\rm T} = \mathbf{D}_p \mathbf{u} \nonumber \\ \boldsymbol{\varepsilon}_{nG} = \left[ \begin{array}{ccc} \gamma_{xz} & \gamma_{yz} & \varepsilon_{zz} \end{array} \right]^{\rm T} = \left( \mathbf{D}_{np} + \mathbf{D}_{nz} \right) \mathbf{u} \label{eqn:strainDisp} \end{eqnarray} where the subscript $G$ indicate the geometrical equations, $\mathbf{D}_{p}, \mathbf{D}_{np}$ and $\mathbf{D}_{nz}$ are differential operators given by: \begin{eqnarray} \mathbf{D}_p = \left[ \begin{array}{ccc} \partial_x & 0 & 0 \\ 0 & \partial_y & 0 \\ \partial_y & \partial_x & 0 \end{array} \right], \hspace{0.5cm} \mathbf{D}_{np} = \left[ \begin{array}{ccc} 0 & 0 & \partial_x \\ 0 & 0 & \partial_y \\ 0 & 0 & 0 \end{array} \right], \nonumber \\ \mathbf{D}_{nz} = \left[ \begin{array}{ccc} \partial_z & 0 & 0 \\ 0 & \partial_z & 0 \\ 0 & 0 & \partial_z \end{array} \right]. \end{eqnarray} The 3D constitutive equations are given as: \begin{eqnarray} \boldsymbol{\sigma}_{pC} = \mathbf{C}_{pp} \boldsymbol{\varepsilon}_{pG} + \mathbf{C}_{pn} \boldsymbol{\varepsilon}_{nG} \nonumber \\ \boldsymbol{\sigma}_{nC} = \mathbf{C}_{np} \boldsymbol{\varepsilon}_{pG} + \mathbf{C}_{nn} \boldsymbol{\varepsilon}_{nG} \label{eqn:stressdef} \end{eqnarray} with \begin{eqnarray} \mathbf{C}_{pp} = \left[ \begin{array}{ccc} C_{11} & C_{12} & C_{16} \\ C_{12} & C_{22} & C_{26} \\ C_{16} & C_{26} & C_{66} \end{array} \right] \hspace{0.5cm} \mathbf{C}_{pn} = \left[ \begin{array}{ccc} 0 & 0 & C_{13} \\ 0 & 0 & C_{23} \\ 0 & 0 & C_{36} \end{array} \right] \nonumber \\ \mathbf{C}_{np} = \left[ \begin{array}{ccc} 0 & 0 & 0 \\ 0 & 0 & 0 \\ C_{13} & C_{23} & C_{36} \end{array} \right] \hspace{0.5cm} \mathbf{C}_{nn} = \left[ \begin{array}{ccc} C_{55} & C_{45} & 0 \\ C_{45} & C_{44} & 0 \\ 0 & 0 & C_{33} \end{array} \right] \end{eqnarray} where the subscript $C$ indicate the constitutive equations. The \textit{Principle of Virtual Displacements} (PVD) in case of multilayered plate subjected to mechanical loads is written as: \begin{equation} \sum\limits_{k=1}^{N_k} \int\limits_{\Omega_k}\int\limits_{A_k} \left\{ (\delta \boldsymbol{\varepsilon}_{pG}^k)^{\rm T} \boldsymbol{\sigma}_{pC}^k + (\delta \boldsymbol{\varepsilon}_{nG}^k)^{\rm T} \boldsymbol{\sigma}_{nC}^k \right\}~\mathrm{d}\Omega_k~\mathrm{d}z = \sum\limits_{k=1}^{N_k} \int\limits_{\Omega_k} \int\limits_{A_k} \rho^k \delta \mathbf{u}_s^{k^{\rm T}} \ddot{\mathbf{u}}^k~\mathrm{d}\Omega_k~\mathrm{d}z + \sum\limits_{k=1}^{N_k} \delta \mathbf{L}_e^k \end{equation} where $\rho^k$ is the mass density of the $k^{\rm th}$ layer, $\Omega_k$, $A_k$ are the integration domain in the $(x,y)$ and the $z$ direction, respectively. Upon substituting the geometric relations (\Eref{eqn:strainDisp}), the constitutive relations (\Eref{eqn:stressdef}) and the unified formulation into the PVD statement, we have: \begin{equation} \begin{split} \int\limits_{\Omega_k}\int\limits_{A_k} \left\{ \left(\mathbf{D}_p^k F_s \delta \mathbf{u}_s^k\right)^{\rm T} \left\{ \mathbf{C}_{pp}^k \mathbf{D}_p^k F_{\tau} \mathbf{u}_{\tau}^k + \mathbf{C}_{pn}^k (\mathbf{D}_{n\Omega}^k + \mathbf{D}_{nz}^k) F_{\tau}\mathbf{u}_\tau^k \right\} + \right. \\ \left. \left[ (\mathbf{D}_{n\Omega}^k + \mathbf{D}_{nz}^k) f_x \delta\mathbf{u}_s^k)^{\rm T} (\mathbf{C}_{np}^k \mathbf{D}_p^k F_\tau \mathbf{u}_\tau^k + \mathbf{C}_{nn}^k (\mathbf{D}_{n\Omega}^k + \mathbf{D}_{nz}^k) F_\tau \mathbf{u}_\tau^k) \right] \right\}~\mathrm{d}\Omega_k ~\mathrm{d}z = \\ \sum\limits_{k=1}^{N_k} \int\limits_{\Omega_k} \int\limits_{A_k} \rho^k \delta \mathbf{u}_s^{k^{\rm T}} \ddot{\mathbf{u}}^k~\mathrm{d}\Omega_k~\mathrm{d}z + \sum\limits_{k=1}^{N_k} \delta \mathbf{L}_e^k \end{split} \end{equation} After integration by parts, the governing equations for the plate are obtained: \begin{equation} \mathbf{K}_{uu}^{k\tau s} \mathbf{u}_{\tau}^k = \mathbf{P}_{u \tau}^k \end{equation} and in the case of free vibrations, we have: \begin{equation} \mathbf{K}_{uu}^{k\tau s} \mathbf{u}_{\tau}^k = \mathbf{M}^{k \tau s} \ddot{\mathbf{u}}_\tau^k \end{equation} where the fundamental nucleus $\mathbf{K}_{uu}^{k \tau s}$ is: \begin{equation} \mathbf{K}_{uu}^{k \tau s} = \left[ (-\mathbf{D}_p^k)^{\rm T} ( \mathbf{C}_{pp}^k \mathbf{D}_p^k + \mathbf{C}_{pn}^k (\mathbf{D}_{n\Omega}^k + \mathbf{D}_{nz}) + (-\mathbf{D}_{n\Omega}^k + \mathbf{D}_{nz}^k)^{\rm T} (\mathbf{C}_{np}^k \mathbf{D}_p^k + \mathbf{C}_{nn}^k (\mathbf{D}_{n\Omega}^k + \mathbf{D}_{nz}^k)) \right] F_\tau F_s \label{eqn:stifffundanuclei} \end{equation} and $\mathbf{M}^{k \tau s}$ is the fundamental nucleus for the inertial term given by: \begin{equation} M_{ij}^{k \tau s} = \left\{ \begin{array}{cc} \rho^k F_\tau F_s & \textup{if} \hspace{1cm} i = j \\ 0 & \textup{if} \hspace{1cm} i \neq j \end{array} \right. \label{eqn:massfundanuclei} \end{equation} where $\mathbf{P}_{u \tau}^k$ are variationally consistent loads with applied pressure. For more detailed derivation and for the explicit form of the fundamental nuclei, interested readers are referred to~\cite{carrerademasi2002,carrerademasi2002a}. The work done by the applied non-conservative loads is: \begin{equation} W(\boldsymbol{\delta}) = \int_{\Omega} \Delta p w ~\mathrm{d} \Omega \label{eqn:aerowork} \end{equation} where $\Delta p$ is the aerodynamic pressure. The static aerodynamic pressure based on first-order, high Mach number approximation to linear potential flow is given by: \begin{equation} \Delta p = \frac{\rho_a U_a^2}{\sqrt{M_\infty^2 - 1}} \left[ \frac{\partial w}{\partial x} \cos \theta^\prime + \frac{\partial w}{\partial y} \sin \theta^\prime \right] \label{eqn:aeropressure} \end{equation}
where $\rho_a, U_a, M_\infty$ and $\theta^\prime$ are the free stream air density, velocity of air, Mach number and flow angle, respectively. \begin{equation} \left[ \left( \mathbf{K} + \lambda \overline{\mathbf{A}}\right) - \overline{\kappa} \mathbf{M}\right] \boldsymbol{\delta} = \mathbf{0} \label{eqn:finaldiscre} \end{equation} where $\mathbf{K}$ is the stiffness matrix, $\overline{\mathbf{A}}$ is the aerodynamic matrix and $\mathbf{M}$ is the mass matrix. The eigenvalue $\overline{\kappa} = -\omega^2 - g_\tau \omega/(\rho h)$ includes the contribution of aerodynamic damping. \Eref{eqn:finaldiscre} is solved for eigenvalues for a given value of $\lambda$. In the absence of aerodynamic damping, i.e., when $\lambda = $ 0, the eigenvalue, $\omega$ is real and positive, since the stiffness matrix and mass matrix are symmetric and positive definite. However, the aerodynamic matrix $\overline{\mathbf{A}}$ is unsymmetric and hence complex eigenvalues $\omega$ are expected for $\lambda >$ 0. As $\lambda$ increases monotonically from zero, two of these eigenvalues will approach each other and become complex conjugates. In this study, $\lambda_{cr}$ is considered to be the value of $\lambda$ at which the first coalescence occurs. In the presence of aerodynamic damping, the eigenvalues $\overline{\kappa}$, in \Eref{eqn:finaldiscre} becomes complex with increasing value of $\lambda$. The corresponding frequency can be written as: \begin{equation} \overline{\kappa} = -\omega^2 - g_\tau \omega/(\rho h) = \overline{\kappa}_R - i \overline{\kappa}_I \end{equation} where the subscripts $R$ and $I$ refer to the real and the imaginary part of the eigenvalue. The flutter boundary is reached $(\lambda = \lambda_{cr})$, when the frequency $\omega$ becomes pure imaginary number, i.e., $\omega = i \sqrt{\overline{\kappa}_R}$ at $g_\tau = \overline{\kappa}_I/\sqrt{\overline{\kappa}_R}$. In practice, the value of $\lambda_{cr}$ is determined from a plot of $\omega_R$ vs $\lambda$ corresponding to $\omega_R = $ 0.
\section{Element description} \label{edesc} The plate element employed in this study is a $\mathcal{C}^0$ continuous element and according to the isoparametric description, the components of each displacement unknown $\mathbf{u}_\tau$ are expressed as: \begin{equation} \mathbf{u}_\tau = N_I \mathbf{q}_{\tau I}, \hspace{0.5cm} I = 1,2,\cdots,N_n \label{eqn:unifieddisp} \end{equation} where $N_I$ are the standard finite element shape functions. By introducing the unified formulation for the displacements, given by \Eref{eqn:unifieddisp} into the strain-displacement relations (see \Eref{eqn:strainDisp}), we have: \begin{align} \boldsymbol{\varepsilon}_{pG}^k &= \mathbf{D}_p^k (F_\tau \mathbf{u}_\tau^k) = \mathbf{D}_p^k (F_{\tau} N_I) \mathbf{q}_{\tau I}^k \nonumber \\ \boldsymbol{\varepsilon}_{nG}^k &= (\mathbf{D}_{n\Omega}^k + \mathbf{D}_{nz}^k) (F_\tau \mathbf{u}_\tau^k) = \mathbf{D}_{n\Omega}^k (F_\tau N_I) \mathbf{q}_{\tau I}^k + F_{\tau,z} N_I \mathbf{q}_{\tau I}^k \label{eqn:straindiscretize} \end{align} Upon substituting \Erefs{eqn:unifieddisp} and (\ref{eqn:straindiscretize}) into \Erefs{eqn:stifffundanuclei} and (\ref{eqn:massfundanuclei}), we can compute the stiffness matrix $\mathbf{K}$, the aerodynamic matrix $\overline{\mathbf{A}}$ and the mass matrix $\mathbf{M}$ of the system. The formulation is implemented in MATLAB\textsuperscript{\textregistered} and the solution to the \Eref{eqn:finaldiscre} is computed from a standard eigenvalue algorithm.
\paragraph{Shear locking} If the interpolation functions given for a QUAD-4 are used directly to interpolate the unknown displacement fields in deriving the shear strains $(\gamma_{xz}, \gamma_{yz})$ and the membrane strains $(\boldsymbol{\varepsilon}_{pG})$, the element will lock and show oscillations in the shear and the membrane stresses. The oscillations are due to the fact that the derivative functions of the out-of plane displacement do not match that of the rotations in the shear strain definition. To alleviate the locking phenomenon, the terms corresponding to the derivative of the out-of plane displacement must be consistent with the rotation terms. In this study, field redistributed shape functions are used to alleviate shear locking. The field consistency requires that the transverse shear strains and the membrane strains must be interpolated in a consistent manner. If the element has edges which are aligned with the coordinate system $(x,y)$, the terms in shear strains $(\gamma_{xz}, \gamma_{yz})$ are approximated by the following substitute shape functions~\cite{somashekarprathap1987}: \begin{eqnarray} \tilde{N}_{1}(\eta) = \frac{1}{4} \left[ \begin{array}{cccc} 1-\eta & 1-\eta & 1+\eta & 1+\eta \end{array} \right] \nonumber \\ \tilde{N}_{2}(\xi) = \frac{1}{4} \left[ \begin{array}{cccc} 1-\xi & 1 +\xi & 1+\xi & 1-\xi \end{array} \right]. \label{eqn:fieldredistribute} \end{eqnarray} Note that, no special integration rule is required for evaluating the shear terms. A numerical integration based on the 2 $\times$ 2 Gaussian rule is used to evaluate all the terms.
\section{Numerical Results} \label{numres} In this section, we present the critical aerodynamic pressure and the critical frequency of laminated composite plates immersed in a supersonic flow using 4-noded quadrilateral element and unified formulation. In this study, we use a hybrid displacement assumption, where the in-plane displacements $u$ and $v$ are expressed as sinusoidal expansion in the thickness direction, and the transverse displacement, $w$ is quadratic in the thickness direction. We refer to this theory as SINUS-W2. The displacements are expressed as: \begin{align} u(x,y,z,t) &= u_o(x,y,t) + zu_1(x,y,t) + \sin \left( \frac{\pi z}{h} \right) u_2(x,y,t) \nonumber \\ v(x,y,z,t) &= v_o(x,y,t) + zv_1(x,y,t) + \sin \left( \frac{\pi z}{h}\right) v_2(x,y,t) \nonumber \\ w(x,y,z,t) &= w_o(x,y,t) + zw_1(x,y,t) + z^2 w_2(x,y,t) \end{align} where $u_o, v_o$ and $w_o$ are translations of a point at the middle-surface of the plate, $w_2$ is higher order translation, and $u_1, v_1, u_3$ and $v_3$ denote rotations~\cite{touratier1991} and considers a quadratic variation of the transverse displacement $w$ allowing for through-the-thickness deformations. Both simply supported and clamped boundary conditions are considered in this study and the influence of the flow direction is also studied. In all cases, we present the non dimensionalized critical aerodynamic pressure, $\lambda_{cr}$ and critical frequency $\omega_{cr}$ as, unless specified otherwise: \begin{eqnarray} \omega^\ast_{cr} = \omega_{cr} a^2 \sqrt{ \frac{\rho h}{D}} \nonumber \\ \lambda^\ast_{cr} = \lambda_{cr} \frac{a^3}{D} \label{eqn:nondimfreq} \end{eqnarray} where $D = {E_2 h^3 \over 12(1-\nu^2)}$ is the bending rigidity of the plate, $E_2, \nu$ are the Young's modulus and Poisson's ratio and $\rho$ is the mass density.
Before proceeding with the detailed numerical study, the formulation developed herein is validated against available results pertaining to the critical aerodynamic pressure and the critical frequency for a 5-layered laminated square plate. Table \ref{tab:meshconvefreq} presents the convergence of the first three fundamental frequencies with mesh size. A structured mesh of 30$\times$30 is found to be adequate for this study. It can be seen that the results from the present formulation are in good agreement with those in the literature. Further numerical studies are performed with a structured quadrilateral mesh. Table \ref{tab:meshconveflutter} presents the convergence of the flutter bounds for a clamped square laminated plate with $a/h=$ 100. The flutter bounds are computed for both the cases: without damping and with damping. It is seen that the results agree well with those in the literature.
\begin{table}[htbp] \centering \renewcommand{1.2}{1.2} \caption{Convergence of the non-dimensional natural frequencies $\Omega = \omega \frac{a^2}{\pi^2 h} \sqrt{ \frac{\rho}{E_2}} $ of a 5-layered laminated square plate $[45^\circ/-45^\circ/45^\circ/-45^\circ/45^\circ]$ with $E_L/E_T=$ 40, $G_{LT}/E_{T}=$ 0.6, $G_{TT}/E_T=$ 0.5, $\nu_{LT}=$0.25. } \begin{tabular}{llrrr} \hline Mesh && \multicolumn{3}{c}{Modes} \\ \cline{3-5} && Mode 1 & Mode 2 & Mode 3 \\ \hline 5$\times$5 && 2.5593 & 5.7514 & 7.2116 \\ 10$\times$10 && 2.4571 & 5.1479 & 6.3702 \\ 14$\times$14 && 2.4413 & 5.0508 & 6.2475 \\ 20$\times$20 && 2.4316 & 5.0008 & 6.1809 \\ 30$\times$30 && 2.4254 & 4.9746 & 6.1435 \\ Ref.~\cite{singhaganapathi2005} && 2.4343 & 4.9854 & 6.1823 \\ Ref.~\cite{wang1997} && 2.4339 & 4.9865 & 6.1818 \\ \hline \end{tabular} \label{tab:meshconvefreq} \end{table}
\begin{table}[htbp] \centering \renewcommand{1.2}{1.2} \caption{Convergence of natural frequencies and flutter bounds for $[(0^\circ/90^\circ)_{2\rm{s}}]$ boron/epoxy clamped laminate with $a/b=$ 1.0 and $a/h=$ 100. } \begin{tabular}{lrrcrrrrr} \hline Mesh & \multicolumn{2}{c}{Modes} && \multicolumn{2}{c}{Without damping} && \multicolumn{2}{c}{With Damping}\\ \cline{2-3}\cline{5-6}\cline{8-9} & Mode 1 & Mode 2 && $\lambda^\ast_{cr}$ & $\omega^\ast_{cr}$ && $\lambda^\ast_{cr}$ & $\omega^\ast_{cr}$\\ \hline 5$\times$5 & 26.5697 & 59.0459 && 866.60 & 58.78 && 883.44 & 59.23 \\ 10$\times$10 & 24.1446 & 45.5384 && 513.48 & 48.37 && 530.31 & 49.02\\ 14$\times$14 & 23.7904 & 43.9233 && 479.88 & 47.24 && 496.72 & 47.93 \\ 20$\times$20 & 23.6064 & 43.1125 && 464.26 & 46.71 && 481.09 & 47.42 \\ 30$\times$30 & 23.5094 & 42.6922 && 455.66 & 46.41 && 472.50 & 47.14 \\ Integral equation method~\cite{srinivasanbabu1987} & 23.33 & 53.77 && - & - && 446.36 & 46.09 \\ Series solution.~\cite{srinivasanbabu1987} & 23.63 & 53.76 && - & - && 474.60 & 47.19 \\ Classical lamination theory~\cite{leecho1991} & 23.34 & 42.30 && - & - && 471.16 & 46.89 \\ \hline \end{tabular} \label{tab:meshconveflutter} \end{table}
Next, the influence of the boundary conditions, the plate thickness and the direction of the flow on the flutter bounds are numerically investigated. Table \ref{tab:ahbcinfluence} presents the influence of the boundary conditions and the plate thickness for a square laminated plate with the following stack up sequence $[(0^\circ/90^\circ)_{\rm{2s}})]$ immersed in a normal flow. It is seen that with increasing plate thickness, the flutter bounds decreases as expected. Also, the effect of damping is to increase the critical aerodynamic pressure and the critical frequency. The effect of the boundary conditions on the flutter bounds is also seen in Table \ref{tab:ahbcinfluence}. The influence of the plate aspect ratio $a/b$ and the flow angle $\theta^\prime$ is shown in \frefs{fig:abratioonlambdaomega} - \ref{fig:thratioonlambdaomega}. The flow is considered to be along the x-direction. It is seen from \fref{fig:abratioonlambdaomega} that as the plate aspect ratio $(a/b)$ increases, the flutter bounds decreases for the stack up sequence considered here. The effect of the flow angle $\theta^\prime$ on the flutter bounds of a square plate is shown in \fref{fig:thratioonlambdaomega}. Increasing the flow angle, the flutter bounds, viz., the critical aerodynamic pressure $\lambda^\ast_{\rm cr}$ and the critical frequency $\omega^\ast_{\rm cr}$, initially increases until $\theta^\prime=$ 20$^\circ$. Upon further increasing in the flow angle, the flutter bounds decreases monotonically until it reaches a minimum at $\theta^\prime=$ 90$^\circ$. The flutter behaviour is symmetric with respect to the flow angle $\theta^\prime=$ 90$^\circ$.
\begin{table}[htbp] \centering \renewcommand{1.2}{1.2} \caption{Influnence of the plate thickness $a/h$ and the support conditions on the flutter characteristics of laminated plates immersed in a supersonic flow with $a/b=$ 1 and flow angle $\theta^\prime=$ 0$^\circ$.} \begin{tabular}{lrrcrrrrrr} \hline Boundary & $a/h$& \multicolumn{2}{c}{Modes} && \multicolumn{2}{c}{Without damping} && \multicolumn{2}{c}{With Damping}\\ \cline{3-4}\cline{6-7}\cline{9-10} & & Mode 1 & Mode 2 && $\lambda^\ast_{cr}$ & $\omega^\ast_{cr}$ && $\lambda^\ast_{cr}$ & $\omega^\ast_{cr}$\\ \hline \multirow{2}{*}{CCCC} & 100 & 23.5094 & 42.6922 && 455.66 & 46.41 && 472.50 & 47.14 \\ & 10 & 15.7281 & 25.2179 && 139.26 & 27.05 && 145.51 & 27.74 \\ \cline{2-10} \multirow{2}{*}{SSSS} & 100 & 10.9018 & 26.4822 && 251.76 & 28.67 && 258.01 & 29.12 \\ & 10 & 9.6205 & 16.5652 && 154.88 & 24.39 && 166.14 & 25.62 \\ \hline \end{tabular} \label{tab:ahbcinfluence} \end{table}
\begin{figure}
\caption{Influence of the plate aspect ratio $a/b$ on the flutter parameters, viz., the critical pressure $\lambda^\ast_{\rm cr}$ and the critical frequency $\omega^\ast_{cr}$ for a simply supported square plate with $a/h=$ 100. The flow is normal to the plate, i.e., flow angle $\theta^\prime=$ 0$^\circ$.}
\label{fig:abratioonlambdaomega}
\end{figure}
\begin{figure}\label{fig:thratioonlambdaomega}
\end{figure}
\section{Conclusions} The flutter characteristics of laminated composites immersed in a supersonic flow has been analyzed within the framework of unified formulation. The plate kinematics is based on sinusoidal theory and a quadratic variation of the transverse displacement through the thickness is considered. A shear flexible four noded quadrilateral plate element was used to discretize the domain and the aerodynamic force is accounted for assuming the first-order Mach number approximation potential flow theory. The results from the present formulation are in very good agreement with the results available in the literature. The influence of the plate aspect ratio and the flow angle are numerically studied.
\section*{References}
\end{document} | arXiv | {
"id": "1312.4233.tex",
"language_detection_score": 0.6856604218482971,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title[Backward volume contraction for endomorphisms]{Backward volume contraction for endomorphisms with eventual volume expansion}
\author{Jos\'e F. Alves} \address{Departamento de Matem\'atica Pura, Faculdade de Ci\^encias do Porto\\ Rua do Campo Alegre 687, 4169-007 Porto, Portugal} \email{jfalves@fc.up.pt}
\author{Vilton Pinheiro} \address{Departamento de Matem\'atica, Universidade Federal da Bahia\\ Av. Ademar de Barros s/n, 40170-110 Salvador, Brazil.} \email{viltonj@ufba.br}
\author{Armando Castro} \address{Departamento de Matem\'atica, Universidade Federal da Bahia\\ Av. Ademar de Barros s/n, 40170-110 Salvador, Brazil.} \email{armando@im.ufba.br}
\date{\today}
\thanks{Work carried out at the Federal University of Bahia. Partially supported by FCT through CMUP and UFBA}
\maketitle
\begin{abstract} We consider smooth maps on compact Riemannian manifolds. We prove that under some mild condition of eventual volume expansion Lebesgue almost everywhere we have uniform backward volume contraction on every pre-orbit for Lebesgue almost every point.
\end{abstract}
\section{Statement of results}
Let $M$ be a compact Riemannian manifold and let $\operatorname{Leb}$ be a volume form on $M$ that we call Lebesgue measure. We take $f\colon M\to M$ any smooth map.
Let $0<a_1\le a_2 \le a_3\le \dots$ be a sequence converging to infinity. We define
\begin{equation}\label{aga}
h(x)=\min\{n>0 \colon |\det Df^n(x)|\ge a_n\}, \end{equation} if this minimum exists, and $h(x)=\infty$, otherwise. For $n\ge 1$, we take
\begin{equation}\label{gamma}
\Gamma_n=\{x\in M \colon h(x) \ge n\}. \end{equation}
\begin{theorem} \label{JCtheo} Assume that $h\in L^p(\operatorname{Leb})$, for some $p>3$, and take $\gamma<(p-3)/(p-1)$. Choose any sequence $0<b_1\le b_2 \le b_3\le \dots$ such that $b_kb_n\ge b_{k+n}$ for every $k,n\in \NN$, and assume that there is $n_0\in\NN$ such that $b_n\le \min\left\{a_n,\operatorname{Leb}(\Gamma_n)^{-\gamma}\right\} $ for every $n\ge n_0$. Then, for $\operatorname{Leb}$ almost every $x\in M$, there exists $C_x>0$
such that $| \det Df^n(y)|>C_x b_n$ for every $y\in f^{-n}(x).$
\end{theorem}
We say that $f\colon M\to M $ is {\em eventually volume expanding} if there exists $\lambda>0$ such that for Lebesgue almost every $x\in M$
\begin{equation}
\sup_{n\ge 1}\frac1n\log|\det Df^n(x)|> \lambda.
\end{equation} Let $h$ and $\Gamma_n$ be defined as in~(\ref{aga}) and~(\ref{gamma}), associated to the sequence $a_n=e^{\lambda n}$.
\begin{corollary} \label{Mtheo} If $f$ is eventually volume expanding, then for Le\-bes\-gue almost every point $x\in M$ there are $C_x>0$ and $\sigma_n\to \infty$
such that $| \det Df^n(y)|>C_x \sigma_n $ for every $y\in f^{-n}(x)$. Moreover, given $\alpha>0$ there is $\beta>0$ such that \begin{enumerate} \item if $\operatorname{Leb}(\Gamma_n)\le \co(e^{-\alpha n})$, then we may take $ \sigma_n \ge e^{\beta n}$;
\item if $\operatorname{Leb}(\Gamma_n)\le \co(e^{-\alpha n^\tau})$ for some $\tau>0$, then we may take $\sigma_n \ge e^{\beta n^\tau}$;
\item if $\operatorname{Leb}(\Gamma_n)\le \co(n^{-\alpha})$ and $\alpha>2$, then we may take $\sigma_n \ge n^\beta$. \end{enumerate}
\end{corollary}
Specific rates will be obtained in Section~\ref{se.examples} for some eventually volume expanding endomorphisms. In particular, non-uniformly expanding maps such as quadratic maps and Viana maps will be considered.
\section{Concatenated collections}
Let $(U_n)_n$ be a collection of measurable subsets
of $M$ whose union covers a full Lebesgue measure subset of $M$. We say that $(U_n)_n$ is a {\em concatenated collection}
if:
$$x\in U_n \quad\mbox{and}\quad f^n(x)\in U_m\quad\Rightarrow\quad x\in
U_{n+m}.$$ Given $x\in \bigcup_{n\ge 1} U_n$, we define
$u(x)$ as the minimum $n\in\NN$ for which $x\in U_n$. Note that by definition we have $x\in U_{u(x)}$.
We define the {\em chain generated by $x\in \bigcup_{n\ge 1} U_n$} as
$C(x)=\{x,f(x),\dots,f^{u(x)-1}(x)\}.$
\begin{lemma} \label{JClemma1} Let $(U_n)_n$ be a
concatenated collection. If $$\sum_{n\ge 1}\sum_{j=0}^{n-1}\operatorname{Leb}(f^j(u^{-1}(n)))<\infty,$$ then we have $ \sup\left\{\,u(y)\ \colon \; y\in \bigcup_{n\ge 1} U_n\;\mbox{and}\,\; x\in C(y)\,\right\}<\infty $ for Leb\-es\-gue almost every $x\in M$. \end{lemma}
\begin{proof}
Assume that for a given $x\in M$ there exists an infinite number of chains
$C_j=\left\{y_j,f(y_j), \dots ,f^{s_j-1}(y_j)\right\}$, $ j\ge 1$, containing $x$
with $s_j\to\infty$. For each $j\ge1$ let $1\le r_j<s_j$ be such that $x=f^{r_j}(y_j)$.
First we verify that $\lim r_j=\infty$. If not, then replacing by a subsequence, we may assume that there is $N>0$ such that $r_j<N$ for every $j\ge1$. This implies that $y_j\in\bigcup_{i=1}^{N}f^{-i}(x)$ for every $j\ge1$. Since $\#(\bigcup_{i=1}^{N}f^{-i}(x))<\infty$ and the number of chains is infinite, we have a contradiction.
Since $r_j\to\infty$ and $x=f^{r_j}(y_j)\in f^{r_j}(u^{-1}(s_j))$, then we have $x\in\bigcup_{n\ge k}\bigcup_{j=0}^{n-1}f^j(u^{-1}(n))$ for every $k\ge 1$. Since we are assuming $\sum_{n\ge 1}\sum_{j=0}^{n-1}\operatorname{Leb}(f^j(u^{-1}(n)))<\infty$, we have $\operatorname{Leb}\big(\bigcup_{n\ge k}\bigcup_{j=0}^{n-1}f^j(u^{-1}(n))\big)\to 0, $ when $k\to\infty$. This completes the proof of Lemma~\ref{JClemma1}. \end{proof}
\begin{lemma} \label{JClemma2} Let $(U_n)_n$ be a concatenated collection. If $$\sup\left\{\,u(y)\ \colon \; y\in \cup_{n\ge 1} U_n\;\mbox{and}\,\; x\in C(y)\,\right\}\le N,$$ then $f^{-n}(x)\subset U_{n}\cup \dots \cup U_{n+N}$ for all $n\ge1$. \end{lemma}
\begin{proof} Assume that $\sup\left\{\,u(y)\ \colon \; y\in \cup_{n\ge 1} U_n\;\mbox{and}\,\; x\in C(y)\,\right\}\le N$, and take $z\in f^{-n}(x)$. Let $z_j=f^j(z)$ for each $j\ge 0$. We distinguish the cases $x\in C(z)$ and $x\notin C(z)$. If $x\in C(z)$, then $n\le u(z)\le n+N$. Hence
$z\in U_{u(z)}\subset U_n\cup\cdots\cup U_{n+N}.$ If $x\notin C(z)$, then letting $u_0=u(z)$ we must have $u_0<n$. Let $u_1=u(z_{u_0})$. If $u_0+u_1< n$ we take $u_2=u(z_{u_0+u_1})$. We proceed in this way until we find the first $s\le n $ such that $n\le u_0+ \dots +u_s$. Note that $u_s=u(z_{u_0+\cdots +u_{s-1}})$, and by the choice of $s$ we must have $x\in C(z_{u_0+\cdots +u_{s-1}})$. Our assumption implies that $u(z_{u_0+\cdots +u_{s-1}})\le N$, and so $u_0+ \dots +u_s\le n+N$. By construction we have
$$
z \in U_{u_0} $$
$$
f^{u_0}(z)=z_{u_0} \in U_{u_1} $$
$$
f^{u_0+u_1}(z)=z_{u_0+u_1} \in U_{u_2}$$
$$\vdots $$
$$
f^{u_0+\cdots u_{s-1}}(z)=z_{u_0+\cdots u_{s-1}}\in U_{u_s}
$$ By the definition of a concatenated collection we conclude that $z\in U_{u_0+u_1+\dots+u_s}$. \end{proof}
\section{Proofs of main results}\label{se.general}
Let us now prove Theorem \ref{Mtheo}. Suppose that $h\in L^p(\operatorname{Leb})$, for some $p>3$. This implies that $\sum_{n\ge1}n^p\operatorname{Leb}(h^{-1}(n))<\infty$, and so there exists some constant $K>0$ such that $$\operatorname{Leb}(h^{-1}(n))\le Kn^{-p},\quad\text{for every $n\ge1$.}$$ Now, taking $0<\gamma<(p-3)/(p-1)$ we have for some $K'>0$
$$ \sum_{n=1}^{\infty}n \left(\sum_{k= n}^{\infty}\operatorname{Leb}(h^{-1}(k))\right)^{1-\gamma}\le \sum_{n=1}^{\infty}n (K'/n^{p-1})^{1-\gamma} <\infty.
$$
Defining $$U_n=\{x\in M\ \colon |\det Df^n(x)|\ge b_n\},$$ then we have that $(U_n )_n$ is a concatenated collection with respect to the Lebesgue measure. Moreover, setting $$U^*_n= U_n\setminus(
U_1\cup...\cup U_{n-1})$$ one has
$U^*_n\subset \bigcup_{m\ge n}h^{-1}(m)$, for otherwise there would be $x\in U^*_n\cap h^{-1}(m)$ with $m<n$, and so $a_m\ge b_m>|\det Df^m(x)|\ge a_m,$ which is not possible. As $|\det Df^j(x)|< b_j$ for every $x\in U^*_n$ and $j<n$, we get $\operatorname{Leb}(f^j(U^*_n))\le b_j \operatorname{Leb}(U^*_n)$ for each $j<n$. Hence \begin{align*} \sum_{n=n_0+1}^{\infty}\sum_{j=0}^{n-1} \operatorname{Leb}(f^j(U^*_n))&\le \sum_{n=n_0+1}^{\infty}\sum_{j=0}^{n-1}b_j \operatorname{Leb}(U^*_n)\\ &\le \sum_{n=n_0+1}^{\infty}\sum_{j=0}^{n_0-1}b_j \operatorname{Leb}(U^*_n)+\sum_{n=n_0+1}^{\infty}\sum_{j=n_0}^{n-1}b_j \operatorname{Leb}(U^*_n)\\ &\le \sum_{j=0}^{n_0-1}b_j+\sum_{n=n_0+1}^{\infty}\sum_{j=n_0}^{n-1}b_j \operatorname{Leb}(U^*_n) \end{align*} Now we just have to check that the last term in the sum above is finite. Indeed, \begin{align*} \sum_{n=n_0+1}^{\infty}\sum_{j=n_0}^{n-1}b_j \operatorname{Leb}(U^*_n) &\le \sum_{n=n_0+1}^{\infty}\sum_{j=n_0}^{n-1}b_j\sum_{k= n}^{\infty} \operatorname{Leb}(h^{-1}(k))\\ &\le\sum_{n=n_0+1}^{\infty}n b_n\sum_{k= n}^{\infty} \operatorname{Leb}(h^{-1}(k))\\ &\le\sum_{n=n_0+1}^{\infty}n \left(\sum_{k=n}^{\infty} \operatorname{Leb}(h^{-1}(k)\right)^{-\gamma}\sum_{k= n}^{\infty} \operatorname{Leb}(h^{-1}(k))\\ &=\sum_{n=n_0+1}^{\infty}n \left(\sum_{k= n}^{\infty} \operatorname{Leb}(h^{-1}(k))\right)^{1-\gamma}<\infty. \end{align*}
Applying Lemmas~\ref{JClemma1}~and~\ref{JClemma2}, we get for each generic point $x\in M$ a positive integer number $N_x$ such that if $y\in f^{-n}(x)$ then $y\in U_{n+s}$ for some $0\le s\le N_x$. Therefore, $|\det Df^{n+s}(y)|>b_{n+s}\ge b_{n}$. Then, taking
$C_x=K^{-N_x}$, where $K=\sup\{|\det Df(z)|\colon z\in M\},$ we obtain the conclusion of Theorem~\ref{JCtheo}:
$$|\det Df^{n}(y)|=\frac{|\det Df^{n+s}(y)|}{|\det Df^{s}(x)|}>C_x b_{n}.$$
Now we explain how we use Theorem~\ref{JCtheo} to prove Corollary~\ref{Mtheo}. Recall that in Corollary~\ref{Mtheo} we have $a_n=e^{\lambda n}$ for each $n\in \NN$.
Assume first that $\operatorname{Leb}(\Gamma_n)\le \co(e^{-c^\prime n})$ for some $c^\prime>0$. Then it is possible to choose $c>0$ such that
$b_n =e^{cn},$ for $n\ge n_0$.
The other two cases are obtained under similar considerations.
\section{Examples: non-uniformly expanding maps}\label{se.examples}
An important class of dynamical systems where we can immediately apply our results are the non-uniformly expanding dynamical maps introduced in \cite{ABV}. As particular examples of this kind of systems we present below quadratic maps and the higher dimensional Viana maps.
\paragraph*{Quadratic maps.}
Let $f_a\colon [-1,1]\to [-1,1]$ be given by $f_a(x)=1-ax^2$, for $0<a\le 2$. Results in \cite{BC1,J} give that for a positive Lebesgue measure set of parameters $f_a$ in non-uniformly expanding. Ongoing work \cite{F} gives that for a positive Lebesgue measure set of parameters there are $C,c>0$ such that $\operatorname{Leb}(\Gamma_n)\le Ce^{-c n}$ for every $n\ge1$.
Thus, it follows from Corollary~\ref{Mtheo} that {\em we may find $\beta>0$ such for Lebesgue almost every $x\in I$ there is $C_x>0$
such that $| (f^n)'(y)|>C_x e^{\beta n}$ for every $y\in f^{-n}(x)$.} \paragraph*{Viana maps.}
Let $a_0\in(1,2)$ be such that the critical point $x=0$ is pre-periodic for the quadratic map $Q(x)=a_0-x^2$. Let $S^1=\RR/\ZZ$ and $b:S^1\rightarrow \RR$ given by $b(s)=\sin(2\pi s)$. For fixed small $\alpha>0$, consider the map $\hat f$ from $S^1\times\RR$ into itself given by $\hat f(s, x) = \big(\hat g(s),\hat q(s,x)\big)$,
where $\hat q(s,x)=a(s)-x^2$ with $a(s)=a_0+\alpha b(s)$, and $\hat g$ is the uniformly expanding map of $S^1$ defined by $\hat{g}(s)=ds$ (mod $\ZZ$) for some integer $d\ge2$. For $\alpha>0$ small enough there is an interval $I\subset (-2,2)$ for which $\hat f(S^1\times I)$ is contained in the interior of $S^1\times I$. Thus, any map $f$ sufficiently close to $\hat f$ in the $C^0$ topology has $S^1\times I$ as a forward invariant region. Moreover, there are $C,c>0$ such that $\operatorname{Leb}(\Gamma_n)\le Ce^{-c\sqrt n}$ for every $n\ge1$; see \cite{AA,BST,V}.
Thus, it follows from Corollary~\ref{Mtheo} that {\em we may find
$\beta>0$ such for Lebesgue almost every $X\in S^1\times I$ there is a constant $C_X>0$ such that $| \det Df^n(Y)|>C_X e^{\beta\sqrt n}$ for every $Y\in f^{-n}(X)$.}
\end{document} | arXiv | {
"id": "0403537.tex",
"language_detection_score": 0.5802704691886902,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\begin{frontmatter}
\renewcommand{\arabic{footnote}}{\fnsymbol{footnote}}
\title{Solving Dependency Quantified Boolean Formulas \\ Using Quantifier Localization\textsuperscript{\footnotemark[1]}}
\author{Aile Ge-Ernst$^1$}
\author{Christoph Scholl$^1$}
\author{Juraj S\'\i\v c$^{2,3}$}
\author{Ralf Wimmer$^{4,1}$}
\address{$^1$Department of Computer Science, Albert-Ludwigs-Universit\"at Freiburg \\
Freiburg im Breisgau, Germany \\
{\upshape\texttt{\{\href{mailto:geernsta@informatik.uni-freiburg.de}{geernsta},
\href{mailto:scholl@informatik.uni-freiburg.de}{scholl},
\href{mailto:wimmer@informatik.uni-freiburg.de}{wimmer}\}@informatik.uni-freiburg.de}}\\[\baselineskip]
$^2$Faculty of Informatics, Masaryk University \\
Brno, Czech Republic \\[\baselineskip]
$^3$Faculty of Information Technology, Brno University of Technology \\
Brno, Czech Republic \\
{\upshape\texttt{\href{mailto:sicjuraj@fit.vutbr.cz}{sicjuraj@fit.vutbr.cz}}}\\[\baselineskip]
$^4$Concept Engineering GmbH, Freiburg im Breisgau, Germany \\
{\upshape\url{https://www.concept.de}}
}
\date{\today}
\begin{abstract}
Dependency quantified Boolean formulas (DQBFs) are a powerful formalism, which
subsumes quantified Boolean formulas (QBFs) and allows an explicit specification
of dependencies of existential variables on universal variables.
Driven by the needs of various applications which
can be encoded by DQBFs in a natural, compact, and elegant way,
research on DQBF solving has emerged in the past few years.
However, research focused on closed DQBFs in prenex form (where all quantifiers
are placed in front of a propositional formula),
while non-prenex DQBFs have almost not been studied in the literature.
In this paper, we provide a formal definition for syntax and semantics of
non-closed non-prenex DQBFs and prove useful properties enabling quantifier localization.
Moreover, we make use of our theory by integrating quantifier localization into
a state-of-the-art DQBF solver. Experiments with prenex DQBF benchmarks, including all instances from
the QBFEVAL'18--'20 competitions,
clearly show that quantifier localization pays off in this context. \end{abstract}
\begin{keyword}
Dependency Quantified Boolean Formulas \sep Henkin quantifier \sep quantifier localization \sep satisfiability \sep solver technology
\end{keyword} \end{frontmatter}
\renewcommand{\arabic{footnote}}{\fnsymbol{footnote}} \footnotetext[1]{This work is an extended version of \cite{geernst-et-al-fmcad-2019}. We added detailed proofs for all theorems, new theory on equisatisfiability under substituting subformulas (together
with the corresponding proofs), new algorithms adjusted to the extended theory, and updated experimental results.} \renewcommand{\arabic{footnote}}{\arabic{footnote}}
\section{Introduction} \label{sec:introduction} \noindent During the last two decades enormous progress in the solution of quantifier-free Boolean formulas (SAT) has been observed. Nowadays, SAT solving is successfully used in many applications, \eg in planning~\cite{RintanenHN06}, automatic test pattern generation~\cite{EggersglussD12,CzutroPLERB10}, and formal verification of hard- and software systems~\cite{BiereCCSZ03,ClarkeBRZ01,IYG+:2008}. Motivated by the success of SAT solvers, efforts have been made, \eg \cite{LonsingB10,Janota2012,JanotaM15,RabeT15}, to consider the more general formalism of quantified Boolean formulas (QBFs).
Although QBFs are capable of encoding decision problems in the PSPACE complexity class, they are not powerful enough to succinctly encode many natural and practical problems that involve decisions under partial information. For example, the analysis of games with incomplete information~\cite{PetersonRA01}, topologically constrained synthesis of logic circuits~\cite{BalabanovCJ14}, synthesis of safe controllers~\cite{BloemKS14}, synthesis of fragments of linear-time temporal logic (LTL)~\cite{CHOP:2013}, and verification of partial designs~\cite{SchollB01,gitina-et-al-iccd-2013} fall into this category and require an even more general formalism, which is known as \emph{dependency quantified Boolean formulas (DQBFs)} \cite{PetersonRA01}.
Unlike QBFs, where an existential variable implicitly depends on all the universal variables preceding its quantification level, DQBFs admit that arbitrary dependency sets are explicitly specified. Essentially, these quantifications with explicit dependency sets correspond to Henkin quantifiers~\cite{Henkin61}. The semantics of a DQBF can be interpreted from a game-theoretic viewpoint as a game played by one universal player and multiple non-cooperative existential players with incomplete information, each partially observing the moves of the universal player as specified by his/her own dependency set. A DQBF is true if and only if the existential players have winning strategies. This specificity of dependencies allows DQBF encodings to be exponentially more compact than their equivalent QBF counterparts. In contrast to the PSPACE-completeness of QBF, the decision problem of DQBF is NEXPTIME-complete~\cite{PetersonRA01}.
Driven by the needs of the applications mentioned above, research on DQBF solving has emerged in the past few years, leading to solvers such as \textsc{iDQ}~\cite{FrohlichKBV14}, HQS~\cite{gitina-et-al-date-2015,wimmer-et-al-sat-2015,WimmerKBS017}, dCAQE~\cite{TentrupR19}, iProver~\cite{Korovin08}, and DQBDD~\cite{Sic:2020}.
As an example for a DQBF, consider the formula \begin{equation*}
\forall x_1 \forall x_2 \exists y_1(x_1) \exists y_2(x_2) \, : \, (x_1 \wedge x_2) \equiv (y_1 \equiv y_2) \end{equation*} from \cite{Rabe17}. Here $\forall x_1 \forall x_2 \exists y_1(x_1) \exists y_2(x_2)$ is called the quantifier prefix and $(x_1 \wedge x_2) \equiv (y_1 \equiv y_2)$ the matrix of the DQBF. This DQBF asks whether there are choices for $y_1$ only depending on the value of $x_1$, denoted $\exists y_1(x_1)$, and for $y_2$ only depending on $x_2$, denoted $\exists y_2(x_2)$, such that the Boolean formula after the quantifier prefix evaluates to true for all assignments to $x_1$ and $x_2$.\footnote{We can interpret this as a \emph{game} played by $y_1$ and $y_2$ against $x_1$ and $x_2$, where $y_1$ and $y_2$ only have incomplete information on actions of $x_1$, $x_2$, respectively.} The Boolean formula in turn states that the existential variables $y_1$ and $y_2$ have to be equal iff $x_1$ and $x_2$ are true. Since $y_1$ can only `see' $x_1$ and $y_2$ only $x_2$, $y_1$ and $y_2$ `cannot coordinate' to satisfy the constraint. Thus, the formula is false. Now consider a straightforward modification of this DQBF into a QBF with only implicit dependency sets. Changing the quantifier prefix into a QBF quantifier prefix $\forall x_1 \exists y_1 \forall x_2 \exists y_2$ means that $y_1$ may depend on $x_1$, but $y_2$ may depend on $x_1$ and $x_2$. In that case the formula would be true. Changing the prefix into $\forall x_2 \exists y_2 \forall x_1 \exists y_1$ has a similar effect.
So far, syntax and semantics of DQBFs have been defined only for closed prenex forms (see for instance \cite{BalabanovCJ14}), \ie for DQBFs where all quantifiers are placed in front of the matrix and all variables occurring in the matrix are either universally or existentially quantified. In this paper, we consider quantifier localization for DQBF, which transforms prenex DQBFs into non-prenex DQBFs for more efficient DQBF solving.
Quantifier localization for \emph{QBF} has been used with great success for image and pre-image computations in the context of sequential equivalence checking and symbolic model checking where it has been called ``early quantification''. Here existential quantifiers were moved over \emph{AND} operations \cite{GB:94,HKB:96,CCL+:97,MKR+:2000}. In \cite{Benedetti05c} the authors consider quantifier localization for QBFs where the matrix is restricted to conjunctive normal form (CNF). They move universal and existential quantifiers over \emph{AND} operations and propose a method to construct a tree-shaped quantifier structure from a QBF instance with linear quantifier prefix. Moreover, they show how to benefit from this structure in the QBF solving phase. This work has been used and generalized in \cite{PigorschS09} for a QBF solver based on symbolic quantifier elimination.
To the best of our knowledge, quantifier localization has not been considered for DQBF so far, apart from the seminal theoretical work on DQBF by Balabanov et al. \cite{BalabanovCJ14}, which considers --~as a side remark~-- quantifier localization for DQBF, transforming prenex DQBFs into non-prenex DQBFs. For quantifier localization they gave two propositions. However, a formal definition of the semantics of non-prenex DQBFs was missing in that work and, in addition, the two propositions are not sound, as we will show in our paper.
In this paper, we provide a formal definition of syntax and semantics of non-prenex non-closed DQBFs. The semantics is based on Skolem functions and is a natural generalization of the semantics for closed prenex DQBFs known from the literature. We introduce an alternative constructive definition of the semantics and show that both semantics are equivalent. Then we define rules for transforming DQBFs into equivalent or equisatisfiable DQBFs, which enable the translation of prenex DQBFs into non-prenex DQBFs. The rules are similar to their QBF counterparts, but it turns out that some of them need additional conditions for being sound for DQBF as well. Moreover, the proof techniques are completely different from those for their corresponding QBF counterparts. We provide proofs for all the rules. Finally, we show a method that transforms a prenex DQBF into a non-prenex DQBF based on those rules. It is inspired by the method constructing a tree-shaped quantifier structure from \cite{Benedetti05c} and works for DQBFs with an arbitrary formula (circuit) structure for the matrix. The approach tries to push quantifiers ``as deep into the formula'' as possible. Whenever a sub-formula fulfills conditions, which we will specify in Section~\ref{sec:ncnp}, it is processed by symbolic quantifier elimination. When traversing the structure back, quantifiers which could not be eliminated are pulled back into the direction of the root. At the end, a prenex DQBF solver is used for the simplified formula. Experimental results demonstrate the benefits of our method when applied to a set of more than 5000 DQBF benchmarks (including all QBFEVAL'18--'20 competition~\cite{qbfeval18,qbfeval19,qbfeval20} benchmarks).
The paper is structured as follows: In Section~\ref{sec:preliminaries} we provide preliminaries needed to understand the paper, including existing transformation rules for QBFs. Section~\ref{sec:ncnp} contains the main conceptual results of the paper whereas Section~\ref{sec:algorithm} shows how to make use of them algorithmically. Section~\ref{sec:experiments} presents experimental results and Section~\ref{sec:conclusion} concludes the paper.
\section{Preliminaries} \label{sec:preliminaries}
\noindent Let $\varphi,\kappa$ be quantifier-free Boolean formulas over the set $V$ of variables and $v\in V$. We denote by $\varphi[\sfrac{\kappa}{v}]$ the Boolean formula which results from $\varphi$ by replacing all occurrences of $v$ (simultaneously) by $\kappa$. For a set $V'\subseteq V$ we denote by $\assign(V')$ the set of \emph{Boolean assignments} for $V'$, \ie
$\assign(V')=\bigl\{\mu\,\big|\,\mu:V'\to\{0,1\}\bigr\}$. As usual, for a Boolean assignment $\mu\in\assign(V')$ and $V'' \subseteq V'$
we denote the restriction of $\mu$ to $V''$ by $\mu|_{V''}$. For each formula $\varphi$ over $V$, a variable assignment $\mu\in\assign(V)$ induces a truth value $0$ or $1$ of $\varphi$, which we call $\mu(\varphi)$. If $\mu(\varphi)=1$ for all $\mu\in\assign(V)$, then $\varphi$ is a \emph{tautology}. In this case we write $\vDash\varphi$.
A \emph{Boolean function} with the set of input variables $V$ is a mapping $f:\assign(V)\to\{0,1\}$. The set of Boolean functions over $V$ is denoted by $\boolf{V}$. The \emph{support} $\support(f)$ of a function $f\in\boolf{V}$ is defined by
$\support(f) \colonequals \{v \in V \; | \; \exists \mu, \mu' \in \assign(V) \mbox{ with } \mu(w) = \mu'(w) \, \text{ for all }w \in V\setminus \{v\} \mbox{ and } f(\mu) \neq f(\mu')\}$. $\support(f)$ is the set of variables from $V$ on which $f$ ``really depends''. The constant zero and constant one function are $\fzero$ and $\fone$, respectively. $\mathrm{ITE}$ denotes the if-then-else operator, \ie $\mathrm{ITE}(f,g,h) = (f\land g)\lor (\neg f\land h)$.
A function $f:\assign(V)\to\{0,1\}$ is \emph{monotonically increasing (decreasing)} in $v\in V$, if $f(\mu) \leq f(\mu')$ ($f(\mu') \leq f(\mu)$) for all assignments $\mu, \mu' \in\assign(V)$ with $\mu(w) = \mu'(w)$ for all $w \in V \setminus \{v\}$ and $\mu(v) \leq \mu'(v)$.
A quantifier-free Boolean formula $\varphi$ over $V$ defines a Boolean function $f_{\varphi}:\assign(V)\to\{0,1\}$ by $f_{\varphi}(\mu) \colonequals \mu(\varphi)$. When clear from the context, we do not differentiate between quantifier-free Boolean formulas and the corresponding Boolean functions, \eg if $\varphi$ is a Boolean formula representing $f_{\varphi}$, we write $\varphi[\sfrac{v'}{v}]$ for the Boolean function where the input variable $v$ is replaced by a (new) input variable $v'$.
Now we consider Boolean formulas with quantifiers. The usual definition for a \emph{closed prenex} DQBF is given as follows:
\begin{definition}[Closed prenex DQBF]
\label{def:dqbf_cp}
Let $V=\{x_1,\ldots,x_n,\allowbreak y_1,\ldots,y_m\}$ be a
set of Boo\-lean variables.
A \emph{dependency quantified Boolean formula} (DQBF) $\psi$ over $V$ has
the form
\begin{equation*}
\psi\colonequals
\forall x_1\forall x_2\ldots\forall x_n
\exists y_1(D_{y_1})\exists y_2(D_{y_2})\ldots\exists y_m(D_{y_m}):
\varphi
\label{eq:dqbf_syntax_cp}
\end{equation*}
where $D_{y_i}\subseteq\{x_1,\ldots,x_n\}$ for $i=1,\ldots,m$ is
the \emph{dependency set} of $y_i$, and $\varphi$ is a quantifier-free
Boolean formula over $V$, called the \emph{matrix} of $\psi$. \end{definition}
We denote the set of universal variables of $\psi$ by $\varall[\psi]=\{x_1,\ldots,x_n\}$ and its set of existential variables by $\varex[\psi] = \{y_1,\ldots,y_m\}$. The former part of $\psi$, $\forall x_1\forall x_2\ldots\forall x_n\allowbreak\exists y_1(D_{y_1})\exists y_2(D_{y_2})\ldots\exists y_m(D_{y_m})$, is called its \emph{prefix}. Sometimes we abbreviate this prefix as $Q$ such that $\psi = Q:\varphi$.
The semantics of closed prenex DQBFs is given as follows:
\begin{definition}[Semantics of closed prenex DQBF]
\label{def:dqbf_semantics_cp}
Let $\psi$ be a DQBF with matrix $\varphi$ as above. $\psi$ is
\emph{satisfiable} iff
there are functions $s_{y_i}:\assign(D_{y_i})\to\{0,1\}$
for $1\leq i\leq m$ such that replacing each
$y_i$ by (a Boolean formula for) $s_{y_i}$ turns
$\varphi$ into a tautology. Then the functions $(s_{y_i})_{i=1,\ldots,m}$ are called
\emph{Skolem functions} for $\psi$. \end{definition}
A DQBF is a QBF, if its dependency sets satisfy certain conditions: \begin{definition}[Closed prenex QBF]
\label{def:qbf}
Let $V = \{x_1,\ldots,x_n,\allowbreak y_1,\ldots,y_m\}$ be a set of Boolean variables.
A \emph{quantified Boolean formula (QBF)} (more precisely, a closed
QBF in prenex normal form) $\psi$ over $V$ is given by
$
\psi\colonequals\forall X_1 \exists Y_1 \ldots \forall X_k \exists Y_k :\varphi
$,
where
$k\geq 1$, $X_1,\ldots,X_k$ is a partition of the universal variables $\{x_1,\ldots,x_n\}$,
$Y_1,\ldots,Y_k$ is a partition of the existential variables $\{y_1,\ldots,y_m\}$,
$X_i\neq\emptyset$ for $i=2,\ldots,k$, and $Y_j\neq\emptyset$ for $j=1,\ldots,k-1$, and
$\varphi$ is a quantifier-free Boolean formula over $V$. \end{definition}
A QBF can be seen as a DQBF where the dependency sets are linearly ordered. A QBF $\psi\colonequals\forall X_1 \exists Y_1 \ldots \forall X_k \exists Y_k : \varphi$ is equivalent to the DQBF $\psi'\colonequals\forall x_1\ldots\forall x_n\exists y_1(D_{y_1})\ldots$ $\exists y_m(D_{y_m}):\varphi$ with $D_{y_i} = \bigcup_{j=1}^\ell X_j$ where $Y_\ell$ is the unique set with $y_i \in Y_\ell$, $1 \leq \ell \leq k$, $1 \leq i \leq m$.
Quantifier localization for QBF is based on the following theorem (see, \eg\cite{Benedetti05c}) which can be used to transform prenex QBFs into equivalent or equisatisfiable non-prenex QBFs (where the quantifiers are not necessarily placed before the matrix). Two QBFs $\psi_1$ and $\psi_2$ are equisatisfiable ($\psi_1 \approx \psi_2$), when $\psi_1$ is satisfiable iff $\psi_2$ is satisfiable.
\begin{theorem} \label{th:qbfquantifierlocalization}
Let $\op\in\{{\land}, {\lor}\}$, let $\mathcal{Q} \in \{\exists, \forall\}$, $\overline{\mathcal{Q}} = \exists$,
if $\mathcal{Q} = \forall$ and $\overline{\mathcal{Q}} = \forall$ otherwise.
Let $\varfreesupp[\psi]$ be the set of all variables occurring in $\psi$ which are not bound by a quantifier.
The following holds for all QBFs:
\begin{small}
\begin{subequations}
\begin{align}
\neg (\mathcal{Q} x : \psi) &\quad\approx\quad \overline{\mathcal{Q}} x : (\neg \psi) \label{qbfapprox:neg} \\
\mathcal{Q} x : \psi &\quad\approx\quad \psi, \; \text{ if } x \notin \varfreesupp[\psi] \label{qbfapprox:indep} \\
\forall x : (\psi_1 \land \psi_2) &\quad\approx\quad (\forall x : \psi_1) \land (\forall x : \psi_2) \label{qbfapprox:forall_and} \\
\exists x : (\psi_1 \lor \psi_2) &\quad\approx\quad (\exists x : \psi_1) \lor (\exists x : \psi_2) \label{qbfapprox:exists_or} \\
\mathcal{Q} x:(\psi_1 \op \psi_2) & \quad\approx\quad \bigl(\psi_1 \op (\mathcal{Q} x:\psi_2)\bigr), \;
\text{ if } x \notin \varfreesupp[\psi_1] \label{qbfapprox:q_op} \\
\mathcal{Q} x_1\,\mathcal{Q} x_2:\psi &\quad\approx \quad \mathcal{Q} x_2\,\mathcal{Q} x_1:\psi \label{qbfapprox:q_q}
\end{align}
\end{subequations}
\end{small} \end{theorem}
\section{Non-Closed Non-Prenex DQBFs} \label{sec:ncnp}
\subsection{Syntax and Semantics} \noindent In this section, we define syntax and semantics of non-prenex DQBFs. Since the syntax definition is recursive, we need non-closed DQBFs as well.
\begin{figure*}
\caption{Rules defining the syntax of non-prenex non-closed DQBFs in negation normal form.}
\label{eq:disjoint}
\label{fig:rules}
\end{figure*}
\begin{definition}[Syntax]
\label{def:syntax}
Let $V$ be a finite set of Boolean variables. Let $\varphi^{-v}$ result from
$\varphi$ by removing $v$ from the dependency sets of all existential variables in $\varphi$.
The set $\npnc$ of \emph{non-closed non-prenex DQBFs in negation normal form} (NNF)
over $V$, the existential and universal variables as well as the free variables in their
support are defined by the rules given in Figure~\ref{fig:rules}.
As usual, $\npnc$ is defined to be the smallest set satisfying those rules.
$\varex[\psi]$ is the set of existential variables of $\psi$,
$\varall[\psi]$ the set of universal variables of $\psi$,
and $\varfreesupp[\psi]$ the set of free variables in the support of $\psi$.
$\var[\psi] \colonequals \varex[\psi] \dcup \varall[\psi] \dcup \varfreesupp[\psi]$ is
the set of variables occurring in $\psi$,
$\var[\psi]^Q \colonequals \varex[\psi]\dcup\varall[\psi]$ is the set of quantified variables of $\psi$,
and $\varfree[\psi] \colonequals V \setminus \var[\psi]^Q$ is the set of free variables of $\psi$.\footnote{
In contrast to the variables from $\varfreesupp[\psi]$, the variables from $\varfree[\psi]$ do not necessarily occur in $\psi$.
} \end{definition}
\begin{remark} For the sake of simplicity, we assume in Definition~\ref{def:syntax} that variables are either free or bound by some quantifier, but not both, and that no variable is quantified more than once. Every formula that violates this assumption can easily be brought into the required form by renaming variables. We restrict ourselves to NNF, since prenex DQBFs are not syntactically closed under negation~\cite{BalabanovCJ14}. For closed prenex DQBFs the (quantifier-free) matrix can be simply transformed into NNF by applying De Morgan's rules and omitting double negations (exploiting that $x\equiv \neg\neg x$) at the cost of a linear blow-up of the formula. \end{remark}
For two DQBFs $\psi_1,\psi_2$ we write $\psi_1\subformula\psi_2$ if $\psi_1$ is a subformula of $\psi_2$.
\begin{definition}[Skolem function candidates]
\label{def:skolem_function_candidates}
For a DQBF $\psi$ over variables $V$
in NNF, we define a \emph{Skolem function candidate}
as a mapping from existential and free variables to functions over universal variables
$s:\varfree[\psi]\dcup\varex[\psi]\to\boolf{\varall[\psi]}$ with
\begin{enumerate}
\item $\support\bigl(s(v)\bigr)=\emptyset$ for all $v\in\varfree[\psi]$, \ie $s(v)\in\{\fzero,\fone\}$, and
\item $\support\bigl(s(v)\bigr)\subseteq \bigl(D_v\cap\varall[\psi]\bigr)$ for all $v\in\varex[\psi]$.
\end{enumerate}
$\sfunc{\psi}$ is the set of all such Skolem function candidates. \end{definition}
That means, $\sfunc{\psi}$ is the set of all Skolem function candidates satisfying the constraints imposed by the dependency sets of the existential and free variables.
\begin{notation} \label{not:skolem} Given $s\in\sfunc{\psi}$ for a DQBF $\psi\in\npnc$, we write $s(\psi)$ for the formula that results from $\psi$ by replacing each variable $v$ for which $s$ is defined by $s(v)$ and omitting all quantifiers from $\psi$, \ie $s(\psi)$ is a quantifier-free Boolean formula, containing only variables from $\varall[\psi]$. \end{notation}
\begin{definition}[Semantics of DQBFs in NNF]
\label{def:sem}
Let $\psi\in\npnc$ be a DQBF over variables $V$. We define the semantics $\sem{\psi}$ of $\psi$ as follows:
\[
\sem{\psi} \colonequals
\bigl\{s\in\sfunc{\psi}\,\big|\,\vDash s(\psi)\bigr\}
=
\bigl\{s\in\sfunc{\psi}\,\big|\,\forall\mu\in\assign(\varall[\psi]): \mu\bigl(s(\psi)\bigr) = 1\bigr\}.
\]
$\psi$ is \emph{satisfiable} if $\sem{\psi}\neq\emptyset$; otherwise we call it \emph{unsatisfiable}.
The elements of $\sem{\psi}$ are called \emph{Skolem functions} for $\psi$. \end{definition} The semantics $\sem{\psi}$ of $\psi$ is the subset of $\,\sfunc{\psi}$ such that for all $s\in\sem{\psi}$ we have: Replacing each free or existential variable $v\in\varfree[\psi]\dcup\varex[\psi]$ with a Boolean expression for $s(v)$ turns $\psi$ into a tautology.
\begin{example} \label{ex1} Consider the DQBF \[
\psi\colonequals \forall x_1 \forall x_2 : \Bigl(
\bigl(x_1\equiv x_2\bigr) \vee \bigl(\exists y_1(x_2) : (x_1\not\equiv y_1)\bigr)
\Bigr) \] over the set of variables $\{x_1, x_2, y_1\}$. $y_1$ with dependency set $\{x_2\}$ is the only existential variable in $\psi$ and there are no free variables. Thus $\sfunc{\psi} = \{y_1 \mapsto \fzero, y_1 \mapsto \fone, y_1 \mapsto x_2, y_1 \mapsto \neg x_2\}$. It is easy to see that $s \colonequals y_1 \mapsto x_2$ is a Skolem function for $\psi$, since $\vDash s(\psi) = \bigl((x_1\equiv x_2) \vee (x_1\not\equiv x_2)\bigr)$, and that the other Skolem function candidates do not define Skolem functions. \end{example}
\begin{remark}
For closed prenex DQBFs the semantics defined here obviously coincides with the usual semantics
as specified in Definition~\ref{def:dqbf_semantics_cp} if we transform the (quantifier-free)
matrix into NNF first. \end{remark}
\begin{remark}
A (non-prenex) DQBF $\psi$ is a (non-prenex) QBF if every existential variable depends on
all universal variables in whose scope it is (and possibly on free variables as well). \end{remark}
The following theorem provides a constructive characterization of the semantics of a DQBF. \begin{restatable}{theorem}{semantics}
\label{th:semantics}
The set $\sem{\psi}$ for a DQBF $\psi$ over variables $V$ in NNF can be characterized recursively as follows:
\begin{subequations}
{\small\allowdisplaybreaks
\begin{align}
\sem{v} &= \bigl\{ s\in\sfunc{v}\,\big|\,s(v)=\fone \bigr\} \text{ for } v\in V_\psi,
\label{th:semantics:var} \\
\sem{\neg v} &= \bigl\{ s\in\sfunc{\neg v}\,\big|\,s(v) = \fzero\bigr\} \text{ for } v\in V_\psi,
\label{th:semantics:notvar} \\
\sem{(\varphi_1\land\varphi_2)} &= \bigl\{ s\in\sfunc{\varphi_1\land\varphi_2}\,\big|\, \label{th:semantics:and}
s_{|\varfree[\varphi_1]\dcup\varex[\varphi_1]}\in\sem{\varphi_1} \land s_{|\varfree[\varphi_2]\dcup\varex[\varphi_2]}\in\sem{\varphi_2}\bigr\},
\\
\sem{(\varphi_1\lor\varphi_2)} &= \bigl\{ s\in\sfunc{\varphi_1\lor\varphi_2}\,\big|\, \label{th:semantics:or}
s_{|\varfree[\varphi_1]\dcup\varex[\varphi_1]}\in\sem{\varphi_1} \lor s_{|\varfree[\varphi_2]\dcup\varex[\varphi_2]}\in\sem{\varphi_2}\bigr\},
\\
\sem{\exists v(D_v):\varphi^{-v}} &= \sem{\varphi^{-v}},
\label{th:semantics:exists} \\
\sem{\forall v:\varphi} &= \Bigl\{ t\in\sfunc{\forall v:\varphi}\,\Big|\, \label{th:semantics:forall}
\exists s_0, s_1\in\sem{\varphi}: s_0(v)=\fzero\land s_1(v)=\fone \; \land \\*
& \qquad \forall w\in\varfree[\forall v:\varphi]: t(w) = s_0(w) = s_1(w) \; \land \nonumber \\*
& \qquad \forall w\in\varex[\forall v:\varphi], v\notin D_w : t(w) = s_0(w) = s_1(w) \; \land \nonumber \\*
& \qquad \forall w\in\varex[\forall v:\varphi], v\in D_w : t(w) = \mathrm{ITE}\bigl(v,\ s_1(w),\ s_0(w)\bigr) \Bigr\}\nonumber
\end{align}} \end{subequations} \end{restatable}
For the proof as well as for the following example, we denote the semantics defined in Definition~\ref{def:sem}
by $\semdef{\psi}$ (\ie $\semdef{\psi}=\{ s\in\sfunc{\psi}\,|\,\vDash s(\psi)\}$) and the set that is characterized by Theorem~\ref{th:semantics} by $\semth{\psi}$.
\begin{proof}
$\semdef{\psi}=\semth{\psi}$ is shown by induction on the structure of $\psi$,
for details we refer to \ref{app:semproof}. \end{proof}
The following example illustrates the recursive characterization of Theorem~\ref{th:semantics} (and again the recursive Definition~\ref{def:syntax}).
\begin{example} \label{ex2} Let us consider the DQBF \[
\psi\colonequals \forall x_1 \forall x_2 : \Bigl(
\bigl(x_1\equiv x_2\bigr) \lor \bigl(\exists y_1(x_2) : (x_1\not\equiv y_1)\bigr)
\Bigr) \] over the set of variables $\{x_1, x_2, y_1\}$ from Example~\ref{ex1} again. We compute $\semth{\psi}$ recursively.
As an abbreviation for $\bigl((\neg x_1 \land y_1) \lor (x_1 \land \neg y_1)\bigr)$, $(x_1\not\equiv y_1)$ is a DQBF based on rules 1--4 of Definition~\ref{def:syntax} with $\varex[x_1\not\equiv y_1] = \varall[x_1\not\equiv y_1] = \emptyset$, $\varfreesupp[x_1\not\equiv y_1] = \{x_1, y_1\}$, $\varfree[x_1\not\equiv y_1] = \{x_1, x_2, y_1\}$. With Theorem~\ref{th:semantics}, \eqref{th:semantics:var}--\eqref{th:semantics:or} we get
$\semth{x_1\not\equiv y_1} = \bigl\{s : \{x_1, x_2, y_1\} \to\boolf{\emptyset} \; \big| \; s(y_1) \neq s(x_1)\bigr\}$.
For $\psi' \colonequals \bigl(\exists y_1(x_2) : (x_1\not\equiv y_1)\bigr)$, we obtain by rule 5: $\varall[\psi'] = \emptyset$, $\varex[\psi'] = \{y_1\}$, $\varfreesupp[\psi'] = \{x_1\}$. $\varfree[\psi'] = \{x_1, x_2\}$. According to Theorem~\ref{th:semantics}, \eqref{th:semantics:exists} we have $\semth{\exists y_1(x_2) : (x_1\not\equiv y_1)} = \semth{x_1\not\equiv y_1}$.
Similarly we obtain
$\semth{x_1\equiv x_2} = \bigl\{s : \{x_1, x_2, y_1\} \to\boolf{\emptyset} \; \big| \; s(x_1) = s(x_2)\bigr\}$.
Then, for $\psi'' \colonequals \bigl(\bigl(x_1\equiv x_2\bigl) \vee \bigl(\exists y_1(x_2) : (x_1\not\equiv y_1)\bigr)\bigr)$ we have $\varall[\psi''] = \emptyset$, $\varex[\psi''] = \{y_1\}$, $\varfreesupp[\psi''] = \varfree[\psi''] = \{x_1, x_2\}$, and by Theorem~\ref{th:semantics}, \eqref{th:semantics:or}
$\semth{\psi''} = \bigl\{s : \{x_1, x_2, y_1\} \to\boolf{\emptyset} \; \bigr| \; \bigl(s(x_1), s(x_2), s(y_1)\bigr) \in \{(\fzero, \fzero, \fzero), (\fzero, \fzero, \fone), (\fzero, \fone, \fone), (\fone, \fzero, \fzero), (\fone, \fone, \fzero), (\fone, \fone, \fone) \} \bigr\}$.
Now we consider $\forall x_2 : \psi''$. $\varall[\forall x_2 : \psi''] = \{x_2\}$. $\varex[\forall x_2 : \psi''] = \{y_1\}$, $\varfreesupp[\forall x_2 : \psi''] = \varfree[\forall x_2 : \psi''] = \{x_1\}$. We use $\eqref{th:semantics:forall}$ to construct $\semth{\forall x_2 : \psi''}$. In principle, there are three possible choices $s_0 \in \semth{\psi''}$ with $s_0(x_2) = \fzero$ and three possible choices $s_1 \in \semth{\psi''}$ with $s_1(x_2) = \fone$. Due to the constraint $s_0(x_1) = s_1(x_1)$ in the third line of $\eqref{th:semantics:forall}$, there remain only four possible combinations $s^{(1)}_0, s^{(1)}_1, \ldots, s^{(4)}_0, s^{(4)}_1$: \begin{itemize} \item $\bigl(s^{(1)}_0(x_1), s^{(1)}_0(x_2), s^{(1)}_0(y_1)\bigr) = (\fzero, \fzero, \fzero)$, \\
$\bigl(s^{(1)}_1(x_1), s^{(1)}_1(x_2), s^{(1)}_1(y_1)\bigr) = (\fzero, \fone, \fone)$, leading to \\
$t^{(1)}(x_1) = \fzero$, $t^{(1)}(y_1) = \mathrm{ITE}\bigl(x_2, s^{(1)}_1(y_1), s^{(1)}_0(y_1)\bigr) = \mathrm{ITE}(x_2, \fone, \fzero) = x_2$, \item $\bigl(s^{(2)}_0(x_1), s^{(2)}_0(x_2), s^{(2)}_0(y_1)\bigr) = (\fzero, \fzero, \fone)$, \\
$\bigl(s^{(2)}_1(x_1), s^{(2)}_1(x_2), s^{(2)}_1(y_1)\bigr) = (\fzero, \fone, \fone)$, leading to \\
$t^{(2)}(x_1) = \fzero$, $t^{(2)}(y_1) = \mathrm{ITE}\bigl(x_2, s^{(2)}_1(y_1), s^{(2)}_0(y_1)\bigr) = \mathrm{ITE}(x_2, \fone, \fone) = \fone$, \item $\bigl(s^{(3)}_0(x_1), s^{(3)}_0(x_2), s^{(3)}_0(y_1)\bigr) = (\fone, \fzero, \fzero)$, \\
$\bigl(s^{(3)}_1(x_1), s^{(3)}_1(x_2), s^{(3)}_1(y_1)\bigr) = (\fone, \fone, \fzero)$, leading to \\
$t^{(3)}(x_1) = \fone$, $t^{(3)}(y_1) = \mathrm{ITE}\bigl(x_2, s^{(3)}_1(y_1), s^{(3)}_0(y_1)\bigr) = \mathrm{ITE}(x_2, \fzero, \fzero) = \fzero$, \item $\bigl(s^{(4)}_0(x_1), s^{(4)}_0(x_2), s^{(4)}_0(y_1)\bigr) = (\fone, \fzero, \fzero)$, \\
$\bigl(s^{(4)}_1(x_1), s^{(4)}_1(x_2), s^{(4)}_1(y_1)\bigr) = (\fone, \fone, \fone)$, leading to \\
$t^{(4)}(x_1) = \fone$, $t^{(4)}(y_1) = \mathrm{ITE}\bigl(x_2, s^{(4)}_1(y_1), s^{(4)}_0(y_1)\bigr) = \mathrm{ITE}(x_2, \fone, \fzero) = x_2$. \end{itemize} Altogether, $\semth{\forall x_2 : \psi''} = \{t^{(1)}, t^{(2)}, t^{(3)}, t^{(4)}\}$.
Finally, for $\psi = \forall x_1 \forall x_2 : \psi''$ we have $\varall[\psi] = \{x_1, x_2\}$, $\varex[\psi] = \{y_1\}$, $\varfreesupp[\psi] = \varfree[\psi] = \emptyset$. For the choice of $s_0$ and $s_1$ in \eqref{th:semantics:forall} we need $s_0(x_1) = \fzero$, $s_1(x_1) = \fone$ and, due to $x_1 \notin D_{y_1}$, $s_0(y_1) = s_1(y_1)$ (see the third line of \eqref{th:semantics:forall}). Thus, the only possible choice is $s_0 \colonequals t^{(1)}$ and $s_1 \colonequals t^{(4)}$; $t(y_1) \colonequals x_2$ is the only possible Skolem function for $\psi$. This result agrees with the Skolem function computed using Definition~\ref{def:sem} in Example~\ref{ex1}. \end{example}
\subsection{Equivalent and Equisatisfiable Non-Closed Non-Prenex DQBFs}
\noindent Now we define rules for replacing DQBFs by equivalent and equisatisfiable ones. We start with the definition of equivalence and equisatisfiability:
\begin{definition}[Equivalence and equisatisfiability]
\label{def:equivalence}
Let $\psi_1,\psi_2\in\npnc$ be DQBFs over $V$. We call them \emph{equivalent} (written $\psi_1\equiv\psi_2$)
if $\sem{\psi_1} = \sem{\psi_2}$; they are
\emph{equisatisfiable} (written $\psi_1\approx\psi_1$)
if $\sem{\psi_1} = \emptyset\Leftrightarrow\sem{\psi_2} = \emptyset$ holds. \end{definition}
Now we prove Theorem~\ref{th:rules}, which is the DQBF counterpart to Theorem~\ref{th:qbfquantifierlocalization} for QBF.
\begin{restatable}{theorem}{rules}
\label{th:rules}
Let $\op\in\{{\land}, {\lor}\}$
and all formulas occurring on the left- and right-hand sides of the following rules
be DQBFs in $\npnc$ over the same set $V$ of variables.
We assume that $x'$ and $y'$ are fresh variables,
which do not occur in $\varphi$, $\varphi_1$, and $\varphi_2$.
The following equivalences and equisatifiabilities hold for all DQBFs in NNF.
\begin{small}
\begin{subequations}
{\allowdisplaybreaks\begin{align}
\exists y(D_y): \varphi &\quad\equiv\quad \varphi
\label{equiv:indep_exists} \\
\forall x : \varphi &\quad\approx\quad \varphi^{-x}
\quad\text{ if } x \notin \var[\varphi]
\label{equiv:indep} \\
\forall x : \varphi &\quad\equiv\quad \varphi[\sfrac{0}{x}] \land \varphi[\sfrac{1}{x}]
\quad\text{ if } \varall[\varphi] = \varex[\varphi] = \emptyset
\label{th:rules3} \\
\exists y(D_y) : \varphi &\quad \approx \quad \varphi[\sfrac{0}{y}] \lor \varphi[\sfrac{1}{y}]
\quad\text{ if } \varall[\varphi] = \varex[\varphi] = \emptyset\footnotemark
\label{equiv:exists} \\
\forall x:(\varphi_1\land\varphi_2) &\quad\approx\quad \bigl(\forall x:\varphi_1\bigr)\land\bigl(\forall x':\varphi_2[\sfrac{x'}{x}]\bigr)\footnotemark
\label{equiv:forall_and} \\
\forall x:(\varphi_1 \land \varphi_2) & \quad \approx \quad \bigl(\varphi_1^{-x} \land (\forall x:\varphi_2)\bigr)
\quad\text{ if } x\notin\var[\varphi_1]
\label{equiv:forall_and2} \\
\forall x:(\varphi_1 \op \varphi_2) & \quad\equiv\quad \bigl(\varphi_1 \op (\forall x:\varphi_2)\bigr)
\quad\text{ if } x\notin\var[\varphi_1] \text{ and } x\notin D_y \text{ for all } y\in\varex[\varphi_1]
\label{equiv:forall_or} \\
\exists y(D_y):(\varphi_1\lor\varphi_2) &\quad\approx\quad \bigl(\exists y(D_y):\varphi_1\bigr)
\lor\bigl(\exists y'(D_y):\varphi_2[\sfrac{y'}{y}]\bigr)
\label{equiv:exists_or} \\
\exists y(D_y):(\varphi_1\op\varphi_2) &\quad\equiv\quad \bigl(\varphi_1\op(\exists y(D_y):\varphi_2)\bigr)
\quad\text{ if } y\notin\var[\varphi_1]
\label{equiv:exists_and} \\
\exists y_1(D_{y_1})\,\exists y_2(D_{y_2}):\varphi &\quad\equiv\quad \exists y_2(D_{y_2})\,\exists y_1(D_{y_1}):\varphi
\label{equiv:exists_exists} \\
\forall x_1\,\forall x_2:\varphi &\quad\equiv\quad \forall x_2\,\forall x_1:\varphi
\label{equiv:forall_forall} \\
\forall x\,\exists y(D_y):\varphi &\quad\equiv\quad \exists y(D_y)\,\forall x:\varphi
\quad\text{ if } x\notin D_y.
\label{equiv:forall_exists}
\end{align}}
\end{subequations}
\end{small} \end{restatable} \addtocounter{footnote}{-1} \footnotetext{A generalization of this rule (which is not needed in this paper, however) holds also for the case that $\varall[\varphi] = \varex[\varphi] = \emptyset$ does not hold. In this case the quantified variables in $\varphi[\sfrac{0}{y}]$ or in $\varphi[\sfrac{1}{y}]$ have to be renamed in order to satisfy the conditions of Definition~\ref{def:syntax}.} \addtocounter{footnote}{1} \footnotetext{By $\varphi_2[\sfrac{x'}{x}]$ we mean that all occurrences of $x$ are replaced by $x'$, including the occurrences in dependency sets.}
Note that the duality of $\exists$ and $\forall$ under negation as in QBF ($\exists \varphi \equiv \neg\forall\neg\varphi$) does not hold for DQBF as DQBFs are not syntactically closed under negation~\cite{BalabanovCJ14}.
\begin{example} \label{ex3} We give an example that shows that --~in contrast to \eqref{qbfapprox:q_op} of Theorem~\ref{th:qbfquantifierlocalization} for QBF~-- the condition $x\notin D_y$ for all $y\in\varex[\varphi_1]$ is really needed in \eqref{equiv:forall_or} if $\op = \lor$. We consider the satisfiable DQBF $\psi \colonequals \forall x_1 \forall x_2 : \bigl((x_1\equiv x_2) \vee (\exists y_1(x_2) : (x_1\not\equiv y_1))\bigr)$ from Example~\ref{ex1} again. First of all, neglecting the above condition, we could transform $\psi$ into $\psi' \colonequals \forall x_1 : \bigl((\forall x_2 : (x_1\equiv x_2)) \vee (\exists y_1(x_2) : (x_1\not\equiv y_1))\bigr)$, which is not well-formed according to Definition~\ref{def:syntax}. However, by renaming $x_2$ into $x'_2$ in the dependency set of $y_1$ we would arrive at a well-formed DQBF $\psi'' \colonequals \forall x_1 : \bigl((\forall x_2 : (x_1\equiv x_2)) \vee (\exists y_1(x'_2) : (x_1\not\equiv y_1))\bigr)$. According to Definition~\ref{def:skolem_function_candidates} the only possible Skolem function candidates for $y_1$ in $\psi''$ are $\fzero$ and $\fone$. It is easy to see that neither inserting $\fzero$ nor $\fone$ for $y_1$ turns $\psi''$ into a tautology, thus $\psi''$ is unsatisfiable and therefore neither equivalent to nor equisatisfiable with $\psi$. \end{example}
Whereas the proof of Theorem~\ref{th:qbfquantifierlocalization} for QBF is rather easy using the equisatisfiabilities $\exists y : \varphi \approx \varphi[\sfrac{0}{y}] \lor \varphi[\sfrac{1}{y}]$ and $\forall x : \varphi \approx \varphi[\sfrac{0}{x}] \land \varphi[\sfrac{1}{x}]$, the proof of Theorem~\ref{th:rules} is more involved. We provide a detailed proof in \ref{app:ruleproof}.
It is also important to note that some of the rules in Theorem~\ref{th:rules} establish equivalences and some establish equisatisfiabilities only. Whereas this might seem to be negligible if we are only interested in the question whether a formula is satisfiable or not, it turns out to be essential in the context of Section~\ref{sec:algorithm} where we replace \emph{subformulas} by equivalent or equisatisfiable formulas. Replacing a subformula of a formula $\psi$ by an equisatisfiable subformula does not necessarily preserve satisfiability / unsatisfiability. This observation is trivially true already for pure propositional logic (\eg $x_1$ is equisatisfiable with $x_1 \lor x_2$, but $x_1 \land \neg x_1$ is \emph{not} equisatisfiable with $(x_1 \lor x_2) \land \neg x_1$). Here we show a more complex example for DQBFs: \begin{example}[label=ex:counter] Let us consider the DQBF $\psi \colonequals \exists y(\emptyset): \bigl((x \land y) \lor (\neg x \land \neg y)\bigr)$, which is, according to \eqref{equiv:exists_or}, equisatisfiable with $\psi' \colonequals \bigl(\exists y(\emptyset) : (x \land y)\bigr) \lor \bigl(\exists y'(\emptyset): (\neg x \land \neg y')\bigr)$. $\forall x : \psi$ is unsatisfiable, since for the choice $s(y) = \fzero$ we have $s(\forall x : \psi) \equiv \neg x$ and for the choice $s(y) = \fone$ we have $s(\forall x : \psi) \equiv x$, \ie for both possible choices for the Skolem function candidates we do not obtain a tautology. However, $\forall x : \psi'$ is satisfiable by $s(y) = \fone$ and $s(y') = \fzero$. \end{example}
It is easy to see that situations like in Example~\ref{ex:counter} do not occur when we replace \emph{equivalent} subformulas:
\begin{theorem} \label{th:equiv_subformulas} Let $\psi$ be a DQBF and $\psi_1 \subformula \psi$ be a subformula of $\psi$. Let $\psi_2$ be a DQBF that is equivalent to $\psi_1$, $\varex[\psi_1] = \varex[\psi_2]$, and each existential variable $y$ has the same dependency set in $\psi_2$ as in $\psi_1$. Then the DQBF $\psi' \colonequals \psi[\sfrac{\psi_2}{\psi_1}]$, which results from replacing $\psi_1$ by $\psi_2$, is equivalent to $\psi$. \end{theorem}
\begin{proofsketch} The proof easily follows from the fact that the set of Skolem functions is identical for equivalent subformulas and from the recursive characterization of the semantics of DQBFs in Theorem~\ref{th:semantics}. If the existential variables in $\psi_1$ and $\psi_2$ as well as their dependency sets are identical, then the same holds for all $\psi'_1 \sqsubseteq \psi$ and $\psi'_2 \sqsubseteq \psi'$ with $\psi_1' \not\sqsubseteq \psi_1$ and $\psi_2' \not\sqsubseteq \psi_2$. We make use of this condition in part \eqref{th:semantics:forall} of the inductive proof that shows $\psi \equiv \psi'$. Assume $\psi_1\ \subformula\ \forall v:\varphi$. In part \eqref{th:semantics:forall}, free and existential variables of $\forall v:\varphi$ (resp.~of $\forall v:\varphi[\sfrac{\psi_2}{\psi_1}]$) are handled differently and therefore it is not enough to inductively assume that the Skolem functions for $\varphi$ and $\varphi[\sfrac{\psi_2}{\psi_1}]$ are identical, but existential / free variables should also not have ``changed their type''. Moreover, existential variables with different dependency sets are handled differently (see last two lines of \eqref{th:semantics:forall}), so we also have to assume that the dependency set of each existential variable $y$ in $\psi_1$ is the same as its dependency set in $\psi_2$. \qed \end{proofsketch}
\begin{remark} Theorem~\ref{th:equiv_subformulas} says that it is safe to apply rules \eqref{th:rules3}, \eqref{equiv:forall_or}, \eqref{equiv:exists_and}, \eqref{equiv:exists_exists}, \eqref{equiv:forall_forall}, and \eqref{equiv:forall_exists} to subformulas as well. Theorem~\ref{th:equiv_subformulas} does not imply this for rule~\eqref{equiv:indep_exists}, since the sets of existential variables of the left-hand side formula and the right-hand side formula are different. \end{remark}
Since we are still interested in obtaining equisatisfiable formulas by replacing equisatisfiable subformulas, we need to have a closer look at the rules \eqref{equiv:indep_exists}, \eqref{equiv:indep}, \eqref{equiv:exists}, \eqref{equiv:forall_and}, \eqref{equiv:forall_and2} and \eqref{equiv:exists_or}. Example~\ref{ex:counter} already shows that we will not be able to achieve our goal in all cases without considering additional conditions.
We start with rule \eqref{equiv:indep_exists}. With the restriction $y\notin\var[\varphi]$ rule~\eqref{equiv:indep_exists} can simply be generalized to subformulas: \begin{theorem} \label{th:equiv:indep_exists}
Let $\psi \in \npnc$ be a DQBF over $V$
and let $\exists y(D_y): \varphi\ \subformula\ \psi$ be a subformula of $\psi$
with $y\notin\var[\varphi]$.
Then $\psi \approx \psi'$ where $\psi'$ results from $\psi$ by replacing the subformula
$\exists y(D_y): \varphi$ by $\varphi$. \end{theorem} \begin{proof} For each $s \in \sem{\psi'}$ we have $s \in \sem{\psi}$ and $\vDash s(\psi')$ implies $\vDash s(\psi)$.
Now assume $\sem{\psi} \neq \emptyset$ and $s \in \sem{\psi}$ with $\vDash s(\psi)$. Define $s' \in \sem{\psi'}$ by $s'(y) \colonequals c$ for some $c \in \{\fzero, \fone\}$ and $s'(v) \colonequals s(v)$ for $v\neq y$. Since $y\notin\var[\varphi]$, the only occurrence of $y$ in $\psi$ is in $\exists y(D_y)$ due to the rules in Definition~\ref{def:syntax} and there is no occurrence of $y$ in $\psi'$, \ie $s(\psi) = s'(\psi')$. Thus $\vDash s(\psi)$ implies $\vDash s'(\psi')$. \end{proof}
Next we consider rule \eqref{equiv:indep}. Although this rule establishes equisatisfiability only, it may be generalized to the replacement of subformulas:
\begin{theorem} \label{th:equiv:indep}
Let $\psi \in \npnc$ be a DQBF
and let $\forall x : \varphi\ \subformula\ \psi$ be a subformula of $\psi$
with $x\notin\var[\varphi]$.
Then $\psi \approx \psi'$ where $\psi'$ results from $\psi$ by replacing the subformula
$\forall x : \varphi$ by $\varphi^{-x}$. \end{theorem} \begin{proof}
If $\sem{\psi'} \neq \emptyset$, then for each $s \in \sem{\psi'}$
with $\vDash s(\psi')$ we also have $\vDash s(\psi)$ and $s \in \sem{\psi}$.
$s$ is a Skolem function candidate for $\psi$ as well, since the supports
of the Skolem function candidates for $\psi$ cannot be more restricted than
those for $\psi'$.
Now assume $\sem{\psi} \neq \emptyset$ and $s \in \sem{\psi}$.
Choose $s'$ with $s'(v) \colonequals s(v)[\sfrac{c}{x}]$ for all existential
variables $v \in \varex[\varphi]$ for an arbitrary constant $c \in \{0, 1\}$
and $s'(v) \colonequals s(v)$ otherwise.
It is clear that $s'$ is a Skolem function candidate for $\psi'$.
Now consider an arbitrary assignment $\mu' \in \assign(\varall[\psi'])$.
Choose $\mu \in \assign(\varall[\psi])$ by $\mu(v) \colonequals \mu'(v)$ for all
$v \in \varall[\psi']$ and $\mu(x) \colonequals c$.
Because of $x\notin\var[\varphi]$, $x$ occurs in $s(\psi)$ only in Skolem functions.
Therefore $\mu'\bigl(s'(\psi')\bigr) = \mu\bigl(s(\psi)\bigr)$ and $\mu'\bigl(s'(\psi')\bigr) = 1$,
since $s(\psi)$ is a tautology.
This proves that $s'(\psi')$ is a tautology as well and $\sem{\psi'} \neq \emptyset$. \end{proof}
Next, we look into rule \eqref{equiv:exists}. Theorem~\ref{th:rules2} is a variant of this rule which is suitable for replacements of subformulas. Here we have the first situation that we need additional conditions for the proof to go through in the more general context of replacing subformulas. Theorem~\ref{th:rules2} is strongly needed for our algorithm taking advantage of quantifier localization. It shows that, under certain conditions, we can do symbolic quantifier elimination for non-prenex DQBFs as it is known from QBFs: \begin{restatable}{theorem}{rulesTwo}
\label{th:rules2}
Let $\psi \in \npnc$ be a DQBF and let $\exists y(D_y): \varphi\ \subformula\ \psi$ be a subformula of $\psi$
such that $\varall[\varphi] = \varex[\varphi] = \emptyset$
and
$\var[\varphi] \subseteq D_y \cup \varfree[\psi] \cup \{v \in \varex[\psi] \; | \; D_v \subseteq D_y\}$.
Then $\psi \approx \psi'$ where $\psi'$ results from $\psi$ by replacing the subformula
$\exists y(D_y): \varphi$ by $\varphi[\sfrac{0}{y}] \lor \varphi[\sfrac{1}{y}]$. \end{restatable}
The proof of Theorem~\ref{th:rules2} is somewhat involved and uses results from \cite{Jiang09}. \begin{proofsketch} We show equisatisfiability by proving that $\sem{\psi'} \neq \emptyset$ implies $\sem{\psi} \neq \emptyset$ and vice versa.
First assume that there is a Skolem function $s' \in \sem{\psi'}$ with $\vDash s'(\psi')$. We define $s \in \sfunc{\psi}$ by $s(v) \colonequals s'(v)$ for all $v \in \varex[\psi'] \cup \varfree[\psi'] \setminus \{y\}$ and $s(y) \colonequals s'(\varphi[\sfrac{1}{y}])$. The fact that $s \in \sfunc{\psi}$ follows from the restriction that
$\varphi$ contains only variables from $D_y \cup \varfree[\psi] \cup \{v \in \varex[\psi] \; | \; D_v \subseteq D_y\}$, \ie $\support\bigl(s(y)\bigr) = \support\bigl(s'(\varphi[\sfrac{1}{y}])\bigr) \subseteq D_y$. $\vDash s(\psi)$ follows by some rewriting from a result in \cite{Jiang09} proving that quantifier elimination can be done by composition, \ie $\varphi[\sfrac{\varphi[\sfrac{1}{y}]}{y}]$ is equivalent to $\varphi[\sfrac{0}{y}] \lor \varphi[\sfrac{1}{y}]$.
Now assume $s \in \sem{\psi}$ with $\vDash s(\psi)$ and define $s'$ just by removing $y$ from the domain of $s$. In a first step we change $s$ into $s''$ by replacing $s(y)$ with $s''(y) \colonequals s'(\varphi)[\sfrac{1}{y}]$. We conclude $\vDash s''(\psi)$ from \cite{Jiang09} and monotonicity properties of $\psi$ in negation normal form. In a second step we use \cite{Jiang09} again to show that $s''(\psi)$ is equivalent to $s'(\psi')$. Thus finally $\vDash s'(\psi')$. The detailed proof can be found in \ref{app:rules2}.\qed \end{proofsketch}
The generalization of rule \eqref{equiv:forall_and} to replacements of subformulas is formulated in Theorem~\ref{th:forall_and}. Here we do not need any additional conditions:
\begin{restatable}{theorem}{forallAnd}
\label{th:forall_and}
Let $\psi \in \npnc$ be a DQBF and let $\psi_1\colonequals \forall x:(\varphi_1\land\varphi_2)\ \subformula\ \psi$ be a subformula of $\psi$.
Then $\psi \approx \psi'$ where $\psi'$ results from $\psi$ by replacing the subformula
$\psi_1$ by $\psi_2\colonequals \bigl(\forall x:\varphi_1\bigr)\land\bigl(\forall x':\varphi_2[\sfrac{x'}{x}]\bigr)$. \end{restatable}
Before proving Theorem~\ref{th:forall_and}, we consider a lemma which will be helpful for the proofs of Theorems~\ref{th:forall_and}, \ref{th:equisat:forall_and}, and \ref{th:equisat:exists_or}.
\begin{lemma}\label{lemma:monotonic}
Let $\varphi_1 \subformula \varphi$ be quantifier-free Boolean formulas
such that $\varphi_1$ is not in the scope of any negation from $\varphi$.
Let $\varphi' \colonequals \varphi[\sfrac{\varphi_2}{\varphi_1}]$ be
the Boolean formula resulting from the replacement of $\varphi_1$ by the
quantifier-free Boolean formula $\varphi_2$ and let
$\mu \in \assign(\var[\varphi] \cup \var[\varphi'])$.
If $\mu(\varphi) = 1$ and $\mu(\varphi') = 0$, then $\mu(\varphi_1) = 1$ and
$\mu(\varphi_2) = 0$. \end{lemma}
\begin{proof}
By assumptions, $\varphi_1$ is only in the scope of conjunctions and disjunctions.
Due to monotonicity of conjunctions and disjunctions we have
$\mu(\varphi[\sfrac{0}{\varphi_1}])\leq\mu(\varphi[\sfrac{1}{\varphi_1}])$.
Moreover, by construction, we have
$\varphi'[\sfrac{0}{\varphi_2}] = \varphi[\sfrac{0}{\varphi_1}]$ and
$\varphi'[\sfrac{1}{\varphi_2}]) = \varphi[\sfrac{1}{\varphi_1}]$.
Further, $\mu(\varphi) = \mu(\varphi[\sfrac{c}{\varphi_1}])$ if
$\mu(\varphi_1) = c \in \{0, 1\}$, and
$\mu(\varphi') = \mu(\varphi'[\sfrac{c}{\varphi_2}])$ if
$\mu(\varphi_2) = c \in \{0, 1\}$.
$\mu(\varphi_1) = \mu(\varphi_2)$ is not possible, since we assumed
$\mu(\varphi) = 1$ and $\mu(\varphi') = 0$, \ie
$\mu(\varphi) \neq \mu(\varphi')$.
From $\mu(\varphi_1) = 0$ and $\mu(\varphi_2) = 1$ we could conclude
$\mu(\varphi) = \mu(\varphi[\sfrac{0}{\varphi_1}]) \leq
\mu(\varphi[\sfrac{1}{\varphi_1}]) =
\mu(\varphi'[\sfrac{1}{\varphi_2}]) =
\mu(\varphi')$, which also contradicts $\mu(\varphi) = 1$ and $\mu(\varphi') = 0$. \end{proof}
\begin{proofsketch}[Theorem~\ref{th:forall_and}] The proof of Theorem~\ref{th:forall_and} uses Lemma~\ref{lemma:monotonic} to lift rule~\eqref{equiv:forall_and} to the more general case of replacements of subformulas. It is easy, but rather technical, and can be found in \ref{app:forall_and}.\qed \end{proofsketch}
The next theorem shows that rule \eqref{equiv:forall_and2} can also be generalized to replacements of subformulas without needing additional conditions. \begin{theorem}
\label{th:equisat:forall_and}
Let $\psi \in \npnc$ be a DQBF and let $\psi_1 \colonequals
\forall x:(\varphi_1 \land \varphi_2)\ \subformula\ \psi$ be a subformula of $\psi$
with $x\notin\var[\varphi_1]$.
Then $\psi \approx \psi'$ where $\psi'$ results from $\psi$ by replacing the subformula
$\psi_1$ by $\psi_2 \colonequals \bigl(\varphi_1^{-x} \land (\forall x:\varphi_2)\bigr)$. \end{theorem}
\begin{proof} The proof easily follows from Theorem~\ref{th:forall_and} and Theorem~\ref{th:equiv:indep}. By Theorem~\ref{th:forall_and}, $\psi \approx \psi''$ with $\psi'' \colonequals \psi[\sfrac{\psi_3}{\psi_1}]$ and $\psi_3 \colonequals \bigl(\forall x':\varphi_1[\sfrac{x'}{x}]\bigr) \land \bigl(\forall x:\varphi_2\bigr)$. Since $x\notin\var[\varphi_1]$, we have $x'\notin\var[{\varphi_{1}[\sfrac{x'}{x}]}]$, and due to Theorem~\ref{th:equiv:indep} $\psi'' \approx \psi'''$ with $\psi''' \colonequals \psi[\sfrac{\psi_4}{\psi_1}]$ and $\psi_4 \colonequals \varphi_1[\sfrac{x'}{x}]^{-x'} \land \bigl(\forall x:\varphi_2\bigr)$. Because of $\varphi_1[\sfrac{x'}{x}]^{-x'} = \varphi_1^{-x}$ we have $\psi_4 = \psi_2$ and thus $\psi \approx \psi'$. \end{proof}
Finally, in case of rule~\eqref{equiv:exists_or} we need non-trivial additional restrictions to preserve satisfiability / unsatisfiability of DQBFs where a subformula $\exists y(D_y):(\varphi_1\lor\varphi_2)$ is replaced by $\bigl(\exists y(D_y):\varphi_1\bigr) \lor \bigl(\exists y'(D_y):\varphi_2[y'/y]\bigr)$ (or vice versa).
\begin{theorem}
\label{th:equisat:exists_or}
Let $\psi \in \npnc$ be a DQBF and let
$\psi_1 \colonequals \exists y(D_y):(\varphi_1\vee\varphi_2) \sqsubseteq \psi$ be a subformula of $\psi$. Further, let
\begin{align*}
\varocc[\varphi_1] &\colonequals \Big[(\varall[\psi] \cap \var[\varphi_1]) \cup \bigcup_{v \in \varex[\psi] \cap \var[\varphi_1]} (\varall[\psi] \cap D_v) \Big] \setminus D_y,\\
\varocc[\varphi_2] &\colonequals \Big[(\varall[\psi] \cap \var[\varphi_2]) \cup \bigcup_{v \in \varex[\psi] \cap \var[\varphi_2]} (\varall[\psi] \cap D_v)\Big] \setminus D_y,\\
\varocc[\psi \setminus \psi_1] &\colonequals \Big[\varall[{\psi[\sfrac{0}{\psi_1}]}] \cup \bigcup_{v \in \varex[{\psi[\sfrac{0}{\psi_1}]}]} (\varall[\psi] \cap D_v)\Big]\,.\\
\end{align*}
If $\varocc[\varphi_1] \cap \varocc[\varphi_2] = \emptyset$ and
$\varocc[\varphi_1] \cap \varocc[\psi \setminus \psi_1] = \emptyset$
(or $\varocc[\varphi_2] \cap \varocc[\psi \setminus \psi_1] = \emptyset$),
then $\psi\approx\psi'$ where $\psi' \colonequals \psi[\sfrac{\psi_2}{\psi_1}]$ results from $\psi$ by replacing
$\psi_1$ by
$\psi_2 \colonequals (\exists y(D_y):\varphi_1)\vee(\exists y'(D_y):\varphi_2[\sfrac{y'}{y}])$
with $y'$ being a fresh variable. \end{theorem}
$\varocc[\varphi_1]$ ($\varocc[\varphi_2]$) is the set of all universal variables occurring in $\varphi_1$ ($\varphi_2$) or in dependency sets of existential variables occurring in $\varphi_1$ ($\varphi_2$), reduced by the dependency set $D_y$ of $y$. $\varocc[\psi \setminus \psi_1]$ is the set of universal variables occurring in $\psi$ outside the subformula $\psi_1$ or in dependency sets of existential variables occurring in $\psi$ outside the subformula $\psi_1$. In the proof we use that $\varocc[\varphi_1] \cap \varocc[\varphi_2] = \emptyset$ implies that --~after replacing existential variables by Skolem functions~-- $\varphi_1$ and $\varphi_2$ do not share universal variables other than those from $D_y$.
Before we present the proof of Theorem~\ref{th:equisat:exists_or}, we consider two examples to motivate that it is necessary to add the conditions in the theorem.
\begin{example}[continues=ex:counter] The first example is again the formula $\forall x \exists y(\emptyset): \bigl((x \land y) \lor (\neg x \land \neg y)\bigr)$, which showed that rule~\eqref{equiv:exists_or} cannot be always applied for replacing subformulas without changing the satisfiability of the formula. We now demonstrate that one of the conditions from Theorem~\ref{th:equisat:exists_or} does indeed not hold for this formula. Using the same notation as in Theorem~\ref{th:equisat:exists_or} we have $\psi_1 \colonequals \exists y(\emptyset) :\bigl((x \land y) \lor (\neg x \land \neg y)\bigr)$ with $\varphi_1 \colonequals (x \land y)$ and $\varphi_2 \colonequals (\neg x \land \neg y)$. Then $\varocc[\varphi_1] = \varocc[\varphi_2] = \{x\}$, which means that the condition $\varocc[\varphi_1] \cap \varocc[\varphi_2] = \emptyset$ is not fulfilled and Theorem~\ref{th:equisat:exists_or} cannot be applied. \end{example}
To show that for the correctness of the theorem it is also needed to add condition $\varocc[\varphi_1] \cap \varocc[\psi \setminus \psi_1] = \emptyset$ (or $\varocc[\varphi_2] \cap \varocc[\psi \setminus \psi_1] = \emptyset$), we give another example:
\begin{example}[label=ex:counter2] Let $\psi_1 \colonequals \exists y(\emptyset):\bigl((x_1 \land y) \lor (x_2 \land \neg y)\bigr)$ be a DQBF with $\varphi_1 \colonequals (x_1 \land y)$ and $\varphi_2 \colonequals (x_2 \land \neg y)$. Let $\psi_2 \colonequals \bigl(\exists y (\emptyset) :(x_1 \land y) \lor \exists y' (\emptyset): (x_2 \land \neg y')\bigr)$ be the DQBF that results from $\psi_1$ by applying rule~\eqref{equiv:exists_or}. Formula $\psi \colonequals \forall x_1 \forall x_2 : \psi_1 \lor (\neg x_1 \land \neg x_2)$ is then unsatisfiable, because for $s_0(y) = \fzero$ we have $s_0(\psi) \equiv x_2 \lor (\neg x_1 \land \neg x_2)$ and for $s_1(y) = \fone$ we have $s_1(\psi) \equiv x_1 \lor (\neg x_1 \land \neg x_2)$, \ie for both possible choices for the Skolem function candidates we do not obtain a tautology. However, formula $\psi' \colonequals \forall x_1 \forall x_2 : \psi_2 \lor (\neg x_1 \land \neg x_2)$ is satisfiable by $s(y) = \fone$ and $s(y') = \fzero$, since $s(\psi') \equiv (x_1 \lor x_2) \lor (\neg x_1 \land \neg x_2) \equiv \fone$.
Checking the conditions of Theorem~\ref{th:equisat:exists_or}, we can see that $\varocc[\varphi_1] = \{x_1\}$ and $\varocc[\varphi_2] = \{x_2\}$, thus $\varocc[\varphi_1] \cap \varocc[\varphi_2] = \emptyset$; however $\varocc[\psi \setminus \psi_1] = \{x_1, x_2\}$, which means that $\varocc[\varphi_1] \cap \varocc[\psi \setminus \psi_1] \neq \emptyset$ as well as $\varocc[\varphi_2] \cap \varocc[\psi \setminus \psi_1] \neq \emptyset$. Thus Theorem~\ref{th:equisat:exists_or} cannot be applied. \end{example}
Now we come to the proof which shows that the conditions in the theorem are sufficient. After the proof, we will give an illustration of the construction by considering Example~\ref{ex:counter2} again.
\begin{proof}
In this proof we assume that the condition $\varocc[\varphi_1] \cap \varocc[\psi \setminus \psi_1] = \emptyset$
holds. The proof with condition $\varocc[\varphi_2] \cap \varocc[\psi \setminus \psi_1] = \emptyset$ can be
done with exactly the same arguments.
To prove the correctness of Theorem~\ref{th:equisat:exists_or} we show that
$\sem{\psi}\neq\emptyset$ iff $\sem{\psi'}\neq\emptyset$.
First, assume $\sem{\psi}\neq\emptyset$ and $s\in\sem{\psi}$.
The function $s'$ with $s'(y')\colonequals s(y)$ and $s'(v) \colonequals s(v)$ otherwise
is a valid Skolem function for $\psi'$ as well,
since $s'(\psi') = s(\psi)$.
\noindent Now assume that $\sem{\psi'}\neq\emptyset$ and let $s'\in\sem{\psi'}$.
We construct a Skolem function candidate $s$ of $\psi$ as follows:
$s(v) \colonequals s'(v)$ for all $v \in \bigl(\varex[\psi] \setminus \{y\}\bigr) \cup \varfree[\psi]$.
The definition of $s(y)$
for each $\mu \in \assign(\varall[\psi])$
is derived from $s'(y)$ and $s'(y')$:
\[
s(y)(\mu) \colonequals \begin{cases}
s'(y)(\mu) & \mbox{if } \mu'\bigl(s'(\varphi_1)\bigr) = 1 \
\forall \mu' \in \assign(\varall[\psi']) \mbox{ with } \mu'|_{D_y} = \mu|_{D_y}; \\
s'(y')(\mu) & \mbox{otherwise.}
\end{cases}
\]
Note that by this definition $s(y)$ only depends on universal variables from $D_y$, \ie we have defined
a valid Skolem function candidate according to Definition~\ref{def:skolem_function_candidates}.
We prove that $s$ is a Skolem function for $\psi$ by contradiction:
Assume that there exists $\mu \in \assign(\varall[\psi]) = \assign(\varall[\psi'])$ with
$\mu\bigl(s(\psi)\bigr) = 0$. $\mu\bigl(s'(\psi')\bigr) = 1$, since $s'(\psi')$ is a tautology.
According to Lemma~\ref{lemma:monotonic}, $\mu\bigl(s'(\psi')\bigr) = 1$ and $\mu\bigl(s(\psi)\bigr) = 0$ implies
$\mu\bigl(s'(\psi_2)\bigr) = 1$ and $\mu\bigl(s(\psi_1)\bigr) = 0$.
Remember that we have
$\psi_1 = \exists y(D_y):(\varphi_1\vee\varphi_2)$ and
$\psi_2 = (\exists y(D_y):\varphi_1)\vee(\exists y'(D_y):\varphi_2[\sfrac{y'}{y}])$.
Now we consider $\mu\bigl(s'(\varphi_1)\bigr)$ and $\mu\bigl(s'(\varphi_2[\sfrac{y'}{y}])\bigr)$
and differentiate between four cases: \begin{description}
\item[Case 1:] $\mu\bigl(s'(\varphi_1)\bigr) = 0$, $\mu\bigl(s'(\varphi_2[\sfrac{y'}{y}])\bigr) = 0$. \\
This would contradict $\mu\bigl(s'(\psi_2)\bigr) = \mu\bigl(s'(\varphi_1)\bigr) \lor \mu\bigl(s'(\varphi_2[\sfrac{y'}{y}])\bigr) = 1$.
\item[Case 2:] $\mu\bigl(s'(\varphi_1)\bigr) = 1$, $\mu\bigl(s'(\varphi_2[\sfrac{y'}{y}])\bigr) = 1$. \\
Since we either have $s(y)(\mu) = s'(y)(\mu)$ or $s(y)(\mu) = s'(y')(\mu)$, this
implies $\mu\bigl(s(\varphi_1)\bigr) = 1$ or $\mu\bigl(s(\varphi_2)\bigr) = 1$. In both cases we obtain
$\mu\bigl(s(\psi_1)\bigr) = \mu\bigl(s(\varphi_1)\bigr) \lor \mu\bigl(s(\varphi_2)\bigr) = 1$, which contradicts
$\mu\bigl(s(\psi_1)\bigr) = 0$.
\item[Case 3:] $\mu\bigl(s'(\varphi_1)\bigr) = 0$, $\mu\bigl(s'(\varphi_2[\sfrac{y'}{y}])\bigr) = 1$. \\
From the definition of $s(y)$ we obtain $s(y)(\mu) = s'(y')(\mu)$
and therefore
$\mu\bigl(s(\varphi_2)\bigr) = \mu\bigl(s'(\varphi_2[\sfrac{y'}{y}])\bigr) = 1$
and thus $\mu\bigl(s(\psi_1)\bigr) = \mu\bigl(s(\varphi_1)\bigr) \lor \mu\bigl(s(\varphi_2)\bigr) = 1$.
Again, this contradicts $\mu\bigl(s(\psi_1)\bigr) = 0$.
\item[Case 4:] $\mu\bigl(s'(\varphi_1)\bigr) = 1$, $\mu\bigl(s'(\varphi_2[\sfrac{y'}{y}])\bigr) = 0$. \\
Our proof strategy is to show
\textbf{Fact 1:} $s(y)(\mu) = s'(y)(\mu)$.
\noindent Then we would have $\mu\bigl(s(\varphi_1)\bigr) = \mu\bigl(s'(\varphi_1)\bigr) = 1$ and thus
$\mu\bigl(s(\psi_1)\bigr) = \mu\bigl(s(\varphi_1)\bigr) \lor \mu\bigl(s(\varphi_2)\bigr) = 1$, which would again
contradict $\mu\bigl(s(\psi_1)\bigr) = 0$. \end{description}
So it remains to show Fact 1.
According to the definition of $s(y)$, $s(y)(\mu) = s'(y)(\mu)$
iff for all $\mu' \in \assign(\varall[\psi'])$ with $\mu'_{|{D_y}} = \mu_{|{D_y}}$
we have $\mu'\bigl(s'(\varphi_1)\bigr) = 1$.
Assume that this is not the case, \ie there is a $\mu' \in \assign(\varall[\psi'])$ with $\mu'|_{D_y} = \mu|_{D_y}$
and $\mu'\bigl(s'(\varphi_1)\bigr) = 0$.
Now we show that if this were the case, then we could construct another $\mu'' \in \assign(\varall[\psi'])$
with $\mu''\bigl(s'(\psi')\bigr) = 0$ which would contradict the fact that $s'(\psi')$ is a tautology.
Define $\mu'' \in \assign(\varall[\psi'])$ by
$\mu''(x) \colonequals \mu'(x)$ for all $x \in \varocc[\varphi_1]$, $\mu''(x) \colonequals \mu(x)$ otherwise.
Since $\mu'|_{D_y} = \mu|_{D_y}$, we have $\mu''|_{D_y} = \mu'|_{D_y} = \mu|_{D_y}$.
Since $s'(\varphi_1)$ only contains variables from $\varocc[\varphi_1]\cup D_y$ we have
$\mu''\bigl(s'(\varphi_1)\bigr) = \mu'\bigl(s'(\varphi_1)\bigr) = 0$.
Moreover, because $\varocc[\varphi_1] \cap \varocc[\varphi_2] = \emptyset$ (from the precondition of the theorem)
and $s'(\varphi_2[\sfrac{y'}{y}])$ contains only variables from $\varocc[\varphi_2] \cup D_y$, we have $\mu''\bigl(s'(\varphi_2[\sfrac{y'}{y}])\bigr) = \mu\bigl(s'(\varphi_2[\sfrac{y'}{y}])\bigr) = 0$.
Altogether we have
$\mu''\bigl(s'(\psi_2)\bigr) = \mu''\bigl(s'(\varphi_1)\bigr) \lor \mu''\bigl(s'(\varphi_2[\sfrac{y'}{y}])\bigr) = 0$
and therefore $\mu''\bigl(s'(\psi')\bigr) = \mu''\bigl(s'(\psi'[\sfrac{0}{\psi_2}])\bigr)$.
Since $\mu''$ can differ from $\mu$ only for variables in $\varocc[\varphi_1]$
and those universal variables do not occur outside $s'(\psi_2)$ due to
the precondition $\varocc[\varphi_1] \cap \varocc[\psi \setminus \psi_1] = \emptyset$, we
further have $\mu''\bigl(s'(\psi'[\sfrac{0}{\psi_2}])\bigr) = \mu\bigl(s'(\psi'[\sfrac{0}{\psi_2}])\bigr)$.
By definition of $s$ as well as $\psi$ and $\psi'$, we have $\mu\bigl(s'(\psi'[\sfrac{0}{\psi_2}])\bigr)
= \mu\bigl(s(\psi[\sfrac{0}{\psi_1}])\bigr)$ which is $0$ by our initial assumption.
Taking the last equations together, we have $\mu''\bigl(s'(\psi')\bigr) = 0$ which is our announced contradiction to the fact
that $s'(\psi')$ is a tautology. This completes the proof of Fact 1.
In all four cases we were able to derive a contradiction and thus our assumption
that there exists $\mu \in \assign(\varall[\psi])$ with
$\mu\bigl(s(\psi)\bigr) = 0$ has to be wrong. $s(\psi)$ is a tautology and $\sem{\psi}\neq\emptyset$. \end{proof}
Now we illustrate the construction of the proof by demonstrating where the construction fails when the conditions of Theorem~\ref{th:equisat:exists_or} are not satisfied. To do so, we look into Example~\ref{ex:counter2} again.
\begin{example}[continues=ex:counter2] We look again at the DQBF $\psi \colonequals \forall x_1 \forall x_2 : \psi_1 \lor (\neg x_1 \land \neg x_2)$ with $\psi_1 \colonequals \exists y(\emptyset):(\varphi_1 \lor \varphi_2)$, $\varphi_1 \colonequals (x_1 \land y)$, and $\varphi_2 =(x_2 \land \neg y)$. $\psi'$ results from $\psi$ by replacing $\psi_1$ by $\psi_2 \colonequals \bigl(\exists y (\emptyset): (x_1 \land y)\bigr) \lor \bigl(\exists y' (\emptyset): (x_2 \land \neg y')\bigr)$.
We already observed that the conditions of Theorem~\ref{th:equisat:exists_or} are not satisfied for this DQBF, $\psi$ is not satisfiable, and $\psi'$ is satisfiable. The Skolem function candidate $s'$ with $s'(y) = \fone$ and $s'(y') = \fzero$ is the only one that satisfies $\psi'$. Now we consider where and why the construction of a Skolem function $s$ for $\psi$ (as shown in the proof) fails.
Since $s'(\varphi_1) \equiv x_1 \neq \fone$, the definition of $s(y)$ in the proof leads to $s(y) = s'(y') = \fzero$. In order to prove by contradiction that $s(\psi)$ is a tautology, the proof assumes an assignment $\mu \in \assign\bigl(\{x_1, x_2\}\bigr)$ with $\mu\bigl(s(\psi)\bigr) = 0$ and $\mu\bigl(s'(\psi')\bigr) = 1$. $\bigl(\mu(x_1), \mu(x_2)\bigr) = (0, 0)$ is not possible, since in this case $\mu\bigl(s(\psi)\bigr) = 1$ as well due to $\mu(\neg x_1 \land \neg x_2) = 1$. Also for the cases $\bigl(\mu(x_1), \mu(x_2)\bigr) = (1, 1)$ and $\bigl(\mu(x_1), \mu(x_2)\bigr) = (0, 1)$ we obtain contradictions as given in the proof. The interesting case (where the proof fails) is $\bigl(\mu(x_1), \mu(x_2)\bigr) = (1, 0)$. This implies $\mu\bigl(s'(\varphi_1)\bigr) = \mu(x_1) = 1$ and $\mu\bigl(s'(\varphi_2[\sfrac{y'}{y}])\bigr) = \mu(x_2) = 0$, \ie we are in Case 4, where we try to prove Fact 1 (\ie $s(y) = s'(y)$ for the constant Skolem functions in the example) by contradiction -- which does not work here. Reduced to our example, Fact 1 does not hold if there is an assignment $\mu' \in \assign\bigl(\{x_1, x_2\}\bigr)$ with $\mu'\bigl(s'(\varphi_1)\bigr) = 0$, which just means $\mu'(x_1) = 0$. The proof idea is to construct from $\mu'$ another assignment $\mu'' \in \assign\bigl(\{x_1, x_2\}\bigr)$ with $\mu''\bigl(s'(\psi')\bigr) = 0$ (contradicting the fact that $s'(\psi')$ is a tautology). $\mu''(x) = \mu'(x)$ for all $x \in \varocc[\varphi_1] = \{x_1\}$, $\mu''(x) = \mu(x)$ otherwise, \ie $\mu''(x_1) = 0$ and $\mu''(x_2) = 0$, leading to $\mu''\bigl(s'(\psi_2)\bigr) = \mu''(x_1 \lor x_2) = 0$.
The contradiction derived in the proof relies on the fact that $\mu''$ can differ from $\mu$ only for variables in $\varocc[\varphi_1]$, which implies by the precondition $\varocc[\varphi_1] \cap \varocc[\psi \setminus \psi_1] = \emptyset$ that the assignments to universal variables outside $s'(\psi_2)$ are identical both for $\mu$ and $\mu''$. This is not true in our example where $\varocc[\varphi_1] \cap \varocc[\psi \setminus \psi_1] = \{x_1\}$ and $\mu(\neg x_1 \land \neg x_2) = 0$, but $\mu''(\neg x_1 \land \neg x_2) = 1$. Thus $\mu''\bigl(s'(\psi')\bigr) = \mu''\bigl((x_1 \lor x_2) \lor (\neg x_1 \land \neg x_2)\bigr)=1$, \ie we do not obtain the contradiction $\mu''\bigl(s'(\psi')\bigr) = 0$. \end{example}
\subsection{Refuting Propositions~\ref{prop4} and \ref{prop5} from \cite{BalabanovCJ14}}
\noindent A first paper looking into quantifier localization for DQBF was \cite{BalabanovCJ14}. To this end, they proposed Propositions~\ref{prop4} and \ref{prop5}, which are unfortunately unsound. We literally repeat Proposition~\ref{prop4} from \cite{BalabanovCJ14}: \setcounter{proposition}{3} \begin{proposition}[\cite{BalabanovCJ14}]
\label{prop4}
The DQBF
\begin{equation*}
\label{eq1}
\forall \vec{x}\, \exists y_1(S_1) \ldots \exists y_m(S_m) : (\phi_A \vee \phi_B)
\end{equation*}
where $\forall \vec{x}$ denotes $\forall x_1 \ldots \forall x_n$, sub-formula $\phi_A$
(respectively $\phi_B$) refers to variables $X_A \subseteq X$ and $Y_A \subseteq Y$
(respectively $X_B \subseteq X$ and $Y_B \subseteq Y$), is logically equivalent to
\begin{align*}
\label{eq2}
\forall \vec{x}_c\bigl((\forall \vec{x}_a \exists y_{a_1}(S_{a_1} \cap X_A) \ldots \exists y_{a_p}(S_{a_p} \cap X_A): \phi_A) \vee {} \\
(\forall \vec{x}_b \exists y_{b_1}(S_{b_1} \cap X_B) \ldots \exists y_{b_q}(S_{b_q} \cap X_B): \phi_B)\bigr),
\end{align*}
where variables $\vec{x}_c$ are in $X_A \cap X_B$,
variables $\vec{x}_a$ are in $X_A \setminus X_B$,
variables $\vec{x}_b$ are in $X_B \setminus X_A$,
$y_{a_i} \in Y_A$, and $y_{b_j} \in Y_B$. \end{proposition}
\begin{lemma}
\label{lemma:prop4unsound}
Proposition~\ref{prop4} is unsound. \end{lemma}
\begin{proof} Consider the following DQBF \begin{equation*}
\label{eq:prop4_cex}
\psi^1 \colonequals \forall x_1 \forall x_2 \exists y_1(x_2) :
\bigl(\underbrace{(x_1\equiv x_2)}_{\phi_A} \vee \underbrace{(x_1\not\equiv y_1)}_{\phi_B}\bigr) \end{equation*}
By \eqref{equiv:exists_and}, $\psi^1$ is equisatisfiable with $\psi$ from Example~\ref{ex1} and thus satisfiable. According to Proposition \ref{prop4} we can identify the sets $X_A = \{x_1,x_2\}$, $X_B = \{x_1\}$, $Y_A = \emptyset$, and $Y_B = \{y_1\}$. and rewrite the formula to \begin{equation}
\label{eq:prop4_cex2}
\psi^2 \colonequals \forall x_1 : \big( ( \forall x_2:(x_1\equiv x_2) ) \vee ( \exists y_1(\emptyset):(x_1\not\equiv y_1) ) \big). \end{equation} This formula, in contrast to $\psi^1$, is unsatisfiable because the only Skolem functions candidates for $y_1$ are $\fzero$ and $\fone$. Both Skolem function candidates do not turn $\psi^2$ into a tautology. \end{proof} In the example from the proof, the ``main mistake'' was to replace $D_{y_1} = \{x_2\}$ by $\emptyset$. If this \emph{were} correct, then the remainder would follow from \eqref{equiv:exists_and} and \eqref{equiv:forall_or}. \begin{remark}
Proposition~\ref{prop4} of \cite{BalabanovCJ14} is already unsound when we consider the
commonly accepted semantics of closed prenex DQBFs as stated in Definition~\ref{def:dqbf_semantics_cp}.
The proposition claims that $\psi^1$ is equisatisfiable with $\psi^2$ . Additionally, it claims
that
\begin{equation*}
\label{eq:prop4_cex3}
\psi^3 \colonequals \forall x_1 \forall x_2 \exists y_1(\emptyset) :
\bigl((x_1\equiv x_2) \vee (x_1\not\equiv y_1)\bigr)
\end{equation*}
is equisatisfiable with $\psi^2$. Due to transitivity of equisatisfiability,
Proposition~\ref{prop5} claims that $\psi^1$ is equisatisfiable with $\psi^3$.
However, according to the
semantics in Definition~\ref{def:dqbf_semantics_cp}, $\psi^1$ is satisfiable and $\psi^3$
unsatisfiable. Also note that $\psi^1$ and $\psi^3$ are actually QBFs; so Proposition~\ref{prop4}
is also unsound when restricted to QBFs. \end{remark}
Next we literally repeat Proposition~\ref{prop5} from \cite{BalabanovCJ14}: \setcounter{proposition}{4} \begin{proposition}[\cite{BalabanovCJ14}]
\label{prop5}
The DQBF
\begin{equation*}
\label{eq3}
\forall \vec{x}\,\exists y_1(S_1) \ldots \exists y_k(S_k): (\phi_A \land \phi_B)
\end{equation*}
where $\forall \vec{x}$ denotes $\forall x_1 \ldots \forall x_n$, sub-formula $\phi_A$ (respectively $\phi_B$)
refers to variables $X_A \subseteq X$ and $Y_A \subseteq Y$ (respectively $X_B \subseteq X$ and $Y_B \subseteq Y$),
is logically equivalent to
\begin{equation*}
\label{eq4}
\forall \vec{x}\,\exists y_2(S_2) \ldots \exists y_k(S_k): \bigl((\exists y_1(S_1 \cap X_A): \phi_A) \land \phi_B\bigr)
\end{equation*}
for $y_1 \notin Y_B$. \end{proposition}
\begin{lemma}
\label{lemma:prop5unsound}
Proposition~\ref{prop5} is unsound. \end{lemma}
\begin{proof} For a counterexample, consider the formula \begin{equation*}
\label{eq:prop5_cex}
\psi^4 \colonequals
\forall x_1 \forall x_2 \exists y_1(x_1,x_2)\exists y_2(x_1,x_2) :
\underbrace{\bigl(y_1\equiv\neg y_2\bigr)}_{\phi_A}
\land
\underbrace{\bigl(y_2\equiv (x_1\land x_2)\bigr)}_{\phi_B}. \end{equation*} with the corresponding variable sets $X_A = \emptyset$, $X_B = \{x_1,x_2\}$, $Y_A = \{y_1,y_2\}$, and $Y_B = \{y_2\}$. We have $y_1\notin Y_B$ and $\{x_1,x_2\}\cap X_A = \emptyset$. Proposition~\ref{prop5} says that $\psi^4$ is equisatisfiable with: \begin{equation*}
\psi^5 \colonequals
\forall x_1\forall x_2\exists y_2(x_1,x_2):
\bigl( \exists y_1(\emptyset): (y_1\equiv\neg y_2)\bigr)\land \bigl(y_2\equiv (x_1\land x_2)\bigr)\,. \end{equation*}
The formula $\psi^4$ is satisfiable; the Skolem function $s$ with $s(y_1)=\neg (x_1\land x_2)$ and $s(y_2)=(x_1\land x_2)$ is in $\sem{\psi}$.
The formula $\psi^5$, however, is unsatisfiable: Since $D_{y_1}^{\psi^5} = \emptyset$, there are only two Skolem function candidates for $y_1$, either $s(y_1)=\fzero$ or $s(y_1)=\fone$. In the first case, we need to find a function for $y_2$ such that $\bigl(\fzero \equiv\neg y_2\bigr)\land \bigl(y_2\equiv (x_1\land x_2)\bigr)$ becomes a tautology. In order to satisfy the first part, $\fzero \equiv\neg y_2$, we need to set $s(y_2)=\fone$. Then the formula can be simplified to $(x_1\land x_2)$, which is not a tautology. In the second case, $s(y_1)=\fone$, we get the expression $\bigl(\fone\equiv\neg y_2\bigr)\land \bigl(y_2\equiv (x_1\land x_2)\bigr)$. This requires to set $s(y_2)=\fzero$ in order to satisfy the first part, turning the formula into $\fzero\equiv (x_1\land x_2)$, or more concisely, $\neg(x_1\land x_2)$, which is neither a tautology. Therefore we can conclude that $\psi^5$ is unsatisfiable and, accordingly, Proposition~\ref{prop5} of \cite{BalabanovCJ14} is unsound. \end{proof}
For Proposition~\ref{prop5} we make a similar observation as for Proposition~\ref{prop4}: \begin{remark}
Also Proposition~\ref{prop5} of \cite{BalabanovCJ14} is already unsound when we consider the
commonly accepted semantics of closed prenex DQBFs as stated in Definition~\ref{def:dqbf_semantics_cp}.
The proposition claims that $\psi^4$ is equisatisfiable with $\psi^5$. Additionally, it claims
that
\begin{equation*}
\psi^6 \colonequals
\forall x_1\forall x_2\exists y_1(\emptyset)\exists y_2(x_1,x_2):
\bigl(y_1\equiv\neg y_2\bigr)\land \bigl(y_2\equiv(x_1\land x_2)\bigr)
\end{equation*}
is equisatisfiable with $\psi^5$. Due to transitivity of equisatisfiability,
Proposition~\ref{prop5} claims that $\psi^4 \approx \psi^6$ holds. However, according to the
semantics in Definition~\ref{def:dqbf_semantics_cp}, $\psi^4$ is satisfiable and $\psi^6$
unsatisfiable. Again, $\psi^4$ and $\psi^6$ are actually QBFs; so Proposition~\ref{prop5}
is also unsound when restricted to QBFs. \end{remark}
\section{Taking Advantage of Quantifier Localization} \label{sec:algorithm}
\noindent In this section, we explain the implementation of the algorithm that exploits the properties of non-prenex DQBFs to simplify a given formula. First, we define necessary concepts and give a coarse sketch of the algorithm. Then, step by step, we dive into the details.
Benedetti introduced in \cite{Benedetti05c} \textit{quantifier trees} for pushing quantifiers into a CNF. In a similar way we construct a \textit{quantifier graph}, which is a structure similar to an And-Inverter Graph (AIG)~\cite{Kuehlmann2002}. It allows to perform quantifier localization according to Theorems~\ref{th:rules}, \ref{th:forall_and}, \ref{th:equisat:forall_and}, and \ref{th:equisat:exists_or}.
\begin{definition} [Quantifier graph]
\label{def:quantGraph}
For a non-prenex DQBF $\psinp$, a \emph{quantifier graph} is a directed acyclic graph $G_{\psinp} = (N_{\psinp},E_{\psinp})$.
$N_{\psinp}$ denotes the set of nodes of $G_{\psinp}$. Each node $n\in N_{\psinp}$ is labeled with an operation
${\op}\in \{\wedge,\vee\}$ from $\psinp$ if $n$ is an inner node, or with a variable $v\in\var[\psinp]$ if it is a terminal node.
$E_{\psinp}$ is a set of edges. Each edge is possibly augmented with quantified variables and\,/\,or negations. \end{definition}
The input to the basic algorithm for quantifier localization (\textit{DQBFQuantLoc}), which is shown in Algorithm~\ref{algo:DQBFQuLo}, is a closed prenex DQBF $\psi$. The matrix $\varphi$ of $\psi$ is represented as an AIG, and the prefix $Q$ is a set of quantifiers as stated in Definition~\ref{def:dqbf_cp}. (If the matrix is initially given in CNF, we preprocess it by circuit extraction (see for instance \cite{PigorschS09,wimmer-et-al-sat-2015,wimmer-et-al-jsat-2019}) and the resulting circuit is then represented by an AIG.) The output of \textit{DQBFQuantLoc} is a DQBF in closed prenex form again. In intermediate steps, we convert $\psi$ into a non-prenex DQBF $\psinp$, represented as a quantifier graph, by pushing quantifiers of the prefix into the matrix. After pushing the quantified variables as deep as possible into the formula, we eliminate quantifiers wherever it is possible. If a quantifier cannot be eliminated, it is pulled out of the formula again. In this manner we finally obtain a modified and possibly simplified prenex DQBF $\psi'$.
\begin{algorithm}[tb] \SetKwInOut{Input}{Input} \SetKwInOut{Output}{Output} \SetKwFunction{NormalizeToNNF}{NormalizeToNNF} \SetKwFunction{BuildMacroGates}{BuildMacroGates} \SetKwFunction{EliminatePures}{EliminatePures} \SetKwFunction{Localize}{Localize} \SetKwFunction{Eliminate}{Eliminate}
\Input{Prenex DQBF $\psi\colonequals Q : \varphi$, where\\
-- $Q = \forall x_1\ldots\forall x_n\exists y_1(D_{y_1})\ldots\exists y_m(D_{y_m})$\\
-- $\varphi$ has an arbitrary structure given as an AIG} \Output{prenex DQBF $\psi'$} \BlankLine
$G_{\psinp} \colonequals$ \NormalizeToNNF{$\psi$}\;\label{algo:qloc:norm} $G_{\psinp} \colonequals$ \BuildMacroGates{$G_{\psinp}$}\;\label{algo:qloc:macro} $G_{\psinp} \colonequals$ \Localize{$G_{\psinp}$}\;\label{algo:qloc:loc} $G_{\psi'} \colonequals$ \Eliminate{$G_{\psinp}$}\;\label{algo:qloc:elim} \Return {$\psi'$} \caption{DQBFQuantLoc} \label{algo:DQBFQuLo} \end{algorithm}
\subsection{Building NNF and Macrogates} \label{ssec:create_nnf}
\begin{figure}
\caption{Quantifier graph in NNF.}
\label{ex:quantgraph}
\end{figure}
\noindent In Line~\ref{algo:qloc:norm} of Algorithm~\ref{algo:DQBFQuLo}, we first translate the matrix $\varphi$ of the DQBF $\psi$ into negation normal form (NNF) by pushing the negations of the circuit to the primary inputs (using De Morgan's law). The resulting formula containing the matrix in NNF is represented as a quantifier graph as in Definition~\ref{def:quantGraph}, where we only have negations at those edges which point to terminals. Figure~\ref{ex:quantgraph} shows a quantifier graph as returned by \textit{NormalizeToNNF}. We will use it as a running example to illustrate our algorithm.
Then, in Line~\ref{algo:qloc:macro} of Algorithm~\ref{algo:DQBFQuLo}, we combine subcircuits into \emph{AND}\,/\,\emph{OR} \emph{macrogates}. The combination into macrogates is essential to increase the degrees of freedom given by different decompositions of \emph{AND}s\,/\,\emph{OR}s that enable different applications of the transformation rules according to rules~\eqref{equiv:forall_or}, \eqref{equiv:exists_and}, and Theorems~\ref{th:forall_and}, \ref{th:equisat:forall_and}, and \ref{th:equisat:exists_or}. A \emph{macrogate} is a multi-input gate, which we construct by collecting consecutive nodes representing the same logic operation ($\land$, $\lor$). Except for the topmost node within a macrogate no other node may have more than one incoming edge, \ie macrogates are subtrees of fanout-free cones. During the collection of nodes, we stop the search along a path when we visit a node with multiple parents. From this node we later start a new search. The nodes which are the target of an edge leaving a macrogate are the \emph{macrochildren} of the macrogate and the parents of its root are called the \emph{macroparents}. It is clear that a macrogate consisting of only one node has exactly two children like a standard node. For such nodes we use the terms macrogate and node interchangeably. In Figure~\ref{ex:macrogate} we show a macrogate found in the running example.
\subsection{Quantifier Localization} \label{ssec:localize}
\noindent After calling \textit{NormalizeToNNF} and \textit{BuildMacroGates} the only edge that carries quantified variables is the root edge. By shifting quantified variables to edges below the root node we push them into the formula. Sometimes we say that we push a quantified variable to a child by which we mean that we write the variable to the edge pointing to this child.
On the quantifier graph for formula $\psinp$ we perform the localization of quantifiers according to Theorems~\ref{th:rules}, \ref{th:forall_and}, \ref{th:equisat:forall_and}, and \ref{th:equisat:exists_or} with the function \textit{Localize} in Line~\ref{algo:qloc:loc} of Algorithm~\ref{algo:DQBFQuLo}. Algorithm~\ref{algo:local} presents the details.
\iffalse \begin{algorithm}[tb] \SetKwInOut{Input}{Input} \SetKwInOut{Output}{Output} \SetKwFunction{PushVariables}{PushVariables} \SetKwFunction{SeparateIncomings}{SeparateIncomings}
\Input{Quantifier graph $G_{\psinp}$} \Output{Modified quantifier graph $G_{\psinp}$} \BlankLine
queue $\colonequals [ g_{\text{root}} ]$\tcp*{$g_{\text{root}}$ is root gate of $G_{\psinp}$}
\While{queue $\neq\emptyset$}{\label{algo:local:while}
$g\colonequals$ get and remove the first element of queue\;
\PushVariables{g}\;\label{algo:local:push}
\uIf{there are variables left which do not occur on all incoming edges of $g$}{
$v\colonequals$ variable that occurs on the fewest incoming edges\;\label{algo:local:pickvar}
$g'\colonequals$ \SeparateIncomings{g,v}\;\label{algo:local:separate}
append $g$ and $g'$ to queue\;
}
\Else{
append all macrochildren of $g$ to queue\;\label{algo:local:children}
} } \Return modified quantifier graph $G_{\psinp}$\;\label{algo:local:end} \caption{Localize} \end{algorithm} \fi
\begin{algorithm}[tb]
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}
\SetKwFunction{PushVariables}{PushVariables}
\SetKwFunction{SeparateIncomings}{SeparateIncomings}
\Input{Quantifier graph $G_{\psinp}$}
\Output{Modified quantifier graph $G_{\psinp}$}
\BlankLine
nodeList $\colonequals$ double linked list of all nodes of $G_{\psinp}$ in
topological order, \ie starting with the root gate\;
$g\colonequals$ nodeList.first \;
\While{$g \neq \mathrm{nodeList.end}$}{\label{algo:local:while}
\PushVariables{g}\;\label{algo:local:push}
\uIf{there are variables left which do not occur on all incoming edges of $g$}{
$v\colonequals$ variable that occurs on most incoming edges of $g$\;\label{algo:local:pickvar}
$g'\colonequals$ \SeparateIncomings{g,v}\;\label{algo:local:separate}
insert $g'$ into nodeList after $g$\;
}\Else{
$g\colonequals$ nodeList.next\;
}
}
\Return modified quantifier graph $G_{\psinp}$\;\label{algo:local:end}
\caption{Localize} \label{algo:local} \end{algorithm}
The graph is traversed in topological order (given by nodeList), starting with the macrogate $g_{root}$, which is the root of the graph representing $\psinp$. Note that despite the transformations made so far, the graph passed to \textit{Localize} still represents a prenex DQBF. For each macrogate $g$ from the graph we first call the function \textit{PushVariables} in Algorithm~\ref{algo:local}, Line~\ref{algo:local:push} (the details of \textit{PushVariables} are listed in Algorithm~\ref{algo:push}). This function pushes as many variables as possible over $g$.
Only variables that are common to \emph{all} incoming edges of $g$ can be pushed over $g$. Thus, after \textit{PushVariables} there might be remaining variables, which only occur on some but not all edges. In Line~\ref{algo:local:pickvar} of Algorithm~\ref{algo:local} we pick such a variable $v$ that occurs on at least one, but not all incoming edges. To allow $v$ to be pushed over $g$, we apply \textit{SeparateIncomings} to $g$ \wrt $v$ (Algorithm~\ref{algo:local}, Line~\ref{algo:local:separate}). This function creates a copy $g'$ of $g$ and removes from $g'$ all incoming edges containing $v$. From $g$ it removes all incoming edges without $v$, \ie $v$ occurs on all incoming edges to $g$ when returning from this function. Then, the procedure that pushes variables and possibly copies gates is repeated for $g$ and $g'$. If there is no more quantified variable for which the incoming edges need to be separated, we continue the procedure for the next macrogates in topological order given by nodeList.
Now we take a closer look at the function \textit{PushVariables} from Line~\ref{algo:local:push} of Algorithm~\ref{algo:local}, which pushes quantified variables over a single macrogate $g$ according to Theorems~\ref{th:rules}, \ref{th:forall_and}, \ref{th:equisat:forall_and}, and \ref{th:equisat:exists_or}.
\begin{algorithm}[tb] \SetKwInOut{Input}{Input} \SetKwInOut{Output}{Output} \SetKwFunction{BuildMacroGates}{BuildMacroGates} \SetKwFunction{CollectCommonVariables}{CollectCommonVariables} \SetKwFunction{FindBestVariableConj}{FindBestVariableConj} \SetKwFunction{FindBestVariableDisj}{FindBestVariableDisj} \SetKwFunction{IsVarPushable}{IsVarPushable}
\Input{macrogate $g$} \BlankLine
$V_{\text{com}}\colonequals$ \CollectCommonVariables{g}\;
\uIf{g is a conjunction}{\label{algo:push:conj}
\While{$V_{\text{com}}\cap\varex\neq\emptyset$}{
$v\colonequals$ \FindBestVariableConj{$V_{\text{com}}$}\;\label{algo:push:bestconj}
try to push $v$ to macrochildren\;
delete $v$ from $V_{\text{com}}$\;
}
\For{each universal variable $x$ in $V_{\text{com}}$}{
try to push $x$ to macrochildren\;
delete $x$ from $V_{\text{com}}$\;
} } \Else{\label{algo:push:disj}
\For{each existential variable $y$ in $V_{\text{com}}$}{ \label{algo:push:foreachex}
\If{\IsVarPushable{y,g}}{\label{algo:push:ispushable}
push $y$ to macrochildren\;
delete $y$ from $V_{\text{com}}$\;
}
}\label{algo:push:foreachexend}
\While{$V_{\text{com}}\neq\emptyset$}{\label{algo:push:remaining}
$v\colonequals$ \FindBestVariableDisj{$V_{\text{com}}$}\;\label{algo:push:bestdisj}
try to push $v$ to macrochildren\;
delete $v$ from $V_{\text{com}}$\;
}\label{algo:push:remainingend} } \caption{PushVariables} \label{algo:push} \end{algorithm}
For macrogate $g$ we first determine the set of quantified variables $V_{\text{com}}$ that occur on \emph{all} incoming edges of $g$ by \textit{CollectCommonVariables}. These are the only ones which we can push further into the graph.
We annotate the edges by sets of quantifiers, not by sequences of quantifiers, although in the DQBF formulas we have sequences of quantifiers. The reason for this approach lies in rules \eqref{equiv:exists_exists}, \eqref{equiv:forall_forall}, and \eqref{equiv:forall_exists}. The order of quantifiers of the same type can be changed arbitrarily due to rules \eqref{equiv:exists_exists} and \eqref{equiv:forall_forall}. For quantifiers $\forall x$ and $\exists y(D_y)$ there are only two possible cases: If $x\in D_y$, then $\forall x$ has to be to the left of $\exists y(D_y)$, since $\exists y(D_y)\,\forall x:\varphi$ with $x\in D_y$ cannot occur in a valid DQBF by construction. If $x \notin D_y$, then both orders of $\forall x$ and $\exists y(D_y)$ are possible due to rule~\eqref{equiv:forall_exists}. Thus, by rule~\eqref{equiv:forall_exists} we can easily derive from a set of quantifiers all orders that are allowed in a valid DQBF. Since in both cases ($x\in D_y$ and $x \notin D_y$), $\exists y(D_y)$ can be on the right, it is always possible to push existential quantifiers first. Universal variables $x$ can only be pushed, if all existential variables $y$ on the same edge do not contain $x$ in their dependency set (see \eqref{equiv:forall_exists}). So pushing existential variables first may have the advantage that this enables pushing of universal variables.
In Lines~\ref{algo:push:conj} and \ref{algo:push:disj} of Algorithm~\ref{algo:push} we distinguish between a conjunction and a disjunction.
\subsubsection{Pushing over Conjunctions} \label{ssec:push_conj}
\noindent As already said, we push existential variables first. If $g$ is a conjunction, we can push existential variables using rule \eqref{equiv:exists_and} only. Before pushing an existential variable $y$, we collect the set $C_y$ of all children containing $y$. (Note that we do not differentiate here between a child $c_i$ and the subformula represented by $c_i$, which would be more precise.)
If $C_y = \emptyset$, then we can just remove the existential quantification of $y$ from the edges towards $g$ by Theorem~\ref{th:equiv:indep_exists}. If $C_y = \{c_i\}$, then we simply push the existential variable to the edge leading from $g$ to $c_i$ ($c_i$ can then be regarded as $\varphi_2$ from \eqref{equiv:exists_and}). If $C_y$ contains all children of $g$, then $y$ cannot be pushed. In all other cases a decomposition of the macrogate $g$ takes place. All children from $C_y$ are merged and treated as $\varphi_2$ from \eqref{equiv:exists_and}, \ie the \emph{AND} macrogate is decomposed into one \emph{AND} macrogate $g'$ combining the children in $C_y$, and another macrogate $g''$ (replacing $g$) whose children are the remaining children of $g$ as well as the new $g'$. Pushing $y$, we write $y$ on the incoming edge of $g'$.
(Of course, $g'$ has to be inserted after $g$ into the topological order nodeList used in Algorithm~\ref{algo:local}.) According to rule \eqref{equiv:exists_exists} we can push existential variables in an arbitrary order. Here we apply \textit{FindBestVariableConj} (Line~\ref{algo:push:bestconj}, Algorithm~\ref{algo:local}) to heuristically determine a good order of pushing. We choose the existential variable $y$ first that occurs in the fewest children of $g$,
\ie we choose the variable $y$ where $|C_y|$ is minimal. Remember that the children in $C_y$ are combined into an \emph{AND} macrogate $g'$ and $y$ (as well as all universal variables $y$ depends on) cannot be pushed over $g'$. So our goal is to find a variable $y$ which is the least obstructive for pushing other variables.
Subsequently, only universal variables are left for pushing. This is done by Theorems~\ref{th:forall_and} and \ref{th:equisat:forall_and}. As mentioned above, a universal variable $x$ cannot be pushed if there is some existential variable $y$ with $x \in D_y$ left on the incoming edges of $g$, because $y$ could not be pushed before. For all other universal variables $x$ we collect the set $C_x$ of all children containing $x$. If $C_x = \emptyset$, then we just remove the universal quantification of $x$ by Theorem~\ref{th:equiv:indep}. Otherwise, for all children $c \notin C_x$ we remove $x$ from the dependency sets of all existential variables $y$ on the edge from $g$ to $c$ according to Theorem~\ref{th:equisat:forall_and}. For all children $c \in C_x$ we push $x$ to the edge from $g$ to $c$ together with renaming $x$ into a fresh variable $x'$ according to Theorem~\ref{th:forall_and}.
\subsubsection{Pushing over Disjunctions} \label{ssec:push_disj}
\noindent In case $g$ is a disjunction, at first we check for each existential variable $y$ (Lines~\ref{algo:push:foreachex}--\ref{algo:push:foreachexend}, Algorithm~\ref{algo:push}) whether it can be distributed to its children. This is not always the case, since the preconditions of Theorem~\ref{th:equisat:exists_or} may possibly not be fulfilled. Function \textit{IsVarPushable} from Line~\ref{algo:push:ispushable} in Algorithm~\ref{algo:push} performs this check based on Theorem~\ref{th:equisat:exists_or} and / or rule~\eqref{equiv:exists_and}. If the function returns \textit{true}, $y$ can be distributed to certain children of $g$. For the check of function \textit{IsVarPushable}, whose details are listed in Algorithm~\ref{algo:isvarpushable}, we collect for each existential variable $y$ the set $C_y$ of children containing $y$. For the check in \textit{IsVarPushable} we do not have to consider the children which are not in $C_y$, since we do not need to push $y$ to those children according to rule~\eqref{equiv:exists_and}. If $C_y = \emptyset$, we can just remove the existential quantification of $y$ from the edges towards $g$ by Theorem~\ref{th:equiv:indep_exists}. If $C_y = \{c_i\}$, then we can push $y$ to the edge leading to $c_i$ according to rule~\eqref{equiv:exists_and}. In both cases \textit{IsVarPushable} returns true, see Lines~\ref{algo:isvarpushable:trivial1}--\ref{algo:isvarpushable:trivial2} of Algorithm~\ref{algo:isvarpushable}. The remaining cases are handled by Theorem~\ref{th:equisat:exists_or}. Let $\psinp$ be the DQBF represented by the root node of the quantifier graph. For each $c_i \in C_y$ we compute as in Theorem~\ref{th:equisat:exists_or} $\varocc[c_i] = \Big[(\varall[\psinp] \cap \var[c_i]) \cup \bigcup_{v \in \varex[\psinp] \cap \var[c_i]} (\varall[\psinp]\cap D_v)\Big] \setminus D_y$, the set of all universal variables occurring in $c_i$ or in dependency sets of existential variables occurring in $c_i$, reduced by the dependency set $D_y$ of $y$ (Lines~\ref{algo:isvarpushable:vocc}--\ref{algo:isvarpushable:voccend}, Algorithm~\ref{algo:isvarpushable}). Moreover we compute in Line~\ref{algo:isvarpushable:collectoutside} of Algorithm~\ref{algo:isvarpushable} $\varocc[\psinp \setminus g] = \Big[ \varall[{\psinp[\sfrac{0}{g}]}] \cup \bigcup_{v \in \varex[{\psinp[\sfrac{0}{g}]}]} (\varall[\psinp]\cap D_v)\Big]$, the set of universal variables outside the subformula (represented by the macrogate) $g$ or in dependency sets of existential variables outside the subformula $g$. If all $\varocc[c_i]$ are pairwise disjoint and $\varocc[c_i] \cap \varocc[\psinp \setminus g]=\emptyset$ for all $c_i \in C_y$ except at most one (which then ``plays the role of $\varphi_2$ in Theorem~\ref{th:equisat:exists_or}''), then \textit{IsVarPushable} returns \textit{true} and $y$ can be pushed to all $c_i \in C_y$. Again, according to Theorem~\ref{th:equisat:exists_or}, we have to replace $y$ by fresh variables after pushing to the children $c_i \in C_y$. As already mentioned, we do not need to push $y$ to the children $c_i \notin C_y$ due to rule~\eqref{equiv:exists_and}.
After checking Theorem~\ref{th:equisat:exists_or} for all existential variables, \textit{PushVariables} continues with all variables left in $V_{com}$ in Lines~\ref{algo:push:remaining}--\ref{algo:push:remainingend} of Algorithm~\ref{algo:push}. They can be universal variables and those existential variables which have been determined to be stuck due to \textit{IsVarPushable}. An existential variable $y$ can still be pushed according to \eqref{equiv:exists_and} by decomposing $g$ into a macrogate $g'$ combining all children in $C_y$ and a macrogate $g''$ combining all other children together with $g'$, with $g''$ replacing $g$ (as in the case of $g$ being a conjunction). For universal variables $x$ we compute the set $C_x$ of all children containing $x$ or an existential variable $y$ with $x \in D_y$, see rule~\eqref{equiv:forall_or}. Then we decompose $g$ with a new macrogate combining all children in $C_x$. Similar to the case of conjunctions, we determine the next variable to be considered for splitting by \textit{FindBestVariableDisj} (Line~\ref{algo:push:bestdisj}, Algorithm~\ref{algo:push}). \textit{FindBestVariableDisj} selects the variable that has the fewest children in $C_y$ resp.~$C_x$ to be split off. For universal variables $x$ we have to take additionally into account that $x$ is only eligible by \textit{FindBestVariableDisj}, if $x$ is not in the dependency set of any $y$ on an incoming edge of $g$ anymore, see rule~\eqref{equiv:forall_exists}.
\begin{algorithm}[tb] \SetKwInOut{Input}{Input} \SetKwInOut{Output}{Output}
\Input{Existential variable $y$, disjunctive macrogate $g$} \Output{true/false} \BlankLine
$C_y\colonequals$ all children $c_i$ of $g$ with $y\in\var[c_i]$\;\label{algo:isvarpushable:collect}
\If{$|C_y| \leq 1$}{\label{algo:isvarpushable:trivial1}
\Return true\;\label{algo:isvarpushable:trivial2} }
\For{each child $c_i$}{\label{algo:isvarpushable:vocc}
$V^{\forall,occ}_{c_i}\colonequals\Big[(\varall[\psinp]\cap V_{c_i})\cup\bigcup_{v\in\varex[\psinp]\cap V_{c_i}}(\varall[\psinp]\cap D_v)\Big]\setminus D_y$\; }\label{algo:isvarpushable:voccend}
\For{each pair $c_i,c_j\in C_y,$ $i\neq j$}{\label{algo:isvarpushable:childrendisj}
\If{$V^{\forall,occ}_{c_i}\cap V^{\forall,occ}_{c_j}\neq\emptyset$}{
\Return false\;
} }
$V^{\forall,occ}_{\psinp\setminus g}\colonequals \Big[ \varall[{\psinp[\sfrac{0}{g}]}]\cup\bigcup_{v\in\varex[{\psinp[\sfrac{0}{g}]}]}(\varall[\psinp]\cap D_v) \Big]$\;\label{algo:isvarpushable:collectoutside}
\BlankLine nonDisjoint $\colonequals$ 0\;
\For{each child $c_i\in C_y$}{\label{algo:isvarpushable:outsidedisj}
\If{$V^{\forall,occ}_{c_i}\cap V^{\forall,occ}_{\psinp\setminus g}\neq\emptyset$}{
nonDisjoint $\colonequals$ nonDisjoint + 1\;
\uIf{nonDisjoint $> 1$}{
\Return false\;
}
} } \Return true\;
\caption{IsVarPushable} \label{algo:isvarpushable} \end{algorithm}
The complete procedure is illustrated in Figure~\ref{ex:localize}.
\begin{figure}
\caption{A macrogate, marked in red.}
\label{ex:macrogate}
\caption{$G_{\psinp}$ after distributing $y_1$, $y_2$ according\\ to Theorem~\ref{th:equisat:exists_or} and \eqref{equiv:exists_and}.}
\label{ex:localize_distr}
\caption{$G_{\psinp}$ after splitting the macrogate to \\ enable pushing $x_2$.}
\label{ex:localize_restruct}
\caption{$G_{\psinp}$ after processing all macrogates.}
\label{ex:localize_final}
\caption{\textit{BuildMacroGates} and \textit{Localize}.}
\label{ex:localize}
\end{figure}
\subsection{Eliminating Variables} \label{ssec:elim}
\noindent Finally, in Line~\ref{algo:qloc:elim} of Algorithm~\ref{algo:DQBFQuLo}, we try to eliminate those variables which can be symbolically quantified after quantifier localization. The conditions are given by Theorem~\ref{th:rules2} and rule~\eqref{th:rules3}. We proceed from the terminals to the root and check each edge with at least one quantified variable written to it. If a variable could not be eliminated, we pull it back to the incoming edges of this edges' source node. If a variable has been duplicated according to Theorems~\ref{th:forall_and} or \ref{th:equisat:exists_or} and some duplications are brought back to one edge, then we merge them into a single variable again.
As Figure~\ref{ex:localize_final} shows, we can eliminate $y_1$ and $y_1'$, since there are no other variables in the support of the target nodes. The same holds for $y_2$ because $x_2$ is the only variable different from $y_2$ in the support of the target node and $x_2$ is in the dependency set of $y_2$, see Theorem~\ref{th:rules2}. Subsequently, $x_2$ and $x_1$ can be eliminated according to rule~\eqref{th:rules3}, such that we obtain a constant function.
Note that in our implementation we avoid copying and renaming variables when we apply Theorems~\ref{th:forall_and} or \ref{th:equisat:exists_or}. This saves additional effort when pulling back variables which could not be eliminated and avoids to copy shared subgraphs which become different by renaming. On the other hand, we have to consider this implementation detail in the check of conditions of Theorem~\ref{th:equisat:exists_or}, of course. Sets $\varocc[c_i]$ and $\varocc[c_j]$ which contain only common universal variables $x$ which are ``virtually different'' (\ie different, if we would perform renaming) are then considered to be disjoint. Fortunately, it is easy to decide this by checking whether $c_i$ and $c_j$ are in the scope of $\forall x$ or not.
Finally, having all remaining variables pulled back to the root edge, we return to a closed prenex DQBF with potentially fewer variables, fewer dependencies and a modified matrix, which we can pass back to a solver for prenex DQBFs.
\section{Experimental Results} \label{sec:experiments}
\noindent We embedded our algorithm into the DQBF solver HQS, which was the winner of the DQBF track at the QBF\-EVAL'18 and '19 competitions \cite{qbfeval18,qbfeval19}. HQS includes the powerful DQBF-preprocessor HQSpre~\cite{wimmer-et-al-tacas-2017,wimmer-et-al-jsat-2019}. After preprocessing has finished, we call the algorithm \textit{DQBFQuantLocalization} to simplify the formula. HQS augmented with the localization of quantifiers is denoted as \textit{\HQSnp}.\footnotemark \footnotetext{A recent binary of \HQSnp and all DQBF benchmarks we used are provided at \url{https://abs.informatik.uni-freiburg.de/src/projects_view.php?projectID=21}}
The experiments were run on one core of an Intel Xeon CPU E5-2650v2 with 2.6~GHz. The runtime per benchmark was limited to 30~min and the memory consumption to 4~GB. We tested our theory with the same 4811 instances as in \cite{WimmerKBS017} \cite{gitina-et-al-sat-skolem-2016} \cite{wimmer-et-al-sat_tr-2015} \cite{gitina-et-al-date-2015}. They encompass equivalence checking problems for incomplete circuits \cite{SchollB01,gitina-et-al-iccd-2013,FinkbeinerT14,FrohlichKBV14}, controller synthesis problems \cite{BloemKS14} and instances from \cite{BalabanovJ15} where a DQBF has been obtained by succinctly encoding a SAT problem.
Out of 4811 DQBF instances we focus here on those 991 which actually reach our algorithm. The remaining ones are solved by the preprocessor HQSpre or already exceed the time / memory limit either during preprocessing or while translating the formula into an AIG, \ie in those cases the results for HQS and \HQSnp do not differ.
When we reach the function \textit{DQBFQuantLocalization} from Alg.~\ref{algo:DQBFQuLo}, for 989 out of 991 instances we can perform the localization of quantifiers. Quantifier localization enables the elimination of variables in subformulas of 848 instances. For 57971 times local quantifier elimination takes place and reduces the overall number of variables in all 848 benchmarks which allow variable elimination in subformulas. 49107 variables have been eliminated in this manner. Note that if a variable has been doubled according to Theorems~\ref{th:forall_and} or \ref{th:equisat:exists_or} and not all of the duplicates can be eliminated locally, this variable is not counted among the eliminated variables as some duplicates will be pulled back to the root.
\begin{figure}
\caption{HQS vs.\ \HQSnp\ -- solved instances}
\caption{HQS vs.\ \HQSnp\ -- computation time}
\caption{Impact of Quantifier Localization}
\label{fig:hqsvshqsEQ}
\label{fig:hqsvshqsEQScatter}
\end{figure}
Altogether 701 instances out of 991 were solved by \HQSnp in the end, whereas HQS could only solve 542. This increases the number of solved instances by more than 29\% (for a cactus plot comparing HQS with \HQSnp see Figure~\ref{fig:hqsvshqsEQ}). The largest impact of quantifier localization has been observed on equivalence checking benchmarks for incomplete circuits from \cite{FinkbeinerT14}.
Figure~\ref{fig:hqsvshqsEQScatter} shows the computation times of HQS resp.~\HQSnp for all individual benchmark instances. The figure reveals that quantifier localization, in its current implementation, does not lead to a better result in every case. 186 instances have been solely solved by \HQSnp, but the opposite is true for 27 benchmarks. In all of these 27 instances the AIG sizes have grown during local quantifier elimination, and processing larger AIGs resulted in larger run times. Altogether, the size of the AIG after \textit{DQBFQuantLocalization} has been decreased in 545 cases and increased in 300 cases (in 3 modified instances the number of AIG nodes did not change), although in general it is not unusual that the symbolic elimination of quantifiers must be paid by increasing the sizes of AIGs. Nevertheless, Figure~\ref{fig:hqsvshqsEQScatter} shows that in most cases the run times of \HQSnp are faster than those of HQS (and, as already mentioned, the number of solved instances is increased by more than 29\%).
We also tested our algorithm on the competition benchmarks from QBFEVAL'18 to QBFEVAL'20~\cite{qbfeval18,qbfeval19,qbfeval20}. Here the situation is pretty similar. In those competitions 660 different benchmark instances have been used. 141 out of 660 benchmark instances reach our core algorithm and 65 of them could be solved by the original HQS algorithm. In all those 141 instances variables are pushed into the formula, and in 39 instances pushing enabled 3641 local eliminations of variables in total. This made it possible to newly solve 13 benchmark instances and to decrease the runtime for further 10 instances.
\section{Conclusions} \label{sec:conclusion} \noindent In this paper, we presented syntax and semantics of non-prenex DQBFs and proved rules to transform prenex DQBFs into non-prenex DQBFs. We could demonstrate that we can achieve significant improvements by extending the DQBF solver HQS based on this theory. Simplifications of DQBFs were due to symbolic quantifier eliminations that were enabled by pushing quantifiers into the formula based on our rules for non-prenex DQBFs.
In the future, we aim at improving the results of quantifier localization, \eg by introducing estimates on costs and benefits of quantifier localization operations as well as local quantifier elimination and by using limits on the growth of AIG sizes caused by local quantifier elimination.
\section{Proof of Theorem~\ref{th:semantics}} \label{app:semproof}
\semantics*
\begin{proof}
We show that $\semdef{\psi}=\semth{\psi}$ holds by induction on the structure of $\psi$. \begin{description}
\item[\eqref{th:semantics:var}:]
$v$ is a free variable in $\psi$. Therefore $\sfunc{\psi} = \{v\mapsto \fzero,v\mapsto\fone\}$.
Only replacing $v$ by $\fone$ turns
$\psi$ into a tautology, \ie $\semdef{\psi}=\{v\mapsto\fone\} = \semth{\psi}$.
\item[\eqref{th:semantics:notvar}:]
Like in the first case, $v$ is a free variable in $\psi$. Therefore $\sfunc{\psi} = \{v\mapsto\fzero,v\mapsto \fone\}$.
Only replacing $v$ by $\fzero$ turns $\neg v$
into a tautology, \ie $\semdef{\psi}=\{v\mapsto\fzero\} = \semth{\psi}$.
\item[\eqref{th:semantics:and} $\psi = (\varphi_1 \land \varphi_2)$:] ~ \\
$ \semdef{\psi} = \bigl\{ s\in\sfunc{\psi}\,\big|\,\vDash s(\psi)\bigr\} = \bigl\{ s\in\sfunc{\psi}\,\big|\,\vDash s(\varphi_1)\land s(\varphi_2)\bigr\}$. \\
The conjunction $s(\varphi_1)\land s(\varphi_2)$ is a tautology iff both $s(\varphi_1)$ and $s(\varphi_2)$ are tautologies, \ie
$\semdef{\psi} = \bigl\{ s\in\sfunc{\psi}\,\big|\,{\vDash s(\varphi_1)} \land {\vDash s(\varphi_2)}\bigr\}$.
We can restrict $s$ to the variables that actually occur in the sub-formulas, \ie
$\semdef{\psi} = \bigl\{ s\in\sfunc{\psi}\,\big|\,{\vDash s_{|\varfree[\varphi_1]\dcup\varex[\varphi_1]}(\varphi_1)} \land
{\vDash s_{|\varfree[\varphi_2]\dcup\varex[\varphi_2]}(\varphi_2)} \bigr\}$.
By using Definition~\ref{def:sem} of $\semdef{\cdot}$:
$\semdef{\psi} = \bigl\{s\in\sfunc{\psi}\,\big|\,s_{|\varfree[\varphi_1]\dcup\varex[\varphi_1]}\in\semdef{\varphi_1} \land
s_{|\varfree[\varphi_2]\dcup\varex[\varphi_2]}\in\semdef{\varphi_2}\bigr\}$.
Due to the induction assumption we have $\semdef{\varphi_1}=\semth{\varphi_1}$ and $\semdef{\varphi_2}=\semth{\varphi_2}$ and thus:
$\semdef{\psi} = \bigl\{s\in\sfunc{\psi}\,\big|\,s_{|\varfree[\varphi_1]\dcup\varex[\varphi_1]}\in\semth{\varphi_1} \land
s_{|\varfree[\varphi_2]\dcup\varex[\varphi_2]}\in\semth{\varphi_2}\bigr\}$.
With the definition of $\semth{\cdot}$ in \eqref{th:semantics:and} we finally obtain: \\
$\semdef{\psi} = \bigl\{s\in\sfunc{\psi}\,\big|\,s\in\semth{\psi}\bigr\} = \semth{\psi}.$
\item[\eqref{th:semantics:or} $\psi = (\varphi_1 \lor \varphi_2)$:] ~\\
This case is analogous to the previous case, however it needs an additional argument.
Here we need the statement `The disjunction $s(\varphi_1)\lor s(\varphi_2)$ is a tautology
iff $s(\varphi_1)$ or $s(\varphi_2)$ are tautologies' which is not true in general.
Nevertheless, we can prove it here with the following argument:
$s(\varphi_1)$ only contains variables from $\varall[\varphi_1]$,
and similarly $s(\varphi_2)$ only variables from $\varall[\varphi_2]$.
According to our assumption from Definition~\ref{def:syntax}
$\varall[\varphi_1]\cap\varall[\varphi_2]=\emptyset$ holds.
Therefore $s(\varphi_1)\lor s(\varphi_2)$ is a tautology iff at least one of its parts is a tautology.
\item[\eqref{th:semantics:exists} $\psi = \exists v(D_v):\varphi^{-v}$:]~\\
$\semdef{\psi} = \bigl\{s\in\sfunc{\psi}\,\big|\,\vDash s(\exists v(D_v):\varphi^{-v})\bigr\}$.
The first observation is that $\sfunc{\psi} = \sfunc{\varphi^{-v}}$, since
$D_v \subseteq V\setminus(\varex[\varphi]\dcup\varall[\varphi]\dcup\{v\})$, \ie $D_v \cap \varall[\varphi] = \emptyset$, and thus
Skolem function candidates for $v$ are restricted to constant functions,
no matter whether $v$ is a free variable as in $\varphi^{-v}$ or an existential variable without universal variables in its dependency set as in $\psi$.
For all other existential variables in $\varphi$, $\varphi^{-v}$ removes $v$ from the dependency sets of all existential variables, but this does not have any effect on the corresponding
Skolem function candidates, since $v \notin \varall[\varphi]$.
Second, for each $s\in\sfunc{\psi}$ we have $s\bigl(\exists v(D_v):\varphi\bigr)=s\bigl(\varphi^{-v}\bigr)$. Therefore we get:
$\semdef{\psi} = \bigl\{s\in\sfunc{\varphi^{-v}}\,\big|\,\vDash s(\varphi^{-v})\bigr\} = \semdef{\varphi^{-v}}$.
By applying the induction assumption we get
$\semdef{\varphi^{-v}} = \semth{\varphi^{-v}}$
and finally, because of the definition of $\semth{\cdot}$:
$\semdef{\psi} = \semth{\psi}$.
\item[\eqref{th:semantics:forall} $\psi = \forall v:\varphi$:]~\\
$\semdef{\psi} = \bigl\{t\in\sfunc{\psi}\,\big|\,\vDash t(\forall v:\varphi)\bigr\}
= \bigl\{t\in\sfunc{\psi}\,\big|\,\vDash t(\varphi)\bigr\}
= \bigl\{t\in\sfunc{\psi}\,\big|\,\vDash t(\varphi)[\sfrac{0}{v}]
\ \land \vDash t(\varphi)[\sfrac{1}{v}]\bigr\}$.
For a function $t\in\sfunc{\psi}$, we define two functions $s_0^t,s_1^t\in\sfunc{\varphi}$ by:
$s_0^t(v)=\fzero$, $s_1^t(v)=\fone$,
$s_0^t(w)=s_1^t(w)=t(w)$ if $w\in\varex[\varphi]$ with $v\not\in D_v$ or $w\in\varfree[\varphi]\setminus\{v\}$, and
$s_0^t(w)= t(w)[\sfrac{0}{v}]$, $s_1^t(w)=t(w)[\sfrac{1}{v}]$ for $w\in\varex[\varphi]$ with $v\in D_w$. Then we have:
$t(\varphi)[\sfrac{0}{v}] = s_0^t(\varphi)$ and $t(\varphi)[\sfrac{1}{v}] = s_1^t(\varphi)$.
$\semdef{\psi} = \bigl\{t\in\sfunc{\psi}\,\big|\,{\vDash s_0^t(\varphi)} \land {\vDash s_1^t(\varphi)}\bigr\}
= \bigl\{t\in\sfunc{\psi}\,\big|\,s_0^t\in\semdef{\varphi} \land s_1^t\in\semdef{\varphi}\bigr\}$.
The induction assumption gives us: $\semdef{\varphi}=\semth{\varphi}$ and therefore:
$\semdef{\psi} = \bigl\{t\in\sfunc{\psi}\,\big|\,s_0^t\in\semth{\varphi} \land s_1^t\in\semth{\varphi}\bigr\}$.
With the equality $t(w) = \mathrm{ITE}\bigl(v,t(w)[\sfrac{1}{v}], t(w)[\sfrac{0}{v}]\bigr) = \mathrm{ITE}\bigl(v,s_1^t(w), s_0^t(w)\bigr)$ for $w\in\varex[\varphi]=\varex[\psi]$ with $v\in D_w$
and $t(w)=s_0^t(w)=s_1^t(w)$ for the remaining existential or free variables, we obtain:
\begin{align*}
\semdef{\psi} &= \bigl\{t\in\sfunc{\psi}\,\big|\,\exists s_0,s_1\in\semth{\varphi}: s_0(v)=\fzero\land s_1(v)=\fone \\
&\quad \land \; t(w) = s_0(w)=s_1(w)\text{ for } w\in\varfree[\psi] = \varfree[\varphi]\setminus\{v\} \\
&\quad \land \; t(w) = s_0(w)=s_1(w)\text{ for } w\in\varex[\psi]\text{ with }v\notin D_w \\
&\quad \land \; t(w) = \mathrm{ITE}\bigl(v,s_1(w), s_0(w)\bigr) \text{ for } w\in\varex[\psi]\text{ with }v\in D_w\bigr\} \\
&= \semth{\psi}.
\end{align*}
\end{description} \end{proof}
\section{Proof of Theorem~\ref{th:rules}} \label{app:ruleproof}
\rules*
\begin{proof}
\begin{description}
\item[\eqref{equiv:indep_exists}:]
Since the left-hand side $\exists y(D_y): \varphi$ of rule \eqref{equiv:indep_exists} is a well-defined DQBF, we
have $\varphi = \varphi^{-y}$ according to Definition~\ref{def:syntax}.
The rule simply follows from $\sem{\exists y(D_y):\varphi^{-y}} = \sem{\varphi^{-y}}$ shown in
Theorem~\ref{th:semantics}.
\item[\eqref{equiv:indep}:]
We omit the proof here, as it immediately follows from the more general Theorem~\ref{th:equiv:indep}.
\item[\eqref{th:rules3}:]
The statement easily follows from Theorem~\ref{th:semantics}, \eqref{th:semantics:forall} considering
that $\varphi$ contains only free variables.
Let $\psi_1 \colonequals \forall x : \varphi$ and
$\psi_2 \colonequals \varphi[\sfrac{0}{x}] \land \varphi[\sfrac{1}{x}]$.
We have $\sfunc{\psi_1} = \sfunc{\psi_2}$ and
$\sem{\psi_1}
= \bigl\{ t\in\sfunc{\psi_1}\,\big|\,
\exists s_0, s_1\in\sem{\varphi}: s_0(x)=\fzero\land s_1(x)=\fone \; \land
\forall w\in\varfree[\psi_1]: t(w)\colonequals s_0(w) = s_1(w) \bigr\}
= \sem{\psi_2}$.
\item[\eqref{equiv:exists}:]
Let $\psi_1 \colonequals \exists y(D_y) : \varphi$ and
$\psi_2 \colonequals \varphi[\sfrac{0}{y}] \lor \varphi[\sfrac{1}{y}]$.
Assume that $\sem{\psi_1}\neq\emptyset$ and let $t\in\sem{\psi_1}$.
$t(y)$ has to be a constant function, \ie $t(y) = \fzero$ or $t(y) = \fone$.
We choose a Skolem function $t'$ for $\psi_2$ by
$t'(v) = t(v)$ for all $v\in \varfree[\psi_2]$.
It is clear that $t'$ is a Skolem function candidate for $\psi_2$.
Assume \wlogen that $t(y) = \fzero$.
Since $t\bigl(\exists y(D_y) : \varphi\bigr) = t\bigl(\varphi\bigr)$ is a tautology,
$t'\bigl(\varphi[\sfrac{0}{y}]\bigr) = t\bigl(\varphi\bigr)$ is a tautology, too.
Thus, $t'\bigl(\varphi[\sfrac{0}{y}] \lor \varphi[\sfrac{1}{y}]\bigr)
= t'\bigl(\varphi[\sfrac{0}{y}]\bigr) \lor t'\bigl(\varphi[\sfrac{1}{y}]\bigr)$
is a tautology. This shows that $t'\in\sem{\psi_2}$ and therefore $\sem{\psi_2}\neq\emptyset$.
For the opposite direction assume that $\sem{\psi_2}\neq\emptyset$ and let $t'\in\sem{\psi_2}$.
$t'\bigl(\varphi[\sfrac{0}{y}] \lor \varphi[\sfrac{1}{y}]\bigr)$ is a tautology.
Since $\varphi$ contains only free variables, $t'\bigl(\varphi[\sfrac{0}{y}]\bigr)$ and
$t'\bigl(\varphi[\sfrac{1}{y}]\bigr)$ are (equivalent to) constants. At least one of them is $\fone$,
assume \wlogen $t'\bigl(\varphi[\sfrac{0}{y}]\bigr)$. Now we choose
$t(v) = t'(v)$ for all $v \in \varfree[\psi_2] \setminus \{y\}$ and $t(y) = \fzero$.
Since $t\bigl(\exists y(D_y) : \varphi\bigr) = t\bigl(\varphi\bigr) = t'\bigl(\varphi[\sfrac{0}{y}]\bigr) = \fone$,
we have $t\in\sem{\psi_1}$ and therefore $\sem{\psi_1}\neq\emptyset$.
\item[\eqref{equiv:forall_and}:]
We omit the proof here, since it immediately follows from the more general Theorem~\ref{th:forall_and}.
\item[\eqref{equiv:forall_and2}:]
We omit the proof here, since it immediately follows from the more general Theorem~\ref{th:equisat:forall_and}.
\item[\eqref{equiv:forall_or}:]
Let $\psi_1\colonequals\forall x:(\varphi_1\op \varphi_2)$ and $\psi_2\colonequals\bigl(\varphi_1\op (\forall x:\varphi_2)\bigr)$
and assume that $x\notin V_{\varphi_1}$ and $x\notin D_y$ for any $y\in\varex[\varphi_1]$.
Note that we need $x\notin V_{\varphi_1}$, since otherwise $\psi_2$ would not be
well-formed according to Definition~\ref{def:syntax}.
From $x\notin D_y$ for any $y\in\varex[\varphi_1]$ we conclude that $\sfunc{\psi_1}=\sfunc{\psi_2}$.
Then we have:
$\sem{\psi_1}
= \bigl\{s\in\sfunc{\psi_1}\,\big|\,\vDash s(\forall x:(\varphi_1\op \varphi_2))\bigr\}
= \bigl\{s\in\sfunc{\psi_1}\,\big|\,\vDash s(\varphi_1\op \varphi_2)\bigr\}
= \bigl\{s\in\sfunc{\psi_1}\,\big|\,\vDash s(\varphi_1 \op (\forall x: \varphi_2))\bigr\}
= \bigl\{s\in\sfunc{\psi_2}\,\big|\,\vDash s(\varphi_1 \op (\forall x: \varphi_2))\bigr\}$,
since $\sfunc{\psi_1}=\sfunc{\psi_2}$, and finally $\sem{\psi_1} = \sem{\psi_2}$.
\item[\eqref{equiv:exists_or}:]
We set $\psi_1\colonequals \exists y(D_y):(\varphi_1\lor\varphi_2)$
and $\psi_2\colonequals \bigl(\exists y(D_y):\varphi_1\bigr)\lor\bigl(\exists y'(D_y):\varphi_2[\sfrac{y'}{y}]\bigr)$.
\begin{align*}
\sem{\psi_1} &= \bigl\{t\in\sfunc{\psi_1}\,\big|\,{\vDash t(\exists y(D_y):(\varphi_1\lor\varphi_2))} \bigr\} \\
&= \bigl\{t\in\sfunc{\psi_1}\,\big|\,{\vDash t(\varphi_1\lor\varphi_2)}\bigr\} \\
&= \bigl\{t\in\sfunc{\psi_1}\,\big|\,{\vDash t(\varphi_1)} \lor {\vDash t(\varphi_2)}\bigr\}
\intertext{The last equality holds, because the variables occurring in $t(\varphi_1)$ and $t(\varphi_2)$ are disjoint.
On the other hand we have}
\sem{\psi_2} &= \bigl\{t'\in\sfunc{\psi_2}\,\big|\,{\vDash t'((\exists y(D_y):\varphi_1)\lor(\exists y'(D_y):\varphi_2[\sfrac{y'}{y}]))} \bigr\} \\
&= \bigl\{t'\in\sfunc{\psi_2}\,\big|\,{\vDash t'(\varphi_1 \lor \varphi_2[\sfrac{y'}{y}])}\bigr\} \\
&= \bigl\{t'\in\sfunc{\psi_2}\,\big|\,{\vDash t'(\varphi_1)} \lor {\vDash t'(\varphi_2[\sfrac{y'}{y}])}\bigr\}.
\end{align*}
Again, the last equality holds, because the variables occurring in $t'(\varphi_1)$ and $t'\bigl(\varphi_2[\sfrac{y'}{y}]\bigr)$ are disjoint.
Assume that $\sem{\psi_1}\neq\emptyset$ and let $t\in\sem{\psi_1}$.
Then $t(\varphi_1)$ or $t(\varphi_2)$ is a tautology.
We choose a Skolem function $t'$ for $\psi_2$ by
$t'(v) = t(v)$ for all
$v\in\varfree[\psi_1]\dcup\varex[\psi_1]$
and
$t'(y') = t(y)$. (Note that $t(y)$ as well as $t'(y')$ have to be constant functions according
to Def.~\ref{def:skolem_function_candidates}.)
If $t(\varphi_1)$ is a tautology, then $t'(\varphi_1) = t(\varphi_1)$ is a
tautology as well, $t'\in\sem{\psi_2}$ and therefore $\sem{\psi_2}\neq\emptyset$.
If $t(\varphi_1)$ is not a tautology, then $t(\varphi_2)$ has to be a tautology and
$t'\bigl(\varphi_2[\sfrac{y'}{y}]\bigr) = t(\varphi_2)$ is a tautology as well,
$t'\in\sem{\psi_2}$ and therefore $\sem{\psi_2}\neq\emptyset$.
For the opposite direction assume $\sem{\psi_2}\neq\emptyset$ and let $t'\in\sem{\psi_2}$.
Then $t'(\varphi_1)$ or $t'(\varphi_2[\sfrac{y'}{y}])$ is a tautology.
If $t'(\varphi_1)$ is a tautology, we choose
the Skolem function for $\psi_1$ by
$t(v) = t'(v)$ for all $v\in\varfree[\psi_1]\dcup\varex[\psi_1]$.
It immediately follows that $t(\varphi_1) = t'(\varphi_1)$ is a tautology as well,
$t\in\sem{\psi_1}$ and therefore $\sem{\psi_1}\neq\emptyset$.
If $t'(\varphi_1)$ is not a tautology, then $t'(\varphi_2[\sfrac{y'}{y}])$ has to be a tautology
and we choose the Skolem function for $\psi_1$ by
$t(v) = t'(v)$ for all $v\in (\varfree[\psi_1]\dcup\varex[\psi_1]) \setminus \{y\}$
and $t(y) = t'(y')$.
Then $t(\varphi_2) = t'\bigl(\varphi_2[\sfrac{y'}{y}]\bigr)$ is also a tautology
and again $t\in\sem{\psi_1}$ and therefore $\sem{\psi_1}\neq\emptyset$.
\item[\eqref{equiv:exists_and}:]
Let $\psi_1 \colonequals\exists y (D_y):(\varphi_1 \op \varphi_2)$ and
$\psi_2\colonequals\varphi_1\op \bigl(\exists y(D_y):\varphi_2\bigr)$.
Note that we need $y\not\in\var[\varphi_1]$, since otherwise $\psi_2$ would not be
well-formed according to Definition~\ref{def:syntax}.
The following equalities hold:
\begin{align*}
\sem{\psi_1} &= \bigl\{s\in\sfunc{\psi_1}\,\big|\,{\vDash s(\exists y(D_y):(\varphi_1\op\varphi_2))}\bigr\} \\
&= \bigl\{s\in\sfunc{\psi_1}\,\big|\,{\vDash s(\varphi_1\op\varphi_2)}\bigr\} \\
&= \bigl\{s\in\sfunc{\psi_1}\,\big|\,\vDash s(\varphi_1\op (\exists y(D_y):\varphi_2)) \bigr\} \\
&= \bigl\{s\in\sfunc{\psi_2}\,\big|\,\vDash s(\varphi_1\op (\exists y(D_y):\varphi_2)) \bigr\}
\text{ since $\sfunc{\psi_1}=\sfunc{\psi_2}$} \\
&= \sem{\psi_2}\,.
\end{align*}
\item[\eqref{equiv:exists_exists}:]
By applying Theorem~\ref{th:semantics}, Equation~\eqref{th:semantics:exists} multiple times, we get:
$\sem{\exists y_1(D_{y_1})\exists y_2(D_{y_2}):\varphi}
= \sem{\exists y_2(D_{y_2}):\varphi}
= \sem{\varphi}
= \sem{\exists y_1(D_{y_1}):\varphi}
= \sem{\exists y_2(D_{y_2})\exists y_1(D_{y_1}):\varphi}$.
\item[\eqref{equiv:forall_forall}:]
We set
$\psi_1\colonequals\forall x_1 \forall x_2:\varphi$ and $\psi_2\colonequals\forall x_2\forall x_1:\varphi$. Then we have:
$\sem{\psi_1} = \sem{\forall x_1 \forall x_2:\varphi}
= \bigl\{ s\in\sfunc{\psi_1}\,\big|\, {\vDash s(\forall x_1\forall x_2:\varphi)} \bigr\}
= \bigl\{ s\in\sfunc{\psi_1}\,\big|\, {\vDash s(\varphi)} \bigr\}
= \bigl\{ s\in\sfunc{\psi_2}\,\big|\, {\vDash s(\varphi)} \bigr\}$,
since $\sfunc{\psi_1}=\sfunc{\psi_2}$, and then
$\sem{\psi_1} = \bigl\{ s\in\sfunc{\psi_2}\,\big|\, {\vDash s(\forall x_2\forall x_1:\varphi)} \bigr\} = \sem{\psi_2}$.
\item[\eqref{equiv:forall_exists}:]
We set $\psi_1\colonequals \forall x\exists y(D_y):\varphi$ and $\psi_2\colonequals \exists y(D_y)\forall x:\varphi$.
First note that $\exists y(D_y)\forall x:\varphi$ is not well-formed according to
Definition~\ref{def:syntax} if $x\in D_y$, because $x$ is universal in $\forall x:\varphi$.
With $x\notin D_y$ we show that $\sem{\psi_1}=\sem{\psi_2}$.
We have:
$\sem{\psi_1} = \bigl\{ s\in\sfunc{\psi_1}\,\big|\,{\vDash s(\forall x\exists y(D_y):\varphi)} \bigr\}
= \bigl\{ s\in\sfunc{\psi_1}\,\big|\,{\vDash s(\varphi)} \bigr\}$.
Because $x\notin D_y$, the Skolem function candidates for $y$ in $\psi_1$ are restricted to constant functions.
The same holds for $y$ in $\psi_2$. Therefore $\sfunc{\psi_1}=\sfunc{\psi_2}$ is true. So we can write:
$\sem{\psi_1} = \bigl\{ s\in\sfunc{\psi_2}\,\big|\,{\vDash s(\varphi)} \bigr\}
= \bigl\{ s\in\sfunc{\psi_2}\,\big|\,{\vDash s(\exists y(D_y)\forall x:\varphi)}\bigr\}
= \sem{\psi_2}$. \end{description} \end{proof}
\section{Proof of Theorem~\ref{th:rules2}} \label{app:rules2}
\rulesTwo*
\begin{proof} We show equisatisfiability by proving that $\sem{\psi'} \neq \emptyset$ implies $\sem{\psi} \neq \emptyset$ and vice versa. First assume that there is a Skolem function $s'\in\sem{\psi'}$ with $\vDash s'(\psi')$. We define $s \in \sfunc{\psi}$ by $s(v) \colonequals s'(v)$ for all $v \in \varex[\psi'] \cup \varfree[\psi'] \setminus \{y\}$
and $s(y) \colonequals s'(\varphi[\sfrac{1}{y}])$. Since $\varphi$ contains only variables from $D_y \cup \varfree[\psi] \cup \{v \in \varex[\psi] \; | \; D_v \subseteq D_y\}$, $\support\bigl(s(y)\bigr) = \support\bigl(s'(\varphi[\sfrac{1}{y}])\bigr) \subseteq D_y$, \ie $s \in \sfunc{\psi}$. By definition of $s(y)$, $s(\psi)$ is the same as $s'(\psi'')$ where $\psi''$ results from $\psi$ by replacing the subformula $\varphi$ by $\varphi[\sfrac{\varphi[\sfrac{1}{y}]}{y}]$. According to \cite{Jiang09}, quantifier elimination can be done by composition as well and $\varphi[\sfrac{\varphi[\sfrac{1}{y}]}{y}]$ is equivalent to $\varphi[\sfrac{0}{y}] \lor \varphi[\sfrac{1}{y}]$, \ie $s(\psi) = s'(\psi')$ and thus $\vDash s(\psi)$.
Now assume $s \in \sem{\psi}$ with $\vDash s(\psi)$. Consider $s'$ which results from $s$ by removing $y$ from the domain of $s$. Then $s'(\varphi)$ can be regarded as a Boolean function depending on $D_y \cup \{y\}$. $s(\varphi) = s'(\varphi)[\sfrac{s(y)}{y}]$ is a function which (1) does not depend on $y$ and which (2) has the property that for each assignment $\mu$ to the variables from $D_y \cup \{y\}$ $\mu\bigl(s'(\varphi)[\sfrac{s(y)}{y}]\bigr) = \mu\bigl(s'(\varphi)\bigr)$ or $\mu'\bigl(s'(\varphi)[\sfrac{s(y)}{y}]\bigr) = \mu'\bigl(s'(\varphi)\bigr)$ with $\mu'$ resulting from $\mu$ by flipping the assignment to $y$. $s'(\varphi)[\sfrac{s'(\varphi)[\sfrac{1}{y}]}{y}]$ which corresponds to the existential quantification of $y$ in $s'(\varphi)$ is \emph{the largest function} fulfilling (1) and (2), \ie $s(\varphi) \leq s'(\varphi)[\sfrac{s'(\varphi)[\sfrac{1}{y}]}{y}]$. We derive $s''$ from $s$ by replacing $s(y)$ by $s'(\varphi)[\sfrac{1}{y}]$ and obtain also $s(\psi) \leq s''(\psi)$, since $\psi$ is in NNF, \ie contains negations only at the inputs, thus $\varphi$ is not in the scope of any negation in $\psi$, but only in the scope of conjunctions and disjunctions which are monotonic functions. Thus $\vDash s(\psi)$ implies $\vDash s''(\psi)$. Again, due to the equivalence of $\varphi[\sfrac{\varphi[\sfrac{1}{y}]}{y}]$ and $\varphi[\sfrac{0}{y}] \lor \varphi[\sfrac{1}{y}]$, we conclude $s''(\psi) = s'(\psi')$ and thus $\vDash s'(\psi')$. \end{proof}
\section{Proof of Theorem~\ref{th:forall_and}} \label{app:forall_and}
\forallAnd*
\begin{proof} First, we assume that $\sem{\psi}\neq\emptyset$ and let $t\in\sem{\psi}$, \ie $t(\psi)$ is a tautology. For $\psi'$ we construct a Skolem function $t'$ by $t'(v) \colonequals t(v)[\sfrac{x'}{x}]$ for all $v\in\varex[\varphi_2]$, $t'(v) \colonequals t(v)$ otherwise. It is easy to see that $t'$ is a Skolem function candidate for $\psi'$.
Now assume that $t'(\psi')$ is not a tautoloy, \ie there is an assignment $\mu' \in \assign(\varall[\psi'])$ with $\mu'\bigl(t'(\psi')\bigr) = 0$. Since $\mu'\bigl(t(\psi)\bigr) = 1$, we have $\mu'\bigl(t(\psi_1)\bigr) = 1$ and $\mu'\bigl(t'(\psi_2)\bigr) = 0$ according to Lemma~\ref{lemma:monotonic}. With $\psi_2 = \bigl(\forall x:\varphi_1\bigr)\land\bigl(\forall x':\varphi_2[\sfrac{x'}{x}]\bigr)$ we obtain $\mu'\bigl(t'(\forall x:\varphi_1)\bigr) = \mu'\bigl(t'(\varphi_1)\bigr) = 0$ or $\mu'\bigl(t'(\forall x':\varphi_2[\sfrac{x'}{x}])\bigr) = \mu'\bigl(t'(\varphi_2[\sfrac{x'}{x}])\bigr) = 0$. In the first case we have $\mu'\bigl(t(\varphi_1)\bigr) = \mu'\bigl(t'(\varphi_1)\bigr) = 0$ which contradicts $\mu'\bigl(t(\psi_1)\bigr) = \mu'\bigl(t(\varphi_1\land\varphi_2)\bigr) = \mu'\bigl(t(\varphi_1)\bigr) \land \mu'\bigl(t(\varphi_2)\bigr) = 1$. In the second case we define $\mu \in \assign(\varall[\psi])$ by $\mu(v) = \mu'(v)$ for all $v \in \varall[\psi] \setminus \{x\}$ and $\mu(x) = \mu'(x')$. In this case we obtain $\mu\bigl(t(\varphi_2)\bigr) = \mu'\bigl(t'(\varphi_2[\sfrac{x'}{x}]))\bigr) = 0$. Thus we obtain $\mu\bigl(t(\forall x:(\varphi_1\land\varphi_2))\bigr) = \mu\bigl(t(\varphi_1) \land t(\varphi_2)\bigr) = \mu\bigl(t(\varphi_1)\bigr) \land \mu\bigl(t(\varphi_2)\bigr) = 0$. Since $\psi$ and $\psi'$ only differ in the $\psi_1$-/$\psi_2$-part and the only occurrences of $x$ in $t(\psi)$ are in $t(\psi_1)$, this leads to $\mu\bigl(t(\psi)\bigr) = \mu'\bigl(t'(\psi')\bigr) = 0$, which is a contradiction to the fact that $t(\psi)$ is a tautology. Thus, $t'(\psi')$ has to be a tautology as well and $\sem{\psi'}\neq\emptyset$.
For the opposite direction we assume that $\sem{\psi'}\neq\emptyset$ and let $t'\in\sem{\psi'}$. We obtain $\sem{\psi}\neq\emptyset$ with similar arguments: We construct a Skolem function $t$ for $\psi$ as follows: $t(v) = t'(v)[\sfrac{x}{x'}]$ for $v\in\varex[{\varphi_2}]$ and $t(v) = t'(v)$ otherwise. Assume that $t(\psi)$ is not a tautology, \ie there is an assignment $\mu \in \assign(\varall[\psi])$ with $\mu\bigl(t(\psi)\bigr) = 0$ and define $\mu' \in \assign(\varall[\psi'])$ by $\mu'(v) \colonequals \mu(v)$ for all $v \in \varall[\psi]$ as well as $\mu'(x') \colonequals \mu(x)$. $\mu'\bigl(t'(\psi')\bigr) = 1$ because $t'(\psi')$ is a tautology. We obtain $\mu'\bigl(t'(\psi_2)\bigr) = 1$ and $\mu'\bigl(t(\psi_1)\bigr) = \mu\bigl(t(\psi_1)\bigr) = 0$ as above by Lemma~\ref{lemma:monotonic}. From $\mu\bigl(t(\psi_1)\bigr) = 0$ we conclude $\mu\bigl(t(\varphi_1)\bigr) = 0$ or $\mu\bigl(t(\varphi_2)\bigr) = 0$. Since $\mu'\bigl(t'(\varphi_1)\bigr) = \mu\bigl(t(\varphi_1)\bigr)$ and $\mu'\bigl(t'(\varphi_2[\sfrac{x'}{x}])\bigr) = \mu\bigl(t(\varphi_2)\bigr)$, this implies $\mu'\bigl(t'((\forall x:\varphi_1)\land(\forall x':\varphi_2[\sfrac{x'}{x}]))\bigr) = 0$ which contradicts $\mu'\bigl(t'(\psi_2)\bigr) = 1$ derived above. Thus, $t(\psi)$ is a tautology and $\sem{\psi}\neq\emptyset$. \end{proof}
\end{document} | arXiv | {
"id": "1905.04755.tex",
"language_detection_score": 0.638084888458252,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Hybrid Bounds on Twisted L-Functions Associated to Modular Forms} \author{Chan Ieong Kuan} \date{\today} \begin{abstract}
For $f$ a primitive holomorphic cusp form of even weight $k \geq 4$, level $N$, and $\chi$ a Dirichlet character mod $Q$ with $(Q,N) = 1$, we establish the following hybrid subcovex bound for $t \in \mathbb{R}$:
\[ L(\half + it, f_\chi) \ll Q^{\frac{3}{8} + \varepsilon} (1+|t|)^{\frac{1}{3-2\theta} + \varepsilon} \] where $\theta$ is the best bound toward the Ramanujan-Petersson conjecture at the infinite place. The implied constant only depends on $f$ and $\varepsilon$. This is done via amplification and taking advantage of a shifted convolution sum of two variables as defined and analyzed in \cite{Jeff}.
\end{abstract} \maketitle
\section{Introduction} \label{sec:intro}
\subsection{Hybrid Bounds} The growth of $L$-functions on the critical line $\operatorname{Re} s = \half$ has been one of the most studied problems in analytic number theory. This paper is concerned with $L$-functions of a holomorphic cusp form $f$, twisted by a character $\chi$ of conductor $Q$. By using functional equation and Phragm\'en-Lindel\"of principle, one can obtain the convexity bound
\[ L(\half+it, f_\chi) \ll (Q(1+|t|))^{\half+\varepsilon}, \] where we suppress the level and weight aspects here.
Throughout the years, there have been many attempts at lowering the exponents, most of which have focused on one chosen aspect. Since our result concerns $Q$- and $t$-aspects, we will state some known results in these directions.
In the $t$-aspect, Good showed in \cite{good1983square} that for $f$ a holomorphic cusp form of the full modular group,
\[ L(\half+it,f) \ll (1+|t|)^{\frac{1}{3} + \varepsilon} \] Meurman showed the same result for $f$ Maass forms of full modular group in \cite{meurman1990}. For number fields, subconvexity results were proved in Petridis and Sarnak \cite{PetridisSarnak2001} and Diaconu and Garrett \cite{Diaconu2010}.
In the $Q$-aspect, the first subconvexity result was obtained by Duke, Friedlander and Iwaniec \cite{Duke1993} for holomorphic cusp forms of full level. Later, Bykovskii showed in \cite{bykovskii1998trace} that for general level, \[ L(\half+it, f_\chi) \ll_t Q^{\frac{3}{8} + \varepsilon}, \]
with a polynomial dependence in $(1+|t|)$, provided that the nebentypus of $f$ is trivial. This same bound without the nebentypus restriction is obtained in Hoffstein and Hulse \cite{Jeff}, and Blomer and Harcos \cite{blomer2008hybrid}. In \cite{blomer2008hybrid}, $f$ can also be taken as a Maass form.
Hhybrid bounds in $Q$- and $t$-aspects have been worked on by Blomer and Harcos in \cite{blomer2008hybrid}, Munshi \cite{munshi2012circle} and Han Wu \cite{Wu2012Burgess}, which, following the method of Michel and Venkatesh, uses amplification. The bound obtained is:
\[ L(\half+it,f_\chi) \ll (Q(1+|t|))^{\frac{3}{8} + \frac{\theta}{4} + \varepsilon}, \] where no complementary series with parameter $>\theta$ appears as a component of a cuspidal automorphic representation of $GL_2(\mathbb{A})$.
One thing to note is that these hybrid bounds do not reach the best known exponents in the $t$-aspect. In this work, we partially resolve this situation by proving the following result. \begin{theorem} \label{thm_main} For $f$ a primitive holomorphic cusp form of even weight $k \geq 4$, level $N$, and $\chi$ a Dirichlet character mod $Q$, where $(Q,N) = 1$, we have
\[ L(\half + it, f_\chi) \ll (1+|t|)^{\frac{1}{3-2\theta} + \varepsilon} Q^{\frac{3}{8}+\frac{\theta}{4} + \varepsilon}, \] where $\theta$ is a bound toward the Ramanujan-Petersson conjecture. \end{theorem} \begin{remark}
A bound of $\theta$ for congruence subgroups of $\operatorname{SL}_2(\mathbb{Z})$ is $\frac{7}{64}$ by Kim and Sarnak \cite{Kim:2003aa}. It should be noted that our theorem does not currently cover the case of Maass forms as the corresponding shifted convolution is not analyzed yet. \end{remark} Our work also uses amplification. The major difference between this work and \cite{Wu2012Burgess} is that we treat non-Archimedian and Archimedian places differently while Wu treated them uniformly. As such, we need more precise control on the $t$-aspect, which is achieved by relating the problem to the shifted convolution sum of two variables analyzed in \cite{Jeff}.
\subsection{Structure of this paper}
Our goal is to bound $L(\half + it, f_\chi)$ in the $Q$ and $t$-aspects. First, we quote relevant results from \cite{Jeff} in section \ref{sec:prep}. We then apply amplification methods in section \ref{sec:amp}, reducing the problem to understanding the growth in $Q$ and $t$ of the following expression, where $G$ and $\mathcal{L}$ are amplification parameters and $\alpha = \frac{1}{\log (Q(1+|t|))}$. \begin{align*} &\varphi(Q) G \sum_{\substack{l_1,l_2 \sim \mathcal{L} \\ l_1,l_2 \text{ prime}}} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{m_1,m_2 \geq 1 \\ m_1l_1 \equiv m_2l_2 (Q)}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} \notag \allowdisplaybreaks[0]\\
&\phantom{\varphi(Q) \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2)} \qquad \qquad \qquad \times V \left( \frac{m_1}{x} \right) V \left( \frac{m_2}{x} \right) e^{-G \left| \log \left(\tfrac{l_1m_1}{l_2m_2} \right) \right|} \end{align*} We then separate the analysis of this expression into the ``diagonal" portion ($m_1l_1 = m_2l_2$) and two ``off-diagonal" portions ($m_1l_1 = m_2l_2 + h_0Q$ and $m_2l_2 = m_1l_1 + h_1Q$, for $h_0, h_1 \geq 1$).
In section \ref{sec:diag}, we analyze the diagonal term with inverse Mellin transforms (propositions \ref{eq_S_d1_prop} and \ref{eq_S_d2_propo}).
For the off-diagonal portions, the first thing to note is they have the same contribution up to conjugation. Our analysis relies heavily on the shifted convolution sum of two variables $Z_Q(s,w)$ from \cite{Jeff}. By inverse Mellin transforms, we relate the off-diagonal term to a four-fold integral involving $Z_Q(s,w)$. This is done in section \ref{sec:off-d-setup} (proposition \ref{eq_S_o1_int}).
The analysis of the off-diagonal then splits into the discrete part and the continuous part, due to the fact that such a splitting exists for $Z_Q(s,w)$. The analysis of each part is done by moving lines of integration, with the primary goal of reducing the $x$-exponent as much as possible, and a secondary goal of reducing contribution of the $t$-aspect where possible. The results can be found in propositions \ref{eq_S_o1d_Zres_propo}, \ref{eq_S_o1d_Z_sp2}, and \ref{propo_total_cts}.
In the last section, we put the results of the previous sections together. Choosing $\mathcal{L}$ and $G$ optimally yields the theorem.
\section{Preparations}\label{sec:prep} Throughout this paper, fix a holomorphic cusp form $f$ of even weight $k \geq 4$, level $N$: \[ f(z) = \sumkinf{n}{1} A(n) n^{\frac{k-1}{2}} e(nz) = \sumkinf{n}{1} a(n) e(nz), e(z) = e^{2\pi iz} \]
\subsection{Shifted convolution of two variables} The most crucial object of this paper is the shifted convolution of two variables $Z_Q(s,w)$, analyzed in \cite{Jeff}. We quote several of the results here for convenience. Fix $\ell_1,\ell_2$ be primes relatively prime to $NQ$ and of size $\mathcal{L}$. The definition of $Z_Q(s,w)$ is as follows: \begin{equation} \label{eq_Zq_defn2}
Z_Q(s,w) := \sum_{\substack{h_0,m_2 \geq 1 \\ m_1l_1 = m_2l_2 + h_0Q}} \frac{A(m_1) \conj{A(m_2)} \left( 1+\frac{h_0Q}{l_2m_2} \right)^{\frac{k-1}{2}}}{(l_2m_2)^s (h_0Q)^{w + \frac{k-1}{2}}} \end{equation} In \cite{Jeff}, it is shown that \[ \lim_{\delta \to 0} Z_Q(s,u;\delta) = Z_Q(s,u) \] and that $Z_Q(s,u;\delta)$ has the following spectral expansion \begin{align}
&Z_Q(s,s'-s-\tfrac{k}{2} + 1;\delta) \notag\\ =& \frac{(4\pi)^k (l_1l_2)^{\frac{k-1}{2}} 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \Bigg( \sum_j L_Q(s',\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \notag \\
&\qquad + \mathcal{V}_{N[l_1,l_2]} \sum_{\mathfrak{a}} \invmellin{0} \zeta_{\mathfrak{a},Q}(s',-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \,d z \Bigg) \label{eq_Z_decomp} \end{align} where the $\mathfrak{a}$-sum is over cusps of $\Gamma_0(N[l_1,l_2])$ and \begin{align}
L_Q(s',\conj{u_j}) :&= \sum_{h \geq 1} \frac{\conj{\rho_j(-hQ)}}{(hQ)^{s'}}, \notag \\
\zeta_{\mathfrak{a},Q}(s',z) :&= \sum_{h \geq 1} \frac{\rho_\mathfrak{a}(-hQ,z)}{2(hQ)^{s'}}, \notag \\
U(z) :&= y^k f(l_1z) \conj{f(l_2z)}, \label{eq_L_kappa_U} \\
\mathcal{V}_{N} :&= \frac{\pi [\operatorname{SL}_2(\mathbb{Z}):\Gamma_0(N)]}{3} \notag \end{align} $\rho_j(n)$ being $n$-th Fourier coefficient of Maass form $u_j$ and $\rho_{\mathfrak{a}}(n,z)$ being $n$-th Fourier coefficient of Eisenstein series at cusp $\mathfrak{a}$ with holormophic argument at $\half + z$. $[l_1,l_2]$ denotes the least common multiple of $l_1$ and $l_2$.
In \cite{Jeff}, it is also shown that in \eqref{eq_Z_decomp}, if we are to take the limit as $\delta$ goes to $0$, we actually require $\operatorname{Re} s < \half - \frac{k}{2}$ for the sum and integral there to be absolutely convergent.
\subsection{Some useful analytic information} The properties of the $M$ and $Z$ functions that are relevant for this work are quoted in the following two propositions: \begin{proposition} \label{eq_M_s_poles} Let $z \in \mathbb{C} - \half \mathbb{Z}$. Then $M(s,\tfrac{z}{i};\delta)$ has simple poles at $s = \half \pm z - r$, for $r$ a nonnegative integer. We denote the following:
\[ \res{s=\half \pm z - r} M(s,\tfrac{z}{i}; \delta) = c_r(\pm z; \delta), \] where $c_r(\pm z,\delta)$ has the following explicit expression as $\delta \to 0$:
\[ \lim_{\delta \to 0} c_r(\pm z, \delta) = \frac{(-1)^r \sqrt{\pi} 2^{\mp z + r} \Gamma(\pm 2z - r) \Gamma(\half \mp z + r)}{r! \Gamma(\half + z) \Gamma(\half - z)} \] And we have the following values at $z = \pm \half$, as $\delta \to 0$:
\begin{align}
c_r(-\half;\delta) &\to -\frac{2^{r+\half} \sqrt{\pi}}{2(r+1)!} \text{ for } r \geq 0 \\
c_0(\half; \delta) &\to \sqrt{\frac{\pi}{2}} \\
c_r(\half; \delta) &\to \frac{2^{r-\half} \sqrt{\pi}}{2r!} \text{ for } r \geq 1
\end{align}
Also for $\operatorname{Re} (s+z) \leq \half + \max(0, |\operatorname{Re} z|)$, with $s$ and $z$ at least a distance $\varepsilon$ away from poles,
\begin{equation} \label{eq_M_delta0}
\lim_{\delta \to 0} M(s,\tfrac{z}{i}; \delta) = \frac{\sqrt{\pi} 2^{\half - s} \Gamma(s-\half+z) \Gamma(s-\half-z) \Gamma(1-s)}{\Gamma(\half+z) \Gamma(\half - z)}
\end{equation} \end{proposition}
\begin{proposition} \label{eq_Z_s_poles} $Z(s,u;\delta)$ has simple poles at $s = \half \pm it_j - r$, where $r \in \mathbb{Z}_{\geq 0}$. Taking the residue at those points and $\delta \to 0$, we have the following:
\[ \lim_{\delta \to 0} \res{s=\half + it_j - r} Z(s,s'-s-\tfrac{k}{2} + 1;\delta) = (l_1l_2)^{\frac{k-1}{2}} c_{r,j} L_Q(s'; \conj{u_j}), \] where $c_{r,j}$ has growth, when $\widetilde{T} \gg 1$,
\begin{equation} \label{eq_crj_growth}
\sum_{|t_j| \sim \widetilde{T}} |c_{r,j}|^2 e^{\pi|t_j|} \ll \log (\widetilde{T}) (l_1l_2)^{-k} \widetilde{T}^{2r+1}
\end{equation} \end{proposition}
We will also give an explicit expression for $\zeta_{\mathfrak{a},Q}(s',-z)$: \begin{proposition} \label{eq_Zeta_explicit}
For cusp $\mathfrak{a}=\frac{b}{c}$ of $\Gamma_0(N)$, we write $c = c_0 c_1$, where $(c_0, \frac{N}{c}) = 1$. Then we have the following:
\begin{align}
&\zeta_{\mathfrak{a},Q}(s',-z) \notag \\
=& \frac{\pi^{\frac{1}{2}-z}}{\Gamma(\frac{1}{2}-z)} \left( \frac{(c,\frac{N}{c})}{cN}\right)^{\frac{1}{2}-z} \left( \frac{c}{(c,\frac{N}{c})} \right)^{1-s'-z} \frac{Q^{-(s'+z)}}{\varphi((c,\frac{N}{c}))} \\
&\qquad \qquad \times \sum_{\chi ((c,\frac{N}{c}))} \frac{\conj{\chi(-Qu)} L^{((c,\frac{N}{c}))}(s'+z,\conj{\chi}) L^{(\frac{N}{c})}(s'-z,\chi)}{L^{(N)}(1-2z,\chi^2)} \sum_{n | c_1^\infty} \frac{G_n(\chi)}{n^{s'+z}} \notag \\
&\qquad \qquad \times \prod_{p | c_0} (1-p^{-(1-z-s')}\chi(p)) \prod_{p^\gamma \| Q} (\sigma_{2z}^{(\chi^2)}(p^\gamma) - \conj{\chi(p)} p^{-(s'-z)} \sigma_{2z}^{(\chi^2)}(p^{\gamma-1})) \notag
\end{align}
where $n | c_1^\infty$ means that $n$ runs over all integers such that $n | c_1^k$ for sufficiently large integer $k$,
\begin{align*}
L^{(c)}(s,\chi) &= \prod_{p \nmid c} (1-\chi(p)p^{-s})^{-1} \\
\sigma_{z}^{\chi}(n) &= \sum_{d | n} \chi(d) d^z \\
G_n(\chi) &= \sum_{\substack{a \pmod{(c,\frac{N}{c})} \\ (a,c,\frac{N}{c}) = 1}} \chi(a) e \left(\frac{a n}{(c,\frac{N}{c})} \right), e(x) = e^{2\pi ix}
\end{align*}
For $\operatorname{Re} (s'+z) > 0$ and $\operatorname{Re} (s'-z) > 0$, the only poles come from the trivial character term. \end{proposition}
\subsection{Some miscellaneous estimates and bounds} We will also require the following estimate concerning $L$-functions: \begin{proposition}
For $\operatorname{Re} s' \geq \half$ and $\theta$ being a bound toward Ramanujan-Petersson conjecture,
\begin{equation} \label{eq_L_growth}
\sum_{|t_j| \sim \widetilde{T}} |L_Q(s', \conj{u_j})|^2 e^{-\pi |t_j|} \ll Q^{-2s' + 2\theta} \mathcal{L}^{2+\varepsilon} (1+|s'|+ \widetilde{T})^{2+\varepsilon}
\end{equation} \end{proposition}
Together with \eqref{eq_crj_growth} and the following fact
\[ \sum_{|t_j| \sim \widetilde{T}} |\langle U, u_j \rangle|^2 e^{\pi |t_j|} \ll \mathcal{L}^{-2k} \widetilde{T}^{2k} \log t,\] we have the following proposition: \begin{proposition} \label{propo_j_sums}
With the same notations as before,
\begin{align}
\sum_{|t_j| \sim \widetilde{T}} L_Q(s', \conj{u_j}) \conj{\langle U, u_j \rangle} &\ll Q^{-s'+\theta} \mathcal{L}^{1-k+\varepsilon} \widetilde{T}^{1+k+\varepsilon} \label{eq_L_inner} \\
\sum_{|t_j| \sim \widetilde{T}} L_Q(s', \conj{u_j}) c_{r,j} &\ll Q^{-s'+\theta} \mathcal{L}^{1-k+\varepsilon} \widetilde{T}^{\frac{3}{2}+r+\varepsilon} \label{eq_L_crj}
\end{align} \end{proposition} These equations can be proved by Cauchy's inequality with facts quoted before the proposition.
We will finally note some equivalent facts with Eisenstein series and a very particular functional equation involving Eisenstein series at the $0$-cusp: \begin{proposition} \label{inner_prod_props}
The inner product $\langle U, E_\mathfrak{a}(*,s) \rangle$ has the following properties:
\begin{enumerate}[leftmargin=1.5em]
\item $\displaystyle \res{s=1} \langle U, E_\mathfrak{a}(*,s) \rangle = \frac{(l_1l_2)^{-\frac{k-1}{2}}}{l_1 \mathcal{V}_{Nl_1}} \langle f,f \rangle$ if $l_1 = l_2$.
\item $\displaystyle \res{s=1} \langle U, E_\mathfrak{a}(*,s) \rangle = \frac{(l_1l_2)^{-\frac{k-1}{2}}}{l_1l_2 \mathcal{V}_{Nl_1l_2}} \langle f,f \rangle E_{l_1,l_2}(1)$ if $l_1 \neq l_2$.
\item $\displaystyle [l_1,l_2] \mathcal{V}_{N[l_1,l_2]} \int_{-T}^T \sum_{\mathfrak{a}} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-it) \conj{\langle U, E_\mathfrak{a}(*,\frac{1}{2}+it) \rangle} \,dt \ll Q^{-s'} \mathcal{L}^{1-k+\varepsilon} T^{1+k+\varepsilon}$
\end{enumerate} \end{proposition}
\begin{proposition} \label{FE_Eisen_0}
The Eisenstein series at $0$-cusp has the following functional equation:
\[ E_0(z,s) = \sum_{\mathfrak{a} = \frac{b}{c}} \frac{\sqrt{\pi} \Gamma(s-\frac{1}{2})}{\Gamma(s)} \frac{\varphi(\frac{N}{c})}{\varphi((c,\frac{N}{c}))} \left( \frac{(c,\frac{N}{c})}{N \cdot \frac{N}{c}} \right)^s \frac{\zeta^{(c)}(2s-1)}{\zeta^{(N)}(2s)} E_\mathfrak{a}(z,1-s) \] \end{proposition}
\section{Amplifying both aspects} \label{sec:amp} Our aim here is to understand the growth of $L(\half +it, f_\chi)$ in $t$- and $Q$-aspects. Since it is sufficient to prove the result on eigenforms, so we assume $f$ is an eigenform. We perform our investigation by averaging around $\half + it$ for a small interval as well as applying the amplification technique.
For this, we choose a rapidly decreasing function $V: \mathbb{R} \to \mathbb{R}$ such that its Mellin transform $v(s)$ is meromorphic between $-5 < \operatorname{Re} s < 5$. Moreover, $v(s)$ should only have a simple pole at $s=0$ with residue $1$ and exponential decay in $\operatorname{Im} s$ as $\operatorname{Im} s \to \infty$. An example of this is $v(s) = \frac{1}{5} \Gamma \left(\frac{s}{5} \right)$. Specifying $v(s)$ is enough, as: \begin{equation}
V(x) = \invmellin{2} v(s) x^{-s} \,d s \label{eq_easy_mellin} \end{equation}
We start by writing the $L$-function as a rapidly converging series: \begin{lemma} \label{lemma_approx}
As $x \to \infty$,
\begin{equation} \label{eq_approx}
L\left( \half + it, f_\chi \right) = \sum_{n \geq 1} \frac{A(n)\chi(n)}{n^{\half + it}} V \left( \frac{n}{x} \right) + O(x^{-\varepsilon}).
\end{equation} \end{lemma}
\begin{proof}
Consider the following inverse Mellin transform:
\begin{equation}
I_0 := \invmellin{2} L \left( \half + it + s, f_\chi \right) v(s) x^s \,d s
\end{equation}
On the one hand, since the argument of $L$-function in $I_0$ is in the region of absolute convergence, we have:
\begin{align*}
I_0= \invmellin{2} \sum_{n \geq 1} \frac{A(n) \chi(n)}{n^{\half + s + it}} v(s) x^s \,d s = \sum_{n \geq 1} \frac{A(n) \chi(n)}{n^{\half + it}} V \left( \frac{n}{x} \right)
\end{align*}
On the other hand, we can move the line of integration to $-\half - \varepsilon$, picking up the only simple pole at $s = 0$ and obtain:
\begin{equation*}
I_0 = L\left(\half + it, f_\chi \right) + O(x^{-\varepsilon})
\end{equation*}
Putting the two equivalent expressions of $I_0$ together proves the lemma.
\end{proof}
Our aim here is to get the bound on $L(\half + it, f_\chi)$. To this end, we first amplify the character and obtain: \begin{equation} \label{eq_most_important}
|L(\half + it, f_\chi)|^2 |\sum_{l \sim \mathcal{L}} 1|^2 \leq \sum_{\psi (Q)} |L(\half+it,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)}\psi(l)|^2 \end{equation} where the first summation runs over all Dirichlet characters $\psi$ mod $Q$ and the $l$-sums are running over primes that are relatively prime to $QN$. The parameter $\mathcal{L}$ is to be chosen optimally later, subject to $\mathcal{L} < Q$.
Next we perform amplification on the $t$-aspect, with modified ideas based upon \cite{Hansen}. The result is the following: \begin{lemma} \label{lemma_one_use} \begin{align}
&|L(\half+it,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 \notag \\
\ll & \log^{4}(Q(1+|t|)) \!\!\!\!\! \int \limits_{|r| \leq A} \!\!\!\!\! \inttoinf{-\infty} \!\!\!\! |L(\half+i(t+r+r'),f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir'} |^2 \,\frac{d r' d r}{\pi (1+\left( \frac{r'}{G} \right)^2)} \notag \\
& \qquad \qquad + O(\log^3 (Q(1+|t|))) + O(|\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2), \label{eq_amp_t_single} \end{align}
where $A := \sqrt{10 \log (Q(1+|t|))}$, $\alpha := \frac{1}{\log (Q(1+|t|))}$ and $G \geq 2A$. \end{lemma} \begin{remark}
In the proof, we will see that the introduction of $G$ into the integral is via the positivity of the integrand. This leads to the desire to minimize $G$ subject to the constraint above. \end{remark}
\begin{proof} The proof relies on estimating $L(\sigma + it, f_\psi)$ by averaging $L(\sigma - \alpha + ir, f_\psi)$ over $r$ in a small interval centered around $t$. Each integral expression defined below is essentially illustrating this fact.
First we will show that $L(\half + it, f_\psi)$ is approximable by averaging the $L$-function over a small interval. To this end, consider the following integral: \begin{equation} \label{eq_I1_defn}
I_1 := \invmellin{2} L(\half + it + s, f_\psi) \frac{e^{s^2}}{s} \,d s \end{equation} On the one hand, $I_1$ is $O(1)$ by bounding the $L$-function by a constant. On the other hand, if we move the line of integration down to $\operatorname{Re} s = -\alpha$, then we have: \[ I_1 = L(\half+it, f_\psi) + \invmellin{-\alpha} L(\half+it+s,f_\psi) \frac{e^{s^2}}{s} \,d s. \] When put together with \eqref{eq_I1_defn}, we derive that \[ L(\half+it,f_\psi) = O(1) + \frac{1}{2\pi} \inttoinf{-\infty} L(\half-\alpha+it+ir, f_\psi) \frac{e^{(-\alpha+ir)^2}}{\alpha-ir} \,d r. \] After taking absolute values and squaring both sides, one gets: \begin{equation} \label{eq_critline}
| L(\half+it,f_\psi) |^2 \ll \left( \inttoinf{-\infty} | L(\half-\alpha+it+ir, f_\psi)| \frac{e^{\alpha^2-r^2}}{\sqrt{\alpha^2 + r^2}} \,d r \right)^2 + O(1) \end{equation}
To continue our investigation, we will split the integral into two parts, $|r| \leq A$ and $|r| > A$, where $A = \sqrt{10 \log (Q(1+|t|))}$.
We start by examining the part of the integral with $|r| > A$, applying convexity for the $L$-function: \begin{align*}
\int_{|r| > A} |L(\half-\alpha+it+ir, f_\psi)| \frac{e^{\alpha^2 - r^2}}{\sqrt{\alpha^2 + r^2}} \,d r &\ll \int_{|r| > A} (Q|t+r|)^{\half+\alpha} \frac{e^{\alpha^2 - r^2}}{r} \,d r \\
& \ll (Q(1+|t|))^{\half + \alpha} e^{\alpha^2 - A^2} \ll 1 \end{align*}
For the part $|r| \leq A$, we apply Cauchy's inequality and functional equation: \begin{align*}
&\left( \int_{|r| \leq A} |L(\half-\alpha+it+ir, f_\psi)| \frac{e^{\alpha^2 - r^2}}{\sqrt{\alpha^2 + r^2}} \,d r \right)^2 \\
\leq &\int_{|r| \leq A} |L(\half-\alpha+it+ir, f_\psi)|^2 \,d r \cdot \int_{|r| \leq A} \frac{e^{2\alpha^2 - 2r^2}}{\alpha^2 + r^2} \,d r \\
\ll &\alpha^{-2} (Q(1+|t|))^{4\alpha} \int_{|r| \leq A} |L(\half+\alpha+it+ir, f_\psi)|^2 \,d r \end{align*}
Putting these into \eqref{eq_critline}, recalling $A = \sqrt{10 \log (Q(1+|t|))}$ and $\alpha = \frac{1}{\log (Q(1+|t|))}$, we get \begin{align}
|L(\half+it,f_\psi)|^2 &\ll \log^{2}(Q(1+|t|)) \int_{|r| \leq A} |L(\half+\alpha + i(t+r) ,f_\psi)|^2 \,d r + O(1) \notag \end{align}
We will multiply both sides by $|\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2$, obtaining: \begin{align}
&|L(\half+it,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 \notag \\
\ll &\log^{2}(Q(1+|t|)) \!\!\!\! \int \limits_{|r| \leq A} \!\!\!\! |L(\half+\alpha + i(t+r) ,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 \,d r \!+\! O(|\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2) \label{eq_critline_1} \end{align}
Now, we approximate $L(\half + \alpha + it_2, f_\psi) \sum_{l \sim \mathcal{L}} \psi(l) \conj{\chi(l)}$ by an integral over a small interval on the critical line. To achieve this, we construct the following auxilary integral: \begin{equation} \label{eq_I2_defn}
I_2 := \invmellin{2} L(\half + \alpha + it_2 + s, f_\psi) \left(\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{-s} \right) \frac{e^{s^2}}{s} \,d s \end{equation}
Doing the same analysis as before and combining expressions, we obtain: \begin{align}
&|L(\half+it,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 \notag \\
\ll & \log^{4}(Q(1+|t|)) \int \limits_{|r| \leq A} \int \limits_{|r'| \leq A_r} |L(\half+i(t+r)+ir',f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir'} |^2 \,d r' \,d r \notag \\
&\qquad \qquad + O(\log^2 (Q(1+|t|)) A) + O(|\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2) \label{eq_final_form_1} \end{align}
where $A_r = \sqrt{10 \log(Q|t+r|)}$. Note that all inequalities are independent of $Q$, $L$ and $t$, as long as $L \ll Q$. Also note that we can make $A_r$ uniform by enlarging the region to $|r'| \leq \sqrt{40 \log(Q(1+|t|))} = 2A$, the inequality still holding due to positivity of the integrand.
Continuing to use the positivity of the integrand, for $G \geq 3A$, the integrand is bounded by: { \allowdisplaybreaks \begin{align}
& \int_{|r| \leq A} \int_{|r'| \leq 2A} |L(\half+i(t+r)+ir',f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir'} |^2 \,d r' \,d r \notag \\
\ll & \int_{|r| \leq G} \! |L(\half+i(t+r),f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir} |^2 \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)} \notag \\
\ll & \int_{|r| \leq A} \inttoinf{-\infty} |L(\half+it+ir,f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir} |^2 \,\frac{dr}{\pi (1+\left( \frac{r}{G} \right)^2)} \label{eq_final_form_2} \end{align} } Putting this into \eqref{eq_final_form_1} gives the proposition. \end{proof}
Putting this proposition together with \eqref{eq_most_important}, we derive the following: \begin{proposition} \label{propo_amp} With the same values of $A$, $\alpha$ and the same constraint on $G$ as in lemma \ref{lemma_one_use}, \begin{align}
&|L(\half + it, f_\chi)|^2 |\sum_{l \sim \mathcal{L}} 1|^2 \notag \\
\ll & \log^{4}(Q(1+|t|)) \inttoinf{-\infty} \sum_{\psi (Q)} |L(\half+i(t+r),f_\psi)|^2 |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l) l^{\alpha - ir} |^2 \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)} \notag \\
&+ O(Q \log^3 (Q(1+|t|))) + O(Q\mathcal{L}) \label{eq_final_form_3} \end{align} \end{proposition}
\begin{proof}
The only part that requires a proof is the last error term. In particular, we should show that
\[ \sum_{\psi (Q)} |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 \ll Q\mathcal{L} \]
Starting with the left-hand side, we have:
\begin{align*}
\sum_{\psi(Q)} |\sum_{l \sim \mathcal{L}} \conj{\chi(l)} \psi(l)|^2 &= \sum_{\psi(Q)} \sum_{l_1, l_2 \sim \mathcal{L}} \conj{\chi(l_1)} \psi(l_1) \chi(l_2) \conj{\psi(l_2)} = \varphi(Q) \sum_{l} 1\ll Q\mathcal{L}
\end{align*}
The second equality is obtained by summing over the characters, which implies that $l_1 = l_2$ since $l_1 \equiv l_2 (Q)$ and $\mathcal{L} < Q$.
\end{proof}
Our next immediate goal is to execute the character sum and the $r$-integral in \eqref{eq_final_form_3}. Replacing the $L$-series with \eqref{eq_approx}, up to $O(x^{-\varepsilon})$, one obtains: \begin{equation} \label{eq_S_defn}
S := \inttoinf{-\infty} \sum_{\psi (Q)} \left| \sum_{m \geq 1} \sum_{l \sim \mathcal{L}} \frac{A(m)\psi(m)\psi(l) \conj{\chi(l)} l^{\alpha} }{m^{\half+it + ir} l^{ir}} V\left(\frac{m}{x} \right) \right|^2 \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)} \end{equation} We apply Parseval here to obtain: {\allowdisplaybreaks \begin{align*}
S &= \inttoinf{-\infty} \sum_{\substack{a \text{ mod } Q \\ (a,Q) = 1}} \varphi(Q) \left| \sum_{m \geq 1} \sum_{\substack{l \sim \mathcal{L} \\ l \text{ prime} \\ ml \equiv a (Q)}} \frac{A(m) \conj{\chi(l)} l^{\alpha}}{m^{\half+it + ir} l^{ir}} V\left(\frac{m}{x} \right) \right|^2 \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)} \\
&\leq \inttoinf{-\infty} \sum_{a \text{ mod } Q} \varphi(Q) \left| \sum_{m \geq 1} \sum_{\substack{l \sim \mathcal{L} \\ l \text{ prime} \\ ml \equiv a (Q)}} \frac{A(m) \conj{\chi(l)} l^{\alpha}}{m^{\half+it + ir} l^{ir}} V\left(\frac{m}{x} \right) \right|^2 \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)} \\
&= \varphi(Q) \inttoinf{-\infty} \sum_{m_1, m_2 \geq 1} \sum_{\substack{l_1,l_2 \sim \mathcal{L} \\ l_1,l_2 \text{ prime} \\ m_1l_1 \equiv m_2l_2 (Q)}} \frac{A(m_1)\conj{A(m_2)} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\alpha}}{m_1^{\half+it+ir} m_2^{\half-it-ir} l_1^{ir} l_2^{-ir}} \allowdisplaybreaks[0] \\
&\phantom{=\varphi(Q)} \qqquad \qqquad \qquad \qquad \qquad \times V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) \,\frac{d r}{\pi (1+\left( \frac{r}{G} \right)^2)} \\
&=\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{m_1,m_2 \geq 1 \\ m_1l_1 \equiv m_2l_2 (Q)}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} \notag \allowdisplaybreaks[0]\\
&\phantom{=\varphi(Q) \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2)} \qquad \qquad \qquad \qquad \times V\left(\frac{m_1}{x} \right) V\left(\frac{m_2}{x} \right) e^{-G \left| \log \left(\tfrac{l_1m_1}{l_2m_2} \right) \right|} \\
&=: S_d + S_{o_1} + S_{o_2} \end{align*} } where the diagonal portion $S_d$ and the off-diagonal portions, $S_{o_1}$ and $S_{o_2}$, are defined as follows: {\allowdisplaybreaks \begin{align}
S_d :&= \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{m_1,m_2 \geq 1 \\ m_1l_1 = m_2l_2}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) \label{eq_diag_defn} \\
S_{o_1} :&= \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{h,m_2 \geq 1 \\ m_1l_1 = m_2l_2 + hQ}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} \notag \allowdisplaybreaks[0] \\
&\phantom{=\varphi(Q) \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2)} \qquad \qquad \qquad \times V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) e^{-G \left| \log \left(\tfrac{l_1m_1}{l_2m_2} \right) \right|} \label{eq_o1_defn} \\
S_{o_2} :&= \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{h,m_1 \geq 1 \\ m_2l_2 = m_1l_1 + hQ}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} \notag \allowdisplaybreaks[0] \\
&\phantom{=\varphi(Q) \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2)} \qquad \qquad \qquad \times V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) e^{-G \left| \log \left(\tfrac{l_1m_1}{l_2m_2} \right) \right|} \label{eq_o2_defn} \end{align} }
\begin{remark}
$S_{o_2} = \conj{S_{o_1}}$. \end{remark} At this point, we have converted the problem into studying $S_d$ and $S_{o_1}$.
\section{The diagonal portion $S_d$} \label{sec:diag} In this section, we focus on analyzing $S_d$. The analysis breaks $S_d$ into two sums, $S_{d_1}$ corresponding to $l_1 = l_2$ and $S_{d_2}$ corresponding to $l_1 \neq l_2$: \[ S_d = S_{d_1} + S_{d_2}, \] where \begin{align}
S_{d_1} :&= \varphi(Q) G \sum_{l} l^{2\alpha} \sum_{m \geq 1} \frac{|A(m)|^2}{m} V\left(\frac{m}{x} \right)V\left(\frac{m}{x} \right) \label{eq_S_d1} \\
S_{d_2} :&= \varphi(Q) G \sum_{\substack{l_1,l_2 \\ l_1 \neq l_2}} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{m_1,m_2 \geq 1 \\ m_1l_1 = m_2l_2}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) \label{eq_d2_defn} \end{align}
\subsection{The case of $l_1 = l_2$} \label{sec:diag_ez} For $S_{d_1}$, note that the $m$-sum does not depend on $l$. The contribution of $S_{d_1}$ is as follows: \begin{proposition} \label{eq_S_d1_prop}
As $x \to \infty$,
\begin{equation}
S_{d_1} = \varphi(Q) G \left| \sum_l l^{2\alpha} \right| \frac{(4\pi)^k}{\Gamma(k)}\langle f,f\rangle \log x + O(QG\mathcal{L}^{1 + 2\alpha}) + O(x^{-\varepsilon})
\end{equation} \end{proposition}
\begin{proof}
Applying \eqref{eq_easy_mellin} twice to \eqref{eq_S_d1}, we obtain: \begin{align}
S_{d_1} &= \varphi(Q) G \left| \sum_l l^{2\alpha} \right| \dblinvmellin{2}{2} \sum_{m \geq 1} \frac{|A(m)|^2}{m^{1+s+w}} x^{s+w} v(s) v(w) \,d s \,d w \notag \\
&= \varphi(Q) G \left| \sum_l l^{2\alpha} \right| \dblinvmellin{2}{4} \sum_{m \geq 1} \frac{|A(m)|^2}{m^{1+s}} x^s v(s-w)v(w) \,d s \,d w \label{eq_S_d1_int} \end{align}
Moving the line of integration of $s$ down to $\operatorname{Re} s = -\frac{1}{3} - \varepsilon$, we pick up simple poles at $s = 0, s = w$, obtaining: \begin{equation} \label{eq_S_d1_break}
S_{d_1} = \res{s=w} S_{d_1} + \res{s=0} S_{d_1} + O(x^{-\varepsilon}) \end{equation} For the residue at $s = w$, by moving the line of integration $\operatorname{Re} w = 2$ down to $-\frac{1}{3}- \varepsilon$, we have: \begin{align}
S_{d_1, \res{s=w}} &= \varphi(Q) G \left| \sum_l l^{2\alpha} \right| \left( \frac{(4\pi)^k}{\Gamma(k)}\langle f,f\rangle \log x + O(1) + O(x^{-\frac{1}{3} - \varepsilon}) \right). \label{eq_S_d1_sw} \end{align}
We continue with the residue at $s=0$: \begin{equation} \label{eq_S_d1_s0}
S_{d_1, \res{s=0}} = \varphi(Q) G \left| \sum_l l^{2\alpha} \right| \frac{(4\pi)^k}{\Gamma(k)} \langle f,f\rangle \left( \invmellin{2} v(-w)v(w) \,d w \right) \end{equation} This is just $O(QG\mathcal{L}^{1 + 2\alpha})$, upon noting the $l$-sum is $O(\mathcal{L}^{1+2\alpha})$ and the $w$-integral above is a constant.
Now plugging \eqref{eq_S_d1_sw} and \eqref{eq_S_d1_s0} into \eqref{eq_S_d1_break}, we have the proposition once we note that the $l$-sum is $O(\mathcal{L}^{1+2\alpha})$. \end{proof}
\subsection{The case of $l_1 \neq l_2$} \label{sec:diag_nez} In $S_{d_2}$, we have the condition $m_1l_1 = m_2l_2$. When $l_1 \neq l_2$, it implies $m_1 = l_2 m$, $m_2 = l_1m$ for some positive integer $m$. Hence, we have: \begin{equation} \label{eq_S_d2}
S_{d_2} = \varphi(Q) G \sum_{\substack{l_1,l_2 \\ l_1 \neq l_2}} \frac{\conj{\chi(l_1)}\chi(l_2)}{l_2^{\half+it} l_1^{\half-it}} (l_1l_2)^{\alpha} \sum_{m \geq 1} \frac{A(l_2m)\conj{A(l_1m)}}{m} V\left(\frac{l_2m}{x} \right)V\left(\frac{l_1m}{x} \right) \end{equation} Using the same methods to the proof of proposition \ref{eq_S_d1_prop}, we have the following: \begin{proposition} \label{eq_S_d2_propo}
As $x \to \infty$,
\begin{align}
S_{d_2} = \varphi(Q) G \sum_{\substack{l_1,l_2\\ l_1 \neq l_2}} \frac{\conj{\chi(l_1)}\chi(l_2)}{l_2^{\half+it} l_1^{\half-it}} (l_1l_2)^{\alpha} \frac{(4\pi)^k}{\Gamma(k)}\langle f,f \rangle &E_{l_1,l_2}(1) \log (x/l_2) \notag \\
&+ O(QG\mathcal{L}^{1+2\alpha+\varepsilon}) + O(x^{-\varepsilon}),
\end{align}
where $E_{l_1,l_2}(s)$ is defined as follows:
\[ E_{l_1,l_2}(s) := \left( \sum_{m \geq 1} \frac{A(l_2m)\conj{A(l_1m)}}{m^s} \right) \left( \sum_{m \geq 1} \frac{|A(m)|^2}{m^s} \right)^{-1}, \]
which is essentially a product of ratios of Euler factors at the primes $l_1$ and $l_2$. $E_{l_1,l_2}(s)$ is analytic for $\operatorname{Re} s > 0$ and is bounded independent of $l_1,l_2$ in the region. \end{proposition}
We have now obtained a complete understanding of the diagonal sum, so our next focus is to understand the off-diagonal sums $S_{o_1}$ and $S_{o_2}$. By the remark after \eqref{eq_o2_defn} , it is sufficient for us to understand $S_{o_1}$.
\section{Off-diagonal portion $S_{o_1}$, setting up the integrals} \label{sec:off-d-setup} Recall from \eqref{eq_o1_defn}, \begin{align}
S_{o_1} &= \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)} \chi(l_2) \sum_{\substack{h,m_2 \geq 1 \\ m_1l_1 = m_2l_2 + hQ}} \frac{A(m_1)\conj{A(m_2)}(l_1l_2)^{\alpha}}{m_1^{\half+it} m_2^{\half-it}} \notag \allowdisplaybreaks[0] \\
&\phantom{=\varphi(Q) \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2)} \qquad \qquad \qquad \times V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) e^{-G \log \left(\tfrac{l_1m_1}{l_2m_2} \right)} \label{eq_S_o1_sum} \end{align}
We will show that this object can be converted into studying a four-fold integral involving the $Z_Q(s,w)$ function: \begin{proposition} \label{eq_S_o1_int} As $G \to \infty$, we have \begin{align}
S_{o_1} &= \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\quad \times \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_3}{\gamma_4} Z_Q\left(s, s'-s-\tfrac{k}{2} + 1\right) \notag \\
&\phantom{\invmellin{c_1}} \times \betaF{s' - s + \half - \beta}{w+s+\beta + \frac{k}{2}-1-s'+it}{w+\frac{k-1}{2}+it} l_1^{w-\half} l_2^{s'-w} \notag\\
&\phantom{\invmellin{c_1}} \times x^{s' - \half} v(s'-w)v(w-\half) \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s' \,d s \,d w \end{align} where $\operatorname{Re} w = \gamma_1 = 1 + 2\theta + 2\varepsilon$, $\operatorname{Re} s = \gamma_2 = \frac{3}{4}$, $\operatorname{Re} s' = \gamma_3 = \frac{5}{4}$ and $\operatorname{Re} \beta = \gamma_4 = \frac{1}{2}+\theta+\varepsilon$. \end{proposition}
\begin{remark}
As long as $G$ is chosen such that $G = (1+|t|)^{a} \log^b(Q)$ with $a > 0$ and $b > 4$, then $G \geq 3A = \sqrt{90 \log (Q(1+|t|))}$ for large $Q$ and $t$. \end{remark}
\begin{proof} We will focus on the innermost sum of $S_{o_1}$. For convenience, we define: \[ T_{o_1} := \sum_{\substack{h_0,m_2 \geq 1\\ m_1l_1 = m_2l_2 + h_0Q}} \!\!\!\!\!\! \frac{A(m_1)\conj{A(m_2)}}{m_1^{\half+it}m_2^{\half-it}} V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right) e^{- G \log \left( \frac{m_1l_1}{m_2l_2} \right)} \] Starting with the definition of $T_{o_1}$, we will substitute in $m_1l_1 = m_2l_2 + h_0Q$ in several places: \[ T_{o_1} = l_1^{\half+it} l_2^{\half-it} \sum_{m_2,h_0} \frac{A(m_1)\conj{A(m_2)}V\left(\frac{m_1}{x} \right)V\left(\frac{m_2}{x} \right)} {(m_2l_2+h_0Q)^{\half+it}(m_2l_2)^{\half-it}} e^{- G \log \left( 1 + \frac{h_0Q}{m_2l_2} \right)} \]
We apply \eqref{eq_easy_mellin} twice, resulting in: \begin{align}
T_{o_1} = l_1^{\half+it} l_2^{\half-it} &\dblinvmellin{c_1}{c_2} \! \sum_{m_2,h_0} \!\! \frac{A(m_1) \conj{A(m_2)}}{(m_2l_2+h_0Q)^{\half+w+it} (m_2l_2)^{\half + s-it}} \notag \\
&\qquad \quad \times v(s)v(w) x^{s+w} l_2^s l_1^w \left( 1 + \frac{h_0Q}{m_2l_2} \right)^{-G} \,d s \,d w \notag \end{align} We quote this identity from \cite{GradsteynRhyzik}: \begin{equation}
\invmellin{\gamma} \betaF{u}{\beta-u}{\beta} t^{-u} \,d u = (1+t)^{-\beta}, \label{eq_neg_bino} \end{equation} where $0 < \gamma < \operatorname{Re} \beta$. Manipulating the expression from before and using \eqref{eq_neg_bino}, we end up introducing the $Z_Q$ function defined above: { \allowdisplaybreaks \begin{align*}
T_{o_1} = &l_1^{\half+it} l_2^{\half-it} \triinvmellin{c_1}{c_2}{c_3} \! \sum_{m_2,h_0} \!\! \frac{A(m_1) \conj{A(m_2)} \left(1 + \frac{h_0Q}{m_2l_2}\right)^{\frac{k-1}{2}}}{(m_2l_2)^{s+w+1}} x^{s+w}\allowdisplaybreaks[0] \\
& \quad \times \left(1+\frac{h_0Q}{m_2l_2}\right)^{-(w+\frac{k}{2}+it)} \!\! v(s)v(w) l_2^s l_1^w \left( \frac{h_0Q}{ m_2l_2} \right)^{-\beta} \!\! \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d s \,d w \,d \beta \\
= &l_1^{\half+it} l_2^{\half-it} \! \quadinvmellin{c_1}{c_2}{c_3}{c_4} \!\!\!\! Z_Q\left(s+w+1-u-\beta, u+\beta-\tfrac{k-1}{2}\right) \allowdisplaybreaks[0] \\
& \times \betaF{u}{w+\frac{k}{2}-u+it}{w+\frac{k}{2}+it} v(s)v(w)x^{s+w} l_2^s l_1^w \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d u \,d s \,d w \,d \beta \end{align*} } From here, we will do a series of change of variables. First we change $u \mapsto u - \beta$: \begin{align*} =&l_1^{\half+it} l_2^{\half-it} \quadinvmellin{c'_1}{c'_2}{c'_3}{c'_4} Z_Q(s+w+1-u, u-\tfrac{k-1}{2}) x^{s+w} l_2^s l_1^w \\
&\times \betaF{u-\beta}{w+\frac{k}{2}+\beta- u+it}{w+\frac{k}{2}+it} v(s)v(w) \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d u \,d s \,d w \,d \beta \end{align*} Now we do $s \mapsto s+u-1$: \begin{align*} =&l_1^{\half+it} l_2^{\half-it} \! \quadinvmellin{c''_1}{c''_2}{c''_3}{c''_4} \!\!\! Z_Q(s\!+\!w, u-\tfrac{k-1}{2}) x^{s-1+u+w} l_2^{s+u-1} l_1^w \\
&\times \betaF{u-\beta}{w+\frac{k}{2}+\beta - u+it}{w+\frac{k}{2}+it}v(s+u-1) v( w) \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d u \,d s \,d w \,d \beta \end{align*} Then we will change $s \mapsto s-w, u \mapsto u+\frac{k-1}{2}$: \begin{align*}
=&l_1^{\half+it} l_2^{\half-it} \! \quadinvmellin{c'''_1}{c'''_2}{c'''_3}{c'''_4} \!\! Z_Q(s, u) x^{s+u+\frac{k-3}{2}} l_2^{s-w+u+\frac{k-3}{2}} l_1^w\\ &\times \betaF{u+\frac{k-1}{2}-\beta}{w-u+\half+\beta+it}{w+\frac{k}{2}+it}v(s-w+u+\frac{k-1}{2}-1) v( w) \\ & \times \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d u \,d s \,d w \,d \beta \end{align*} Finally we will get rid of $u$ and introduce $s' = s + u + \frac{k}{2} - 1$, while also doing $w \mapsto w-\half$: \begin{align*}
=&l_1^{\half+it} l_2^{\half-it} \! \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_3}{\gamma_4} \!\!\! Z_Q\left(s, s'-s-\tfrac{k}{2} + 1\right) x^{s' - \half} l_2^{s'-w} l_1^{w-\half} \\
&\qquad \qquad \qquad \qquad \qquad \times \betaF{s' - s + \half -\beta}{s + w+\tfrac{k}{2} - 1 - s' +\beta+it}{w+\tfrac{k-1}{2}+it}\\
&\qquad \qquad \qquad \qquad \qquad \times v(s'-w) v(w-\half) \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s' \,d s \,d w \end{align*} This ends the change of variables. We can take the following values for the $\gamma_i$'s: $\operatorname{Re} s = 2$, $\operatorname{Re} s' = 2 + \frac{k}{2} + \varepsilon$, $\operatorname{Re} w = 1 + 2\theta + 2\varepsilon$ and $\operatorname{Re} \beta = 2$.
We can move $\operatorname{Re} \beta$ down to $\frac{3}{4}$ without hitting poles. Next, we can move $\operatorname{Re} s'$ to $\frac{5}{2} + \varepsilon$ without hitting poles. Now we can move $\operatorname{Re} s$ down to $\frac{3}{4}$ without picking up poles. Then we will move $\operatorname{Re} s'$ to $\frac{5}{4}$, again without poles. Finally, moving $\operatorname{Re} \beta$ to $\frac{1}{2}+\theta+\varepsilon$ does not hit any pole. This proves the proposition. \end{proof}
We will separate our analysis of $S_{o_1}$ into 2 parts, the first part corresponding to the discrete spectrum $S_{o_1}^d$ and the second to the continuous spectrum $S_{o_1}^c$. We will also replace $Z_Q(s,s'-s-\tfrac{k}{2} + 1)$ with $\lim_{\delta \to 0} Z_Q(s,s'-s-\tfrac{k}{2} + 1; \delta)$, taking the $\delta$ limit where it is convenient to do so. In our analysis, our top priority is to bring down the effects of $x$; the second priority is to bring down the $t$-contribution.
\section{Off-diagonal, discrete spectrum} We will be looking at the growth of the discrete terms in this section: \begin{align}
S_{o_1}^d = & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_3}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} x^{s'-\half} l_2^{s'-w} l_1^{w-\half} \notag \\
&\times v( s'-w) v( w-\half) \betaF{s' - s + \half -\beta}{s + w+\beta - s' + \tfrac{k}{2} - 1+it}{w+ \tfrac{k-1}{2}+it} \notag \\
&\times\sum_{t_j} L_Q(s',\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s' \,d s \,d w \label{eq_S_o1d_5int} \end{align}
Our goal here is twofold: to bring down the $x$-exponent as much as possible and to take the $\delta$-limit. As such, the natural thing to do is to bring $\operatorname{Re} s'$. Hence, we move $\operatorname{Re} s'$ down to $\operatorname{Re} s' = \frac{1}{2} - \varepsilon$, hitting simple poles at $s' = w$ and $s' = s + \beta - \frac{1}{2}$. We break our analysis into bounding these two residues and the moved integral.
\subsection{The residue at $s'=w$} In this section, we will show the following: \begin{proposition} \label{eq_S_o1d_Zres_propo}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ and $\mathcal{L} \ll Q$,
\[ \res{s'=w} S_{o_1}^d \ll G^{1+\varepsilon} Q^{\half+\theta+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon} + x^{-\varepsilon}, \] \end{proposition}
\begin{remark} In the proof of this proposition, we also justify the reason of having this condition on $G$ . \end{remark}
\begin{proof} Taking the residue at $s'=w$, we have: \begin{align}
\res{s'=w} S_{o_1}^d = & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_1}{\gamma_2}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} (xl_1)^{w-\half} \notag \\
&\times v( w-\half) \betaF{w- s + \half -\beta}{s + \beta + \tfrac{k}{2} - 1+it}{w+ \tfrac{k-1}{2}+it} \notag \\
&\times\sum_{t_j} L_Q(w,\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \,d w \label{eq_S_o1d_leadsp} \end{align} This really suggests moving $\operatorname{Re} w$ to $\frac{1}{2}-\varepsilon$, which picks up simple poles at $w=s+\beta-\frac{1}{2}$ and $w=\frac{1}{2}$.
\subsubsection{The residue at $w=s+\beta-\frac{1}{2}$} We first write down the residue: \begin{align}
&\res{w=s+\beta-\frac{1}{2}}\res{s'=w} S_{o_1}^d \notag \\
= & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_2}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} (xl_1)^{s+\beta-1} v( s+\beta-1) \notag \\
&\times\sum_{t_j} L_Q(s+\beta-\frac{1}{2},\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \label{eq_S_o1d_leadswp} \end{align} Here, we move $\operatorname{Re} \beta$ down to $\frac{1}{4}-\varepsilon$, passing through a simple pole at $\beta = 1-s$. The residue is: \begin{align}
&\res{\beta=1-s} \res{w=s+\beta-\frac{1}{2}}\res{s'=w} S_{o_1}^d \notag \\
= & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j}) \conj{\langle U, u_j \rangle} \notag \\
&\times \invmellin{\gamma_2} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} M(s,t_j,\delta) \frac{\Gamma(1-s)\Gamma(G-1+s)}{\Gamma(G)} \,ds \label{eq_S_o1d_lead} \end{align} Now to take the limit as $\delta \to 0$, it is necessary for us to move $\operatorname{Re} s$ to $\frac{1}{2}-\frac{k}{2}-\theta -\varepsilon$, which results in us picking up poles at $s = \frac{1}{2} \pm it_j - r$, for integers $0 \leq r \leq \frac{k}{2}$. For integer $r$, using the notation from proposition \ref{eq_Z_s_poles} the residue is: \begin{align}
&\sum_{\pm t_j} \res{s=\frac{1}{2}+it_j-r} \res{\beta=1-s} \res{w=s+\beta-\frac{1}{2}}\res{s'=w} S_{o_1}^d \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\qquad \qquad \times \sum_{t_j} (l_1l_2)^{\frac{k-1}{2}} c_{r,j} L_Q(\frac{1}{2},\conj{u_j}) \frac{\Gamma(\frac{1}{2}-it_j+r)\Gamma(G-\frac{1}{2}-it_j+r)}{\Gamma(G)} \label{eq_S_o1d_end1} \end{align} Using the fact that $\Gamma(G-\frac{1}{2}-it_j-r) \ll \Gamma(G-\frac{1}{2}+\theta-r)$ and \eqref{eq_L_crj}, we can see that the above expression is bounded by $O(Q^{\frac{1}{2}+\theta+\varepsilon} G^{\frac{1}{2}+\theta-r+\varepsilon} \mathcal{L}^{3+2\alpha +\varepsilon})$.
For the moved integral with $\operatorname{Re} s = \gamma_2' = \frac{1}{2}-\frac{k}{2}-\theta-\varepsilon$, we take $\delta \to 0$, obtaining: \begin{align}
&\res{\beta=1-s} \res{w=s+\beta-\frac{1}{2}}\res{s'=w} S_{o_1}^d \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \invmellin{\gamma_2'} \frac{(4\pi)^k (l_1l_2)^{\frac{k-1}{2}}}{2\Gamma(s+k-1)} \notag \\
&\quad \times \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j}) \frac{\Gamma(1-s)^2\Gamma(s-\frac{1}{2}+it_j)\Gamma(s-\frac{1}{2}-it_j)}{\Gamma(\frac{1}{2}+it_j)\Gamma(\frac{1}{2}-it_j)} \conj{\langle U, u_j \rangle} \frac{\Gamma(G-1+s)}{\Gamma(G)} \,d s \label{eq_S_o1d_lead_moved} \end{align}
By using Stirling's approximation, \eqref{eq_L_inner} and splitting the integral according to the relative sizes of $|t_j|$ and $|\operatorname{Im} s|$, one can derive the bound $O(Q^{\frac{1}{2}+\theta+\varepsilon} G^{\frac{1}{2}-\frac{k}{2}-\theta-\varepsilon} \mathcal{L}^{3+2\alpha +\varepsilon})$.
\subsubsection{Residue at $w=\frac{1}{2}$} The residue is: \begin{align}
&\res{w=\frac{1}{2}}\res{s'=w} S_{o_1}^d \notag \\
= & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_2}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \notag \\
&\times \betaF{1- s -\beta}{s + \beta + \tfrac{k}{2} - 1+it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \label{eq_S_o1d_2ndw} \end{align} Similar as before, we need to move $\operatorname{Re} s$ to $\frac{1}{2}-\frac{k}{2}-\theta-\varepsilon$ to take the limit. In doing so, we pick up simple poles at $s = 1-\beta$, $s=1-\beta-\frac{k}{2}-it$ and $s=\frac{1}{2}\pm it_j-r$ for $0 \leq r \leq \frac{k}{2}$.
Investigating the pole at $s=1-\beta$, the residue is: \begin{align}
&\res{s=1-\beta} \res{w=\frac{1}{2}}\res{s'=w} S_{o_1}^d \notag \\
= &- \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j}) \conj{\langle U, u_j \rangle} \notag \\
&\times \invmellin{\gamma_4} \frac{(4\pi)^k 2^{\half-\beta} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(k-\beta) 2\sqrt{\pi}} M(1-\beta,t_j,\delta) \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d\beta \label{eq_S_o1d_2ndws} \end{align} Changing variable $\beta = 1-s$, we see that the contour line becomes $\operatorname{Re} s = \frac{1}{2}-\theta-\varepsilon$. This is pretty much the same term as \eqref{eq_S_o1d_lead}. Thus, the same bound applies.
Next up is the residue at $s=1-\beta-\frac{k}{2}-it$, for which we also take $\delta \to 0$: \begin{align}
&\res{s=1-\beta-\frac{k}{2}-it} \res{w=\frac{1}{2}}\res{s'=w} S_{o_1}^d \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\qquad \qquad \times \invmellin{\gamma_4} \frac{(4\pi)^k(l_1l_2)^{\frac{k-1}{2}}}{2\Gamma(\frac{k}{2}-\beta-it) } \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j})\conj{\langle U, u_j \rangle}\notag \\
&\qquad \qquad \times \frac{\Gamma(\beta+\frac{k}{2}+it)\Gamma(\frac{1}{2}-\beta-\frac{k}{2}-it+it_j)\Gamma(\frac{1}{2}-\beta-\frac{k}{2}-it-it_j)}{\Gamma(\frac{1}{2}+it_j)\Gamma(\frac{1}{2}-it_j)} \,d \beta \label{eq_S_o1d_2ndwLs} \end{align} Changing variable $\beta \mapsto \beta - it$, the expression becomes: \begin{align}
&\res{s=1-\beta-\frac{k}{2}-it} \res{w=\frac{1}{2}}\res{s'=w} S_{o_1}^d \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\qquad \qquad \times \invmellin{\gamma_4} \frac{(4\pi)^k(l_1l_2)^{\frac{k-1}{2}}}{2\Gamma(\frac{k}{2}-\beta-it) } \frac{\Gamma(\beta-it)\Gamma(G-\beta+it)}{\Gamma(G)} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j})\conj{\langle U, u_j \rangle}\notag \\
&\qquad \qquad \times \frac{\Gamma(\beta+\frac{k}{2})\Gamma(\frac{1}{2}-\beta-\frac{k}{2}+it_j)\Gamma(\frac{1}{2}-\beta-\frac{k}{2}-it_j)}{\Gamma(\frac{1}{2}+it_j)\Gamma(\frac{1}{2}-it_j)} \,d \beta \label{eq_S_o1d_2ndwLs_break} \end{align} Note that there is enough exponential decay in $\beta$ even if we sum $t_j$ first. Hence, we can easily see that this term is bounded by $O(G^{\frac{1}{2}+\varepsilon} Q^{\frac{1}{2}+\theta+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon})$.
We now keep track of the residues at $s=\frac{1}{2}+it_j-r$: \begin{align}
&\sum_{\pm t_j} \res{s=\frac{1}{2}+it_j-r} \res{w=\frac{1}{2}}\res{s'=w} S_{o_1}^d \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \invmellin{\gamma_4} \sum_{t_j} (l_1l_2)^{\frac{k-1}{2}} c_{r,j} L_Q(\frac{1}{2},\conj{u_j}) \notag \\
&\times \betaF{\frac{1}{2}-it_j+r -\beta}{\beta + \tfrac{k-1}{2} +it_j-r+it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \label{eq_dis_res_rj} \end{align}
Upon changing variable $\beta \mapsto \beta-it_j$, the expression above is related to the upcoming lemma.
\begin{lemma} \label{propo_crazy} For $\operatorname{Re} \beta = a \geq \frac{1}{2}+r+\varepsilon$ and $\mathcal{L} \ll Q$, \begin{align}
& G \sum_{t_j} L_Q(\half,\conj{u_j}) c_{r,j} \notag \\
&\qquad \times \invmellin{a} \!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j) \Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
\ll & G^{1-a + \theta} \left( (1+|t|)^{a-r-\half+\varepsilon} + (1+|t|)^{1 + r+ \varepsilon} \right) Q^{-\half+\theta} \mathcal{L}^{1-k+\varepsilon} \label{eq_dis_res_bound} \end{align}
In fact, to minimize this, the optimal choices are $\operatorname{Re} \beta = a = \frac{3}{2} + 2r + \varepsilon$ and $G \asymp (1+|t|)^\frac{2}{3-2\theta} \log^5 Q$. With these choices, the bound is $O(G^{1+\varepsilon} Q^{-\half+\theta} \mathcal{L}^{1-k+\varepsilon})$. \end{lemma} This relates to \eqref{eq_dis_res_rj} by moving lines of integration and picking up relevent poles. We will delay the proof of this lemma to the end of this section.
Going back to the integral in \eqref{eq_dis_res_rj}, we will deal with the case $r=0$ first. We move the line of integration up to $\operatorname{Re} \beta = \frac{3}{2} + \varepsilon$, picking up a residue at $\beta = \frac{3}{2}$. Then by lemma \ref{propo_crazy} with the optimal choice of $G$, the moved integral is $O(G^{1+\varepsilon} Q^{\half+\theta} \mathcal{L}^{3 + 2\alpha + \varepsilon})$. The residue is: \begin{align}
- \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}&l_2^{-it} (\tfrac{k}{2}+it) \notag \\
&\times \sum_{t_j} L_Q(\half,\conj{u_j}) c_{0,j} \frac{\Gamma(\tfrac{3}{2}-it_j) \Gamma(\beta-\frac{3}{2}+it_j)}{\Gamma(G)} \notag \end{align} With the same choice of $G$ as above, this residue is also $O(G^{1+\varepsilon} Q^{\half+\theta} \mathcal{L}^{3+2\alpha+\varepsilon})$. We assume the same choice of $G$ from now on.
For $1 \leq r \leq \frac{k}{2}$, we move the line of integration in \eqref{eq_dis_res_rj} up to $\operatorname{Re} \beta = \frac{3}{2} + 2r + \varepsilon$. The moved integral is $O(G^{1+\varepsilon} Q^{\half+\theta} \mathcal{L}^{3+2\alpha+\varepsilon})$ as shown in lemma \ref{propo_crazy}. The residues for a particular $r$ are as follows: \begin{align}
&\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} L_Q(\half,\conj{u_j}) c_{r,j} \notag \\
&\times \sum_{m=0}^{r+1} \frac{(-1)^m}{m!} \frac{\Gamma(\tfrac{k}{2}+m+it)}{\Gamma(\tfrac{k}{2}+it)} \frac{\Gamma(\half+r+m-it_j)\Gamma(G-\frac{1}{2}-r-m+it_j)}{\Gamma(G)} \notag \end{align} This whole sum is $o(GQ^{\half+\theta}\mathcal{L}^{3+2\alpha+\varepsilon})$.
Last but not least, we bound the moved integral, where $\operatorname{Re} s = \gamma_2'=\frac{1}{2}-\frac{k}{2}-\theta-\varepsilon$: \begin{align}
&\res{w=\frac{1}{2}}\res{s'=w} S_{o_1}^d \notag \\
= & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_2'}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} \sum_{t_j} L_Q(\frac{1}{2},\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \notag \\
&\times \betaF{1- s -\beta}{s + \beta + \tfrac{k}{2} - 1+it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \label{eq_dis_moved} \end{align}
Note that for $\operatorname{Re} s < \half - \frac{k}{2}$, we note the following bound which is true regardless of relative sizes of $|t_j|$ and $|\operatorname{Im} s|$:
\[ \lim_{\delta \to 0} \frac{2^{s-\half}M(s,t_j,\delta)}{\Gamma(s+k-1) 2\sqrt{\pi}} \ll (1+|t_j|)^{2\operatorname{Re} s - 2} (1+|\operatorname{Im} s|)^{3-3\operatorname{Re} s - k} \] Hence, at $\operatorname{Re} s = \half - \frac{k}{2} - \theta - \varepsilon$, using the above bound and \eqref{eq_L_inner},we have: \begin{align*}
\lim_{\delta \to 0} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} &\sum_j L_Q(\half,\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \notag \\
&\ll (1+|\operatorname{Im} s|)^{\frac{3}{2} +\frac{k}{2} + 3\theta +3\varepsilon} Q^{-\half+\theta} \mathcal{L}^{1-k+\varepsilon} \end{align*}
Using the fact noted above and the methods of proving lemma \ref{propo_crazy}, we have the following auxiliary lemma: \begin{lemma} \label{propo_crazy2}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ and $\operatorname{Re} \beta = a = \frac{5}{2} + k + 5\theta + 2\varepsilon$, \begin{align}
&G \dblinvmellin{\gamma'_2}{a} \sum_{t_j} \lim_{\delta \to 0} \frac{(4\pi)^k 2^{s-\half}M(s,t_j,\delta)}{\Gamma(s+k-1) 2\sqrt{\pi}} L_Q(\half,\conj{u_j}) \conj{\langle U, u_j \rangle} \notag \\
& \qquad \qquad \qquad \qquad \quad \times \betaF{1 - s - \beta}{s+\beta + \tfrac{k}{2} - 1 +it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \notag \\
\ll & G^{1+\varepsilon} Q^{-\half+\theta} \mathcal{L}^{1-k+\varepsilon} \label{eq_dis_moved_bound} \end{align} \end{lemma}
Again in \eqref{eq_dis_moved}, we move $\operatorname{Re} \beta$ to $\frac{5}{2} + k + 5\theta + 2\varepsilon$, hitting poles at $\beta = 1 - s + \ell$, where $0 \leq \ell \leq \frac{k}{2} + 2$. Using the proposition above and $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^3 Q$, the moved integral is $O(G^{1+\varepsilon} Q^{\half+\theta+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon})$. The residue at $\beta = 1-s + \ell$ is: \begin{align}
\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} &\conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \invmellin{\gamma'_2} \sum_j \frac{(4\pi)^k 2^{s-\half}M(s,t_j,\delta)}{\Gamma(s+k-1) 2\sqrt{\pi}} L_Q(\half,\conj{u_j}) \conj{\langle U, u_j \rangle} \notag \\
& \qquad \qquad \times \frac{(-1)^\ell}{\ell !} \frac{\Gamma(\tfrac{k}{2} + \ell +it)}{\Gamma(\tfrac{k}{2}+it)} \frac{\Gamma(1-s+\ell)\Gamma(G+s-1-\ell)}{\Gamma(G)} \,d s \notag \end{align} These residues are $o(G Q^{\half+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon} )$. Putting all these together, we see the residue at $w=\frac{1}{2}$ is bounded by $O(G Q^{\half+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon} )$.
\subsubsection{The moved integral at $w=\frac{1}{2}-\varepsilon$} We see that the $x$-exponent is $O(x^{-\varepsilon})$. Moving the line of integration for $\operatorname{Re} s$ such that we can take the $\delta$-limit just as demonstrated before, we see that this term is $O(x^{-\varepsilon})$.
Putting all the cases so far together, we finish proving proposition \ref{eq_S_o1d_Zres_propo}.
\end{proof}
\subsection{Contribution from the pole $s'=s+\beta-\frac{1}{2}$} \begin{proposition} \label{eq_S_o1d_Z_sp2}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$,
\[ \res{s'=s+\beta-\frac{1}{2}} S_{o_1}^d \ll x^{-\varepsilon}, \] \end{proposition}
\begin{proof}
We start by writing down the residue at $s'=s+\beta-\frac{1}{2}$:
\begin{align}
&\res{s'=s+\beta-\frac{1}{2}} S_{o_1}^d \notag \\
= & \lim_{\delta \to 0}\, \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\half+\alpha} l_1^{it}l_2^{-it} \notag \\
&\!\!\times \triinvmellin{\gamma_1}{\gamma_2}{\gamma_4} \frac{(4\pi)^k 2^{s-\half} (l_1l_2)^{\frac{k-1}{2}}}{\Gamma(s+k-1) 2\sqrt{\pi}} x^{s+\beta-1} l_2^{s+\beta-\frac{1}{2}-w} l_1^{w-\half} v( w-\tfrac{1}{2}) \notag \\
&\!\!\times v( s+\beta-\tfrac{1}{2}-w) \sum_{t_j} L_Q(s+\beta-\tfrac{1}{2},\conj{u_j}) M(s,t_j,\delta) \conj{\langle U, u_j \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \,d s \,d w \label{eq_S_o1d_sp2_first} \end{align} We move $\operatorname{Re} \beta$ down to $\frac{1}{4}-\varepsilon$, during which we do not encounter any poles. Hence, this term is $O(x^{-\varepsilon})$, after moving $\operatorname{Re} s$ to $\frac{1}{2}-\frac{k}{2}-\theta-\varepsilon$ to enable us taking $\delta \to 0$. \end{proof}
\subsection{Contribution of the moved integral at $\operatorname{Re} s' = \frac{1}{2}-\varepsilon$} We see that the $x$-exponent is negative. Moving $\operatorname{Re} s$ down to enable us taking the $\delta$-limit, this term is also $O(x^{-\varepsilon})$.
\subsection{Proof of lemma \eqref{propo_crazy}} For convenience, we write $\beta = a + it_\beta$, where $a, t_\beta \in \mathbb{R}$. We will split the object to be analyzed as follows: \begin{align}
& G \sum_{t_j} L_Q(\half,\conj{u_j}) c_{r,j} \notag \\
&\qquad \times \invmellin{a} \!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
=& G \sum_{t_j} L_Q(\half,\conj{u_j}) c_{r,j} (P_1 + P_2 + P_3), \notag \end{align} where \begin{align}
P_1 &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ |t_\beta| \leq |t| - \log^4 |t|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
P_2 &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ \left| |t_\beta| - |t| \right| \leq \log^4 |t|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
P_3 &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ |t_\beta| \geq |t| + \log^4 |t|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \end{align} In each part, we seek to bound the integrand using Stirling's formula.
\subsubsection{The case where $|t_\beta| \leq |t| - \log^4 |t|$: $P_1$} The ratio of gammas is bounded by: \begin{align}
&\betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \notag \\
\ll &G^{-a+\theta} (1 + |t|)^{a-r-\half} (1+|t_\beta|)^{r-a} (1+|t_\beta - t_j|)^{a+\theta-\half} e^{-\frac{\pi}{2} |t_\beta-t_j|} \notag \end{align}
We further separate this case by the relative sizes of $|t_j|$ and $|t|$: \begin{enumerate}[leftmargin=*]
\item If $|t_j| \geq |t|$, then we can further conclude that the integrand is bounded by:
\[ (1 + |t|)^{a-r-\half} (1+|t_\beta|)^{r-a} (1+|t_j|)^{a+\theta-\half} e^{-\frac{\pi}{2} |t_j|} e^{\frac{\pi}{2} |t_\beta|} G^{-a+\theta} \]
Executing the $\beta$-integral and then summing over such $|t_j|$'s using the bound in proposition \ref{propo_j_sums}, we have that:
\[ G \sum_{|t_j| \geq |t|} L_Q(\half,\conj{u_j}) c_{r,j} P_1 \ll G^{1-a+\theta} (1 + |t|)^{a-r-\half+\varepsilon} Q^{-\half} L^{1-k+\varepsilon} \]
\item If $|t_j| \leq |t|$, then we separate the integral as follows:
\[ P_1 = P_{1,1} + P_{1,2} + P_{1,3}, \]
where
\begin{align*}
P_{1,1} &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ |t_\beta| \leq |t_j| - \log^4 |t_j|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
P_{1,2} &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ \left| |t_\beta| - |t_j| \right| \leq \log^4 |t_j|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
P_{1,3} &= \frac{1}{2\pi i} \!\!\!\!\!\!\!\! \int \limits_{\substack{\beta = a + it_\beta \\ |t_\beta| \geq |t_j| + \log^4 |t_j| \\ |t_\beta| \leq |t| - \log^4 |t|}} \!\!\!\!\!\!\!\!\!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag
\end{align*}
\begin{enumerate}[leftmargin=*]
\item In the subcase $|t_\beta| \leq |t_j| - \log^4 |t_j|$, this subcase has the same effect as case 1:
\[ G \sum_{|t_j| \leq |t|} L_Q(\half,\conj{u_j}) c_{r,j} P_{1,1} \ll G^{1-a+\theta} (1 + |t|)^{a-r-\half+\varepsilon} Q^{-\half} L^{1-k+\varepsilon} \]
\item In the subcase $\left| |t_\beta| - |t_j| \right| \leq \log^4 |t_j|$, the integrand is bounded by
\[ (1 + |t|)^{a-r-\half+\varepsilon} (1+|t_\beta|)^{r-a} G^{-a+\theta}. \]
Now executing the $\beta$-integral and then summing $|t_j|$, we obtain that
\[ G \sum_{|t_j| \leq |t|} L_Q(\half,\conj{u_j}) c_{r,j} P_{1,2} \ll G^{1-a+\theta} (1 + |t|)^{1+r+\varepsilon} Q^{-\half} L^{1-k+\varepsilon} \]
\item In the last subcase, $|t_j| + \log^4 |t_j| \leq |t_\beta| \leq |t| - \log^4 |t|$, the integrand is bounded by:
\[ (1 + |t|)^{a-r-\half} (1+|t_\beta|)^{r+\theta-\half} e^{-\frac{\pi}{2} |t_\beta|} e^{\frac{\pi}{2}|t_j|} G^{-a+\theta}.\]
Now executing the $\beta$-integral and then summing $|t_j|$, we obtain that
\[ G \sum_{|t_j| \leq |t|} L_Q(\half,\conj{u_j}) c_{r,j} P_{1,3} \ll G^{1-a+\theta} (1 + |t|)^{a-r-\half+\varepsilon} Q^{-\half} L^{1-k+\varepsilon} \]
\end{enumerate} \end{enumerate} Hence, in total: \begin{align}
G \sum_{j} L_Q(\half,\conj{u_j}) &c_{r,j} P_1 \notag \\
&\ll G^{1-a+\theta} \left( (1 + |t|)^{1+r+\varepsilon} + (1+|t|)^{a-r-\half+\varepsilon} \right) Q^{-\half} L^{1-k+\varepsilon} \notag \end{align}
\subsubsection{The case where $\left| |t_\beta| - |t| \right| \leq \log^4 |t|$: $P_2$} The ratio of gammas is bounded by: \begin{align}
&\betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)}\notag \\
&\qqquad \qqquad \ll G^{-a+\theta} (1 + |t|)^{r-a-\frac{k-1}{2}} (1+|t_\beta - t_j|)^{a+\theta-\half} e^{-\frac{\pi}{2} |t_\beta-t_j|} \notag \end{align}
Separating cases by the relative sizes of $|t_j|$ and $|t|$, we can show that this term is completely overshadowed by $P_1$.
\subsubsection{The case where $|t_\beta| \geq |t| + \log^4 |t|$: $P_3$} The ratio of gammas is bounded by: \begin{align}
&\betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \notag \\
\ll &G^{-a+\theta} (1 + |t|)^{-\frac{k-1}{2}} (1+|t_\beta|)^{\frac{k}{2}-1} e^{-\pi|t_\beta|} e^{\pi|t|} (1+|t_\beta - t_j|)^{a+\theta-\half} e^{-\frac{\pi}{2} |t_\beta-t_j|} \notag \end{align}
Executing the $\beta$-integral will show that the result will have decay in $|t|$ that is faster than every polynomial. Hence, this case is also negligible.
Putting the cases together, we realize that: \begin{align}
& G \sum_{t_j} L_Q(\half,\conj{u_j}) c_{r,j} \notag \\
&\qquad \times \invmellin{a} \!\! \betaF{\half + r -\beta}{\beta+\tfrac{k-1}{2} -r+i t}{\tfrac{k}{2}+it} \frac{\Gamma(\beta-it_j)\Gamma(G-\beta+it_j)}{\Gamma(G)} \,d \beta \notag \\
\ll& G^{1-a+\theta} \left( (1 + |t|)^{1+r+\varepsilon} + (1+|t|)^{a-r-\half+\varepsilon} \right) Q^{-\half} L^{1-k+\varepsilon} \notag \end{align}
Our goal is to minimize $G$. In order to do this, we want to increase $a$, looking at the term $(1+|t|)^{1+r+\varepsilon}$, which has a fixed exponent. However, the other term dominates if $a$ is too large, with increasingly worse behavior as $a$ increases. Hence, we set the exponents of the terms to equal each other. This gives $a = \frac{3}{2} + 2r + \varepsilon$ (the $\varepsilon$ is added in to avoid the pole on the line $\operatorname{Re} \beta = \frac{3}{2} + 2r$.)
Finally, we can proceed to get a lower bound of $G$ such that the above is $O(G^{1+\varepsilon} Q^{-\half} L^{1-k+\varepsilon})$, such that the contribution in $(1+|t|)$-aspect is at most as much as in $S_d$. It turns out that $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ when $r=0$. The $G$ required for larger values of $r$ has a smaller $(1+|t|)$-exponent. This proves the proposition.
\section{Off-diagonal, continuous spectrum} Quoting the continuous spectrum from \eqref{eq_Z_decomp}, the object to analyze here is: \begin{align}
S_{o_1}^c = &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_3}{\gamma_4} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \invmellin{0} \zeta_{\mathfrak{a},Q}(s',-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\times \betaF{s' - s + \half - \beta}{s + w+ \beta - s' + \tfrac{k}{2} - 1+it}{w+ \tfrac{k-1}{2}+it} \notag \\
&\times v( s'-w) v( w-\half) x^{s'-\half} l_2^{s'-w} l_1^{w-\half} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d z \,d \beta \,d s' \,d s \,d w \label{eq_S_o1c_6int} \end{align} We will move $\operatorname{Re} z$ slightly to the right to a curve $C$, which has the property that if $z$ is any complex number between 0 and $C$, $\zeta^*(1-2z) \neq 0$.
For the most part, there will be a lot of similiarities in how we analyze this 5-fold integral compared to how we analyze the discrete spectrum expression in the last section. In particular, we start by moving $\operatorname{Re} s'$ down to $\frac{1}{2}-\varepsilon$, passing through simple poles at $s'=w$, $s'=1+z$, $s'=1-z$ and $s'=s+\beta-\frac{1}{2}$. As seen in the case of discrete spectrum, the moved integral here is $O(x^{-\varepsilon})$.
\subsection{The residue at $s'=w$} This is the pole that has most of the contribution from the continuous part. \begin{proposition} \label{propo_total_cts}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$, \begin{align}
\res{s'=w} S_{o_1}^c =&-\frac{\varphi(Q) G}{2 \Gamma(k)} \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{1}{2}+\alpha} l_1^{it}l_2^{-it} (4\pi)^k \langle f,f \rangle \log x \cdot b_{l_1,l_2} \notag \\
&+ O(G^{1+\varepsilon} Q^{1+\varepsilon} \mathcal{L}^{1+2\alpha+\varepsilon}) + O(G^{1+\varepsilon} Q^{\half+\varepsilon} \mathcal{L}^{3 + 2\alpha + \varepsilon}) + O(x^{-\varepsilon}), \label{prop_total_cts} \end{align} where \begin{align}
b_{l_1,l_2} = \begin{cases} l_1^{-1} & \text{ if $l_1 = l_2$} \\ (l_1l_2)^{-1} E_{l_1,l_2}(1) & \text{ if $l_1 \neq l_2$} \end{cases} \label{eq_defn_b_l1l2} \end{align} \end{proposition} \begin{remark}
Note the sum of the $(\log x)$-terms above is exactly $-\half$ of the $(\log x)$-terms in $S_d$. \end{remark}
\begin{proof}
We first write down the residue at $s'=w$:
\begin{align}
\res{s'=w} S_{o_1}^c
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \notag \\
&\times \mathcal{V}_{N[l_1,l_2]} M(s,\tfrac{z}{i},\delta) \betaF{w - s + \half - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{w+ \tfrac{k-1}{2}+it} \notag \\
&\times v( w-\half) (xl_1)^{w-\half} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(w,-z) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \,d z \,d \beta \,d s \,d w \label{eq_S_o1c_sp_lead}
\end{align}
At this point, it is natural to move $\operatorname{Re} w$ to $\frac{1}{2}-\varepsilon$, which yields residue terms at $w=1+z$, $w=1-z$, $w=s+\beta-\frac{1}{2}$ and $w=\frac{1}{2}$. The moved integral is $O(x^{-\varepsilon})$.
\subsubsection{The residue at $w=1+z$}
The residue here is:
\begin{align}
&\res{w=1+z} \res{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \frac{\pi^{\frac{1}{2}-z}}{\Gamma(\frac{1}{2}-z)}\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a=\frac{b}{c}} \left( \frac{(c,\frac{N}{c})}{N\frac{N}{c}}\right)^{\frac{1}{2}-z} \frac{\varphi(\frac{N}{c})}{ \varphi((c,\frac{N}{c}))} \frac{\zeta(1+2z)}{\zeta^{(N)}(1-2z)} \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\times \betaF{\frac{3}{2}+z - s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{z+ \tfrac{k+1}{2}+it} M(s,\tfrac{z}{i},\delta) \frac{\Gamma(\beta)\Gamma(G-\beta)}{N^{\frac{1}{2}+z}\Gamma(G)}\notag \\
&\times v( \tfrac{1}{2}+z) (xl_1)^{\half+z} \prod_{p | c} (1-p^{2z}) Q^{-(1+2z)} \prod_{p^\gamma \| Q} (\sigma_{2z}(p^\gamma) - p^{-1} \sigma_{2z}(p^{\gamma-1})) \,d z \,d \beta \,d s\label{eq_S_o1c_sw_lead}
\end{align}
Applying function equation to $\zeta(1+2z)$ and doing some simplifications, we obtain:
\begin{align}
&\res{w=1+z} \res{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}\pi^{\frac{1}{2}+z}Q^{-1}}{\Gamma(s+k-1) 2\sqrt{\pi}} \!\!\! \prod_{p^\gamma \| Q} \!\! (\sigma_{-2z}(p^\gamma) - p^{-1-2z} \sigma_{-2z}(p^{\gamma-1}))\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a=\frac{b}{c}}\frac{\sqrt{\pi}\Gamma(-z)}{\Gamma(\frac{1}{2}-z)} \left( \frac{(c,\frac{N}{c})}{N\frac{N}{c}}\right)^{\frac{1}{2}-z} \frac{\varphi(\frac{N}{c})}{ \varphi((c,\frac{N}{c}))} \frac{\zeta^{(c)}(-2z)}{\zeta^{(N)}(1-2z)} \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\times \betaF{\frac{3}{2}+z - s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{z+ \tfrac{k+1}{2}+it} M(s,\tfrac{z}{i},\delta) \frac{\Gamma(\beta)\Gamma(G-\beta)}{N^{\frac{1}{2}+z}\Gamma(G)\Gamma(\frac{1}{2}+z)}\notag \\
&\times v( \half+z) (xl_1)^{\half+z} \,d z \,d \beta \,d s\label{eq_S_o1c_sw_lead2}
\end{align}
At this point, we use functional equation of Eisenstein series at $0$-cusp \eqref{FE_Eisen_0}, simplifying this further to:
\begin{align}
&\res{w=1+z} \res{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}\pi^{\frac{1}{2}+z}Q^{-1}}{\Gamma(s+k-1) 2\sqrt{\pi}} \!\!\! \prod_{p^\gamma \| Q} \!\! (\sigma_{-2z}(p^\gamma) - p^{-1-2z} \sigma_{-2z}(p^{\gamma-1}))\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \conj{ \langle U,E_0(*,\half-z) \rangle} \betaF{\frac{3}{2}+z - s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{z+ \tfrac{k+1}{2}+it} \notag \\
&\times M(s,\tfrac{z}{i},\delta) \frac{\Gamma(\beta)\Gamma(G-\beta)}{N^{\frac{1}{2}+z}\Gamma(G)\Gamma(\frac{1}{2}+z)} v( \half+z) (xl_1)^{\half+z} \,d z \,d \beta \,d s\label{eq_S_o1c_sw_sim}
\end{align}
We now move $\operatorname{Re} z$ to $-\frac{1}{2}-\varepsilon$, picking up poles at $z=s+\beta-\frac{3}{2}$ and $z=\frac{1}{2}-s$. There is technically also a pole at $z=-\frac{1}{2}$, but its residue becomes $0$ when $\delta \to 0$. The moved integral is $O(x^{-\varepsilon})$.
We investigate the residue at $z=s+\beta-\frac{3}{2}$:
\begin{align}
&\res{z=s+\beta-\frac{3}{2}} \res{w=1+z} \res{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \dblinvmellin{\gamma_2}{\gamma_4} \frac{(4\pi)^k 2^{s-\half}\pi^{s+\beta-1}}{\Gamma(s+k-1) 2\sqrt{\pi}} \notag \\
&\times Q^{-1} \prod_{p^\gamma \| Q} (\sigma_{3-2s-2\beta}(p^\gamma) - p^{2-2s-2\beta} \sigma_{3-2s-2\beta}(p^{\gamma-1}))M(s,\tfrac{s+\beta-\frac{3}{2}}{i},\delta)\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \conj{ \langle U,E_0(*,2-s-\beta) \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)v(s+\beta-1)}{N^{s+\beta-1}\Gamma(G)\Gamma(s+\beta-1)} (xl_1)^{s+\beta-1} \,d \beta \,d s\label{eq_S_o1c_key}
\end{align}
We move $\operatorname{Re} \beta$ to $\frac{1}{4}-\varepsilon$, hitting a pole from the $M$-function at $\beta = 2-2s$. Again, there is technically a pole at $\beta=1-s$, but the residue vanishes upon taking $\delta \to 0$. The moved integral is again $O(x^{-\varepsilon})$. Taking $\delta \to 0$, the residue at $\beta = 2-2s$ is:
\begin{align}
&\res{\beta=2-2s} \res{z=s+\beta-\frac{3}{2}} \res{w=1+z} \res{s'=w} S_{o_1}^c \notag \\
= &\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \invmellin{\gamma_2} \frac{(4\pi)^k \pi^{1-s}}{2\Gamma(s+k-1)} \notag \\
&\times Q^{-1} \prod_{p^\gamma \| Q} (\sigma_{2s-1}(p^\gamma) - p^{2s-2} \sigma_{2s-1}(p^{\gamma-1})) \frac{\Gamma(2s-1)}{\Gamma(s)}\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \conj{ \langle U,E_0(*,s) \rangle} \frac{\Gamma(2-2s)\Gamma(G-2+2s)v(1-s)}{N^{1-s}\Gamma(G)\Gamma(1-s)} (xl_1)^{1-s} \,d s\label{eq_S_o1c_key2}
\end{align}
We move $\operatorname{Re} s$ up to $1+\varepsilon$, encountering a double pole at $s=1$. The moved integral is $O(x^{-\varepsilon})$. The residue here is:
\begin{align}
&\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{1}{2}+\alpha} l_1^{it}l_2^{-it} \left( - \frac{(4\pi)^k}{4\Gamma(k)} \langle f, f\rangle b_{l_1,l_2} \log(xl_1) + c_1 \right),
\end{align}
where $b_{l_1,l_2}$ is as defined from \eqref{eq_defn_b_l1l2} and
\begin{align*}
c_1 = \res{s=1} \Big(& \frac{ (4\pi)^k \pi^{1-s}}{2\Gamma(s+k-1)} Q^{-1} \prod_{p^\gamma \| Q} (\sigma_{2s-1}(p^\gamma) - p^{2s-2} \sigma_{2s-1}(p^{\gamma-1})) \frac{\Gamma(2s-1)}{\Gamma(s)} \\
&\qquad \qquad \times \mathcal{V}_{N[l_1,l_2]} \conj{ \langle U,E_0(*,s) \rangle} \frac{\Gamma(2-2s)\Gamma(G-2+2s)v(1-s)}{N^{1-s}\Gamma(G)\Gamma(1-s)} \Big)
\end{align*}
The one thing we need to know about $c_1$ is that it is $O(Q^{\varepsilon} G^{\varepsilon} \mathcal{L}^{1-k+\varepsilon})$.
Continuing the investigating of residue terms, we look at the residue at $z=\frac{1}{2}-s$:
\begin{align}
&\res{z=\frac{1}{2}-s} \res{w=1+z} \res{s'=w} S_{o_1}^c \notag \\
= &\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_2}{\gamma_4} \frac{(4\pi)^k \pi^{1-s}}{2\Gamma(s+k-1)} Q^{-1} \prod_{p^\gamma \| Q} (\sigma_{2s-1}(p^\gamma) - p^{2s-2} \sigma_{2s-1}(p^{\gamma-1}))\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \conj{ \langle U,E_0(*,s) \rangle} \betaF{2 - 2s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{ \tfrac{k}{2}+1-s+it} \notag \\
&\times \frac{\Gamma(2s-1)}{\Gamma(s)}\frac{\Gamma(\beta)\Gamma(G-\beta)}{N^{1-s}\Gamma(G)\Gamma(1-s)} v(1-s) (xl_1)^{1-s}\,d \beta \,d s\label{eq_S_o1c_sw_2nd}
\end{align}
We move $\operatorname{Re} s$ up to $1+\varepsilon$, hitting a simple pole at $s=1$. The moved integral is $O(x^{-\varepsilon})$. The residue at $s=1$ is:
\begin{align}
&\res{s=1} \res{z=\frac{1}{2}-s} \res{w=1+z} \res{s'=w} S_{o_1}^c \notag \\
= &\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{1}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \invmellin{\gamma_4} \frac{(4\pi)^k}{2\Gamma(k)} \langle f, f\rangle b_{l_1,l_2} \betaF{- \beta}{\beta+ \tfrac{k}{2} +it}{ \tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d \beta \notag \\
\ll& Q \mathcal{L}^{1+2\alpha+\varepsilon} G^{1+\varepsilon} \label{eq_S_o1c_sw_2nd_d}
\end{align}
Putting together all the cases here, we have:
\begin{lemma}
For $G \asymp (1+|t|)^\frac{2}{3-2\theta} \log^5 Q$, \begin{align}
& \res{w=1+z}\res{s' = w} S_{o_1}^c \notag \\
= &-\varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{1}{2}+\alpha} l_1^{it}l_2^{-it} (4\pi)^k \langle f,f \rangle \frac{\log x}{4\Gamma(k)} \cdot b_{l_1,l_2} \notag \\
&+ O(G^{1+\varepsilon} Q^{1+\varepsilon} L^{1+2\alpha+\varepsilon}) + O(x^{-\varepsilon}), \label{prop_w_+z} \end{align} where $b_{l_1,l_2}$ is defined as in \eqref{eq_defn_b_l1l2}. \end{lemma}
\subsubsection{Residue at $w=1-z$} Looking at the $\zeta_{\mathfrak{a},Q}(w,-z)$, it turns out that the pole only occurs at the $0$-cusp. Hence, the residue term here is:
\begin{align}
&\res{w=1-z} \res{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}Q^{-1}}{\Gamma(s+k-1) 2\sqrt{\pi}} \frac{\pi^{\frac{1}{2}-z}}{\Gamma(\frac{1}{2}-z)} \left( \frac{1}{N}\right)^{\frac{1}{2}-z} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)}\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_0(*,\half+z) \rangle} \prod_{p^\gamma \| Q} (\sigma_{2z}(p^\gamma) - p^{-(1-2z)} \sigma_{2z}(p^{\gamma-1})) \notag \\
&\times \betaF{\frac{3}{2}-z - s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{-z+ \tfrac{k+1}{2}+it} v( \tfrac{1}{2}-z) (xl_1)^{\half-z} \,d z \,d \beta \,d s \label{eq_S_o1c_sp_lead}
\end{align}
Moving the $z$-line of integration to $-C$, and changing variable from $z \mapsto -z$, we see that this term is exactly the same as the residue at $w=1+z$.
\subsubsection{Residue at $w=s+\beta-\frac{1}{2}$} \begin{align}
&\res{w=s+\beta-\frac{1}{2}} \res{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)}v( s+\beta-1)\notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(s+\beta-\frac{1}{2},-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} (xl_1)^{s+\beta-1} \,d z \,d \beta \,d s \label{eq_S_o1c_sp_be} \end{align} Here we move $\operatorname{Re} \beta$ to $\frac{1}{4}-\varepsilon$, picking up a simple pole at $\beta = 1-s$. The moved integral is again $O(x^{-\varepsilon})$. The residue is: \begin{align}
&\res{\beta=1-s} \res{w=s+\beta-\frac{1}{2}} \res{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_2}{C} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \frac{\Gamma(1-s)\Gamma(G-1+s)}{\Gamma(G)} \notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \,d z \,d s \label{eq_S_o1c_sp_be_p} \end{align} In order to take the $\delta$-limit, we move $\operatorname{Re} s$ down to $\frac{1}{2}-\frac{k}{2}-\varepsilon$, encountering poles at $s=\frac{1}{2} \pm z - r$, where $r$ is an integer such that $0 \leq r \leq \frac{k}{2}$. For $r$ being such an integer, taking limit as $\delta \to 0$, we have the residue: \begin{align}
&\left( \res{s=\frac{1}{2}+z-r} + \res{s=\frac{1}{2}-z-r} \right) \res{\beta=1-s} \res{w=s+\beta-\frac{1}{2}} \res{s'=w} S_{o_1}^c \notag \\
= & \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \invmellin{C} \frac{(-1)^r (4\pi)^k}{2r!} \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-z) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\times \Big( \frac{\Gamma(2z-r) \Gamma(\frac{1}{2}-z+r) \Gamma(\frac{1}{2}-z+r)\Gamma(G-\frac{1}{2}+z-r)}{\Gamma(\frac{1}{2}+z)\Gamma(\frac{1}{2}-z) \Gamma(k-\frac{1}{2}+z-r)\Gamma(G)} \notag \\
&\qquad + \frac{\Gamma(-2z-r) \Gamma(\frac{1}{2}+z+r) \Gamma(\frac{1}{2}+z+r)\Gamma(G-\frac{1}{2}-z-r)}{\Gamma(\frac{1}{2}+z)\Gamma(\frac{1}{2}-z) \Gamma(k-\frac{1}{2}-z-r)\Gamma(G)} \Big)\,d z \label{eq_S_o1c_sp_beps} \end{align} We see that there is enough exponential decay to guarantee convergence, and that this term is $O(G^{\frac{1}{2}-r+\varepsilon} Q^{\frac{1}{2}+\varepsilon} \mathcal{L}^{3+2\alpha+\varepsilon})$.
For the moved integral, it is treated in the same way as \eqref{eq_S_o1d_lead_moved}, and can be shown to be $O(Q^{\frac{1}{2}+\varepsilon} G^{\frac{1}{2}-\frac{k}{2}-\varepsilon} \mathcal{L}^{3+2\alpha +\varepsilon})$
\subsubsection{The residue at $w=\frac{1}{2}$} The residue is:
\begin{align}
&\res{w=\frac{1}{2}} \res{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \triinvmellin{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}\mathcal{V}_{N[l_1,l_2]}}{\Gamma(s+k-1) 2\sqrt{\pi}} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-z) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle}\notag \\
&\times M(s,\tfrac{z}{i},\delta) \betaF{1 - s - \beta}{s + \beta+ \tfrac{k}{2} - 1+it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d z \,d \beta \,d s \label{eq_S_o1c_sp_Lw}
\end{align} Now we move $\operatorname{Re} s$ down to $\frac{1}{2}-\frac{k}{2}-\theta-\varepsilon$ to enable us to take the $\delta$-limit. In doing so, we pick up poles at $s=1-\beta$, $s=1-\frac{k}{2}-\beta-it$, and $s=\frac{1}{2}\pm z -r$, where $0 \leq r \leq \frac{k}{2}$ is an integer.
The residue at $s=1-\beta$ is:
\begin{align}
&\res{s=1-\beta} \res{w=\frac{1}{2}} \res{s'=w} S_{o_1}^c \notag \\
= -&\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_4}{C} \frac{(4\pi)^k 2^{\half-\beta}}{\Gamma(k-\beta) 2\sqrt{\pi}} M(1-\beta,\tfrac{z}{i},\delta) \notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-z) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d z \,d \beta \label{eq_S_o1c_sp_Lw_Hs}
\end{align}
Changing variable $\beta =1-s$, we see that the contour line becomes $\operatorname{Re} s = \frac{1}{2}-\theta-\varepsilon$. This is pretty much the same term as \eqref{eq_S_o1c_sp_be_p} and the bound there applies.
Taking the $\delta$-limit, the analysis of the residue at $s=1-\frac{k}{2}-\beta-it$ is very similar to that of \eqref{eq_S_o1d_2ndwLs}, yielding the bound $O(G^{\half+\varepsilon} Q^{\half + \varepsilon} \mathcal{L}^{3 + 2\alpha + \varepsilon})$.
The residues at $s=\frac{1}{2} \pm z -r $ are:
\begin{align}
&\left( \res{s=\frac{1}{2}+z-r} + \res{s=\frac{1}{2}-z-r} \right) \res{w=\frac{1}{2}} \res{s'=w} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \frac{(4\pi)^k }{2\sqrt{\pi}} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \dblinvmellin{\gamma_4}{C} \mathcal{V}_{N[l_1,l_2]}\sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\frac{1}{2},-z)\conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)}\notag \\
&\times \Big( \frac{c_r(z,\delta) 2^{z-r}}{\Gamma(k-\frac{1}{2}+z-r)}\betaF{\frac{1}{2}-z+r - \beta}{z-r + \beta+ \tfrac{k-1}{2}+it}{\tfrac{k}{2}+it} \notag \\
&\qquad + \frac{c_r(-z,\delta) 2^{-z-r}}{\Gamma(k-\frac{1}{2}-z-r)}\betaF{\frac{1}{2}+z+r - \beta}{-z-r + \beta+ \tfrac{k-1}{2}+it}{\tfrac{k}{2}+it} \Big) \,d z \,d \beta \label{eq_S_o1c_sp_Lws}
\end{align}
Similar to the previous section, we can prove the following proposition as in lemma \ref{propo_crazy} with slight modifications: \begin{lemma} \label{propo_crazy3}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ and $\operatorname{Re} \beta = a = \frac{3}{2} + 2r + \varepsilon$, \begin{align}
&\lim_{\delta \to 0} G \dblinvmellin{a}{C} \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\half,-z) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)}\notag \\
&\times \Big( \frac{2^{z-r}c_r(z,\delta)}{2\sqrt{\pi}\Gamma(k-\half + z -r)} \betaF{\half - z + r - \beta}{ \beta + \tfrac{k-1}{2} + z - r+it}{\tfrac{k}{2}+it} \notag \\
&\phantom{\times} + \frac{2^{-z-r}c_r(-z,\delta) }{2\sqrt{\pi}\Gamma(k-\half - z -r)} \betaF{\half + z + r -\beta}{ \beta + \tfrac{k-1}{2} - z - r+it}{\tfrac{k}{2}+it} \Big) \,d z \,d \beta \notag \\
\ll & G^{1+\varepsilon} \mathcal{L}^{1-k+\varepsilon} Q^{-\half + \varepsilon} \times [l_1,l_2]^{-\frac{1}{2}} \label{eq_cts_res_bound} \end{align} \end{lemma} By the same arguments that we made after proving lemma \ref{propo_crazy}, we can conclude that the double integral \eqref{eq_S_o1c_sp_Lws} is $O(G^{1+\varepsilon} Q^{\half + \varepsilon} \mathcal{L}^{2 + 2\alpha + \varepsilon})$.
For the moved integral, we estimate with the following lemma, which can be proved in the same way as lemma \ref{propo_crazy}: \begin{lemma} \label{propo_crazy4}
For $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ and $\operatorname{Re} \beta = a = \frac{5}{2} + k + 2\varepsilon$, \begin{align}
&\lim_{\delta \to 0} G \triinvmellin{\gamma'_2}{\gamma_4}{C} \frac{(4\pi)^k \mathcal{V}_{N[l_1,l_2]} 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \notag \\
&\qquad \times \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(\half,-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\qquad \times \betaF{1 - s-\beta}{s +\beta+\tfrac{k}{2} - 1+it}{\tfrac{k}{2}+it} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d z \,d \beta \,d s \notag \\
\ll & G^{1+\varepsilon} \mathcal{L}^{1-k+\varepsilon} Q^{-\half+\varepsilon} [l_1,l_2]^{-\frac{1}{2}} \end{align} \end{lemma} By the same arguments that we made after stating lemma \ref{propo_crazy2}, we can conclude that the moved triple integral \eqref{eq_S_o1c_sp_Lw} is $O(G^{1+\varepsilon} Q^{\half+\varepsilon} L^{2+2\alpha+\varepsilon})$.
All these results combined together gives proposition \ref{propo_total_cts}. \end{proof}
\subsection{The residue at $s'=1 \pm z$} Calculating the contribution of the residue at $s'=1+z$ is very similar to the subcase $w=1+z$ in the previous section, except that we do not have any poles coming from the $v$ functions. As such, we will omit the details and only state the result here: \begin{lemma}
For $G \asymp (1+|t|)^\frac{2}{3-2\theta} \log^5 Q$, \begin{align}
& \res{s' = 1+z} S_{o_1}^c \ll G^{1+\varepsilon} Q^{1+\varepsilon} \mathcal{L}^{1+2\alpha+\varepsilon} + x^{-\varepsilon}, \label{prop_s_+z} \end{align} \end{lemma} The residue at $s'=1-z$ can be shown to be the same as $s'=1+z$, and hence has the same contribution as above. \subsection{The residue at $s'=s+\beta-\frac{1}{2}$} The residue is: \begin{align}
&\res{s'=s+\beta-\frac{1}{2}} S_{o_1}^c \notag \\
= &\lim_{\delta \to 0} \varphi(Q) G \sum_{l_1,l_2} \conj{\chi(l_1)}\chi(l_2) (l_1l_2)^{\frac{k}{2}+\alpha} l_1^{it}l_2^{-it} \notag \\
&\times \quadinvmellin{\gamma_1}{\gamma_2}{\gamma_4}{C} \frac{(4\pi)^k 2^{s-\half}}{\Gamma(s+k-1) 2\sqrt{\pi}} \notag \\
&\times \mathcal{V}_{N[l_1,l_2]} \sum_\mathfrak{a} \zeta_{\mathfrak{a},Q}(s+\beta-\frac{1}{2},-z) M(s,\tfrac{z}{i},\delta) \conj{ \langle U,E_\mathfrak{a}(*,\half+z) \rangle} \notag \\
&\times v( s+\beta-\frac{1}{2}-w) v( w-\half) x^{s+\beta-1} l_2^{s+\beta-\frac{1}{2}-w} l_1^{w-\half} \frac{\Gamma(\beta)\Gamma(G-\beta)}{\Gamma(G)} \,d z \,d \beta \,d s \,d w \label{eq_S_o1c_Lsp} \end{align} Moving the line of integration of $\operatorname{Re} \beta$ to $\frac{1}{4}-\varepsilon$ does not pass through any poles, and hence this is $O(x^{-\varepsilon})$.
\section{Proof of Theorem \ref{thm_main}} Putting the results of the sections together (in particular, propositions \ref{eq_S_d1_prop}, \ref{eq_S_d2_propo}, \ref{eq_S_o1d_Zres_propo}, \ref{eq_S_o1d_Z_sp2}, and \ref{propo_total_cts}), we obtain that $S$ as defined in $\eqref{eq_S_defn}$ has the following bound: \[ S = O(G^{1+\varepsilon} Q^{1+\varepsilon} \mathcal{L}^{1+2\alpha + \varepsilon}) + O(G^{1+\varepsilon} Q^{\half + \theta+\varepsilon} \mathcal{L}^{3+2\alpha+ \varepsilon}) + O(x^{-\varepsilon}), \]
where $G \asymp (1+|t|)^{\frac{2}{3-2\theta}} \log^5 Q$ and $\alpha = \frac{1}{\log (Q(1+|t|))}$.
Plugging this into the right-hand side of proposition \ref{propo_amp}, we have: \begin{align*}
&|L(\half + it, f_\chi)|^2 |\sum_{l \sim \mathcal{L}} 1|^2 \\
\ll & (1+|t|)^{\frac{2}{3-2\theta}+\varepsilon} \left( Q^{1+\varepsilon} \mathcal{L}^{1+2\alpha + \varepsilon} + Q^{\half +\theta+ \varepsilon} \mathcal{L}^{3+2\alpha+ \varepsilon} \right) + x^{-\varepsilon} + Q\mathcal{L} + (1+|t|)^\varepsilon Q^{1+\varepsilon} \end{align*}
Taking $x \to \infty$, we can drop the $x$-term above. Note that \[ \sum_{\substack{l \sim \mathcal{L} \\ l \text{ prime} \\ (l,QN) = 1}} 1 \asymp \frac{\mathcal{L}}{\log \mathcal{L}}. \]
Hence, we can conclude that: \begin{align*}
|L(\half + it, f_\chi)|^2 \ll &(1+|t|)^{\frac{2}{3-2\theta}+\varepsilon} \left( Q^{1+\varepsilon} \mathcal{L}^{-1+2\alpha + \varepsilon} + Q^{\half +\theta+ \varepsilon} \mathcal{L}^{1+2\alpha+ \varepsilon} \right) \end{align*}
In order to balance the effects of the two terms, we set $L = Q^{\frac{1}{4} - \frac{\theta}{2} + \varepsilon}$. Recalling $\alpha = \frac{1}{\log (Q(1+|t|))}$, we have:
\[ |L(\half + it, f_\chi)|^2 \ll (1+|t|)^{\frac{2}{3-2\theta}+\varepsilon} Q^{\frac{3}{4}+\frac{\theta}{2}+\varepsilon} \]
Taking square roots of the above, this is the theorem.
\end{document} | arXiv | {
"id": "1311.1826.tex",
"language_detection_score": 0.42534804344177246,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Test of Causal Non-Linear Quantum Mechanics by Ramsey Interferometry on the vibrational mode of a trapped ion}
\author{Joseph Broz} \affiliation{
Department of Physics, University of California, Berkeley, California 94720, USA } \affiliation{Challenge Institute for Quantum Computation, University of California, Berkeley, CA 94720} \author{Bingran You} \affiliation{
Department of Physics, University of California, Berkeley, California 94720, USA } \affiliation{Challenge Institute for Quantum Computation, University of California, Berkeley, CA 94720} \author{Sumanta Khan} \affiliation{
Department of Physics, University of California, Berkeley, California 94720, USA } \affiliation{Challenge Institute for Quantum Computation, University of California, Berkeley, CA 94720} \author{Hartmut Häffner} \affiliation{
Department of Physics, University of California, Berkeley, California 94720, USA } \affiliation{Challenge Institute for Quantum Computation, University of California, Berkeley, CA 94720}
\author{David E. Kaplan and Surjeet Rajendran} \affiliation{
Department of Physics and Astronomy, The Johns Hopkins University, Baltimore, Maryland 21218, USA }
\date{\today}
\begin{abstract} Kaplan and Rajendran have recently demonstrated that non-linear and state-dependent terms can be consistently added to quantum field theory to yield causal non-linear time evolution in quantum mechanics. Causal non-linear theories have the unavoidable feature that their quantum effects are dramatically sensitive to the full physical spread of the quantum state of the system. As a result, such theories are not well tested by conventional atomic and nuclear spectroscopy. By using a well-controlled superposition of vibrational modes of a $^{40}$Ca$^+$ ion trapped in a harmonic potential, we set a stringent limit of $5.4\times10^{-12}$ on the magnitude of the unitless scaling factor $\tilde{\epsilon}_{\gamma}$ of the predicted causal, non-linear perturbation. \end{abstract}
\maketitle
\textcolor{purple}{\textit{Introduction.}}--A basic feature of quantum mechanics (QM) is that the time evolution of the wave function is described by a linear equation of motion. This is treated as an axiom in deriving many elementary results, like conservation of probability or the no-cloning theorem \cite{Wigner1939OnUR, Wootters1982Oct}. But, interestingly, there is no proof that linearity is necessary to obtain a physically consistent theory, raising the possibility that QM evolution could be non-linear.
This is evidenced by a number of successful non-linear generalizations of QM (NLQM) for {\it single} particles that have been independently fleshed out in the literature \cite{Bialynicki-Birula1976Sep, Weinberg1989Jan, Weinberg1989Sep, doebner-goldin}. These examples demonstrate that state-dependent terms can be added to the non-relativistic Schrö dinger Hamiltonian while maintaining a compatible physical interpretation and correspondence with linear QM. The rigorous experimental tests that have followed \cite{Shull1980Mar, Gahler1981Apr, Bollinger1989, Chupp1990May, Walsworth1990May, Majumder1990Dec} further highlight the seriousness of these proposals and have also imposed stringent bounds on their predictions.
\begin{comment} Time evolution in quantum mechanics (QM) is described by a linear equation of motion. This is one of the fundamental axioms of QM \cite{Wigner1939OnUR} and is used to prove results such as the no-cloning theorem \cite{Wootters1982Oct}. Given the central role of QM in physics, it is important to question this axiom.
Interestingly, linearity is not necessary to reproduce many central elements of QM such as the conservation of probability or the existence of energy eigenstates. This is evidenced by a number of successful non-linear generalizations of QM (NLQM) for {\it single} particles that have been independently fleshed out in the literature \cite{Bialynicki-Birula1976Sep, Weinberg1989Jan, Weinberg1989Sep, doebner-goldin}. These examples demonstrate that state-dependent terms can be added to the non-relativistic Schrö dinger Hamiltonian while maintaining a compatible physical interpretation and correspondence with linear QM. The rigorous experimental tests that have followed \cite{Shull1980Mar, Gahler1981Apr, Bollinger1989, Chupp1990May, Walsworth1990May, Majumder1990Dec} further highlight the seriousness of these proposals and have also imposed stringent bounds on their predictions. \end{comment}
But QM needs to be able to describe multiple particles that can exist together in an arbitrarily complicated entangled state. Naive generalizations of \cite{Bialynicki-Birula1976Sep, Weinberg1989Jan, Weinberg1989Sep, doebner-goldin} to such states generally lead to violations of causality \cite{GISIN1, Gisin2, Polchinski}, contributing to a widespread belief that linearity is necessary for causality \cite{Bassi2015Aug}. However, as first pointed out by Polchinski \cite{Polchinski}, causal NLQM evolution of multi-particle states is possible if the non-linear terms in the Schrö dinger equation are restricted to a specific form.
\begin{comment} But the significance of these results was challenged when it was shown that the underlying non-linear theories generally violated another key tenet of physics: causality \cite{GISIN1, Gisin2}. Evidently, it is the linearity of time evolution that ultimately prevents the nonlocal quantum correlations in an entangled system from being leveraged for nonlocal communication in standard QM. This has led to a persistent notion that linearity is the \textit{expression} of causality in the Schrö dinger equation \cite{Bassi2015Aug}. However, Polchinski has since described a restricted class of non-linear observables \footnote{Specifically, non-linear observables that depend only on the full density matrix of the system.} that also preserve causality under reasonable assumptions \cite{Polchinski}. Thus, causality seems to be yet another physical requirement for which linear time evolution is a sufficient but not necessary condition.
\end{comment}
Recently, Kaplan and Rajendran \cite{KaplanandRajendran}, building on earlier work by Kibble \cite{Kibble:1978vm}, have developed a systematic approach for incorporating causal non-linear evolution into quantum field theory (QFT). The introduction of non-linearities directly into QFT as opposed to the single particle Schrö dinger equation is motivated by the fact that QFT is the natural framework to describe the causal evolution of multi-particle states. Excitingly, it was shown in \cite{KaplanandRajendran} that the non-linear structure demanded by \cite{Polchinski} for multi-particle states was a natural consequence of QFT.
\begin{comment} Recently Kaplan and Rajendran have built on Polchinski's work by developing a compatible (and thus, causal) NLQM framework \cite{KaplanandRajendran}, which is also capable of handling relativistic systems. A novelty of their approach is the treatment of the non-linearity as a completely physical (albeit unusual) interaction, rather than a purely mathematical abstraction. This is accomplished by starting with a given (relativistic) quantum field theory (QFT) Hamiltonian and then pairing each existing interaction with an additional term obtained by replacing the participating field operators with their expectation values. In essence, this describes a systematic procedure for reintroducing an element of classical field theory into the fully quantum picture. However, when applied to low-energy systems the generic result is a non-linear Schrödinger equation. \end{comment}
The basic approach of \cite{KaplanandRajendran} is to start with a given QFT and introduce non-linearities by shifting bosonic field operators by a small amount proportional to the expectation value of the field operator acting on the full quantum state. When applied to single particle systems, the procedure yields a non-linear Schrödinger equation. For example, the time evolution of a single particle with charge $q$ and Hamiltonian $H$ is described in this theory by:
\begin{align}
i\hbar\partial_t\Psi(t,\mathbf{x}) = &\bigg(H + \tilde{\epsilon}_{\gamma} \frac{q^2}{4\pi\varepsilon_0}\int d^4 \mathbf{x}_1 |\Psi(t_1,\mathbf{x}_1)|^2G_r(t, \mathbf{x}; t_1, \mathbf{x_1})\bigg)\nonumber\\
&\times\Psi(t,\mathbf{x}) \label{relativisticcorrection} \end{align}
\noindent where $\tilde{\epsilon}_{\gamma}$ is a small unitless parameter scaling the non-linearity of the theory \footnote{The $\gamma$ subscript specifies that this perturbation is specific to electromagnetic fields. The causal theory is field-dependent and does not explicitly require that the scaling of the perturbation be the same for self-interactions mediated by different quantum field theories.} and $G_r$ is the relativistic retarded Green's function from the spacetime coordinates $(t_1,\mathbf{x}_1)$ to $(t,\mathbf{x})$ . $G_r$ naturally appears in this expression from the underlying QFT derivation and enforces causality. The new term added to the Hamiltonian in Eq. \eqref{relativisticcorrection} admits the simple interpretation of a classical Coulomb potential causally sourced by the quantum probability distribution of the particle's position.
\begin{comment} In this sense, the self-interaction of the wave function is mediated by the ghost of its past position-space distribution such that causality is directly enforced. \end{comment}
As with past efforts, Kaplan and Rajendran's theory also maintains a satisfactory correspondence with standard QM, preserving important features like conservation of probability and energy, the existence of stationary states and a consistent notion of measurement. One might expect, however, that the strong bounds set from previous searches for NLQM \cite{Shull1980Mar, Gahler1981Apr, Bollinger1989, Chupp1990May, Walsworth1990May, Majumder1990Dec} would largely carry over. But this expectation turns out to be false for the following fundamental reason. The tests performed by \cite{Shull1980Mar, Gahler1981Apr, Bollinger1989, Chupp1990May, Walsworth1990May, Majumder1990Dec} are on energy levels of various bound states. In linear QM, the level structure is independent of the center of mass spread of the bound state wave-function. This is not true in causal NLQM where non-linear effects alter time evolution via the position space wavefunction as in Eq.~\eqref{relativisticcorrection}. These effects are highly suppressed if the center of mass wave-function is spread out.
To illustrate this point, it is helpful to take the non-relativistic limit of Eq. \eqref{relativisticcorrection}. When $||H||/\hbar \ll c/|\mathbf{x}_1-\mathbf{x}|$ the non-linear Schrö dinger equation becomes:
\begin{equation} \label{nonrelativisticcorrection}
i\hbar\partial_t\Psi(t,\mathbf{x}) = \bigg(H + \tilde{\epsilon}_{\gamma} \frac{q^2}{4\pi\varepsilon_0}\int d^3 \mathbf{x}_1 \frac{|\Psi(t,\mathbf{x}_1)|^2}{|\mathbf{x}_1 - \mathbf{x}|}\bigg) \Psi(t,\mathbf{x}) \end{equation}
Here one can see that denominator of the integrand scales with the full position-space spread of the wave function, damping the perturbation accordingly. This is a simple consequence of the Coulomb potential that sources the non-linearity, but the implication is that any sensitive test based on standard atomic or nuclear spectroscopy must also pin down the test system's center of mass motion to a dimension comparable to the spread of the internal degrees of freedom. This condition was not well-satisfied in previous tests for NLQM \cite{Shull1980Mar, Gahler1981Apr, Bollinger1989, Chupp1990May, Walsworth1990May, Majumder1990Dec}, but by requiring the non-linear correction to be smaller than the uncertainty in recent Lamb shift measurements of hydrogen, Kaplan and Rajendran have set a modest bound of $|\tilde{\epsilon}_{\gamma}| \lessapprox 10^{-2}$, giving a sense for the limitations of atomic spectroscopy \cite{KaplanandRajendran}.
For a more precise test, one might perform Ramsey spectroscopy \cite{Ramsey} on a superposition of the Fock states $|n\rangle$ of a harmonic vibrational mode of a trapped ion \cite{KaplanandRajendran}. The state $|\psi(t{=}0)\rangle=\alpha_n|n\rangle + \alpha_m|m\rangle$ can be prepared and then allowed to freely evolve for an interrogation time $\tau$. The Coulomb field sourced by the position-space expectation value of $|\psi\rangle$ interacts differently with the two branches of the wave function leading to an energy shift and thus the accumulation of a measurable phase difference between them \cite{Schmidt_Kaler_2003}. The advantage of this method is that a superposition can be created where 1) the physical spread of the center-of-mass wave function is well localized with respect to the size of the wavefunction and 2) there is still very little overlap between the position space distributions of the two branches. The first point ensures that non-linear perturbation is not small and the second point ensures that the effect it has on the two branches of the wave function is dissimilar -- maximizing the phase difference.
These conditions are satisfied when $n{=}0$ and $m{=}1$, i.e. the initial state is a superposition of the ground and first excited state. If one replaces $H$ in Eq. \eqref{nonrelativisticcorrection} with the Hamiltonian for a three-dimensional, isostropic harmonic oscillator \footnote{For an anisotropic potential, Eq. \eqref{phi} will incur an $\mathcal{O}(\tilde{\epsilon}_{\gamma})$ correction.} and assumes that the vibrational modes in the two transverse directions remain in their ground state, the phase difference accumulated between the ground and first excited state of the superposition after a time $\tau$ is given by:
\begin{equation} \label{phi}
\phi_{NL}(\tau; \{\alpha_i\}) = \tilde{\epsilon}_{\gamma}\frac{10 \alpha_0^2 + \alpha_1^2}{30\sqrt{2\pi}\hbar} \frac{e^2}{4\pi\varepsilon_0 x_0} \tau \end{equation}
\noindent where the $\alpha_i$ are assumed to be real and $x_0 = \sqrt{m\nu/\hbar}$ is the characteristic length scale of a harmonic oscillator with mass $m$ and natural frequency $\nu$. Note that the state-dependence of $\phi_{NL}$, i.e. its dependence on the weight of the energy eigenstates via $\alpha_i$, is a characteristic non-linear effect, which has no analog in the linear theory. For an ion localized to $x_0 = 10 $\,nm, a phase of up to order $10^{10}\times \tilde{\epsilon}_{\gamma}$ is accumulated for every millisecond of interrogation time. In this Letter we perform such a Ramsey experiment designed to maximize the signal $\phi_{NL}$ and thus tighten the bound on $\tilde{\epsilon}_{\gamma}$ by 8 orders of magnitude relative to the current best estimate.
\begin{figure*}
\caption{
\textbf{Experimental implementation.} \textbf{(a)} A $^{40}$Ca$^+$ ion is trapped using a combination of RF and DC electric fields. In a time-averaged sense, the confinement is well modeled by a 3-dimensional harmonic potential. \textbf{(b)} Motion along the $x$-direction is excited by resonantly coupling to the internal electronic Zeeman sublevels of the $4^2S_{1/2} \leftrightarrow 3^2D_{5/2}$ transition using narrow band light near 729\,nm. The degeneracy of the Zeeman states is broken through application of a strong magnetic field of $\approx$4\,G. Measurement is performed by scattering photons of the short-lived $4^2S_{1/2}\leftrightarrow 4^2P_{1/2}$, which are then focused onto an EMCCD camera. \textbf{(c)} The experimental pulse sequence. Pulses that address the qubit are colored gray and those that address the blue sideband, blue. After preparing the state $|S,0\rangle$, the first pair of pulses is used to generate the state $|\psi(t=0)\rangle = |D\rangle (\alpha_0|0\rangle + \alpha_1|1\rangle)$. This is then allowed to freely evolve for a time $\tau$, accumulating a relative phase of $\Phi(\tau)$, which is sensitive to the proposed causal non-linear perturbation. Afterwards, the information is mapped onto the qubit with a blue sideband pulse and then the expectation value of the Pauli spin operator $\text{cos}(\xi_L)\sigma_x + \text{sin}(\xi_L)\sigma_y$ is measured. \textbf{(d)} An illustration of the two-step process for generating the state $|\psi(t=0)\rangle$, as described in more detail in the main text. The key feature is the fact that the state $|D,0\rangle$ is transparent to the resonant blue sideband drive as illustrated in \textbf{(e)}. This allows us to map an arbitrary qubit state onto the ground and first excited state of the vibrational mode. }
\label{fig:experiment-illustration}
\end{figure*}
\textcolor{purple}{\textit{Experimental implementation.}}-- Experiments were performed using a single $^{40}$Ca$^+$ ion confined inside of a radio-frequency (RF) Paul trap in a parameter regime where the center of mass motion is well modelled as a 3-dimensional anisotropic harmonic oscillator (Fig. \ref{fig:experiment-illustration}a) with vibrational frequencies $\nu_x \approx 2\pi \times 1.01$ MHz, $\nu_y \approx 2\pi \times 2.52$ MHz and $\nu_z \approx 2\pi \times 2.79$ MHz.
The ion's internal state is manipulated by shining resonant
laser light on various electronic transitions (Fig. \ref{fig:experiment-illustration}b). The short-lived $4^2S_{1/2}\leftrightarrow{4^2P_{1/2}}$ and $3^2D_{5/2}\leftrightarrow{4^2P_{3/2}}$ dipole transitions are used for entropy-altering operations like cooling and measurement. Measurement, in particular, is performed via the electron shelving method on $4^2S_{1/2}\leftrightarrow{4^2P_{1/2}}$ and allows us to determine the population of the $4^2S_{1/2}$ manifold \cite{electron_shelving}. For coherent operations, narrowband light at 729\,nm is used to couple the $|4^2S_{1/2},m_J{=}-1/2\rangle$ and $|{3^2D_{5/2}},m_J{=}-1/2\rangle$ states, whose degeneracy is broken with a static magnetic field of $B\approx$ 4\,G. We call this our qubit transition and reference it as $|S\rangle\leftrightarrow|D\rangle$.
To prepare the ion in a well-defined state, we first cool its temperature to several hundred microkelvin using Doppler cooling and then optically pump its electronic state into $|S\rangle$. Afterwards, resolved sideband cooling is applied along the $x$-direction, driving the axial vibrational mode into its ground state with high probability \cite{sidebandcooling}. Once this process is complete, the ion is measured to be in the state $|S, n_x=0\rangle$ with a confidence greater than $99\%$, where $n_x$ refers to the phonon number of the vibrational mode along the $x$-direction. The two transverse vibrational modes are left in thermal states with mean phonon occupations determined by the Doppler-limit of $\langle n_{y,z}\rangle\approx3$). These modes remain separated from the $|S, n_x\rangle$ state and so we ignore them in what follows except for taking into account the additional spread of the wave function in position space to determine the nonlinearity in Eqs.~\ref{nonrelativisticcorrection} and \ref{phi}.
In order to create the desired superposition state, we use laser light resonant with the a motional sideband of the qubit transition. From the ion's perspective, a laser pointing along one of its vibrational axes will appear to be phase modulated by motion along that direction. By detuning the laser from the qubit frequency by an amount equal to $+\nu_x$, this effect can be used to couple the states ${|S,n\rangle\leftrightarrow|D,n+1\rangle}$, which we refer to as blue sideband transitions (Fig. \ref{fig:experiment-illustration}e) \cite{Leibfried2003Mar}. The energy of the blue sideband transitions is already sensitive to the non-linear perturbation and, in principle, can be used for our Ramsey experiment. But the electronic states are first-order sensitive to ambient magnetic field fluctuations leading to a coherence time an order of magnitude less than that of the vibrational mode -- unnecessarily limiting the Ramsey interrogation time.
So, instead we first map the desired Ramsey superposition onto the ion's internal states by resonantly driving the qubit transition for a fixed duration, generating the state $(\alpha_1 |S\rangle + \alpha_0 |D\rangle)|0\rangle$, in an appropriate rotating frame. Here $\alpha_0 = \text{sin}(\theta/2)$, $\alpha_1 = \text{cos}(\theta/2)$ and the value of $\theta$ is controlled by adjusting the intensity of the addressing laser. Next, we drive a blue sideband $\pi$-pulse that nominally transfers all of the population from $|S,0\rangle$ to $|D,1\rangle$ but leaves the population in $|D,0\rangle$ untouched (Fig. \ref{fig:experiment-illustration}d-e). Together, these operatons result in the separated state ${|\psi(t=0)\rangle = |D\rangle(\alpha_0|0\rangle + \alpha_1|1\rangle)}$, where the qubit state information has been written onto the vibrational mode \cite{Schmidt_Kaler_2003}.
Once the state $|\psi\rangle$ has been prepared, it is allowed to evolve freely for a time $\tau$ so that a relative phase $\Phi(\tau;\theta)$ is accumulated and ${|\psi(\tau)\rangle = |D\rangle(\alpha_0|0\rangle + e^{i\Phi(\tau,\theta)}\alpha_1|1\rangle)}$. To extract this phase, we repeat the steps used to generate $|\psi(0)\rangle$ in a time-reversed order (with the value of $\theta$ now fixed at $\pi/2$ where the signal is maximized) and then measure $|D\rangle$, which will be occupied with a probability of:
\begin{equation} \label{ramseysignal}
P(\tau) = B - \frac{A(\tau)}{2}\;\text{cos}[\Phi(\tau;\theta) + \xi_L] \end{equation}
\noindent here $0\le A(\tau)\le 1$ is the signal contrast which will generally be less than one when $\theta\ne\pi/2$, $B\approx1/2$ is an offset whose precise value is sensitive to errors in state preparation/ measurement and $\xi_L$ is the laser phase of the final qubit $\pi/2$-pulse relative to the initial $\theta$-pulse. Since $P(\tau)$ is an expectation value, a single estimate is obtained by repeating the experiment 200 times, which is large enough that the propagation of the quantum projection noise (QPN) converges when inverting Eq. \eqref{ramseysignal}. The full pulse sequence is illustrated in Fig. \ref{fig:experiment-illustration}c.
In order to gauge the performance of the Ramsey experiment, we conduct a test experiment where we apply a detuning $\Delta$ from resonance of several kHz to the first blue sideband pulse. In the rotating frame, this breaks the degeneracy of $|0\rangle$ and $|1\rangle$ leading to a phase of $\Phi(\tau) = \Delta\tau$ and, thus, sinusoidal oscillations of $P(\tau)$. The result is plotted in Fig. \ref{fig:optimal_time}a, where one can see that the signal contrast $A(\tau)$ exhibits a clear time-dependence due to zero-mean noise effects beyond the simple model described in Eq. \eqref{ramseysignal}.
The dominant source of this noise is found to be a Markovian heating of the vibrational mode caused by ambient electric field fluctuations at the position of the ion and, perhaps, high frequency noise on the trapping potential \cite{Schneider1999May}. This means that during free evolution, the vibrational mode may spontaneously absorb a phonon from its environment with a probability that grows linearly in time. When $n$ phonons are absorbed, the state of the system after the final blue sideband pulse will be $|S\rangle (\alpha_0|n\rangle + \alpha_1|n+1\rangle)$ and the result of the final $\pi/2$-pulse, regardless of $\Phi$, will be a symmetric distribution of $\{|S\rangle,\;|D\rangle\}$ -- diminishing the averaged signal contrast. The dashed line in Fig. \ref{fig:optimal_time}a is a simulated decay envelope computed assuming only this heating process as characterized by the heating rate $\dot{\bar{n}}\approx10$\, quanta/s, independently measured by monitoring the red sideband \cite{Leibfried2003Mar}. The good agreement between the simulated and measured decay validates our earlier claim that the contrast is limited by environmental heating.
\begin{figure}
\caption{
\textbf{Experimental Performance.} \textbf{(a)} Measured $P(\tau)$, as described by Eq. \eqref{ramseysignal} (red). The black dashed line is the predicted decay envelope taking into account only heating of the vibrational mode at a rate of 10 quanta/s. The reasonable agreement between the predicted and measured decay suggests that the Ramsey signal contrast is dominated by this heating process. \textbf{(b)} The black circles represent the sample standard deviation from repeated measurements of $\Delta\phi_{NL}(\tau)$ taken at various interrogation times and normalized to an integration time of 1\,s. The blue shaded region bounds the simulated predictions assuming only QPN and a heating rate between 7 and 13 quanta/s (lower and upper edge of the region, respectively). The dark blue line corresponds to 10 quanta/s. }
\label{fig:optimal_time}
\end{figure}
For a precise determination of the non-linearity it is most convenient to estimate $\Phi(\tau)$ at a fixed $\tau$. But since $A$, $B$ and $\Phi$ are all empirical quantities, inverting Eq. \eqref{ramseysignal} requires at least three independent measurements. We obtain these by repeating the experiment for three different values of $\xi_L$ spaced by ninety degrees such that $\xi^{(3)}_L = \xi^{(2)}_L + \pi/2 = \xi^{(1)}_L + \pi$. The targeted value of $\xi^{(1)}_L$ is chosen to minimize the standard deviation of $\Phi(\tau)$:
\begin{equation} \label{stdphi}
|\delta \Phi(\tau)|=\sqrt{\sum_{i}\left(\frac{\partial \Phi}{\partial \mathrm{P}_{i}} \delta \mathrm{P}_{i}\right)^{2}} \end{equation}
\noindent which occurs when $\Phi(\tau) + \xi^{(1)}_L = \pi/2$. Here $P_i$ is the population measurement associated with $\xi_L^{(i)}$ and $\delta P_i$ is its standard deviation, nominally dominated by QPN.
A single measurement of $\Phi(\tau)$ contains the non-linear signature $\phi_{NL}(\tau)$, as described by Eq. \eqref{phi}, but also includes information about the detuning $\Delta$ of the blue sideband pulses from resonance and any AC Stark shifts that occur during state preparation and readout. Explicitly: $\Phi(\tau;\theta) = \phi_{NL}(\tau;\theta) + \Delta\tau + \phi_{SS}$, where $\phi_{SS}$ is the phase imprinted by the Stark shifts. Ideally, the frequency of the blue sideband pulses are calibrated such that $\Delta=0$, but slow drifts of the trapping potential on a time scale that is long relative to the Ramsey interrogation time generally cause this condition to be violated.
Likewise, Stark shifts incurred while driving the blue sideband cause a phase offset. But importantly, both $\Delta$ and $\phi_{SS}$ are independent of $\theta$ meaning that we can obtain an unbiased estimate of the non-linearity by repeating the measurement for two different values of $\theta$ and taking their difference:
\begin{align} \label{deltaphinl}
\Delta\phi_{NL}(\tau; \{\theta_i\}) &= \Phi(\tau; \theta_1) - \Phi(\tau; \theta_2) \nonumber\\
&= \phi_{NL}(\tau; \theta_1) - \phi_{NL}(\tau;\theta_2) \end{align}
\noindent We choose $\theta_1$ and $\theta_2$ such that the ground state population of $|\psi(t=0)\rangle$ is 0.2 and 0.8, respectively. We also verify that there is not phase difference due to the Stark shift for both preparation sequences.
The non-linear signal $\Delta\phi_{NL}$ grows linearly with interrogation time $\tau$. But this effect must contend with the contrast decay and QPN, both of which increase the uncertainty of the signal (Eq. \eqref{stdphi}) and both of which favor shorter, more frequent measurements \cite{Huelga1997Nov}. The combination of these effects results in an optimal interrogation time, which we determine experimentally by measuring $\Delta\phi_{NL}(\tau)$ at various $\tau$ and computing the sample standard deviation. These results are normalized to an integration time of 1\,s and plotted in Fig. \ref{fig:optimal_time}b. The blue shaded region is a corresponding simulation that assumes only QPN and vibrational heating bounded by $7\le\dot{\bar{n}}\le13$\,quanta/s. Based on this data, we fix $\tau = 15$\,ms.
\begin{comment} \textcolor{purple}{\textit{Results and discussion.}}-- In order to obtain a single estimate of $\tilde{\epsilon}_{\gamma}$, we perform the following steps:
\begin{enumerate}
\item \end{enumerate} \end{comment}
To determine a more rigorous bound on $\tilde{\epsilon}_{\gamma}$ we repeat the measurement of $\Delta\phi_{NL}(\tau{=}15\,\text{ ms})$ many times. Before each $\phi_{NL}$ measurement, we independently measure the initial qubit excitation to determine the precise values of $\theta_i$ which may change slightly over time due to intensity drifts of the addressing light. We also perform a preliminary 3-point Ramsey measurement with the population of $|0\rangle$ set to 0.5 to produce a maximum signal that we use to optimally bias $\xi^{(1)}_L$. Next, we perform 1200 measurements of the Ramsey signal, 200 for each of the 3 Ramsey points for $\theta_1$ and $\theta_2$. The ordering of these experiments is randomized so as to avoid a bias due to drifts in $\Phi(\tau)$. From this data we compute $\Delta\phi_{NL}$, the average contrast $A$ of the two runs and the average offset $B$. For a single day of data, this is plotted in Fig. \ref{fig:results}a. The blue dots show data taken at $\tau=15$ ms. The red dots show data taken at 15\,ms divided by the golden ratio $(1+\sqrt{5})/2 \approx 9.27$\,ms, which does not improve the estimate of the non-linearity but allows us to rule out the remote possibility that $\Delta\phi_{NL}(\tau{=}15\,\text{ ms})$ modulo $2\pi$ vanishes even though the perturbation is not small.
\begin{figure}
\caption{
\textbf{Results.} \textbf{(a)} (top to bottom) The measured contrast, frequency and offset over a full day of data collection. The blue circles represent data taken at an interrogation time of 15\,ms and the red circles were taken at a time of 15\,ms divided by the golden ratio ($\approx$9.3\,ms). \textbf{(b)} The distribution of $\tilde{\epsilon}_{\gamma}$ estimated from the data. The mean value is $5 \pm 5.4 \times 10^{-12}$. The black curve is a Gaussian fit to the distribution. }
\label{fig:results}
\end{figure}
The distribution of $\tilde{\epsilon}_{\gamma}$ computed from the measured values of $\Delta\phi_{NL}(\tau{=}15\,\text{ ms})$ and $\theta_i$ is shown in Fig. \ref{fig:results}b. The black curve is a Gaussian fit. The mean value is determined to be $5 \pm 5.4 \times 10^{-12}$ where the reported uncertainty corresponds to 1 standard deviation. The average uncertainty of the individual measurements computed using standard propagation of error when solving the system of equations Eqs. \eqref{stdphi}, \eqref{deltaphinl} and assuming only QPN is found to be $7.7 \times 10^{-11}$, which is in good agreement with the sample standard deviation $8.2 \times 10^{-11}$.
In summary, we have improved the bound of potential nonlinearities of a causal extension to QM from $\tilde{\epsilon}_{\gamma}\lessapprox 10^{-2}$ to $|\tilde{\epsilon}_{\gamma}| \lessapprox 5.4 \times 10^{-12}$. Further improvements could be achieved with longer averaging times, longer coherence times, or sophisticated quantum measurement protocols such as using squeezed states. Similarly tighter bounds can also be achieved by localizing the test particle better, for instance, by increasing the mass $m$ or the confinement $\nu$.
During the preparation of this manuscript, we became aware of related work \cite{Polkovnikov2022Apr}.
\begin{acknowledgments}
J.B., S.K. and H.H. acknowledge funding by the U.S. Department of Energy, Office of Science, Office of Basic Energy Sciences under Award Number DE{-}SC0019376. \end{acknowledgments}
\begin{comment} {\color{red} SR: The following are notes on how to deal with the fact that the cooling gives a thermal occupation of the harmonic oscillator states}
Non-linear quantum mechanics requires knowledge of the full quantum state. When you cool an ion, you expose the ion to a fixed number of cooling transitions which results in the ion undergoing a bunch of spontaneous emission all of which results in lowering the energy of the ion. The emitted photons will go and hit various atoms in the apparatus and undergo decoherence with the environment. But since the rest of the apparatus is not actively tracking these photons, the full quantum state of the system (ion and lab) is at $t = 0$ when the ion has been cooled is:
\begin{equation}
|\chi \left(t = 0\right)\rangle = \left(\sum_{ij} c_{ij} |N_x = i, N_y = j, N_z = 1\rangle \otimes |E_{ij}\rangle \right) \otimes |L\rangle \end{equation}
That is, the laser cooling gets us to the ground state $N_{z} = 1\rangle$ in the $z$ direction, while in the $x$ and $y$ direction the cooling gets us to various excitation states with probability $|c_{ij}|^2$. The entanglement with the environment $|E_{ij}\rangle$ implies that the reduced density matrix which describes the ion alone is simply a classical probability distribution without any off diagonal terms. We also denote the lab state by $|L\rangle$ - since the macroscopic behavior of the lab is not changing depending upon the cooling process, we simply write it as an overall factorized state.
When we place the ion in a spatial superposition at time $t = t_0$, the state becomes:
\begin{equation}
|\chi \left(t = t_0\right)\rangle = \left(\sum_{ij} c_{ij} |N_x = i, N_y = j\rangle\otimes \left(\alpha |N_z = 1\rangle + \beta |N_z = 2\rangle \right) \otimes |E_{ij}\rangle \right) \otimes |L\rangle \end{equation}
In non-linear quantum mechanics, the key point is that the ion interacts with the expectation value of the electromagnetic field in the full quantum state: $\langle \chi\left(t_0\right) | A_{\mu} | \chi\left(t_0\right) \rangle$.
Thus the Coulomb potential is:
\begin{equation}
\tilde{\epsilon}_{\gamma} \frac{q^2}{4 \pi \epsilon_0} \sum_{ij} |c_{ij}|^2 \int d^3 x_1 \frac{|\Psi_{ij}\left(t, x_1\right)|^2}{|x_1 - x|} \end{equation}
So basically the suppression of the effect relative to the answer that you initially had goes like:
\begin{equation}
\sum |c_{ij}|^2 \frac{1}{i * j} \end{equation}
That is, we have used the fact that the square of the wavefunction of the $i^{th}$ state of the harmonic oscillator is $1/i$ times more spread out than the ground state. \end{comment}
\end{document} | arXiv | {
"id": "2206.12976.tex",
"language_detection_score": 0.8352997899055481,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\def\e#1\e{\begin{equation}#1\end{equation}} \def\ea#1\ea{\begin{align}#1\end{align}} \def\eq#1{{\rm(\ref{#1})}} \theoremstyle{plain} \newtheorem{thm}{Theorem}[section] \newtheorem{lem}[thm]{Lemma} \newtheorem{prop}[thm]{Proposition} \newtheorem{cor}[thm]{Corollary} \theoremstyle{definition} \newtheorem{dfn}[thm]{Definition} \newtheorem{ex}[thm]{Example} \newtheorem{rem}[thm]{Remark} \newtheorem{conv}[thm]{Convention} \numberwithin{figure}{section} \def\mathop{\rm dim}\nolimits{\mathop{\rm dim}\nolimits} \def\mathop{\rm codim}\nolimits{\mathop{\rm codim}\nolimits} \def\mathop{\rm depth}\nolimits{\mathop{\rm depth}\nolimits} \def\mathop{\rm Ker}{\mathop{\rm Ker}} \def\mathop{\rm Coker}{\mathop{\rm Coker}} \def\mathop{\rm sign}{\mathop{\rm sign}} \def\mathop{\rm GL}\nolimits{\mathop{\rm GL}\nolimits} \def\mathop{\rm Im}{\mathop{\rm Im}} \def\mathop{\rm id}\nolimits{\mathop{\rm id}\nolimits} \def\mathop{\rm Or}{\mathop{\rm Or}} \def{\mathop{\bf Man}}{{\mathop{\bf Man}}} \def{\mathop{\bf Man^b}}{{\mathop{\bf Man^b}}} \def{\mathop{\bf Man^c}}{{\mathop{\bf Man^c}}} \def{\mathop{\bf dMan^c}}{{\mathop{\bf dMan^c}}} \def\geqslant{\geqslant} \def\leqslant\nobreak{\leqslant\nobreak} \def{\mathbin{\mathbb R}}{{\mathbin{\mathbb R}}} \def{\mathbin{\mathbb Z}}{{\mathbin{\mathbb Z}}} \def{\mathbin{\mathbb N}}{{\mathbin{\mathbb N}}} \def{\mathbin{\mathbb{RP}}}{{\mathbin{\mathbb{RP}}}} \def{\mathbin{\smash{\,\,\overline{\!\!\mathcal M\!}\,}}}{{\mathbin{\smash{\,\,\overline{\!\!\mathcal M\!}\,}}}} \def\alpha{\alpha} \def\beta{\beta} \def\gamma{\gamma} \def\delta{\delta} \def\iota{\iota} \def\epsilon{\epsilon} \def\lambda{\lambda} \def\kappa{\kappa} \def\theta{\theta} \def\zeta{\zeta} \def\upsilon{\upsilon} \def\varphi{\varphi} \def\sigma{\sigma} \def\omega{\omega} \def\Delta{\Delta} \def\Lambda{\Lambda} \def\Omega{\Omega} \def\Upsilon{\Upsilon} \def\Gamma{\Gamma} \def\Sigma{\Sigma} \def\Theta{\Theta} \def\partial{\partial} \def\textstyle{\textstyle} \def\scriptscriptstyle{\scriptscriptstyle} \def\wedge{\wedge} \def\setminus{\setminus} \def\bullet{\bullet} \def\boldsymbol{\boldsymbol} \def\oplus{\oplus} \def\odot{\odot} \def\oplus{\oplus} \def\overline{\overline} \def\bigoplus{\bigoplus} \def\bigotimes{\bigotimes} \def\infty{\infty} \def\emptyset{\emptyset} \def\rightarrow{\rightarrow} \def\Rightarrow{\Rightarrow} \def\allowbreak{\allowbreak} \def\longrightarrow{\longrightarrow} \def\hookrightarrow{\hookrightarrow} \def\dashrightarrow{\dashrightarrow} \def\times{\times} \def\circ{\circ} \def\tilde{\tilde} \def{\rm d}{{\rm d}} \def\md#1{\vert #1 \vert} \def\bmd#1{\big\vert #1 \big\vert} \def\an#1{\langle #1 \rangle}
\title{On manifolds with corners} \author{Dominic Joyce} \date{} \maketitle \begin{abstract} Manifolds without boundary, and manifolds with boundary, are universally known and loved in Differential Geometry, but {\it manifolds with corners\/} (locally modelled on $[0,\infty)^k\times{\mathbin{\mathbb R}}^{n-k}$) have received comparatively little attention. The basic definitions in the subject are not agreed upon, there are several inequivalent definitions in use of manifolds with corners, of boundary, and of smooth map, depending on the applications in mind.
We present a theory of manifolds with corners which includes a new notion of smooth map $f:X\rightarrow Y$. Compared to other definitions, our theory has the advantage of giving a category ${\mathop{\bf Man^c}}$ of manifolds with corners which is particularly well behaved as a category: it has products and direct products, boundaries $\partial X$ behave in a functorial way, and there are simple conditions for the existence of fibre products $X\times_ZY$ in ${\mathop{\bf Man^c}}$.
Our theory is tailored to future applications in Symplectic Geometry, and is part of a project to describe the geometric structure on moduli spaces of $J$-holomorphic curves in a new way. But we have written it as a separate paper as we believe it is of independent interest. \end{abstract}
\section{Introduction} \label{mc1}
Most of the literature in Differential Geometry discusses only manifolds without boundary (locally modelled on ${\mathbin{\mathbb R}}^n$), and a smaller proportion manifolds with boundary (locally modelled on $[0,\infty)\times{\mathbin{\mathbb R}}^{n-1}$). Only a few authors have seriously studied {\it manifolds with corners} (locally modelled on $[0,\infty)^k\times{\mathbin{\mathbb R}}^{n-k}$). They were first developed by Cerf \cite{Cerf} and Douady \cite{Doua} in 1961, who were primarily interested in their Differential Geometry. J\"anich \cite{Jani} used manifolds with corners to classify actions of transformation groups on smooth manifolds. Melrose \cite{Melr1,Melr2} and others study analysis of elliptic operators on manifolds with corners. Laures \cite{Laur} defines a cobordism theory for manifolds with corners, which has been applied in Topological Quantum Field Theory, by Lauda and Pfeiffer \cite{LaPf} for instance. Margalef-Roig and Outerelo Dominguez \cite{MaOu} generalize manifolds with corners to infinite-dimensional Banach manifolds.
How one sets up the theory of manifolds with corners is not universally agreed, but depends on the applications one has in mind. As we explain in Remarks \ref{mc2rem}, \ref{mc3rem} and \ref{mc6rem2} which relate our work to that of other authors, there are at least four inequivalent definitions of manifolds with corners, two inequivalent definitions of boundary, and (including ours) four inequivalent definitions of smooth map in use in the literature. The purpose of this paper is to carefully lay down the foundations of a theory of manifolds with corners, which includes a new notion of {\it smooth map\/} $f:X\rightarrow Y$ between manifolds with corners.
The main issue here is that (in our theory) an $n$-manifold with corners $X$ has a boundary $\partial X$ which is an $(n\!-\!1)$-manifold with corners, and so by induction the $k$-fold boundary $\partial^kX$ is an $(n\!-\!k)$-manifold with corners. How to define smooth maps $f:X\rightarrow Y$ in the interiors $X^\circ,Y^\circ$ is clear, but one must also decide whether to impose compatibility conditions on $f$ over $\partial^kX$ and $\partial^lY$, and it is not obvious how best to do this. Our definition gives a nicely behaved category ${\mathop{\bf Man^c}}$ of manifolds with corners, and in particular we can give simple conditions for the existence of {\it fibre products\/} in~${\mathop{\bf Man^c}}$.
The author's interest in manifolds with corners has to do with applications in Symplectic Geometry. Moduli spaces ${\mathbin{\smash{\,\,\overline{\!\!\mathcal M\!}\,}}}_{g,h}^{l,m}(M,L,J,\beta)$ of stable $J$-holomorphic curves in a symplectic manifold $(M,\omega)$ with boundary in a Lagrangian $L$ are used in Lagrangian Floer cohomology, open Gromov--Witten invariants, and Symplectic Field Theory. What geometric structure should we put on ${\mathbin{\smash{\,\,\overline{\!\!\mathcal M\!}\,}}}_{g,h}^{l,m}(M,L,J,\beta)$? Hofer, Wysocki and Zehnder \cite{HWZ} make ${\mathbin{\smash{\,\,\overline{\!\!\mathcal M\!}\,}}}_{g,h}^{l,m}(M,L,J,\beta)$ into a {\it polyfold\/} (actually, the zeroes of a Fredholm section of a polyfold bundle over a polyfold). Fukaya, Oh, Ohta and Ono \cite{FOOO} make ${\mathbin{\smash{\,\,\overline{\!\!\mathcal M\!}\,}}}_{g,h}^{l,m}(M,L,J,\beta)$ into a {\it Kuranishi space}. But the theory of Kuranishi spaces is still relatively unexplored, and in the author's view even the definition is not satisfactory.
In \cite{Joyc} the author will develop theories of {\it d-manifolds\/} and {\it d-orbifolds}. Here {\it d-manifolds\/} are a simplified version of Spivak's {\it derived manifolds\/} \cite{Spiv}, a diff\-er\-ent\-ial-geo\-met\-ric offshoot of Jacob Lurie's Derived Algebraic Geometry programme, and d-orbifolds are the orbifold version of d-manifolds. We will argue that the `correct' way to define Kuranishi spaces is as {\it d-orbifolds with corners}, which will help to make \cite{FOOO} more rigorous. In future the author hopes also to show that polyfolds can be truncated to d-orbifolds with corners, building a bridge between the theories of Fukaya et al.\ \cite{FOOO} and Hofer et al.~\cite{HWZ}.
To define d-manifolds and d-orbifolds with corners we first need a theory of manifolds with corners, and we develop it here in a separate paper as we believe it is of independent interest. For \cite{Joyc} and later applications in Symplectic Geometry, it is important that boundaries and fibre products in ${\mathop{\bf Man^c}}$ should be well-behaved. The author strongly believes that the theory we set out here, in particular our definition of smooth map, is the `right' definition for these applications. As evidence for this, note that in \cite{Joyc} we will show that if $\boldsymbol X,\boldsymbol Y$ are d-manifolds with corners, $Z$ is a manifold with corners, and $f:\boldsymbol X\rightarrow Z$, $g:\boldsymbol Y\rightarrow Z$ are arbitrary smooth maps, then a fibre product $\boldsymbol X\times_{f,Z,g}\boldsymbol Y$ exists in the 2-category ${\mathop{\bf dMan^c}}$ of d-manifolds with corners. The fact that this works is crucially dependent on the details of our definition of smooth map.
We begin in \S\ref{mc2} with definitions and properties of manifolds with corners $X$, their boundaries $\partial X$, $k$-boundaries $\partial^kX$ and $k$-corners $C_k(X)\cong \partial^kX/S_k$. Section \ref{mc3} defines and studies smooth maps $f:X\rightarrow Y$ of manifolds with corners, and \S\ref{mc4} explains two ways to encode how a smooth $f:X\rightarrow Y$ relates $\partial^kX$ and $\partial^lY$ for $k,l\geqslant 0$. Sections \ref{mc5}--\ref{mc7} discuss submersions, transversality and fibre products of manifolds with corners, and orientations and orientation conventions. The proofs of Theorems \ref{mc6thm1} and \ref{mc6thm2}, two of our main results on fibre products, are postponed to \S\ref{mc8} and~\S\ref{mc9}.
The author would like to thank Franki Dillen for pointing out reference~\cite{MaOu}.
\section{Manifolds with corners, and boundaries} \label{mc2}
We define {\it manifolds without boundary}, {\it with boundary}, and {\it with corners}.
\begin{dfn} Let $X$ be a paracompact Hausdorff topological space. \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[(i)] An {\it $n$-dimensional chart on\/ $X$ without boundary\/} is a pair $(U,\phi)$, where $U$ is an open subset in ${\mathbin{\mathbb R}}^n$, and $\phi:U\rightarrow X$ is a homeomorphism with a nonempty open set $\phi(U)$ in~$X$. \item[(ii)] An {\it $n$-dimensional chart on\/ $X$ with boundary\/} for $n\geqslant 1$ is a pair $(U,\phi)$, where $U$ is an open subset in ${\mathbin{\mathbb R}}^n$ or in $[0,\infty)\times{\mathbin{\mathbb R}}^{n-1}$, and $\phi:U\rightarrow X$ is a homeomorphism with a nonempty open set $\phi(U)$. \item[(iii)] An {\it $n$-dimensional chart on\/ $X$ with corners\/} for $n\geqslant 1$ is a pair $(U,\phi)$, where $U$ is an open subset in ${\mathbin{\mathbb R}}^n_k=[0,\infty)^k\times{\mathbin{\mathbb R}}^{n-k}$ for some $0\leqslant\nobreak k\leqslant\nobreak n$, and $\phi:U\rightarrow X$ is a homeomorphism with a nonempty open set~$\phi(U)$. \end{itemize} These are increasing order of generality, that is, (i) $\Rightarrow$ (ii) $\Rightarrow$ (iii).
For brevity we will use the notation ${\mathbin{\mathbb R}}^n_k=[0,\infty)^k\times{\mathbin{\mathbb R}}^{n-k}$, following~\cite[\S 1.1]{Melr2}.
Let $A\subseteq{\mathbin{\mathbb R}}^m$ and $B\subseteq{\mathbin{\mathbb R}}^n$ and $\alpha:A\rightarrow B$ be continuous. We call $\alpha$ {\it smooth\/} if it extends to a smooth map between open neighbourhoods of $A,B$, that is, if there exists an open subset $A'$ of ${\mathbin{\mathbb R}}^m$ with $A\subseteq A'$ and a smooth map $\alpha':A'\rightarrow{\mathbin{\mathbb R}}^n$ with $\alpha'\vert_A\equiv\alpha$. If $A$ is open we can take $A'=A$ and $\alpha'=\alpha$. When $m=n$ we call $\alpha:A\rightarrow B$ a {\it diffeomorphism\/} if it is a homeomorphism and $\alpha:A\rightarrow B$, $\alpha^{-1}:B\rightarrow A$ are smooth.
Let $(U,\phi),(V,\psi)$ be $n$-dimensional charts on $X$, which may be without boundary, or with boundary, or with corners. We call $(U,\phi)$ and $(V,\psi)$ {\it compatible\/} if $\psi^{-1}\circ\phi:\phi^{-1}\bigl(\phi(U)\cap\psi(V)\bigr)\rightarrow \psi^{-1}\bigl(\phi(U)\cap\psi(V)\bigr)$ is a diffeomorphism between subsets of ${\mathbin{\mathbb R}}^n$, in the sense above.
An $n$-{\it dimensional atlas\/} for $X$ {\it without boundary}, {\it with boundary}, or {\it with corners}, is a system $\{(U^i,\phi^i):i\in I\}$ of pairwise compatible $n$-dimensional charts on $X$ with $X=\bigcup_{i\in I}\phi^i(U^i)$, where the $(U^i,\phi^i)$ are with boundary, or with corners, respectively. We call such an atlas {\it maximal\/} if it is not a proper subset of any other atlas. Any atlas $\{(U^i,\phi^i):i\in I\}$ is contained in a unique maximal atlas of the same type, the set of all charts $(U,\phi)$ of this type on $X$ which are compatible with $(U^i,\phi^i)$ for all~$i\in I$.
An $n$-{\it dimensional manifold without boundary}, or {\it with boundary}, or {\it with corners}, is a paracompact Hausdorff topological space $X$ equipped with a maximal $n$-dimensional atlas without boundary, or with boundary, or with corners, respectively. Usually we refer to $X$ as the manifold, leaving the atlas implicit, and by a {\it chart\/ $(U,\phi)$ on the manifold\/} $X$, we mean an element of the maximal atlas. When we just say manifold, we will usually mean a manifold with corners. \label{mc2def1} \end{dfn}
Here is some notation on (co)tangent spaces.
\begin{dfn} Let $X$ be an $n$-manifold with corners. A map $f:X\rightarrow{\mathbin{\mathbb R}}$ is called {\it smooth\/} if whenever $(U,\phi)$ is a chart on the manifold $X$ then $f\circ\phi:U\rightarrow{\mathbin{\mathbb R}}$ is a smooth map between subsets of ${\mathbin{\mathbb R}}^n,{\mathbin{\mathbb R}}$, in the sense of Definition \ref{mc2def1}. Write $C^\infty(X)$ for the ${\mathbin{\mathbb R}}$-algebra of smooth functions~$f:X\rightarrow{\mathbin{\mathbb R}}$.
Following \cite[p.~4]{KoNo}, for each $x\in X$ define the {\it tangent space\/} $T_xX$ by \begin{align*} T_xX=\bigl\{v:\,&\text{$v$ is a linear map $C^\infty(X)\rightarrow{\mathbin{\mathbb R}}$ satisfying}\\ &\text{$v(fg)=v(f)g(x)+f(x)v(g)$ for all $f,g\in C^\infty(X)$}\bigr\}, \end{align*} and define the {\it cotangent space\/} $T_x^*X=(T_xX)^*$. Both are vector spaces of dimension $n$. For each $x\in X$, we define the {\it inward sector\/} $IS(T_xX)$ of vectors which `point into $X$', as follows: let $(U,\phi)$ be a chart on $X$ with $U\subseteq{\mathbin{\mathbb R}}^n_k$ open and $0\in U$ with $\phi(0)=x$. Then ${\rm d}\phi\vert_0:T_0{\mathbin{\mathbb R}}^n_k={\mathbin{\mathbb R}}^n\rightarrow T_xX$ is an isomorphism. Set $IS(T_xX)={\rm d}\phi\vert_0({\mathbin{\mathbb R}}^n_k)\subseteq T_xX$. This is independent of the choice of~$(U,\phi)$. \label{mc2def2} \end{dfn}
We now study the notion of {\it boundary\/} $\partial X$ for $n$-manifolds $X$ with corners.
\begin{dfn} Let $U\subseteq{\mathbin{\mathbb R}}^n_k$ be open. For each $u=(u_1,\ldots,u_n)$ in $U$, define the {\it depth\/} $\mathop{\rm depth}\nolimits_Uu$ of $u$ in $U$ to be the number of $u_1,\ldots,u_k$ which are zero. That is, $\mathop{\rm depth}\nolimits_Uu$ is the number of boundary faces of $U$ containing~$u$.
Let $X$ be an $n$-manifold with corners. For $x\in X$, choose a chart $(U,\phi)$ on the manifold $X$ with $\phi(u)=x$ for $u\in U$, and define the {\it depth\/} $\mathop{\rm depth}\nolimits_Xx$ of $x$ in $X$ by $\mathop{\rm depth}\nolimits_Xx=\mathop{\rm depth}\nolimits_Uu$. This is independent of the choice of $(U,\phi)$. For each $k=0,\ldots,n$, define the {\it depth\/ $k$ stratum\/} of $X$ to be \begin{equation*} S^k(X)=\bigl\{x\in X:\mathop{\rm depth}\nolimits_Xx=k\bigr\}. \end{equation*}
\label{mc2def3} \end{dfn}
The proof of the next proposition is elementary.
\begin{prop} Let\/ $X$ be an $n$-manifold with corners. Then \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[{\bf(a)}] $X=\coprod_{k=0}^nS^k(X),$ and\/ $\overline{S^k(X)}=\bigcup_{l=k}^n S^l(X);$ \item[{\bf(b)}] Each\/ $S^k(X)$ has the structure of an $(n-k)$-manifold without boundary; \item[{\bf(c)}] $X$ is a manifold without boundary if and only if\/ $S^k(X)=\emptyset$ for\/ $k>0;$ \item[{\bf(d)}] $X$ is a manifold with boundary if and only if\/ $S^k(X)=\emptyset$ for\/ $k>1;$ and \item[{\bf(e)}] If\/ $x\in S^k(X)$ then $IS(T_xX)$ in $T_xX$ is isomorphic to ${\mathbin{\mathbb R}}^n_k$ in ${\mathbin{\mathbb R}}^n$. Also the intersection $IS(T_xX)\cap -IS(T_xX)$ in $T_xX$ is~$T_xS^k(X)\cong{\mathbin{\mathbb R}}^{n-k}$. \end{itemize} \label{mc2prop1} \end{prop}
\begin{dfn} Let $X$ be a manifold with corners, and $x\in X$. A {\it local boundary component\/ $\beta$ of\/ $X$ at\/} $x$ is a local choice of connected component of $S^1(X)$ near $x$. That is, for each sufficiently small open neighbourhood $V$ of $x$ in $X$, $\beta$ gives a choice of connected component $W$ of $V\cap S^1(X)$ with $x\in\overline W$, and any two such choices $V,W$ and $V',W'$ must be compatible in the sense that~$x\in\overline{(W\cap W')}$. \label{mc2def4} \end{dfn}
The meaning of local boundary components in coordinate charts is easy to explain. Suppose $(U,\phi)$ is a chart on $X$ with $\phi(u)=x$, where $U$ is an open set in ${\mathbin{\mathbb R}}^n_k$, and write $u=(u_1,\ldots,u_n)$. Then \begin{equation*} S^1(U)=\textstyle\coprod_{i=1}^k\bigl\{(x_1,\ldots,x_n)\in U:\text{$x_i=0$, $x_j\ne 0$ for $j\ne i$, $j=1,\ldots,k$}\bigr\}. \end{equation*} If $u_i=0$ for some $i=1,\ldots,k$, then $\bigl\{(x_1,\ldots,x_n)\in U:x_i=0$, $x_j\ne 0$ for $j\ne i$, $j=1,\ldots,k\bigr\}$ is a subset of $S^1(U)$ whose closure contains $u$, and the intersection of this subset with any sufficiently small open ball about $u$ is connected, so this subset uniquely determines a local boundary component of $U$ at $u$, and hence a local boundary component of $X$ at $x$. Thus, the local boundary components of $X$ at $x$ are in 1-1 correspondence with those $i=1,\ldots,k$ with $u_i=0$. But the number of such $i$ is the depth $\mathop{\rm depth}\nolimits_Xx$. Hence {\it there are exactly $\mathop{\rm depth}\nolimits_Xx$ distinct local boundary components\/ $\beta$ of\/ $X$ at\/ $x$ for each\/}~$x\in X$.
\begin{dfn} Let $X$ be a manifold with corners. As a set, define the {\it boundary\/} \e \partial X=\bigl\{(x,\beta):\text{$x\in X$, $\beta$ is a local boundary component for $X$ at $x$}\bigr\}. \label{mc2eq1} \e Define a map $i_X:\partial X\rightarrow X$ by $i_X:(x,\beta)\mapsto x$. Note that $i_X$ {\it need not be injective}, as $\bmd{i_X^{-1}(x)}=\mathop{\rm depth}\nolimits_Xx$ for all~$x\in X$. \label{mc2def5} \end{dfn}
If $(U,\phi)$ is a chart on the manifold $X$ with $U\subseteq{\mathbin{\mathbb R}}^n_k$ open, then for each $i=1,\ldots,k$ we can define a chart $(U_i,\phi_i)$ on $\partial X$ by \e \begin{split} &U_i=\bigl\{(x_1,\ldots,x_{n-1})\in {\mathbin{\mathbb R}}^{n-1}_{k-1}: (x_1,\ldots,x_{i-1},0,x_i,\ldots,x_{n-1})\in U\subseteq{\mathbin{\mathbb R}}^n_k\bigr\},\\ &\phi_i:(x_1,\ldots,x_{n-1})\longmapsto\bigl(\phi (x_1,\ldots,x_{i-1}, 0,x_i,\ldots,x_{n-1}),\phi_*(\{x_i=0\})\bigr). \end{split} \label{mc2eq2} \e All such charts on $\partial X$ are compatible, and induce a manifold structure on $\partial X$. Thus as in Douady \cite[\S 6]{Doua} we may prove:
\begin{prop} Let\/ $X$ be an $n$-manifold with corners. Then $\partial X$ is naturally an $(n\!-\!1)$-manifold with corners for $n>0,$ and\/ $\partial X=\emptyset$ if\/~$n=0$. \label{mc2prop2} \end{prop}
The map $i_X:\partial X\rightarrow X$ is continuous, and we will see in \S\ref{mc3} that it is smooth. By considering the local models ${\mathbin{\mathbb R}}^n_k$ for $X$, we see:
\begin{lem} As a map between topological spaces, $i_X:\partial X\rightarrow X$ in Definition {\rm\ref{mc2def5}} is continuous, finite (that is, $i_X^{-1}(x)$ is finite for all\/ $x\in X$), and proper (that is, if\/ $S\subseteq X$ is compact then $i_X^{-1}(S)\subseteq\partial X$ is compact). \label{mc2lem} \end{lem}
As $\partial X$ is a manifold with corners we can iterate the boundary construction to obtain $\partial X,\partial^2X,\ldots,\partial^nX$, with $\partial^kX$ an $(n-k)$-manifold with corners.
\begin{prop} Let\/ $X$ be an $n$-manifold with corners. Then for $k=0,\ldots,n$ there are natural identifications \e \begin{split} \partial^kX\cong\bigl\{(x,\beta_1,\ldots,\beta_k):\,&\text{$x\in X,$ $\beta_1,\ldots,\beta_k$ are distinct}\\ &\text{local boundary components for $X$ at $x$}\bigr\}. \end{split} \label{mc2eq3} \e \label{mc2prop3} \end{prop}
\begin{proof} Consider first the case $k=2$. Points of $\partial^2X$ are of the form $\bigl((x,\beta_1),\tilde\beta_2\bigr)$, where $x\in X$, and $\beta_1$ is a local boundary component of $X$ at $x$, and $\tilde\beta_2$ is a local boundary component of $\partial X$ at $(x,\beta_1)$. Suppose $(U,\phi)$ is a chart for $X$ with $x=\phi(u)$ for some $u\in U$, where $U$ is open in ${\mathbin{\mathbb R}}^n_l$ for $l\geqslant 2$, and $u=(u_1,\ldots,u_n)$, with $u_{i_1}=0$ and $\phi^{-1}(\beta_1)$ the local boundary component $x_{i_1}=0$ in $U$. Then \eq{mc2eq2} gives a chart $(U_{i_1},\phi_{i_1})$ for $\partial X$ with $\phi_{i_1}\bigl((u_1,\ldots,u_{i_1-1},u_{i_1+1},\ldots, u_n)\bigr)=(x,\beta_1)$. Thus $\phi_{i_1}^{-1}(\tilde\beta_2)$ is a local boundary component for $U_{i_1}$, and so is of the form $x_j=0$ for $j=1,\ldots,l-1$. Write $i_2=j$ if $j<i_1$ and $i_2=j+1$ if $j\geqslant i_1$. Then $u_{i_2}=0$, as $u_{i_2}$ is the $j^{\rm th}$ coordinate of~$(u_1,\ldots,u_{i_1-1},u_{i_1+1},\ldots,u_n)$.
Let $\beta_2$ be the local boundary component $\phi_*(\{x_{i_2}=0\})$ of $X$ at $x$. Then $\beta_2\ne \beta_1$ as $i_2\ne i_1$. We have constructed a 1-1 correspondence between local boundary components $\tilde\beta_2$ of $\partial X$ at $(x,\beta_1)$ and local boundary components $\beta_2$ of $X$ at $x$ with $\beta_2\ne\beta_1$. This 1-1 correspondence is independent of the choice of chart $(U,\phi)$. Identifying $\bigl((x,\beta_1),\tilde\beta_2\bigr)$ with $(x,\beta_1,\beta_2)$ gives \eq{mc2eq3} for~$k=2$.
We prove the general case by induction on $k$. The case $k=0$ is trivial, $k=1$ is \eq{mc2eq1}, and $k=2$ we have proved above. For the inductive step, having proved \eq{mc2eq3} for $k\leqslant\nobreak l<n$, we show that at a point of $\partial^lX$ identified with $(p,\beta_1,\ldots,\beta_l)$ under \eq{mc2eq3}, local boundary components of $\partial^lX$ are in 1-1 correspondence with local boundary components $\beta_{l+1}$ of $X$ at $p$ distinct from~$\beta_1,\ldots,\beta_l$. \end{proof}
\begin{dfn} Write $S_k$ for the symmetric group on $k$ elements, the group of bijections $\sigma:\{1,\ldots,k\}\rightarrow\{1,\ldots,k\}$. From \eq{mc2eq3} we see that $\partial^kX$ has a natural, free action of $S_k$ by permuting $\beta_1,\ldots,\beta_k$, given by \begin{equation*} \sigma:(x,\beta_1,\ldots,\beta_k)\longmapsto (x,\beta_{\sigma(1)},\ldots,\beta_{\sigma(k)}). \end{equation*} Each $\sigma\in S_k$ acts on $\partial^kX$ as an isomorphism of $(n-k)$-manifolds with corners (a diffeomorphism). Thus, if $G$ is a subgroup of $S_k$, then the quotient $(\partial^kX)/G$ is also an $(n-k)$-manifold with corners.
In particular, taking $G=S_k$, we define the $k$-{\it corners\/} $C_k(X)$ of $X$ to be \e \begin{aligned} C_k(X)=\bigl\{(x,&\{\beta_1,\ldots,\beta_k\}):\text{$x\in X,$ $\beta_1,\ldots,\beta_k$ are distinct}\\ &\text{local boundary components for $X$ at $x$}\bigr\}\cong \partial^kX/S_k, \end{aligned} \label{mc2eq4} \e an $(n-k)$-manifold with corners. Note that $\beta_1,\ldots,\beta_k$ are unordered in \eq{mc2eq4}, but ordered in \eq{mc2eq3}. We have isomorphisms $C_0(X)\cong X$ and~$C_1(X)\cong\partial X$. \label{mc2def6} \end{dfn}
\begin{rem} We review how our definitions so far relate to those in use by other authors. For manifolds without or with boundary, all definitions the author has found (see Kobayashi and Nomizu \cite{KoNo} or Lang \cite{Lang}, for instance) are equivalent to Definition \ref{mc2def1}. However, for manifolds with corners, there are four main {\it inequivalent\/} definitions. Our terminology for (a),(b),(d) follows J\"anich \cite[\S 1.1]{Jani}, and for (c) follows Monthubert~\cite[\S 2.2]{Mont}. \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[(a)] {\it Manifolds with corners\/} are as in Definition~\ref{mc2def1}. \item[(b)] A manifold with corners $X$ is called a {\it manifold with faces\/} if each $x\in X$ lies in the image of $\mathop{\rm depth}\nolimits_Xx=\bmd{i_X^{-1}(x)}$ different connected components of $\partial X$ under~$i_X:\partial X\rightarrow X$. \item[(c)] A manifold with corners $X$ is called a {\it manifold with embedded corners\/} if there exists a decomposition $\partial X=\partial_1X\amalg \partial_2 X\amalg \cdots\amalg\partial_NX$ for finite $N\geqslant 0$ with $\partial_iX$ open and closed in $\partial X$, such that $i_X\vert_{\partial_iX}:\partial_iX\rightarrow X$ is injective for $i=1,\ldots,N$. We allow~$\partial_iX=\emptyset$. \item[(d)] For each integer $N\geqslant 0$, an $\an{N}$-{\it manifold\/} is a manifold with corners $X$ together with a given decomposition $\partial X=\partial_1X\amalg\cdots\amalg\partial_NX$ with $\partial_iX$ open and closed in $\partial X$, such that $i_X\vert_{\partial_iX}:\partial_iX\rightarrow X$ is injective for $i=1,\ldots,N$. We allow $\partial_iX=\emptyset$. Note that $N$ has no relation to $\mathop{\rm dim}\nolimits X$. A $\an{0}$-manifold is a manifold without boundary, and a $\an{1}$-manifold is a manifold with boundary. \end{itemize}
Note that (c) implies (b) implies (a), and (d) becomes (c) after forgetting the decomposition $\partial X=\partial_1X\amalg\cdots\amalg \partial_NX$. An example satisfying (a) but not (b)--(d) is the {\it teardrop\/} $T=\bigl\{(x,y)\in{\mathbin{\mathbb R}}^2:x\geqslant 0$, $y^2\leqslant\nobreak x^2-x^4\bigr\}$, shown in Figure \ref{fig1}. For compact $X$ (b) also implies (c), but one can find pathological examples of noncompact $X$ which satisfy (b), but not (c) or (d). Cerf \cite{Cerf}, Douady \cite{Doua}, Margalef-Roig and Outerelo Dominguez \cite{MaOu}, and others define manifolds with corners as in (a). Melrose \cite{Melr1,Melr2} and authors who follow him define manifolds with corners as in (c); Melrose \cite[\S 1.6]{Melr2} calls manifolds with corners in sense (a) {\it t-manifolds}. J\"anich \cite[\S 1.1]{Jani} defines manifolds with corners in senses (a),(b) and (d), but is primarily interested in (d). Laures \cite{Laur} also works with $\an{N}$-manifolds, in sense~(d). \begin{figure}
\caption{The teardrop, a 2-manifold with corners of type (a)}
\label{fig1}
\end{figure}
The {\it boundary\/} $\partial X$ of a manifold with corners $X$ is also defined in different ways in the literature. In our picture, $\partial X$ is a manifold with corners, with an immersion $i_X:\partial X\rightarrow X$ which is not necessarily injective, so that $\partial X$ may not be a subset of $X$. This follows Douady \cite[\S 6]{Doua}, who defines $\partial^kX$ (in his notation) to be equivalent to our $C_k(X)$ in \eq{mc2eq4}, so that his $\partial^1X$ agrees with our $\partial X$. All the other authors cited define $\partial X$ to be $i_X(\partial X)$ in our notation, so that $\partial X$ is a subset of $X$, but is not necessarily a manifold with corners. But in (c),(d) above, the $\partial_iX$ are both subsets of $X$ and manifolds with corners. \label{mc2rem} \end{rem}
If $X,Y$ are manifolds with corners of dimensions $m,n$, there is a natural way to make the product $X\times Y$ into a manifold with corners, of dimension $m+n$. The following result on boundary and $k$-corners of $X\times Y$ is easy to prove by considering local models ${\mathbin{\mathbb R}}^{m+n}_{a+b}\cong {\mathbin{\mathbb R}}^m_a\times{\mathbin{\mathbb R}}^n_b$ for~$X\times Y$.
\begin{prop} Let\/ $X,Y$ be manifolds with corners. Then there are natural isomorphisms of manifolds with corners \ea \partial(X\times Y)&\cong (\partial X\times Y)\amalg (X\times\partial Y), \label{mc2eq5}\\ C_k(X\times Y)&\cong \textstyle\coprod_{i,j\geqslant 0,\; i+j=k}C_i(X)\times C_j(Y). \label{mc2eq6} \ea \label{mc2prop4} \end{prop}
Note that \eq{mc2eq5} and \eq{mc2eq6} imply that \ea \partial^k(X\times Y)&\cong\textstyle \coprod_{i=1}^k\binom{k}{i}\, \partial^iX\times\partial^{k-i}Y, \label{mc2eq7}\\ \textstyle\coprod_{k=0}^{\mathop{\rm dim}\nolimits X\times Y}C_k(X\times Y)&\cong \textstyle\bigl[\coprod_{i=0}^{\mathop{\rm dim}\nolimits X}C_i(X) \bigr]\times\bigl[\coprod_{j=0}^{\mathop{\rm dim}\nolimits Y}C_j(Y)\bigr]. \label{mc2eq8} \ea We will see in \S\ref{mc4} that if $X$ is a manifold with corners then we can make $\coprod_{i=0}^{\mathop{\rm dim}\nolimits X}C_i(X)$ behave functorially under smooth maps.
The map $X\mapsto C_k(X)$ commutes with boundaries. The proof is again an easy exercise by considering local models ${\mathbin{\mathbb R}}^m_a$ for~$X$.
\begin{prop} Let\/ $X$ be a manifold with corners and\/ $k\geqslant 0$. Then there are natural identifications, with the first a diffeomorphism: \e \begin{split} \partial\bigl(C_k(X)\bigr)\cong C_k(\partial X)\cong \bigl\{(x,\beta_1,\{\beta_2,\ldots,\beta_{k+1}\}):\text{$x\in X,$ $\beta_1,\ldots,\beta_{k+1}$}&\\ \text{are distinct local boundary components for\/ $X$ at\/ $x$}&\bigr\}. \end{split} \label{mc2eq9} \e \label{mc2prop5} \end{prop}
The next definition will be used in defining smooth maps in~\S\ref{mc3}.
\begin{dfn} Let $X$ be a manifold with corners, and $(x,\beta)\in\partial X$. A {\it boundary defining function for\/ $X$ at\/} $(x,\beta)$ is a pair $(V,b)$, where $V$ is an open neighbourhood of $x$ in $X$ and $b:V\rightarrow[0,\infty)$ is a map, such that $b:V\rightarrow{\mathbin{\mathbb R}}$ is smooth in the sense of Definition \ref{mc2def2}, and ${\rm d} b\vert_x:T_xV\rightarrow T_0[0,\infty)$ is nonzero, and there exists an open neighbourhood $\tilde V$ of $(x,\beta)$ in $i_X^{-1}(V)\subseteq\partial X$, with $b\circ i_X\vert_{\tilde V}\equiv 0$, and $i_X\vert_{\tilde V}:\tilde V\longrightarrow\bigl\{x'\in V:b(x')=0\bigr\}$ is a homeomorphism between $\tilde V$ and an open subset of~$\bigl\{x'\in V:b(x')=0\bigr\}$.
Thus the boundary $\partial X$ is defined near $(x,\beta)$ by the equation $b=0$ in $X$ near $x$. Using the ideas on fibre products of manifolds with corners in \S\ref{mc6}, one can say more: $\partial X$ near $(x,\beta)$ is naturally isomorphic to the fibre product of manifolds with corners $V\times_{b,[0,\infty),i}\{0\}$ near $(x,0)$, where $i:\{0\}\rightarrow[0,\infty)$ is the inclusion, and the fibre product exists near~$(x,0)$. \label{mc2def7} \end{dfn}
Here are some properties of such $(V,b)$. The proofs are elementary.
\begin{prop} Let\/ $X$ be an $n$-manifold with corners, and\/~$(x,\beta)\in\partial X$. \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[{\bf(a)}] There exists a boundary defining function\/ $(V,b)$ for\/ $X$ at\/ $(x,\beta)$. \item[{\bf(b)}] Let\/ $(V,b)$ and\/ $(V',b')$ be boundary defining functions for $X$ at\/ $(x,\beta)$. Then there exists an open neighbourhood\/ $V''$ of\/ $x$ in $V\cap V'$ and a smooth function $g:V''\rightarrow(0,\infty)\subset{\mathbin{\mathbb R}}$ such that\/ $b'\vert_{V''}\equiv b\vert_{V''}\cdot g$. \item[{\bf(c)}] Let\/ $(V,b)$ be a boundary defining function for $X$ at\/ $(x,\beta)$. Then there exists a chart\/ $(U,\phi)$ on the manifold\/ $X,$ such that\/ $U$ is open in\/ ${\mathbin{\mathbb R}}^n_k$ for\/ $0<k\leqslant\nobreak n$ and\/ $0\in U$ with\/ $\phi(0)=x,$ and\/ $\beta$ is the image of the local boundary component\/ $x_1=0$ of\/ $U$ at\/ $0,$ and\/ $\phi(U)\subseteq V,$ and\/ $b\circ\phi\equiv x_1:U\rightarrow[0,\infty)$. \item[{\bf(d)}] Let\/ $(U,\phi)$ be a chart on the manifold\/ $X,$ such that\/ $U$ is open in\/ ${\mathbin{\mathbb R}}^n_k$ and\/ $u\in U$ with\/ $\phi(u)=x,$ and\/ $\beta$ is the image of the local boundary component $x_i=0$ of\/ $U$ at\/ $u$ for $i\leqslant\nobreak k$. Then $\bigl(\phi(U),x_i\circ\phi^{-1}\bigr)$ is a boundary defining function for $X$ at\/~$(x,\beta)$. \end{itemize} \label{mc2prop6} \end{prop}
\section{Smooth maps of manifolds with corners} \label{mc3}
Here is our definition of {\it smooth maps\/} $f:X\rightarrow Y$ of manifolds with corners~$X,Y$.
\begin{dfn} Let $X,Y$ be manifolds with corners of dimensions $m,n$. A continuous map $f:X\rightarrow Y$ is called {\it weakly smooth\/} if whenever $(U,\phi),(V,\psi)$ are charts on the manifolds $X,Y$ then \begin{equation*} \psi^{-1}\circ f\circ\phi:(f\circ\phi)^{-1}(\psi(V))\longrightarrow V \end{equation*} is a smooth map from $(f\circ\phi)^{-1}(\psi(V))\subset{\mathbin{\mathbb R}}^m$ to $V\subset{\mathbin{\mathbb R}}^n$, where smooth maps between subsets of ${\mathbin{\mathbb R}}^m,{\mathbin{\mathbb R}}^n$ are defined in Definition~\ref{mc2def1}.
A weakly smooth map $f:X\rightarrow Y$ is called {\it smooth\/} if it satisfies the following additional condition over $\partial X,\partial Y$. Suppose $x\in X$ with $f(x)=y\in Y$, and $\beta$ is a local boundary component of $Y$ at $y$. Let $(V,b)$ be a boundary defining function for $Y$ at $(y,\beta)$. Then $f^{-1}(V)$ is an open neighbourhood of $x$ in $X$, and $b\circ f:f^{-1}(V)\rightarrow[0,\infty)$ is a weakly smooth map. We require that either $b\circ f\equiv 0$ on an open neighbourhood of $x$ in $f^{-1}(V)$, or $(f^{-1}(V),b\circ f)$ is a boundary defining function for $X$ at $(x,\tilde\beta)$, for some unique local boundary component $\tilde\beta$ of $X$ at~$x$. \label{mc3def1} \end{dfn}
We also define five special classes of smooth maps:
\begin{dfn} Let $X,Y$ be manifolds with corners of dimensions $m,n$, and $f:X\rightarrow Y$ a weakly smooth map. If $x\in X$ with $f(x)=y$ then in the usual way there is an induced linear map on tangent spaces ${\rm d} f\vert_x:T_xX\rightarrow T_yY$. In the notation of Definition \ref{mc2def2}, ${\rm d} f\vert_x:T_xX\rightarrow T_yY$ maps $IS(T_xX)\rightarrow IS(T_yY)$, that is, ${\rm d} f\vert_x$ maps inward-pointing vectors to inward-pointing vectors.
Let $x\in S^k(X)$ and $y\in S^l(Y)$. Then the inclusion $T_x(S^k(X))\subseteq IS(T_xX)\subseteq T_xX$ is modelled on $\{0\}\times{\mathbin{\mathbb R}}^{n-k}\subseteq [0,\infty)^k\times{\mathbin{\mathbb R}}^{n-k}\subseteq {\mathbin{\mathbb R}}^n$. Hence $T_x(S^k(X))=IS(T_xX)\cap -IS(T_xX)$, and similarly $T_y(S^l(Y))=IS(T_yY)\cap -IS(T_yY)$. Since ${\rm d} f\vert_x$ maps $IS(T_xX)\rightarrow IS(T_yY)$ it maps $IS(T_xX)\cap -IS(T_xX)\rightarrow IS(T_yY) \cap -IS(T_yY)$, that is, ${\rm d} f\vert_x$ maps $T_x(S^k(X))\rightarrow T_y(S^l(Y))$. Hence there is an induced linear map \e ({\rm d} f\vert_x)_*:T_xX/T_x(S^k(X))\longrightarrow T_yY/T_y(S^l(Y)). \label{mc3eq1} \e
Now let $f:X\rightarrow Y$ be a smooth map. \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[(i)] We call $f$ a {\it diffeomorphism\/} if $f$ has a smooth inverse $f^{-1}:Y\rightarrow X$. \item[(ii)] We call $f$ an {\it immersion\/} if ${\rm d} f\vert_x:T_xX\rightarrow T_{f(x)}Y$ is injective for all~$x\in X$. \item[(iii)] We call $f$ an {\it embedding\/} if it is an injective immersion. \item[(iv)] We call $f$ a {\it submersion\/} if ${\rm d} f\vert_x:T_xX\rightarrow T_{f(x)}Y$ and ${\rm d} f\vert_x:T_x(S^k(X))\rightarrow T_{f(x)}(S^l(Y))$ are surjective for all $x\in X$, where $x\in S^k(X)$, $f(x)\in S^l(Y)$. \item[(v)] We call $f$ {\it boundary-submersive}, or {\it b-submersive}, if $({\rm d} f\vert_x)_*$ in \eq{mc3eq1} is surjective for all $x\in X$. Note that ${\rm d} f\vert_x$ surjective implies $({\rm d} f\vert_x)_*$ surjective, so submersions are automatically b-submersive. \end{itemize} \label{mc3def2} \end{dfn}
Here is how Definition \ref{mc3def1} relates to other definitions in the literature:
\begin{rem} Weakly smooth maps $f:X\rightarrow Y$ are just the obvious generalization of the usual definition \cite[\S I.1]{KoNo} of smooth maps for manifolds without boundary. If $\partial Y=\emptyset$ the additional condition in Definition \ref{mc3def1} is vacuous, and weakly smooth maps are smooth. Note that the definition of smooth maps $f:X\rightarrow{\mathbin{\mathbb R}}$ in Definition \ref{mc2def2} is equivalent to Definition \ref{mc3def1} when~$Y={\mathbin{\mathbb R}}$.
Our definition of smooth maps between manifolds with corners is not equivalent to any other definition that the author has found in the literature, though it is related. Most authors, such as Cerf \cite[\S I.1.2]{Cerf}, define smooth maps of manifolds with corners to be weakly smooth maps, in our notation. But there are also two more complex definitions. Firstly, Monthubert \cite[Def.~2.8]{Mont} defines {\it morphisms of manifolds with corners\/} $f:X\rightarrow Y$. One can show that these are equivalent to {\it b-submersive smooth maps}, in our notation. We prefer our definition, as b-submersive smooth maps do not have all the properties we want. In particular, Theorem \ref{mc3thm}(iv),(vi) below fail for b-submersive smooth maps.
Secondly, Melrose \cite[\S 1.12]{Melr2} defines {\it b-maps\/} between manifolds with corners. Let $f:X\rightarrow Y$ be a weakly smooth map. We call $f$ a {\it b-map\/} if the following holds. Let $x\in X$ with $f(x)=y$, and let the local boundary components of $X$ at $x$ be $\tilde\beta_1,\ldots,\tilde\beta_k$, and of $Y$ at $y$ be $\beta_1,\ldots,\beta_l$. Suppose $(\tilde V_i,\tilde b_i)$ is a boundary defining function for $X$ at $(x,\tilde\beta_i)$, $i=1,\ldots,k$, and $(V_j,b_j)$ a boundary defining function for $Y$ at $(y,\beta_j)$, $j=1,\ldots,l$. Then for all $j=1,\ldots,l$ either $b_j\circ f$ should be zero near $x$ in $X$, or there should exist $e_{1j},\ldots,e_{kj}\in{\mathbin{\mathbb N}}$ such that $b_j\circ f\equiv G_j\cdot\prod_{i=1}^k\tilde b{}_i^{e_{ij}}$ near $x$ in $X$ for smooth $G_j>0$. Thus, a smooth map in the sense of Definition \ref{mc3def1} is exactly a b-map $f:X\rightarrow Y$ such that for all such $x,y$ and $j=1,\ldots,l$, one of $e_{1j},\ldots,e_{kj}$ is 1 and the rest are zero. So our smooth maps are a special class of Melrose's b-maps. \label{mc3rem} \end{rem}
Here are some properties of smooth maps. The proofs are elementary.
\begin{thm} Let\/ $W,X,Y,Z$ be manifolds with corners. \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[{\rm(i)}] If\/ $f:X\rightarrow Y$ and\/ $g:Y\rightarrow Z$ are smooth then\/ $g\circ f:X\rightarrow Z$ is smooth. \item[{\rm(ii)}] The identity map\/ $\mathop{\rm id}\nolimits_X:X\rightarrow X$ is smooth. \item[{\rm(iii)}] Diffeomorphisms\/ $f:X\rightarrow Y$ are equivalent to isomorphisms of smooth manifolds, that is, to homeomorphisms of topological spaces $f:X\rightarrow Y$ which identify the maximal atlases on $X$ and~$Y$. \item[{\rm(iv)}] The map $i_X:\partial X\rightarrow X$ in Definition\/ {\rm\ref{mc2def5}} is a smooth immersion. \item[{\rm(v)}] If\/ $f:W\rightarrow Y$ and\/ $g:X\rightarrow Z$ are smooth, the \begin{bfseries}product\end{bfseries} $f\times g:W\times X\rightarrow Y\times Z$ given by $(f\times g)(w,x)=\bigl(f(w),g(x)\bigr)$ is smooth. \item[{\rm(vi)}] If\/ $f:X\rightarrow Y$ and\/ $g:X\rightarrow Z$ are smooth, the \begin{bfseries}direct product\end{bfseries} $(f,g):X\rightarrow Y\times Z$ given by $(f,g)(x)=\bigl(f(x),g(x)\bigr)$ is smooth. \item[{\rm(vii)}] Regarding the empty set\/ $\emptyset$ as a manifold and the point\/ $\{0\}$ as a $0$-manifold, the unique maps\/ $\emptyset:\emptyset\rightarrow X$ and\/ $\pi:X\rightarrow\{0\}$ are smooth. \end{itemize} \label{mc3thm} \end{thm}
Theorem \ref{mc3thm}(i),(ii) show that manifolds with corners form a {\it category\/}, which we write ${\mathop{\bf Man^c}}$, with objects manifolds with corners $X$ and morphisms smooth maps $f:X\rightarrow Y$. We write ${\mathop{\bf Man^b}}$ for the full subcategory of ${\mathop{\bf Man^c}}$ whose objects are manifolds with boundary, and ${\mathop{\bf Man}}$ for the full subcategory of ${\mathop{\bf Man^c}}$ whose objects are manifolds without boundary, so that ${\mathop{\bf Man}} \subset{\mathop{\bf Man^b}}\subset{\mathop{\bf Man^c}}$. Theorem \ref{mc3thm}(v),(vi) have a category-theoretic interpretation in terms of products in ${\mathop{\bf Man^c}}$, and (vii) says that $\emptyset$ is an {\it initial object\/} in ${\mathop{\bf Man^c}}$, and $\{0\}$ is a {\it terminal object\/} in ${\mathop{\bf Man^c}}$. Here are some examples.
\begin{ex}{\bf(a)} If $X$ is a manifold with corners, the diagonal map $\Delta_X:X\rightarrow X\times X$, $\Delta_X:x\mapsto(x,x)$, is a smooth embedding. This follows from Theorem \ref{mc3thm}(ii),(vi), as $\Delta_X=(\mathop{\rm id}\nolimits_X,\mathop{\rm id}\nolimits_X)$. If $\partial X\ne\emptyset$ then $\Delta_X$ is not b-submersive, so it is not a morphism of manifolds in the sense of Monthubert~\cite[Def.~2.8]{Mont}.
\noindent{\bf(b)} If $X,Y$ are manifolds with corners then the projection $\pi_X:X\times Y\rightarrow X$ is a smooth submersion. This follows from Theorem \ref{mc3thm}(ii),(v),(vii), by identifying $\pi_X:X\times Y\rightarrow X$ with $\mathop{\rm id}\nolimits_X\times\pi:X\times Y\rightarrow X\times\{0\}$.
\noindent{\bf(c)} The inclusion $i:[0,\infty)\rightarrow{\mathbin{\mathbb R}}$ is smooth, but it is not a submersion, since at $0\in[0,\infty)$ the map ${\rm d} i\vert_0:T_0S^0\bigl([0,\infty)\bigr)\rightarrow T_0S^1({\mathbin{\mathbb R}})$ is not surjective.
\noindent{\bf(d)} The map $f:{\mathbin{\mathbb R}}\rightarrow[0,\infty)$, $f(x)=x^2$ is weakly smooth but {\it not\/} smooth, as the additional condition in Definition \ref{mc3def1} fails at~$x=0$.
\noindent{\bf(e)} The map $f:[0,\infty)^2\rightarrow[0,\infty)$, $f(x,y)=x+y$ is weakly smooth but {\it not\/} smooth, as Definition \ref{mc3def1} fails at~$(x,y)=(0,0)$.
\noindent{\bf(f)} The map $f:[0,\infty)^2\rightarrow[0,\infty)$, $f(x,y)=xy$ is weakly smooth but {\it not\/} smooth, as Definition \ref{mc3def1} fails at $(x,y)=(0,0)$. However, $f$ is a b-map in the sense of Melrose \cite[\S 1.12]{Melr2}, with~$e_{11}=e_{21}=1$. \label{mc3ex} \end{ex}
\section{Describing how smooth maps act on corners} \label{mc4}
If $f:X\rightarrow Y$ is a smooth map of manifolds with corners, then $f$ may relate $\partial^kX$ to $\partial^lY$ for $k,l\geqslant 0$ in complicated ways. We now explain two different ways to describe these relations. The first involves a decomposition $X\times_Y\partial Y=\Xi^f_+\amalg\Xi^f_-$ and maps $\xi^f_+:\Xi^f_+\rightarrow X$ and $\xi^f_-:\Xi^f_-\rightarrow\partial X$. This will be important in \cite{Joyc} when we define {\it d-manifolds with corners\/} and {\it d-orbifolds with corners}, which are `derived' versions of manifolds and orbifolds with corners. To make this generalization we find it helpful to replace a manifold with corners $X$ by the triple $(X,\partial X,i_X)$, so we need to characterize smooth maps $f:X\rightarrow Y$ in terms of the triples~$(X,\partial X,i_X),(Y,\partial Y,i_Y)$.
\begin{dfn} Let $X,Y$ be manifolds with corners, and $f:X\rightarrow Y$ a smooth map. Consider the smooth maps $f:X\rightarrow Y$ and $i_Y:\partial Y\rightarrow Y$ as continuous maps of topological spaces. Then we may form the fibre product of topological spaces $X\times_Y\partial Y=X\times_{f,Y,i_Y}\partial Y$, given explicitly by \begin{equation*} X\times_Y\partial Y=\bigl\{\bigl(x,(y,\beta)\bigr)\in X\times\partial Y:f(x)=y=i_Y(y,\beta)\bigr\}. \end{equation*} This is a closed subspace of the topological space $X\times\partial Y$, since $X,\partial Y$ are Hausdorff, and so it is a topological space with the subspace topology.
By Definition \ref{mc3def1}, for each $\bigl(x,(y,\beta)\bigr)\in X\times_Y\partial Y$, if $(V,b)$ is a boundary defining function for $Y$ at $(y,\beta)$, then either $b\circ f\equiv 0$ near $x$, or $(f^{-1}(V),b\circ f)$ is a boundary defining function for $X$ at some $(x,\tilde\beta)$. Define subsets $\Xi^f_+,\Xi^f_-$ of $X\times_Y\partial Y$ by $\bigl(x,(y,\beta)\bigr)\in\Xi^f_+$ if $b\circ f\equiv 0$ near $x$, and $\bigl(x,(y,\beta)\bigr)\in\Xi^f_-$ otherwise. Define maps $\xi^f_+:\Xi^f_+\rightarrow X$ by $\xi^f_+\bigl(x,(y,\beta)\bigr)=x$ and $\xi^f_-:\Xi^f_-\rightarrow\partial X$ by $\xi^f_-\bigl(x,(y,\beta)\bigr) =(x,\tilde\beta)$, for $(x,\tilde\beta)$ as above. It is easy to show that $\Xi^f_\pm,\xi^f_\pm$ can also be defined solely in terms of $\bigl(x,(y,\beta)\bigr)$ and ${\rm d} f\vert_x$, and so they are independent of the choice of $(V,b)$, and are well-defined. \label{mc4def1} \end{dfn}
Here are some properties of these $\Xi^f_\pm,\xi^f_\pm$. A continuous map $g:X\rightarrow Y$ is a {\it finite covering map\/} if every $y\in Y$ has an open neighbourhood $U$ such that $g^{-1}(U)$ is homeomorphic to $U\times T$ for some finite set $T$ with the discrete topology, and $g:\xi_f^{-1}(U)\rightarrow U$ corresponds to the projection~$U\times S\rightarrow U$.
\begin{prop} Let\/ $f:X\rightarrow Y$ be a smooth map of manifolds with corners, and\/ $\Xi^f_\pm,\xi^f_\pm$ be as in Definition {\rm\ref{mc4def1},} and set\/ $n=\mathop{\rm dim}\nolimits X$. Then \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[{\rm(a)}] $\Xi^f_\pm$ are open and closed subsets of\/ $X\times_Y\partial Y,$ with\/~$X\times_Y\partial Y=\Xi^f_+\amalg\Xi^f_-$. \item[{\rm(b)}] $\xi^f_+:\Xi^f_+\rightarrow X$ and\/ $\xi^f_-:\Xi^f_-\rightarrow\partial X$ are proper, finite covering maps of topological spaces, with\/ $\smash{\xi^f_+\equiv\pi_X\vert_{\Xi^f_+}}$ and\/~$\smash{i_X\circ \xi^f_-\equiv\pi_X\vert_{\Xi^f_-}}$. \item[{\rm(c)}] Part\/ {\rm(b)} implies there is a unique way to make\/ $\smash{\Xi^f_+}$ into an $n$-manifold with corners and\/ $\Xi^f_-$ into an $(n\!-\!1)$-manifold with corners so that\/ $\xi^f_\pm$ are local diffeomorphisms, and so covering maps of manifolds. Then the projections $\pi_X:\Xi^f_\pm\rightarrow X$ and\/ $\pi_{\partial Y}:\Xi^f_\pm\rightarrow\partial Y$ are smooth maps. \end{itemize} \label{mc4prop1} \end{prop}
\begin{proof} For (a), clearly $X\times_Y\partial Y=\Xi^f_+\amalg\Xi^f_-$. Let $\bigl(x,(y,\beta)\bigr)\in X\times_Y\partial Y$, and $(V,b)$ be a boundary defining function for $Y$ at $(y,\beta)$. Then $(V,b)$ is also a boundary defining function for $Y$ at any $(y',\beta')$ sufficiently close to $(y,\beta)$ in $\partial Y$. Hence if $\bigl(x',(y',\beta')\bigr)$ is sufficiently close to $\bigl(x,(y,\beta)\bigr)\in X\times_Y\partial Y$ then $(V,b)$ is also a boundary defining function for $Y$ at $(y',\beta')$. We have $\bigl(x,(y,\beta)\bigr)\in\Xi^f_+$ if $b\circ f\equiv 0$ near $x$. Fixing $(V,b)$ this is an open condition in $x$, so $\Xi^f_+$ is open in $X\times_Y\partial Y$, and thus $\Xi^f_-=(X\times_Y\partial Y)\setminus\Xi^f_+$ is closed in $X\times_Y\partial Y$. Similarly, $\bigl(x,(y,\beta)\bigr) \in\Xi^f_-$ if $(f^{-1}(V),b\circ f)$ is a boundary defining function for $X$ at some $(x,\tilde\beta)$. Fixing $(V,b)$ this is an open condition in $(x,\tilde\beta)$, so $\Xi^f_-$ is open, and $\Xi^f_+=(X\times_Y\partial Y)\setminus\Xi^f_-$ is closed, proving~(a).
For (b), the identities $\smash{\xi^f_+\equiv\pi_X\vert_{\Xi^f_+}}$ and $\smash{i_X\circ \xi^f_-\equiv\pi_X\vert_{\Xi^f_-}}$ are immediate. First consider $\xi^f_+:\Xi^f_+\rightarrow X$. Since $i_Y:\partial Y\rightarrow Y$ is proper and finite by Lemma \ref{mc2lem}, $\pi_X:X\times_Y\partial Y\rightarrow X$ is proper and finite by properties of topological fibre products, and so $\xi^f_+=\pi_X\vert_{\smash{\Xi^f_+}}:\Xi^f_+\rightarrow X$ is proper and finite as $\Xi^f_+$ is closed in $X\times_Y\partial Y$. To see that $\xi^f_+$ is a covering map, note that it is a local homeomorphism, since as above, given $\bigl(x,(y,\beta)\bigr)\in\Xi^f_+$, if $x'$ is close to $x$ in $X$ then setting $y'=f(x')$, $(V,b)$ is a boundary defining function for $Y$ at $(y',\beta')$ for some unique local boundary component $\beta'$ of $Y$ at $\beta'$, and then $\bigl(x',(y',\beta')\bigr)\in\Xi^f_+$ with $\xi^f_+\bigl(x',(y',\beta')\bigr)=x'$. We have constructed a local inverse $x'\mapsto\bigl(x',(y',\beta')\bigr)$ for $\xi^f_+$ which is clearly continuous, so $\xi^f_+$ is a local homeomorphism, and thus a finite covering map, as it is finite.
Next consider $\xi^f_-:\Xi^f_-\rightarrow\partial X$. As above, given $\bigl(x,(y,\beta)\bigr)\in\Xi^f_-$ we may fix a boundary defining function $(V,b)$ for $Y$ at $(y,\beta)$, and then for $\bigl(x',(y',\beta')\bigr)$ near $\bigl(x,(y,\beta)\bigr)$ in $\Xi^f_-$ we have $\xi^f_-\bigl(x',(y',\beta')\bigr)=(x',\tilde\beta')$, where $\tilde\beta'$ is the unique local boundary component of $X$ at $x'$ such that $(f^{-1}(V),b\circ f)$ is a boundary defining function for $X$ at $(x',\tilde\beta')$. Therefore $\xi^f_-$ is continuous, as $\tilde\beta'$ depends continuously on $x'$. As above $\pi_X:X\times_Y\partial Y\rightarrow X$ is proper and finite, so $i_X\circ\xi^f_-=\pi_X\vert_{\smash{\Xi^f_-}}:\Xi^f_- \rightarrow X$ is proper and finite as $\Xi^f_-$ is closed, and hence $\xi^f_-$ is proper and finite. We show $\xi^f_-$ is a finite covering map by constructing a local inverse $(x',\tilde\beta')\mapsto\bigl(x',(y',\beta')\bigr)$ as for $\xi^f_+$. This proves~(b).
For (c), $\pi_X:\Xi^f_+\rightarrow X$ is $\xi^f_+:\Xi^f_+\rightarrow X$ and $\pi_X:\Xi^f_-\rightarrow X$ is $i_X\circ\xi^f_-:\Xi^f_-\rightarrow X$, so $\pi_X:\Xi^f_\pm\rightarrow X$ are smooth as $\xi^f_\pm$ are covering maps of manifolds, and so smooth. To see $\pi_{\partial Y}:\Xi^f_\pm\rightarrow\partial Y$ are smooth, note that $i_Y\circ\pi_{\partial Y}\equiv f\circ\pi_X$ as maps $\Xi^f_\pm\rightarrow Y$, so $i_Y\circ\pi_{\partial Y}:\Xi^f_\pm\rightarrow Y$ is smooth, and it follows that $\pi_{\partial Y}:\Xi^f_\pm\rightarrow\partial Y$ is smooth as $\pi_{\partial Y}$ is continuous and $i_Y:\partial Y\rightarrow Y$ is an immersion. \end{proof}
Using $\Xi^f_\pm,\xi^f_\pm$ we can define a decomposition~$\partial X=\partial_+^fX\amalg\partial_-^fX$.
\begin{prop} Let\/ $f:X\rightarrow Y$ be a smooth map of manifolds with corners. Define $\partial_-^fX=\xi_-^f(\Xi_-^f)$ and\/ $\partial_+^fX=\partial X\setminus\xi_-^f(\Xi_-^f),$ so that\/ $\partial X=\partial_+^fX\amalg\partial_-^fX$. Then $\partial_\pm^fX$ are open and closed in $\partial X,$ so they are manifolds with corners. \label{mc4prop2} \end{prop}
\begin{proof} As $\xi_-^f:\Xi_-^f\rightarrow\partial X$ is a covering map by Proposition \ref{mc4prop1}(b), $\xi_-^f(\Xi_-^f)$ is open in $\partial X$. Since $\xi_-^f$ is proper and $\Xi_-^f,\partial X$ are Hausdorff, $\xi_-^f(\Xi_-^f)$ is closed in $\partial X$. So $\partial_-^fX$, and hence $\partial_+^fX=\partial X\setminus\partial_-^fX$, are open and closed in~$\partial X$. \end{proof}
We can characterize b-submersive morphisms $f:X\rightarrow Y$ in Definition \ref{mc3def2}(v) in terms of $\Xi^f_-,\xi^f_-$. The proof is an easy exercise.
\begin{lem} Let\/ $f:X\rightarrow Y$ be a smooth map of manifolds with corners. Then $f$ is b-submersive if and only if\/ $\Xi^f_-=X\times_Y\partial Y,$ so that\/ $\Xi^f_+=\emptyset,$ and\/ $\xi^f_-:\Xi^f_-\rightarrow\partial X$ is injective. \label{mc4lem} \end{lem}
We now move on to our second way of describing how $f$ relates $\partial^kX$ and $\partial^lY$. Equation \eq{mc2eq8} showed that if $X$ is a manifold with corners then $X\mapsto\coprod_{i\geqslant 0}C_i(X)$ commutes with products of manifolds. We will explain how to lift a smooth map $f:X\rightarrow Y$ up to a map $C(f):\coprod_{i\geqslant 0}C_i(X)\rightarrow\coprod_{j\geqslant 0}C_j(Y)$ which is (in a generalized sense) smooth, and which is functorial in a very strong sense.
\begin{dfn} Let $X,Y$ be smooth manifolds with corners and $f:X\rightarrow Y$ a smooth map. Define $C(f):\coprod_{i=0}^{\mathop{\rm dim}\nolimits X} C_i(X)\rightarrow \coprod_{j=0}^{\mathop{\rm dim}\nolimits Y}C_j(Y)$ by \e \begin{split} &C(f):\bigl(x,\{\tilde\beta_1,\ldots,\tilde\beta_i\}\bigr)\longmapsto \bigl(y,\{\beta_1,\ldots,\beta_j\}\bigr),\quad\text{where}\\ &\{\beta_1,\ldots,\beta_j\}\!=\!\bigl\{\beta:\bigl(x,(y,\beta)\bigr)\!\in\! \Xi^f_-,\; \xi^f_-\bigl(x,(y,\beta)\bigr)\!=\!(x,\tilde\beta_l),\; l\!=\!1,\ldots,i\bigr\}.\!\!\!\!\! \end{split} \label{mc3eq2} \e \label{mc4def2} \end{dfn}
\begin{dfn} Let $\{X_i:i\in I\}$ and $\{Y_j:j\in J\}$ be families of manifolds, where $I,J$ are indexing sets. We do not assume that all $X_i$ have the same dimension, or that all $Y_j$ have the same dimension, so $\coprod_{i\in I}X_i$ and $\coprod_{j\in J}Y_j$ need not be manifolds. We call a map $f:\coprod_{i\in I}X_i\rightarrow \coprod_{j\in J}Y_j$ {\it smooth\/} if $f$ is continuous, and for all $i\in I$ and $j\in J$ the map \begin{equation*} f\vert_{X_i\cap f^{-1}(Y_j)}:X_i\cap f^{-1}(Y_j)\rightarrow Y_j \end{equation*} is a smooth map of manifolds. Here $Y_j$ is an open and closed subset of the topological space $\coprod_{j\in J}Y_j$, so $X_i\cap f^{-1}(Y_j)$ is an open and closed subset of $X_i$ as $f$ is continuous, and thus $X_i\cap f^{-1}(Y_j)$ is a manifold. \label{mc4def3} \end{dfn}
The next theorem, in part parallel to Theorem \ref{mc3thm}, gives properties of these maps $C(f)$. The proofs are elementary. The theorem basically says that mapping $X\mapsto \coprod_{i\geqslant 0}C_i(X)$ and $f\mapsto C(f)$ yields a {\it functor\/} which preserves smoothness, composition, identities, boundaries $\partial X$, immersions $i_X:\partial X\rightarrow X$, and products and direct products of smooth maps. Theorem \ref{mc6thm2} will also show that the functor preserves strongly transverse fibre products.
\begin{thm} Let\/ $W,X,Y,Z$ be manifolds with corners. \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[{\bf(i)}] If\/ $f:X\rightarrow Y$ is smooth then\/ $C(f):\coprod_{i\geqslant 0}C_i(X)\rightarrow\coprod_{j\geqslant 0}C_j(Y)$ is smooth in the sense of Definition\/~{\rm\ref{mc4def3}}. \item[{\bf(ii)}] If\/ $f:X\rightarrow Y$ and\/ $g:Y\rightarrow Z$ are smooth then $C(g\circ f)=C(g)\circ C(f):\coprod_{i\geqslant 0}C_i(X)\rightarrow\coprod_{k\geqslant 0}C_k(Z)$. \item[{\bf(iii)}] $C(\mathop{\rm id}\nolimits_X)=\mathop{\rm id}\nolimits_{\coprod_{k\geqslant 0}C_k(X)}: \coprod_{k\geqslant 0}C_k(X)\rightarrow\coprod_{k\geqslant 0}C_k(X)$. \item[{\bf(iv)}] The diffeomorphisms $C_k(\partial X)\cong\partial C_k(X)$ in \eq{mc2eq9} identify \begin{align*} &\textstyle C(i_X):\coprod_{k\geqslant 0}C_k(\partial X)\longrightarrow \coprod_{k\geqslant 0}C_k(X) \qquad\text{with}\\ &i_{\coprod_{k\geqslant 0}C_k(X)}:=\textstyle \coprod_{k\geqslant 0}i_{C_k(X)}=\textstyle \coprod_{k\geqslant 0}\partial C_k(X)\longrightarrow \coprod_{k\geqslant 0}C_k(X). \end{align*} \item[{\bf(v)}] Let\/ $f:W\rightarrow Y$ and\/ $g:X\rightarrow Z$ be smooth maps. Then \eq{mc2eq8} gives \ea \textstyle\coprod_{m\geqslant 0}C_m(W\times X)&\textstyle\cong\bigl[\coprod_{i\geqslant 0}C_i(W) \bigr]\times\bigl[\coprod_{j\geqslant 0}C_j(X)\bigr], \nonumber\\ \textstyle\coprod_{n\geqslant 0}C_n(Y\times Z)&\textstyle\cong\bigl[\coprod_{k\geqslant 0}C_k(Y)\bigr]\times\bigl[\coprod_{l\geqslant 0}C_l(Z)\bigr]. \label{mc3eq3} \ea These identify $C(f\times g):\coprod_{m\geqslant 0}C_m(W\times X)\rightarrow\coprod_{n\geqslant 0}C_n(Y\times Z)$ with\/ $C(f)\times C(g)\!:\!\bigl[\coprod_{i\geqslant 0}\!C_i(W) \bigr]\!\times\!\bigl[\coprod_{j\geqslant 0}\!C_j(X) \bigr]\!\rightarrow\! \bigl[\coprod_{k\geqslant 0}\!C_k(Y)\bigr]\!\times\!\bigl[\coprod_{l\geqslant 0}\!C_l(Z)\bigr]$. \item[{\bf(vi)}] Let\/ $f:X\rightarrow Y$ and\/ $g:X\rightarrow Z$ be smooth maps. Then \eq{mc3eq3} identifies $C\bigl((f,g)\bigr):\coprod_{j\geqslant 0}C_j(X)\rightarrow\coprod_{n\geqslant 0}C_n(Y\times Z)$ with\/ $\bigl(C(f),C(g)\bigr):\coprod_{j\geqslant 0}C_j(X)\rightarrow\bigl[\coprod_{k\geqslant 0}C_k(Y)\bigr]\times\bigl[\coprod_{l\geqslant 0}C_l(Z)\bigr]$. \item[{\bf(vii)}] Let\/ $f:X\rightarrow Y$ be a b-submersive smooth map. Then $C(f)$ maps $C_i(X)\rightarrow \coprod_{j\geqslant 0}^iC_j(Y)$ for all\/~$i\geqslant 0$. \end{itemize} \label{mc4thm} \end{thm}
Curiously, there is a second way to define a map $\coprod_{i\geqslant 0}C_i(X)\rightarrow\coprod_{j\geqslant 0}C_j(Y)$ with the same properties. Define $\hat C(f):\coprod_{i\geqslant 0}C_i(X)\rightarrow\coprod_{j\geqslant 0}C_j(Y)$ by \begin{align*} &\hat C(f):\bigl(x,\{\tilde\beta_1,\ldots,\tilde\beta_i\}\bigr)\longmapsto \bigl(y,\{\beta_1,\ldots,\beta_j\}\bigr),\quad\text{where}\quad \{\beta_1,\ldots,\beta_j\}=\\ &\bigl\{\beta:\bigl(x,(y,\beta)\bigr)\!\in\! \Xi^f_-,\; \xi^f_-\bigl(x,(y,\beta)\bigr)\!=\!(x,\tilde\beta_l),\; 1\!\leqslant\nobreak\! l\!\leqslant\nobreak\! i\bigr\}\!\cup\! \{\bigl\{\beta:\bigl(x,(y,\beta)\bigr)\!\in\! \Xi^f_+\bigr\}. \end{align*} Then the analogues of Theorems \ref{mc4thm} and \ref{mc6thm2} also hold for $\hat C(f),\hat C(g),\ldots$.
\section{Submersions} \label{mc5}
Definition \ref{mc3def2}(iv) defined {\it submersions\/} $f:X\rightarrow Y$ between manifolds with corners $X,Y$. We show that submersions are locally isomorphic to projections.
\begin{prop} Let\/ $X,Y$ be manifolds with corners, $f:X\rightarrow Y$ a submersion, and\/ $x\in X$ with\/ $f(x)=y$. Then there exist open neighbourhoods\/ $X',Y'$ of\/ $x,y$ in\/ $X,Y$ with\/ $f(X')=Y',$ a manifold\/ $Z'$ with corners with\/ $\mathop{\rm dim}\nolimits X=\mathop{\rm dim}\nolimits Y+\mathop{\rm dim}\nolimits Z',$ and a diffeomorphism $X'\cong Y'\times Z',$ such that\/ $f\vert_{X'}:X'\rightarrow Y'$ is identified with\/~$\pi_{Y'}:Y'\times Z'\rightarrow Y'$. \label{mc5prop1} \end{prop}
\begin{proof} Let $x\in X$ and $y=f(x)\in Y$, with $\mathop{\rm dim}\nolimits X=m$, $\mathop{\rm dim}\nolimits Y=n$ and $x\in S^k(X)$, $y\in S^l(X)$. Choose charts $(U,\phi)$, $(V,\psi)$ on $X,Y$ with $U,V$ open in ${\mathbin{\mathbb R}}^m_k,{\mathbin{\mathbb R}}^n_l$ and $0\in U$, $0\in V$ with $\phi(0)=x$, $\psi(0)=y$ and $f\circ\phi(U)\subseteq\psi(V)$. Write $(x_1,\ldots,x_m)$, $(y_1,\ldots,y_n)$ for the coordinates on $U,V$ respectively. Write $\tilde\beta_i$ for the local boundary component $\phi_*(\{x_i=0\})$ for $i=1,\ldots,k$, and $\beta_j$ for the local boundary component $\psi_*(\{y_j=0\})$ for~$j=1,\ldots,l$.
Lemma \ref{mc4lem} implies that $\bigl(x,(y,\beta_j)\bigr)\in\Xi_-^f$ with $\xi_-^f\bigl(x,(y,\beta_j)\bigr)=(x,\tilde\beta_{i_j})$ for each $j=1,\ldots,l$ and some $i_j=1,\ldots,k$, and $i_1,\ldots,i_l$ are distinct as $\xi_-^f$ is injective. Thus $l\leqslant\nobreak k$, and reordering $x_1,\ldots,x_k$ if necessary we suppose that $i_j=j$. By Proposition \ref{mc2prop6}(d), $\bigl(\psi(V),y_i\circ\psi^{-1} \bigr)$ is a boundary defining function for $Y$ at $(y,\beta_i)$ for $i=1,\ldots,l$, so by Definition \ref{mc3def1} $\bigl(f^{-1}(\psi(V)),y_i\circ\psi^{-1}\circ f\bigr)$ is a boundary defining function for $X$ at $(x,\tilde\beta_i)$ for $i=1,\ldots,l$. But $\bigl(\phi(U),x_i\circ\phi^{-1}\bigr)$ is also a boundary defining function for $X$ at $(x,\tilde\beta_i)$, so by Proposition \ref{mc2prop6}(b), making $U$ smaller if necessary we can suppose that \begin{equation*} y_i\circ\psi^{-1}\circ f\circ\phi\equiv x_i\cdot g_i \end{equation*} on $U$, for some smooth $g_i:U\rightarrow(0,\infty)$ and all $i=1,\ldots,l$.
Combining this with the surjectivity conditions in Definition \ref{mc3def2}(iv), we see that we may choose alternative coordinates $(\tilde x_1,\ldots,\tilde x_n)$ on an open neighbourhood $\tilde U$ of 0 in $U$ taking values in ${\mathbin{\mathbb R}}^m_k$ and zero at 0, such that \begin{equation*} \tilde x_i\equiv \begin{cases} y_i\circ\psi^{-1}\circ f\circ\phi,& i=1,\ldots,l, \\ x_i, & i=l+1,\ldots,k, \\ y_{i-k+l}\circ\psi^{-1}\circ f\circ\phi,& i=k+1,\ldots,n-k+l, \\ \text{some function of $x_{k+1},\ldots,x_n$,} & i=n-k+l+1,\ldots,m. \end{cases} \end{equation*} Choose small $\epsilon>0$ so that $[0,\epsilon)^k\times(-\epsilon,\epsilon)^{m-k} \subseteq \tilde U$ in coordinates $(\tilde x_1,\ldots,\tilde x_n)$. Then defining $X'=\phi\bigl(\{(\tilde x_1,\ldots,\tilde x_m)\in [0,\epsilon)^k\times(-\epsilon,\epsilon)^{m-k}\}\bigr)$, $Y'=\psi\bigl([0,\epsilon)^l\times (-\epsilon,\epsilon)^{n-l}\bigr)$ and $Z'=[0,\epsilon)^{k-l}\times (-\epsilon,\epsilon)^{m-k-n+l}$, the proposition follows. \end{proof}
Submersions $f:X\rightarrow Y$ are nicely compatible with the boundaries~$\partial X,\partial Y$.
\begin{prop} Let\/ $f:X\rightarrow Y$ be a submersion, and\/ $\partial X=\partial_+^fX\amalg\partial_-^fX$ be as in Proposition {\rm\ref{mc4prop2}}. Then $f_+=f\circ i_X\vert_{\partial_+^fX}:\partial_+^fX\rightarrow Y$ is a submersion. There is a natural submersion $\smash{f_-:\partial_-^fX\rightarrow\partial Y}$ with\/~$\smash{f\circ i_X\vert_{\partial_-^fX}\equiv i_Y\circ f_-}$. \label{mc5prop2} \end{prop}
\begin{proof} Lemma \ref{mc4lem} shows that $\xi^f_-:\Xi^f_-= X\times_Y\partial Y\rightarrow\partial X$ is injective. Since $\partial_-^fX=\xi^f_-(\Xi^f_-)$, it follows that $\xi^f_-:X\times_Y\partial Y\rightarrow\partial_-^fX$ is invertible. Define $f_-:\partial_-^fX\rightarrow\partial Y$ by $f_-=\pi_{\partial Y}\circ(\xi^f_-)^{-1}$. Then $i_Y\circ f_-=i_Y\circ\pi_{\partial Y}\circ(\xi^f_-)^{-1} =f\circ\pi_X\circ(\xi^f_-)^{-1}=f\circ i_X\vert_{\smash{\partial_-^fX}}$, since $i_Y\circ\pi_Y=f\circ\pi_X:X\times_Y\partial Y\rightarrow Y$ and $\pi_X\circ(\xi^f_-)^{-1}=i_X\vert_{\smash{\partial_-^fX}}: \partial_-^fX\rightarrow X$ as $i_X\circ\xi^f_-\equiv\pi_X\vert_{\smash{\Xi^f_-}}$ by Proposition~\ref{mc4prop1}(b).
It remains to check that the maps $f_\pm$ are submersions. Since being a submersion is a local property, by Proposition \ref{mc5prop1} it is enough to show $f_\pm$ are submersions when $f:X\rightarrow Y$ is a projection $\pi_{Y'}:Y'\times Z'\rightarrow Y'$. We have a natural isomorphism $\partial(Y'\times Z')\cong(\partial Y'\times Z')\amalg(Y'\times\partial Z')$. It is easy to see that $\partial_+^f(Y'\times Z')\cong Y'\times\partial Z'$, so that $f_+$ becomes the projection $Y'\times\partial Z'\rightarrow Y'$ which is a submersion, and that $\partial_-^f(Y'\times Z')\cong\partial Y'\times Z'$, so that $f_-$ becomes the projection $\partial Y'\times Z'\rightarrow\partial Y'$ which is a submersion. \end{proof}
Note that we can iterate this construction to decompose $\partial^kX$, so that \begin{equation*} \partial^2X=\partial_+^{f_+}(\partial_+^fX)\amalg \partial_-^{f_+}(\partial_+^fX)\amalg \partial_+^{f_-}(\partial_-^fX)\amalg \partial_-^{f_-}(\partial_-^fX), \end{equation*} for instance, and $f$ lifts to a submersion on every piece.
\section{Transversality and fibre products of manifolds} \label{mc6}
Let $X,Y,Z$ be manifolds with corners and $f:X\rightarrow Z$, $g:Y\rightarrow Z$ be smooth maps. From category theory, a {\it fibre product\/} $X\times_{f,Z,g}Y$ in the category ${\mathop{\bf Man^c}}$ consists of a manifold with corners $W$ and smooth maps $\pi_X:W\rightarrow X$, $\pi_Y:W\rightarrow Y$ such that $f\circ\pi_X=g\circ\pi_Y:W\rightarrow Z$, satisfying the universal property that if $W'$ is a manifold with corners and $\pi_X':W'\rightarrow X$, $\pi_Y':W'\rightarrow Y$ are smooth maps with $f\circ\pi_X'=g\circ\pi_Y'$, then there exists a unique smooth map $h:W'\rightarrow W$ with $\pi_X'=\pi_X\circ h$ and $\pi_Y'=\pi_Y\circ h$. We now give sufficient conditions for fibre products of manifolds with corners to exist.
\begin{dfn} Let $X,Y,Z$ be manifolds with corners and $f:X\rightarrow Z$, $g:Y\rightarrow Z$ be smooth maps. We call $f,g$ {\it transverse\/} if the following holds. Suppose $x\in X$, $y\in Y$ and $z\in Z$ with $f(x)=z=g(y)$, so that there are induced linear maps of tangent spaces ${\rm d} f\vert_x:T_xX\rightarrow T_zZ$ and ${\rm d} g\vert_y:T_yY\rightarrow T_zZ$. Let $x\in S^j(X)$, $y\in S^k(Y)$ and $z\in S^l(Z)$, so that as in Definition \ref{mc3def2} ${\rm d} f\vert_x$ maps $T_x(S^j(X))\rightarrow T_z(S^l(Z))$ and ${\rm d} g\vert_y$ maps $T_y(S^k(Y))\rightarrow T_z(S^l(Z))$. Then we require that $T_zZ={\rm d} f\vert_x(T_xX)+{\rm d} g\vert_y(T_yY)$ and $T_z(S^l(Z))={\rm d} f\vert_x(T_x(S^j(X)))+{\rm d} g\vert_y(T_y(S^k(Y)))$ for all such $x,y,z$. From Definition \ref{mc3def2}, if one of $f,g$ is a submersion then $f,g$ are automatically transverse. \label{mc6def1} \end{dfn}
\begin{rem} If $X,Y,Z$ are manifolds without boundary then $j=k=l=0$ in Definition \ref{mc6def1}, and both conditions reduce to the usual definition $T_zZ={\rm d} f\vert_x(T_xX)+{\rm d} g\vert_y(T_yY)$ of transverse smooth maps. When $X,Y,Z$ are manifolds with corners we believe this definition of transversality is new, since it depends heavily on our definition of smooth maps which is also new.
Definition \ref{mc6def1} imposes two transversality conditions on $f,g$ at $x,y,z$, the first on the corners $C_0(X)\cong X,C_0(Y)\cong Y,C_0(Z)\cong Z$ of $X,Y,Z$ of largest dimension at $x,y,z$, and the second on the corners $C_j(X)\cong S^j(X)$, $C_k(Y)\cong S^k(X)$, $C_l(Z)\cong S^l(Z)$ (locally) of $X,Y,Z$ of smallest dimension at~$x,y,z$.
One might think that to prove Theorem \ref{mc6thm1} one would need to impose transversality conditions on corners $C_a(X),C_b(Y),C_c(Y)$ of intermediate dimensions $0\leqslant\nobreak a\leqslant\nobreak j$, $0\leqslant\nobreak b\leqslant\nobreak k$, $0\leqslant\nobreak c\leqslant\nobreak l$ as well. In fact these intermediate conditions are implied by our definition of smooth maps, since the requirement for $f,g$ to pull boundary defining functions back to boundary defining functions is a kind of transversality condition at the boundaries. One of the motivations for our definition of smooth maps of manifolds with corners was to have a simple, not too restrictive condition for the existence of fibre products. \label{mc6rem1} \end{rem}
\begin{rem} Margalef-Roig and Outerelo Dominguez \cite[\S 7.2]{MaOu} also define transversality of smooth maps between manifolds with corners, and prove their own version of Theorem \ref{mc6thm1} below. They work with Banach manifolds and $C^p$ maps for $p=0,1,\ldots,\infty$. For finite-dimensional manifolds, their notion of smooth map (`map of class $\infty$') corresponds to our weakly smooth maps. However, their notion of {\it transversality\/} \cite[Def.~7.2.1]{MaOu} is very restrictive.
In our notation, if $f:X\rightarrow Z$ and $g:Y\rightarrow Z$ are weakly smooth maps, then $f,g$ are transverse in the sense of \cite[Def.~7.2.1]{MaOu} if and only if whenever $x\in X$ and $y\in Y$ with $f(x)=g(y)=z\in Z$ then $z\in Z^\circ$, and $x\in S^j(X)$, $y\in S^k(Y)$ with $T_zZ={\rm d} f\vert_x(T_x(S^j(X)))+{\rm d} g\vert_y(T_y(S^k(Y)))$. In particular, $f(X)$ and $g(Y)$ cannot intersect in the boundary strata $S^l(Z)$ for $l>0$ but only in the interior $Z^\circ$, so in effect Margalef-Roig and Outerelo Dominguez reduce to the case in which $\partial Z=\emptyset$, and then their \cite[Prop.~7.2.7]{MaOu} is a special case of Theorem \ref{mc6thm1}. So, for example, $f,g$ are generally not transverse in the sense of \cite[Def.~7.2.1]{MaOu} if one of $f,g$ is a submersion, or even if~$f=\mathop{\rm id}\nolimits_X:X\rightarrow X=Z$. \label{mc6rem2} \end{rem}
For manifolds without boundary the following theorem is well-known, as in Lang \cite[Prop.~II.4]{Lang}. For manifolds with corners Margalef-Roig and Outerelo Dominguez \cite[Prop.~7.2.7]{MaOu} prove it with a stricter notion of transversality, as above. We believe this version is new. The proof is given in~\S\ref{mc8}.
\begin{thm} Suppose\/ $X,Y,Z$ are manifolds with corners and\/ $f:X\rightarrow Z,$ $g:Y\rightarrow Z$ are transverse smooth maps. Then there exists a fibre product\/ $W=X\times_{f,Y,g}Z$ in the category ${\mathop{\bf Man^c}}$ of manifolds with corners, which is given by an explicit construction, as follows.
As a topological space $W=\{(x,y)\in X\times Y:f(x)=g(y)\},$ with the topology induced by the inclusion $W\subseteq X\times Y,$ and the projections\/ $\pi_X:W\rightarrow X$ and\/ $\pi_Y:W\rightarrow Y$ map $\pi_X:(x,y)\mapsto x,$ $\pi_Y:(x,y)\mapsto y$. Let\/ $n=\mathop{\rm dim}\nolimits X+\mathop{\rm dim}\nolimits Y-\mathop{\rm dim}\nolimits Z$, so that\/ $n\geqslant 0$ if\/ $W\ne\emptyset$. The maximal atlas on\/ $W$ is the set of all charts\/ $(U,\phi),$ where\/ $U\subseteq{\mathbin{\mathbb R}}^n_k$ is open and\/ $\phi:U\rightarrow W$ is a homeomorphism with a nonempty open set\/ $\phi(U)$ in $W,$ such that\/ $\pi_X\circ\phi:U\rightarrow X$ and $\pi_Y\circ\phi:U\rightarrow Y$ are smooth maps, and for all\/ $u\in U$ with\/ $\phi(u)=(x,y),$ the following induced linear map of real vector spaces is injective: \e {\rm d}(\pi_X\circ\phi)\vert_u\oplus{\rm d}(\pi_Y\circ\phi)\vert_u:T_uU={\mathbin{\mathbb R}}^n\longrightarrow T_xX\oplus T_yY. \label{mc6eq1} \e \label{mc6thm1} \end{thm}
We note one important special case of Theorem \ref{mc6thm1}, the intersection of submanifolds. Suppose $X,Y$ are embedded submanifolds of $Z$, with inclusions $i:X\hookrightarrow Z$ and $j:Y\hookrightarrow Z$. Then we say that $X,Y$ {\it intersect transversely\/} if the smooth embeddings $i,j$ are transverse. Then the fibre product $W=X\times_ZY$ is just the intersection $X\cap Y$ in $Z$, and Theorem \ref{mc6thm1} shows that it is also an embedded submanifold of $Z$. If $f,g$ are not transverse, then a fibre product $X\times_{f,Y,g}Z$ may or may not exist in the category ${\mathop{\bf Man^c}}$. Even if one exists, from the point of view of derived differential geometry \cite{Spiv}, it is in some sense the `wrong answer'. Here are some examples.
\begin{ex}{\bf(a)} The inclusion $i:\{0\}\rightarrow{\mathbin{\mathbb R}}$ is not transverse to itself. A fibre product $\{0\}\times_{i,{\mathbin{\mathbb R}},i}\{0\}$ does exist in ${\mathop{\bf Man^c}}$ in this case, the point $\{0\}$. Note however that it does not have the expected dimension: $\{0\}\times_{{\mathbin{\mathbb R}}}\{0\}$ has dimension 0, but Theorem \ref{mc6thm1} predicts the dimension $\mathop{\rm dim}\nolimits\{0\}+\mathop{\rm dim}\nolimits\{0\}-\mathop{\rm dim}\nolimits{\mathbin{\mathbb R}}=-1$.
\noindent{\bf(b)} Consider the smooth maps $f:{\mathbin{\mathbb R}}\rightarrow{\mathbin{\mathbb R}}^2$ and $g:{\mathbin{\mathbb R}}\rightarrow{\mathbin{\mathbb R}}^2$ given by \begin{equation*} f(x)=(x,0)\quad\text{and}\quad g(x,y)=\begin{cases} (x,e^{-x^2}\sin (\pi/x)), & x\ne 0, \\ (0,0), & x=0. \end{cases} \end{equation*} These are not transverse at $f(0)=g(0)=(0,0)$. The fibre product does not exist in ${\mathop{\bf Man^c}}$. To see this, note that the topological fibre product ${\mathbin{\mathbb R}}\times_{f,{\mathbin{\mathbb R}}^2,g}{\mathbin{\mathbb R}}$ is $\{(1/n,0):0\ne n\in{\mathbin{\mathbb Z}}\}\cup\{(0,0)\}$, which has no manifold structure. \label{mc6ex1} \end{ex}
In the general case of Theorem \ref{mc6thm1}, the description of $\partial W$ in terms of $\partial X,\partial Y,\partial Z$ is rather complicated, as can be seen from the proof in \S\ref{mc8}. Here are three cases in which the expression simplifies. The proofs follow from the proof of Theorem \ref{mc6thm1} in \S\ref{mc8}, or alternatively from equation \eq{mc6eq9} below with $i=1$, since $\partial W\cong C_1(W)$ and $f,g$ are strongly transverse in each case.
\begin{prop} Let\/ $X,Y$ be manifolds with corners, and\/ $f:X\rightarrow Y$ a submersion. Then there is a canonical diffeomorphism \e \partial_-^fX\cong X\times_{f,Y,i_Y}\partial Y, \label{mc6eq2} \e which identifies the submersions $\smash{f_-:\partial_-^fX\rightarrow\partial Y}$ and\/~$\pi_{\partial Y}:X\times_{Y}\partial Y\rightarrow\partial Y$. \label{mc6prop1} \end{prop}
\begin{prop} Let\/ $X,Y$ be manifolds with corners, $Z$ a manifold without boundary, and\/ $f:X\rightarrow Z,$ $g:Y\rightarrow Z$ be transverse smooth maps. Then $f\circ i_X:\partial X\rightarrow Z,$ $g:Y\rightarrow Z$ are transverse, and\/ $f:X\rightarrow Z,$ $g\circ i_Y:\partial Y\rightarrow Z$ are transverse, and there is a canonical diffeomorphism \e \partial\bigl(X\times_{f,Z,g}Y\bigr)\cong \bigl(\partial X\times_{f\circ i_X,Z,g}Y\bigr) \amalg \bigl(X\times_{f,Z,g\circ i_Y}\partial Y\bigr). \label{mc6eq3} \e \label{mc6prop2} \end{prop}
\begin{prop} Let\/ $X,Y,Z$ be manifolds with corners, $f:X\rightarrow Z$ a submersion and\/ $g:Y\rightarrow Z$ smooth. Then there is a canonical diffeomorphism \e \partial\bigl(X\times_{f,Z,g}Y\bigr)\cong \bigl(\partial_+^fX \times_{f_+,Z,g}Y\bigr) \amalg \bigl(X\times_{f,Z,g\circ i_Y}\partial Y\bigr). \label{mc6eq4} \e If both\/ $f,g$ are submersions there is also a canonical diffeomorphism \e \begin{split} \partial\bigl(&X\times_{f,Z,g}Y\bigr)\cong \\ &\bigl(\partial_+^fX \times_{f_+,Z,g}Y\bigr)\amalg \bigl(X \times_{f,Z,g_+} \partial_+^gY\bigr)\amalg\bigl(\partial_-^fX\times_{f_-,\partial Z,g_-}\partial_-^gY\bigr). \end{split} \label{mc6eq5} \e Equation \eq{mc6eq4} also holds if\/ $f,g$ are transverse and $f$ is b-submersive, and\/ \eq{mc6eq5} also holds if\/ $f,g$ are transverse and both are b-submersive. \label{mc6prop3} \end{prop}
We will also discuss a stronger notion of transversality. To introduce it we prove the following lemma.
\begin{lem} Let\/ $X,Y,Z$ be manifolds with corners, $f:X\rightarrow Z$ and\/ $g:Y\rightarrow Z$ be transverse smooth maps, and\/ $C(f),C(g)$ be as in \eq{mc3eq2}. Suppose $(x,\{\beta_1,\ldots,\beta_j\})\in C_j(X)$ and\/ $(y,\{\tilde\beta_1,\ldots,\tilde\beta_k\})\in C_k(Y)$ with\/ $C(f) (x,\{\beta_1,\allowbreak\ldots,\allowbreak\beta_j\})\allowbreak=C(g)(y,\{\tilde\beta_1,\ldots, \tilde\beta_k\})= (z,\{\dot\beta_1,\ldots,\dot\beta_l\})$ in\/ $C_l(Z)$. Then\/~$j+k\geqslant l$. \label{mc6lem} \end{lem}
\begin{proof} Since $C(f)(x,\{\beta_1,\ldots,\beta_j\})= (z,\{\dot\beta_1,\ldots,\dot\beta_l\})$ it follows that ${\rm d} f\vert_x$ maps the vector subspace $T_x\beta_1\cap\cdots\cap T_x\beta_j$ in $T_xX$ to the vector subspace $T_z\dot\beta_1\cap\cdots\cap T_z\dot\beta_l$ in $T_zZ$, as the restriction of ${\rm d} f\vert_x$ to $T_x\beta_1\cap \cdots\cap T_x\beta_j$ is naturally identified with ${\rm d} C(f)\vert_{(x,\{\beta_1,\ldots,\beta_j\})}$. Similarly, ${\rm d} g\vert_y$ maps $T_y\tilde\beta_1\cap\cdots\cap T_y\tilde\beta_k$ in $T_yY$ to $T_z\dot\beta_1\cap\cdots\cap T_z\dot\beta_l$ in $T_zZ$. Since $f,g$ are transverse, we have $T_zZ={\rm d} f\vert_x(T_xX)+{\rm d} g\vert_y(T_yY)$. Passing to the quotients $T_xX/(T_x\beta_1\cap\cdots\cap T_x\beta_j),\allowbreak\ldots,T_zZ/(T_z\dot\beta_1\cap\cdots\cap T_z\dot\beta_l)$ and using the facts above shows that \e \begin{split} ({\rm d} f\vert_x)_*\bigl(T_xX/(T_x\beta_1\cap\cdots\cap T_x\beta_j)\bigr)+ ({\rm d} g\vert_y)_*\bigl(T_yY/(T_y\tilde\beta_1\cap\cdots\cap T_y\tilde\beta_k)\bigr)&\\ = T_zZ/(T_z\dot\beta_1\cap\cdots\cap T_z\dot\beta_l)&. \end{split} \label{mc6eq6} \e As the vector spaces in \eq{mc6eq6} have dimensions $j,k,l$, it follows that~$j+k\geqslant l$. \end{proof}
\begin{dfn} Let $X,Y,Z$ be manifolds with corners and $f:X\rightarrow Z$, $g:Y\rightarrow Z$ be smooth maps. We call $f,g$ {\it strongly transverse\/} if they are transverse, and whenever there are points in $C_j(X),C_k(Y),C_l(Z)$ with \e C(f)(x,\{\beta_1,\ldots,\beta_j\})=C(g)(y,\{\tilde\beta_1,\ldots, \tilde\beta_k\})=(z,\{\dot\beta_1,\ldots,\dot\beta_l\}) \label{mc6eq7} \e we have either $j+k>l$ or $j=k=l=0$. That is, in Lemma \ref{mc6lem}, equality in $j+k\geqslant l$ is allowed only if~$j=k=l=0$.
Suppose $f,g$ are smooth with $f$ a submersion. Then $f,g$ are automatically transverse, as in Definition \ref{mc6def1}, and in \eq{mc6eq7}, Theorem \ref{mc4thm}(v) implies that $j\geqslant l$. Hence if $k>0$ then $j+k>l$. If $k=0$ then $l=0$ as $C(g)$ maps $C_0(Y)\rightarrow C_0(Z)$, so either $j+k>l$ or $j=k=l=0$. So $f,g$ are strongly transverse. Also $f,g$ are strongly transverse if $f,g$ are smooth with $g$ a submersion, or if $f,g$ are transverse and~$\partial Z=\emptyset$. \label{mc6def2} \end{dfn}
In the situation of Theorem \ref{mc6thm1} we have a Cartesian square \e \begin{gathered} \xymatrix@R=10pt{ W \ar[r]_{\pi_Y} \ar[d]^{\pi_X} & Y \ar[d]_g \\ X \ar[r]^f & Z,} \end{gathered} \quad \quad \begin{aligned} &\text{which induces}\\ &\text{a commutative}\\ &\text{square} \end{aligned} \quad \begin{gathered} \xymatrix@R=10pt{ \coprod_{i\geqslant 0}C_i(W) \ar[r]_{C(\pi_Y)} \ar[d]^{C(\pi_X)} & \coprod_{k\geqslant 0}C_k(Y) \ar[d]_{C(g)} \\ \coprod_{j\geqslant 0}C_j(X) \ar[r]^{C(f)} & \coprod_{l\geqslant 0}C_l(Z).} \end{gathered} \label{mc6eq8} \e Since as in Theorem \ref{mc4thm} the transformation $X\mapsto \coprod_{i\geqslant 0}C_i(X)$, $f\mapsto C(f)$ has very good functorial properties, it is natural to wonder whether the right hand square in \eq{mc6eq8} is also Cartesian. The answer is yes if and only if $f,g$ are strongly transverse. The following theorem will be proved in~\S\ref{mc9}.
\begin{thm} Let\/ $X,Y,Z$ be manifolds with corners, and\/ $f:X\rightarrow Z,$ $g:Y\rightarrow Z$ be strongly transverse smooth maps, and write\/ $W$ for the fibre product\/ $X\times_{f,Z,g}Y$ in Theorem\/ {\rm\ref{mc6thm1}}. Then there is a canonical diffeomorphism \e \begin{gathered} C_i(W)\cong \coprod_{\begin{subarray}{l}j,k,l\geqslant 0:\\ i=j+k-l\end{subarray}} \begin{aligned}[t] \bigl(C_j(X)\cap C(f)^{-1}(C_l(Z))\bigr) \times_{C(f),C_l(Z),C(g)}&\\ \bigl(C_k(Y)\cap C(g)^{-1}(C_l(Z))\bigr)& \end{aligned} \end{gathered} \label{mc6eq9} \e for all\/ $i\geqslant 0,$ where the fibre products are all transverse and so exist. Hence \e \textstyle \coprod_{i\geqslant 0}C_i(W)\cong \coprod_{j\geqslant 0}C_j(X)\times_{C(f),\coprod_{l\geqslant 0}C_l(Z),C(g)} \coprod_{k\geqslant 0}C_k(Y). \label{mc6eq10} \e Here the right hand commutative square in\/ \eq{mc6eq8} induces a map from the left hand side of\/ \eq{mc6eq10} to the right hand side, which gives the identification\/~\eq{mc6eq10}. \label{mc6thm2} \end{thm}
Suppose $f:X\rightarrow Z$ and $g:Y\rightarrow Z$ are transverse, but not strongly transverse. Then by Definition \ref{mc6def2} there exist points in $C_j(X),C_k(Y),C_l(Z)$ satisfying \eq{mc6eq7} with $j+k=l$ but $j,k,l$ not all zero. These give a point in the right hand side of \eq{mc6eq9} with $i=0$ which does not lie in the image of $C_0(W)$ under the natural map, since $C_0(W)$ maps to $C_0(X),C_0(Y)$ and so cannot map to $C_j(X),C_k(Y)$ as $j,k$ are not both zero. Thus \eq{mc6eq9} and \eq{mc6eq10} are false if $f,g$ are transverse but not strongly transverse.
Here is an example of $f,g$ which are transverse but not strongly transverse.
\begin{ex} Define smooth maps $f:[0,\infty)\rightarrow[0,\infty)^2$ by $f(x)=(x,2x)$ and $g:[0,\infty)\rightarrow[0,\infty)^2$ by $g(y)=(2y,y)$. Then $f(0)=g(0)=(0,0)$. We have \begin{gather*} {\rm d} f\vert_0\bigl(T_0[0,\infty)\bigr)+{\rm d} g\vert_0\bigl(T_0[0,\infty)\bigr)= \an{(1,2)}_{\mathbin{\mathbb R}}+\an{(2,1)}_{\mathbin{\mathbb R}}={\mathbin{\mathbb R}}^2=T_{(0,0)}[0,\infty)^2,\\ {\rm d} f\vert_0\bigl(T_0\bigl(S^0([0,\infty))\bigr)\bigr) +{\rm d} g\vert_0\bigl(T_0\bigl(S^0([0,\infty))\bigr)\bigr)=\{0\}= T_{(0,0)}\bigl(S^0([0,\infty)^2)\bigr), \end{gather*} so $f,g$ are transverse. However we have \begin{equation*} C(f)\bigl(0,\bigl\{\{x=0\}\bigr\}\bigr)=C(g)\bigl(0,\bigl\{\{y=0\} \bigr\})=\bigl((0,0),\bigl\{\{x=0\},\{y=0\}\bigr\}\bigr), \end{equation*} with $j=k=1$ and $l=2$, so $f,g$ are not strongly transverse. The fibre product $W=[0,\infty)_{f,[0,\infty)^2,g}[0,\infty)$ is a single point $\{0\}$. In \eq{mc6eq9} when $i=0$ the l.h.s.\ is one point, and the r.h.s.\ is two points, one from $j\!=\!k\!=\!l\!=\!0$ and one from $j\!=\!k\!=\!1$, $l\!=\!2$, so \eq{mc6eq9} does not hold. For $i\!\ne\! 0$, both sides of \eq{mc6eq9} are empty. \label{mc6ex2} \end{ex}
The distinction between transversality and strong transversality will be important in \cite{Joyc}. There we will define a 2-category ${\mathop{\bf dMan^c}}$ of {\it d-manifolds with corners}, a `derived' generalization of manifolds with corners, which contains the 1-category ${\mathop{\bf Man^c}}$ of manifolds with corners as a full discrete 2-subcategory. If $X,Y,Z$ are manifolds with corners and $f:X\rightarrow Z$, $g:Y\rightarrow Z$ are smooth then a 2-category fibre product $(X\times_ZY)_{\mathop{\bf dMan^c}}$ exists in ${\mathop{\bf dMan^c}}$. If $f,g$ are transverse then a 1-category fibre product $(X\times_ZY)_{\mathop{\bf Man^c}}$ also exists in ${\mathop{\bf Man^c}}\subset{\mathop{\bf dMan^c}}$ by Theorem \ref{mc6thm1}. However, $(X\times_ZY)_{\mathop{\bf dMan^c}}$ and $(X\times_ZY)_{\mathop{\bf Man^c}}$ coincide if and only if $f,g$ are strongly transverse.
\section{Orientations and orientation conventions} \label{mc7}
Orientations are discussed in \cite[\S I.1]{KoNo} and~\cite[\S VIII.3]{Lang}.
\begin{dfn} Let $X$ be an $n$-manifold and $E\rightarrow X$ a vector bundle of rank $k$. The {\it frame bundle\/} $F(E)$ is \begin{equation*} F(E)=\bigl\{(x,e_1,\ldots,e_k):\text{$x\in X$, $(e_1,\ldots,e_k)$ is a basis for $E\vert_x\cong{\mathbin{\mathbb R}}^k$}\bigr\}. \end{equation*} It is a manifold of dimension $n+k^2$. Define an action of $\mathop{\rm GL}\nolimits(k,{\mathbin{\mathbb R}})$ on $F(E)$ by $(A_{ij})_{i,j=1}^k:(x,e_1,\ldots,e_k) \mapsto\bigl(x,\sum_{j=1}^kA_{1j}e_j,\ldots,\sum_jA_{kj}e_j\bigr)$. This action is smooth and free, and makes $F(E)$ into a principal $\mathop{\rm GL}\nolimits(k,{\mathbin{\mathbb R}})$-bundle over $X$, with projection $\pi:F(E)\rightarrow X$ given by~$\pi:(x,e_1,\ldots,e_k)\mapsto x$.
Write $\mathop{\rm GL}\nolimits_+(k,{\mathbin{\mathbb R}})$ for the subgroup of $A\in\mathop{\rm GL}\nolimits(k,{\mathbin{\mathbb R}})$ with $\det A>0$. It is a normal subgroup of $\mathop{\rm GL}\nolimits(k,{\mathbin{\mathbb R}})$ of index 2, and we identify the quotient subgroup $\mathop{\rm GL}\nolimits(k,{\mathbin{\mathbb R}})/\mathop{\rm GL}\nolimits_+(k,{\mathbin{\mathbb R}})$ with $\{\pm 1\}$ by $A\mathop{\rm GL}\nolimits_+(k,{\mathbin{\mathbb R}})\mapsto\det A/\md{\det A}$. The {\it orientation bundle\/} $\mathop{\rm Or}(E)$ of $E$ is $F(E)/\mathop{\rm GL}\nolimits_+(k,{\mathbin{\mathbb R}})$. It is a principal $\mathop{\rm GL}\nolimits(k,{\mathbin{\mathbb R}})/\mathop{\rm GL}\nolimits_+(k,{\mathbin{\mathbb R}})=\{\pm 1\}$-bundle over $X$. Points of the fibre of $\mathop{\rm Or}(E)$ over $x\in X$ are equivalence classes of bases $(e_1,\ldots,e_k)$ for $E\vert_x$, where two bases are equivalent if they are related by a $k\times k$ matrix with positive determinant.
An {\it orientation\/ $o_E$ for the fibres of\/} $E$ is a continuous section $o_E:X\rightarrow\mathop{\rm Or}(E)$ of $\mathop{\rm Or}(E)$. The pair $(E,o_E)$ is called an {\it oriented vector bundle\/} on $X$. If $E\rightarrow X$, $F\rightarrow X$ are vector bundles on $X$ of ranks $k,l$ and $o_E,o_F$ are orientations on the fibres of $E,F$, we define the {\it direct sum orientation\/} $o_{E\oplus F}=o_E\oplus o_F$ on the fibres of $E\oplus F$ by saying that if $x\in X$, $(e_1,\ldots,e_k)$ is an oriented basis for $E\vert_x$ and $(f_1,\ldots,f_l)$ is an oriented basis for $F\vert_x$, then $(e_1,\ldots,e_k,f_1,\ldots,f_l)$ is an oriented basis for~$(E\oplus F)\vert_x$.
An {\it orientation\/ $o_X$ for\/} $X$ is an orientation for the fibres of the tangent bundle $TX\rightarrow X$. An {\it oriented manifold\/} $(X,o_X)$ is a manifold $X$ with an orientation $o_X$. Usually we leave $o_X$ implicit, and call $X$ an oriented manifold. If $o_X$ is an orientation on $X$ then the {\it opposite orientation\/} on $X$ is $-o_X$, where $o_X:X\rightarrow\mathop{\rm Or}(TX)$ is a section, $-1:\mathop{\rm Or}(TX)\rightarrow\mathop{\rm Or}(TX)$ comes from the principal $\{\pm 1\}$-action on $\mathop{\rm Or}(TX)$, and $-o_X=-1\circ o_X$ is the composition. When $X$ is an oriented manifold, we write $-X$ for $X$ with the opposite orientation. \label{mc7def1} \end{dfn}
We shall consider issues to do with orientations on manifolds with corners, and orientations on fibre products of manifolds. To do this, we need {\it orientation conventions\/} to say how to orient boundaries $\partial X$ and fibre products $X\times_ZY$ of oriented manifolds $X,Y,Z$. Our conventions generalize those of Fukaya, Oh, Ohta and Ono \cite[Conv.~45.1]{FOOO}, who restrict to $f,g$ submersions.
\begin{conv}{\bf(a)} Let $(X,o_X)$ be an oriented manifold with corners. Define $o_{\partial X}$ to be the unique orientation on $\partial X$ such that \e i_X^*(TX)\cong{\mathbin{\mathbb R}}_{\rm out}\oplus T(\partial X) \label{mc7eq1} \e is an isomorphism of oriented vector bundles over $\partial X$, where $i_X^*(TX),T(\partial X)$ are oriented by $o_X,o_{\partial X}$, and ${\mathbin{\mathbb R}}_{\rm out}$ is oriented by an outward-pointing normal vector to $\partial X$ in $X$, and the r.h.s.\ of \eq{mc7eq1} has the direct sum orientation.
\noindent{\bf(b)} Let $(X,o_X),(Y,o_Y),(Z,o_Z)$ be oriented manifolds with corners, and $f:X\rightarrow Z$, $g:Y\rightarrow Z$ be transverse smooth maps, so that a fibre product $W=X\times_ZY$ exists in ${\mathop{\bf Man^c}}$ by Theorem \ref{mc6thm1}. Then we have an exact sequence of vector bundles over $W$ \e \xymatrix@C=8.5pt{0 \ar[r] & TW \ar[rrr]^(0.35){{\rm d}\pi_X\oplus{\rm d}\pi_Y} &&& \pi_X^*(TX)\oplus \pi_Y^*(TY) \ar[rrrr]^(0.55){\pi_X^*({\rm d} f)-\pi_Y^*({\rm d} g)} &&&& (f\circ\pi_X)^*(TZ) \ar[r] & 0.} \label{mc7eq2} \e Choosing a splitting of \eq{mc7eq2} induces an isomorphism of vector bundles \e TW \oplus (f\circ\pi_X)^*(TZ)\cong \pi_X^*(TX)\oplus \pi_Y^*(TY). \label{mc7eq3} \e Define $o_W$ to be the unique orientation on $W$ such that the direct sum orientations in \eq{mc7eq3} induced by $o_W,o_Z,o_X,o_Y$ differ by a factor~$(-1)^{\mathop{\rm dim}\nolimits Y\mathop{\rm dim}\nolimits Z}$.
Here are two was to rewrite this convention in special cases. Firstly, suppose $f$ is a submersion. Then ${\rm d} f:TX\rightarrow f^*(TZ)$ is surjective, so by splitting the exact sequence $0\rightarrow\mathop{\rm Ker}{\rm d} f\rightarrow TX\,{\buildrel{\rm d} f\over\longrightarrow}\,f^*(TZ)\rightarrow 0$ we obtain an isomorphism \e TX\cong \mathop{\rm Ker}{\rm d} f\oplus f^*(TZ). \label{mc7eq4} \e Give the vector bundle $\mathop{\rm Ker}{\rm d} f\rightarrow X$ the unique orientation such that \eq{mc7eq4} is an isomorphism of oriented vector bundles, where $TX,f^*(TZ)$ are oriented using $o_X,o_Z$. As $f:X\rightarrow Z$ is a submersion so is $\pi_Y:W\rightarrow Y$, and ${\rm d}\pi_X$ induces an isomorphism $\mathop{\rm Ker}({\rm d}\pi_Y)\rightarrow\pi_X^*(\mathop{\rm Ker}{\rm d} f)$. Thus we have an exact sequence \begin{equation*} \xymatrix@C=15pt{0 \ar[r] & \pi_X^*(\mathop{\rm Ker}{\rm d} f) \ar[rr]^(0.6){({\rm d}\pi_X)^{-1}} && TW \ar[rr]^(0.45){{\rm d}\pi_Y} && \pi_Y^*(TY) \ar[r] & 0.} \end{equation*} Splitting this gives an isomorphism \e TW\cong \pi_X^*(\mathop{\rm Ker}{\rm d} f) \oplus \pi_Y^*(TY). \label{mc7eq5} \e The orientation on $W$ makes \eq{mc7eq5} into an isomorphism of oriented vector bundles, using $o_Y$ and the orientation on $\mathop{\rm Ker}{\rm d} f$ to orient the right hand side.
Secondly, let $g$ be a submersion. Then as for \eq{mc7eq4}--\eq{mc7eq5} we have isomorphisms \e TY\cong g^*(TZ)\oplus\mathop{\rm Ker}{\rm d} g\quad\text{and}\quad TW\cong \pi_X^*(TX)\oplus\pi_Y^*(\mathop{\rm Ker}{\rm d} g). \label{mc7eq6} \e We use the first equation of \eq{mc7eq6} to define an orientation on the fibres of $\mathop{\rm Ker}{\rm d} g$, and the second to define an orientation on~$W$. \label{mc7conv} \end{conv}
If $X$ is an oriented manifold with corners then by induction Convention \ref{mc7conv}(a) gives orientations on $\partial^kX$ for all $k=0,1,2,\ldots$. Now Definition \ref{mc2def6} defined a smooth, free action of $S_k$ on $\partial^kX$ for each $k$. By considering local models ${\mathbin{\mathbb R}}^n_l$ it is easy to see that the action of each $\sigma\in S_k$ multiplies the orientation on $\partial^kX$ by $\mathop{\rm sign}(\sigma)=\pm 1$. Since $C_k(X)\cong\partial^kX/S_k$ by \eq{mc2eq4} and $S_k$ does not preserve orientations for $k\geqslant 2$, we see that $C_k(X)$ {\it does not have a natural orientation for\/} $k\geqslant 2$. We show by example that $C_k(X)$ need not even be orientable.
\begin{ex} Let $X$ be the 4-manifold with corners $\bigl({\cal S}^2\times[0,\infty)^2\bigr)/{\mathbin{\mathbb Z}}_2$, where \begin{equation*} {\cal S}^2\times[0,\infty)^2=\bigl\{(x_2,x_2,x_3,y_1,y_2):\text{$x_j,y_j\in{\mathbin{\mathbb R}}$, $x_1^2+x_2^2+x_3^2=1$, $y_1,y_2\geqslant 0$}\bigr\}, \end{equation*} and ${\mathbin{\mathbb Z}}_2=\an{\sigma}$ acts freely on $X$ by \begin{equation*} \sigma:(x_2,x_2,x_3,y_1,y_2)\mapsto(-x_1,-x_2,-x_3,y_2,y_1). \end{equation*} There is an orientation on ${\cal S}^2\times[0,\infty)^2$ which is invariant under ${\mathbin{\mathbb Z}}_2$, and so descends to $X$. We have diffeomorphisms \begin{equation*} \partial X\cong C_1(X)\cong {\cal S}^2\times[0,\infty),\quad \partial^2X\cong {\cal S}^2, \quad C_2(X)\cong {\mathbin{\mathbb{RP}}}^2, \end{equation*} and $\partial^kX\!=\!C_k(X)\!=\!\emptyset$ for $k\!>\!2$. Thus $X$ is oriented, but $C_2(X)$ is not orientable. \label{mc7ex} \end{ex}
Given any canonical diffeomorphism between expressions involving boundaries and fibre products of oriented manifolds with corners, we can use Convention \ref{mc7conv} to define orientations on each side. These will be related by some sign $\pm 1$, which we can try to compute. Here is how to add signs to~\eq{mc6eq2}--\eq{mc6eq5}.
\begin{prop} In Propositions {\rm\ref{mc6prop1}, \ref{mc6prop2}} and\/ {\rm\ref{mc6prop3},} suppose $X,Y,Z$ are oriented. Then in oriented manifolds, equations \eq{mc6eq2}--\eq{mc6eq5} respectively become \ea \partial_-^fX&\cong (-1)^{\mathop{\rm dim}\nolimits X+\mathop{\rm dim}\nolimits Y}X\times_{f,Y,i_Y}\partial Y, \label{mc7eq7}\\ \partial\bigl(X\times_{f,Z,g}Y\bigr)&\cong \bigl(\partial X\times_{f\circ i_X,Z,g}Y\bigr) \amalg (-1)^{\mathop{\rm dim}\nolimits X+\mathop{\rm dim}\nolimits Z}\bigl(X\times_{f,Z,g\circ i_Y}\partial Y\bigr), \label{mc7eq8}\\ \partial\bigl(X\times_{f,Z,g}Y\bigr)&\cong \bigl(\partial_+^fX \times_{f_+,Z,g}Y\bigr) \amalg (-1)^{\mathop{\rm dim}\nolimits X+\mathop{\rm dim}\nolimits Z}\bigl(X\times_{f,Z,g\circ i_Y}\partial Y\bigr), \label{mc7eq9}\\ \begin{split} \partial\bigl(X\times_{f,Z,g}Y\bigr)&\cong\bigl(\partial_+^fX \times_{f_+,Z,g}Y\bigr) \amalg (-1)^{\mathop{\rm dim}\nolimits X+\mathop{\rm dim}\nolimits Z}\bigl(X\times_{f,Z,g_+}\partial_+^gY\bigr)\\ &\qquad \amalg\bigl(\partial_-^fX\times_{f_-,\partial Z,g_-}\partial_-^gY\bigr). \end{split} \label{mc7eq10} \ea \label{mc7prop1} \end{prop}
Here are some more identities involving only fibre products:
\begin{prop}{\bf(a)} If\/ $f:X\rightarrow Z,$ $g:Y\rightarrow Z$ are transverse smooth maps of oriented manifolds with corners then in oriented manifolds we have \e X\times_{f,Z,g}Y\cong(-1)^{(\mathop{\rm dim}\nolimits X-\mathop{\rm dim}\nolimits Z)(\mathop{\rm dim}\nolimits Y-\mathop{\rm dim}\nolimits Z)}Y\times_{g,Z,f}X. \label{mc7eq11} \e
\noindent{\bf(b)} If\/ $d:V\rightarrow Y,$ $e:W\rightarrow Y,$ $f:W\rightarrow Z,$ $g:X\rightarrow Z$ are smooth maps of oriented manifolds with corners then in oriented manifolds we have \e V\times_{d,Y,e\circ\pi_W}\bigl(W\times_{f,Z,g}X\bigr)\cong \bigl(V\times_{d,Y,e}W\bigr)\times_{f\circ\pi_W,Z,g}X, \label{mc7eq12} \e provided all four fibre products are transverse.
\noindent{\bf(c)} If\/ $d:V\rightarrow Y,$ $e:V\rightarrow Z,$ $f:W\rightarrow Y,$ $g:X\rightarrow Z$ are smooth maps of oriented manifolds with corners then in oriented manifolds we have \e \begin{split} &V\times_{(d,e),Y\times Z,f\times g}(W\times X)\cong \\ &\quad(-1)^{\mathop{\rm dim}\nolimits Z(\mathop{\rm dim}\nolimits Y+\mathop{\rm dim}\nolimits W)} (V\times_{d,Y,f}W)\times_{e\circ\pi_V,Z,g}X, \end{split} \label{mc7eq13} \e provided all three fibre products are transverse. \label{mc7prop2} \end{prop}
\begin{rem}{\bf(i)} Equations \eq{mc7eq8}, \eq{mc7eq12} and \eq{mc7eq13} can be found in Fukaya et al.\ \cite[Lem.~45.3]{FOOO} for the case of Kuranishi spaces.
\noindent{\bf(ii)} The proofs of Propositions \ref{mc7prop1} and \ref{mc7prop2} are elementary calculations starting from Convention \ref{mc7conv}. Here is a way to make these calculations easier. For simplicity, assume all the smooth maps involved are submersions. By Proposition \ref{mc5prop1}, submersions are locally projections. Since identities like \eq{mc7eq7}--\eq{mc7eq13} are local, it is enough to prove the identities for projections.
Let $M,N,Z$ be oriented manifolds with corners, of dimensions $m,n,z$. Set $X=M\times Z$ and $Y=Z\times N$, with the product orientations, and define $f:X\rightarrow Z$, $g:Y\rightarrow Z$ by $f=\pi_Z=g$. Convention \ref{mc7conv}(b) is arranged so that $W\cong M\times Z\times N$ holds in oriented manifolds. Exchanging the order in a product of oriented manifolds yields $X\times Y\cong(-1)^{\mathop{\rm dim}\nolimits X\mathop{\rm dim}\nolimits Y}Y\times X$. Thus to compute the sign in \eq{mc7eq11}, for instance, note that \begin{align*} &X\times_{f,Z,g}Y=(M\times Z)\times_Z(Z\times N)\cong M\times Z\times N,\\ &Y\times_{g,Z,f}X=(Z\times N)\times_Z(M\times Z)\cong(-1)^{(m+n)z} (N\times Z)\times_Z(Z\times M)\\ &\quad\cong(-1)^{(m+n)z}N\times Z\times M\cong(-1)^{(m+n)z}(-1)^{mn+mz+nz}M\times Z\times N, \end{align*} and then substitute in $m=\mathop{\rm dim}\nolimits X-\mathop{\rm dim}\nolimits Z$, $n=\mathop{\rm dim}\nolimits Y-\mathop{\rm dim}\nolimits Z$.
\noindent{\bf(iii)} Ramshaw and Basch \cite{RaBa} prove that there is a {\it unique\/} orientation convention for transverse fibre products of manifolds without boundary satisfying the three conditions: (A) if $X,Y$ are oriented then $X\times_{\{0\}}Y\cong X\times Y$ in oriented manifolds, where $X\times Y$ has the product orientation from $T(X\times Y)\cong \pi_X^*(TX)\oplus \pi_Y^*(TY)$; (B) if $f:X\rightarrow Y$ is a smooth map of oriented manifolds then $X\cong Y\times_{\mathop{\rm id}\nolimits_Y,Y,f}X$ in oriented manifolds; and (C) equation \eq{mc7eq12} holds. Convention \ref{mc7conv}(b) satisfies (A)--(C), and so agrees with that of~\cite{RaBa}. \label{mc7rem} \end{rem}
\section{Proof of Theorem \ref{mc6thm1}} \label{mc8}
Theorem \ref{mc6thm1} follows from the next two propositions. In the proof we assume Theorem \ref{mc6thm1} for manifolds without boundary, since this is well known, as in Lang \cite[Prop.~II.4]{Lang} for instance. The difference between {\it transverse\/} and {\it strongly transverse\/} $f,g$ in \S\ref{mc6} appears in part (C) in the proof below: transverse $f,g$ are strongly transverse if and only if there are no $\sim$-equivalence classes $E$ of type~(b).
\begin{prop} Let\/ $X,Y,Z$ be manifolds with corners, and\/ $f:X\rightarrow Z,$ $g:Y\rightarrow Z$ be transverse smooth maps. Then the construction of Theorem\/ {\rm\ref{mc6thm1}} defines a manifold with corners $W,$ with\/ $\mathop{\rm dim}\nolimits W=\mathop{\rm dim}\nolimits X+\mathop{\rm dim}\nolimits Y-\mathop{\rm dim}\nolimits Z$ if\/ $W\ne\emptyset,$ and the maps $\pi_X:W\rightarrow X,$ $\pi_Y:W\rightarrow Y$ are smooth. \label{mc8prop1} \end{prop}
\begin{proof} If $W=\emptyset$ the proposition is trivial, so suppose $W\ne\emptyset$. Then $n=\mathop{\rm dim}\nolimits X+\mathop{\rm dim}\nolimits Y-\mathop{\rm dim}\nolimits Z\geqslant 0$, since $f,g$ are transverse. Let $(x,y)\in W$, so that $x\in X$ and $y\in Y$ with $f(x)=g(y)=z$ in $Z$. We will first construct a chart $(U,\phi)$ on $W$ satisfying the conditions of Theorem \ref{mc6thm1}, with $U$ open in ${\mathbin{\mathbb R}}^n_d$ and $0\in U$ with~$\phi(0)=(x,y)$.
Choose charts $(R,\theta),(S,\psi),(T,\xi)$ on $X,Y,Z$ respectively with $0\in R,S,T$ and $\theta(0)=x$, $\psi(0)=y$, $\xi(0)=z$, where $R,S,T$ are open in ${\mathbin{\mathbb R}}^k_a,{\mathbin{\mathbb R}}^l_b,{\mathbin{\mathbb R}}^m_c$ with $k=\mathop{\rm dim}\nolimits X$, $l=\mathop{\rm dim}\nolimits Y$, $m=\mathop{\rm dim}\nolimits Z$. Making $R,S$ smaller if necessary suppose $f\circ\theta(R),g\circ\psi(S)\subseteq\xi(T)$. Then $\tilde f=\xi^{-1}\circ f\circ\theta:R\rightarrow T$ and $\tilde g=\xi^{-1}\circ g\circ\psi:S\rightarrow T$ are smooth maps between subsets of ${\mathbin{\mathbb R}}^k,{\mathbin{\mathbb R}}^l,{\mathbin{\mathbb R}}^m$ in the sense of Definition \ref{mc2def1}. So by definition we can choose open subsets $\hat R\subseteq{\mathbin{\mathbb R}}^k$, $\hat S\subseteq{\mathbin{\mathbb R}}^l$, $\hat T\subseteq{\mathbin{\mathbb R}}^m$ with $R=\hat R\cap{\mathbin{\mathbb R}}^k_a$, $S=\hat S\cap{\mathbin{\mathbb R}}^l_b$, $T=\hat T\cap {\mathbin{\mathbb R}}^m_c$ and smooth maps $\hat f:\hat R\rightarrow\hat T$, $\hat g:\hat S\rightarrow\hat T$ with $\hat f\vert_R=\tilde f$, $\hat g\vert_S=\tilde g$.
Now $f,g$ are transverse, so $\tilde f,\tilde g$ are transverse on $R,S$, and as this is an open condition, by making $\hat R,\hat S$ smaller if necessary we can make $\hat f,\hat g$ transverse. Since $\hat f:\hat R\rightarrow\hat T$, $\hat g:\hat S\rightarrow\hat T$ are transverse smooth maps of manifolds without boundary, by \cite[Prop.~II.4]{Lang} the fibre product $\hat V=\hat R\times_{\hat f,\hat T,\hat g}\hat S$ exists as an $n$-manifold without boundary, and it is also easy to show that charts on $\hat V$ are characterized by the injectivity of \eq{mc6eq1}. Define $V=\bigl\{(r,s)\in\hat V:r\in R$, $s\in S\bigr\}$. We will show that near $(0,0)\in V$, the embedding of $V$ in $\hat V$ is modelled on the inclusion of ${\mathbin{\mathbb R}}^n_d$ in ${\mathbin{\mathbb R}}^n$, so that $V$ is a manifold with corners.
The local boundary components of $R\subseteq{\mathbin{\mathbb R}}^k_a$ at 0 are $\{r_i=0\}$ for $i=1,\ldots,a$, where $(r_1,\ldots,r_k)$ are the coordinates on $R$ and $\hat R$. Write $\beta_i=\theta_*(\{r_i=0\})$ for the corresponding local boundary component of $X$ at $x$. Then $\beta_1,\ldots,\beta_a$ are the local boundary components of $X$ at $x$. Similarly, write $(s_1,\ldots,s_l)$ for coordinates on $S,\hat S$ and $\tilde\beta_1,\ldots,\tilde\beta_b$ for the local boundary components of $Y$ at $y$, where $\tilde\beta_i=\psi_*(\{s_i=0\})$, and $(t_1,\ldots,t_m)$ for coordinates on $T,\hat T$ and $\dot\beta_1,\ldots,\dot\beta_c$ for the local boundary components of $Z$ at $z$, where~$\dot\beta_i=\xi_*(\{t_i=0\})$.
Define subsets $P^f,P^g\subseteq\{1,\ldots,c\}$ by $P^f=\bigl\{i:(x,(z,\dot\beta_i))\in\Xi_-^f\bigr\}$ and $P^g=\bigl\{i:(y,(z,\dot\beta_i))\in\Xi_-^g\bigr\}$. Define maps $\Pi^f:P^f\rightarrow\{1,\ldots,a\}$ and $\Pi^g:P^g\rightarrow\{1,\ldots,b\}$ by $\Pi^f(i)=j$ if $\xi_-^f\bigl(x,(z,\dot\beta_i)\bigr)=(x,\beta_j)$ and $\Pi^g(i)=j$ if $\xi_-^g\bigl(y,(z,\dot\beta_i)\bigr)=(y,\tilde\beta_j)$. We can express the maps $C(f),C(g)$ of \eq{mc3eq2} over $x,y,z$ as follows: if $A\subseteq\{1,\ldots,a\}$ and $B\subseteq\{1,\ldots,b\}$ then \e \begin{split} C(f):\bigl(x,\{\beta_i:i\in A\}\bigr)&\longmapsto \bigl(z,\{\dot\beta_j:j\in P^f,\; \Pi^f(j)\in A\}\bigr),\\ C(g):\bigl(y,\{\tilde\beta_i:i\in B\}\bigr)&\longmapsto \bigl(z,\{\dot\beta_j:j\in P^g,\; \Pi^g(j)\in B\}\bigr). \end{split} \label{mc8eq1} \e
Lemma \ref{mc6lem} on $C(f),C(g)$ over $x,y,z$, which uses $f,g$ transverse, then turns out to be equivalent to the following conditions on $P^f,P^g,\Pi^f,\Pi^g$: \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[(A)] $\Pi^f(P^f\cap P^g)\cap \Pi^f(P^f\setminus P^g)=\emptyset$ and $\Pi^g(P^f\cap P^g)\cap \Pi^g(P^g\setminus P^f)=\emptyset$. \item[(B)] $\Pi^f\vert_{P^f\setminus P^g}\!:\!P^f\!\setminus\! P^g\!\rightarrow\!\{1, \ldots,a\}$, $\Pi^g\vert_{P^g\setminus P^f}\!:\!P^g\!\setminus\!P^f\!\rightarrow\! \{1,\ldots,b\}$ are injective. \item[(C)] Let $\approx$ be the equivalence relation on $P^f\cap P^g$ generated by $i\approx j$ if $\Pi^f(i)=\Pi^f(j)$ or $\Pi^f(i)=\Pi^f(j)$. Then for each $\approx$-equivalence class $E\subseteq P^f\cap P^g$ we have either \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[(a)] $\md{\Pi^f(E)}+\md{\Pi^g(E)}=\md{E}+1$, or \item[(b)] $\md{\Pi^f(E)}+\md{\Pi^g(E)}=\md{E}$. \end{itemize} Here it is automatic that $\md{\Pi^f(E)}+\md{\Pi^g(E)}\leqslant\nobreak\md{E}+1$, and Lemma \ref{mc6lem} implies that $\md{\Pi^f(E)}+\md{\Pi^g(E)} \geqslant\md{E}$. The number of equivalence classes of type (a) is~$\md{\Pi^f(P^f\cap P^g)}+ \md{\Pi^g(P^f\cap P^g)}-\md{P^f\cap P^g}$. \end{itemize} Also, if $i\in\{1,\ldots,c\}\setminus (P^f\cup P^g)$ then $\bigl(x,(z,\dot\beta_i)\bigr)\in\Xi^f_+$ and $\bigl(y,(z,\dot\beta_i)\bigr)\in\Xi^g_+$. These imply that ${\rm d} f\vert_x(T_xX),{\rm d} g\vert_y(T_yY)\subseteq T_z\dot\beta_i$, so that ${\rm d} f\vert_x(T_xX)+{\rm d} g\vert_y(T_yY)\subseteq T_z\dot\beta_i\subsetneq T_zZ$, contradicting $f,g$ transverse. This proves: \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[(D)] $P^f\cup P^g=\{1,\ldots,c\}$. \end{itemize}
Now $V$ is cut out in $\hat V$ by $r_i\geqslant 0$, $i\leqslant\nobreak a$, and $s_i\geqslant 0$, $i\leqslant\nobreak b$, that is \e \begin{split} V=\bigl\{\bigl((r_1,\ldots,r_k),(s_1,\ldots,s_l)\bigr)\in\hat V:\,& \text{$r_i\geqslant 0$, $i=1,\ldots,a$, and}\\ &\text{$s_i\geqslant 0$, $i=1,\ldots,b$}\bigr\}. \end{split} \label{mc8eq2} \e We claim that making $R,S,\hat R,\hat S$ smaller if necessary, the following hold: \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[(i)] If $i\in P^f\setminus P^g$ then the inequality $r_{\Pi^f(i)}\geqslant 0$ does not change $V$, and can be omitted in~\eq{mc8eq2}. \item[(ii)] If $i\in P^g\setminus P^f$ then the inequality $s_{\Pi^g(i)}\geqslant 0$ does not change $V$, and can be omitted in~\eq{mc8eq2}. \item[(iii)] If $i,j\in P^f\cap P^g$ with $i\approx j$, then the four inequalities $r_{\Pi^f(i)}\geqslant 0$, $r_{\Pi^f(j)}\geqslant 0$, $s_{\Pi^g(i)}\geqslant 0$, $s_{\Pi^g(j)}\geqslant 0$ have the same effect in $\hat V$. Thus for each $\approx$-equivalence class $E$ in $P^f\cap P^g$, it is sufficient to impose only one of the $2\md{E}$ inequalities $r_{\Pi^f(i)}\geqslant 0$, $s_{\Pi^g(i)}\geqslant 0$ in \eq{mc8eq2} for $i\in E$ to define~$V$. \item[(iv)] If $E$ is an $\approx$-equivalence class of type (b) in (C) above, then $r_{\smash{\Pi^f(i)}}\equiv s_{\Pi^g(i)}\equiv 0$ in $\hat V$ for all $i\in E$. Thus we can omit the inequalities $r_{\Pi^f(i)}\geqslant 0$, $s_{\Pi^g(i)}\geqslant 0$ in \eq{mc8eq2} for $i\in E$ to define~$V$. \end{itemize} Here we mean that all of the inequalities $r_i\geqslant 0$, $s_i\geqslant 0$ which (i)--(iv) allow us to omit may all be omitted simultaneously without changing~$V$.
To prove (i), note that as $\bigl(\xi(T),t_i\circ\xi^{-1}\bigr)$ is a boundary defining function for $Z$ at $(z,\dot\beta_i)$, and $\bigl(\theta(R),r_{\Pi^f(i)}\circ\theta^{-1}\bigr)$ is a boundary defining function for $X$ at $(x,\beta_{\Pi^f(i)})$, and $\xi_-^f\bigl(x,(z,\dot\beta_i)\bigr)=(x,\beta_{\Pi^f(i)})$, Definition \ref{mc3def1} and Proposition \ref{mc2prop6}(b) imply that $t_i\circ\xi^{-1}\circ f\equiv (r_{\Pi^f(i)}\circ\theta^{-1})\cdot G$ on $\theta(T)$ near $x$ for some smooth $G:\theta(T)\rightarrow(0,\infty)$. Hence $t_i\circ\hat f\equiv r_{\Pi^f(i)}\cdot\hat G$ on $\hat R$ near 0 for some smooth $\hat G:\hat R\rightarrow(0,\infty)$ defined near 0. Therefore making $R,\hat R$ smaller if necessary, $r_{\Pi^f(i)}\geqslant 0$ is equivalent to $t_i\circ\hat f\circ\pi_{\hat R}\geqslant 0$ on $\hat V$. But $t_i\circ\hat f\circ\pi_{\hat R}\!=\!t_i\circ\hat g\circ\pi_{\hat S}$, and $t_i\!\geqslant\! 0$ on $T$, so $t_i\circ\hat g\!\geqslant\! 0$ on $S$ as $\hat g$ maps~$S\!\rightarrow\! T$.
Hence the inequality $r_{\smash{\Pi^f(i)}}\geqslant 0$ is unnecessary in \eq{mc8eq2} provided we restrict to $S$ in $\hat S$, that is, provided we impose all the conditions $s_i\geqslant 0$. In fact we need more than this: we must also be able to omit conditions $s_{\Pi^g(j)}\geqslant 0$ when this is allowed by (ii)--(iv). This is possible because the $s_{\Pi^g(j)}\geqslant 0$ omitted in (ii)--(iv) correspond to different conditions $t_j\geqslant 0$ in $\hat T$ than the condition $t_i\geqslant 0$ we are considering, since (i) deals with $i\in P^f\setminus P^g$, (ii) with $j\in P^g\setminus P^f$, and (iii)--(iv) with $j\in P^f\cap P^g$, which are disjoint sets. This proves (i), and also that we can omit $r_{\Pi^f(i)}\geqslant 0$ in (i) independently of other omissions in (i)--(iv). The proof for (ii) is the same.
For (iii), if $i\in P^f\cap P^g$ then by Definition \ref{mc3def1} and Proposition \ref{mc2prop6}(b) as above we see that making $R,\hat R,S,\hat S$ smaller if necessary, $r_{\smash{\Pi^f(i)}}\geqslant 0$ is equivalent to $t_i\circ\hat f\circ\pi_{\hat R}\geqslant 0$ on $\hat V$, which is also equivalent to $s_{\smash{\Pi^g(i)}}\geqslant 0$ on $\hat V$. Suppose $i,j\in P^f\cap P^g$ with $\Pi^f(i)=\Pi^f(j)$. Then the conditions $r_{\smash{\Pi^f(i)}}\geqslant 0$, $r_{\smash{\Pi^f(j)}}\geqslant 0$ are the same, and are equivalent to $s_{\smash{\Pi^g(i)}}\geqslant 0$ and $s_{\smash{\Pi^g(j)}}\geqslant 0$. Similarly, if $\Pi^g(i)=\Pi^g(j)$ then the conditions $r_{\smash{\Pi^f(i)}}\geqslant 0$, $r_{\smash{\Pi^f(j)}}\geqslant 0$, $s_{\smash{\Pi^g(i)}}\geqslant 0$ and $s_{\smash{\Pi^g(j)}}\geqslant 0$ are all equivalent. Since these two cases generate $\approx$, part (iii) follows.
For (iv), let $E$ be an $\approx$-equivalence class of type (b). Define submanifolds $\hat R_E,\hat S_E,\hat T_E$ in $\hat R,\hat S,\hat T$ by \begin{align*} \hat R_E&=\bigl\{(r_1,\ldots,r_k)\in\hat R:r_{\smash{\Pi^f(i)}}=0,\; i\in E\bigr\}, \\ \hat S_E&=\bigl\{(s_1,\ldots,s_l)\in\hat S:s_{\smash{\Pi^g(i)}}=0,\; i\in E\bigr\}, \\ \hat T_E&=\bigl\{(t_1,\ldots,t_m)\in\hat T:t_i=0,\; i\in E\bigr\}. \end{align*} Making $R,S,\hat R,\hat S$ smaller if necessary, $\hat f$ maps $\hat R_E\rightarrow \hat T_E$ and $\hat g$ maps $\hat S_E\rightarrow\hat T_E$. As $\hat f,\hat g$ are transverse, an argument similar to Lemma \ref{mc6lem} shows that $\hat f:\hat R_E\rightarrow\hat T_E$ and $\hat g:\hat S_E\rightarrow\hat T_E$ are transverse, so the fibre product $\hat R_E\times_{\hat T_E}\hat S_E$ exists as a manifold, and is a submanifold of $\hat V=\hat R\times_{\hat T}\hat S$. Now \begin{equation*} \mathop{\rm dim}\nolimits \hat R_E\!\times_{\hat T_E}\hat S_E\!=\!(k\!-\!\md{\Pi^f(E)})\!+\!(l\!-\!\md{\Pi^g(E)})\!-\! (m\!-\!\md{E})=k\!+\!l\!-\!m\!=\!\mathop{\rm dim}\nolimits\hat V, \end{equation*} since $E$ is of type (b). Thus $\hat R_E\times_{\hat T_E}\hat S_E$ is open in $\hat V$, as they are of the same dimension, and contains $(0,0)$. So making $R,S,\hat R,\hat S$ smaller if necessary, we have $\hat R_E\times_{\hat T_E}\hat S_E=\hat V$, proving (iv). There are no further issues about simultaneous omissions in~(i)--(iv).
Choose a subset $Q\subseteq P^f\cap P^g$ such that $Q$ contains exactly one element of each $\approx$-equivalence class of type (a) in $P^f\cap P^g$, and no elements of $\approx$-equivalence classes of type (b) . Then \eq{mc8eq2} and (i)--(iv) above imply that \e \begin{split} V=\bigl\{\bigl((r_1,&\ldots,r_k),(s_1,\ldots,s_l)\bigr)\in\hat V: r_i\geqslant 0,\; i\in\{1,\ldots,a\}\setminus\Pi^f(P^f),\\ &s_i\geqslant 0,\; i\in\{1,\ldots,b\}\setminus\Pi^g(P^g), \quad r_{\Pi^f(i)}\geqslant 0,\; i\in Q\bigr\}. \end{split} \label{mc8eq3} \e For the first condition $r_i\geqslant 0$, $i\in\{1,\ldots,a\} \setminus\Pi^f(P^f)$ in \eq{mc8eq3}, there are \begin{equation*} a-\bmd{\Pi^f(P^f)}=a-\bmd{\Pi^f(P^f\cap P^g)}-\bmd{\Pi^f(P^f\setminus P^g)}=a-\bmd{\Pi^f(P^f\cap P^g)}-\bmd{P^f\setminus P^g} \end{equation*} inequalities, using (A) above in the first step and (B) in the second. Similarly, for the second condition $s_i\geqslant 0$, $i\in\{1,\ldots,b\} \setminus\Pi^g(P^g)$ there are $b-\bmd{\Pi^g(P^f\cap P^g)}-\bmd{P^g\setminus P^f}$ inequalities. For the third there are $\md{Q}=\md{\Pi^f(P^f\cap P^g)}+\md{\Pi^g(P^f\cap P^g)}-\md{P^f\cap P^g}$ inequalities by (C) above. Hence in total there are \e \begin{split} \bigl(&a-\bmd{\Pi^f(P^f\cap P^g)}-\bmd{P^f\setminus P^g}\bigr)+ \bigl(b-\bmd{\Pi^g(P^f\cap P^g)}-\bmd{P^g\setminus P^f}\bigr)\\ &\qquad+\bigl(\bmd{\Pi^f(P^f\cap P^g)}+\bmd{\Pi^f(P^f\cap P^g)}-\md{P^f\cap P^g}\bigr)\\ &=\!a\!+\!b\!-\!\md{P^f\setminus P^g}\!-\!\md{P^g\setminus P^f}\!-\!\md{P^f\cap P^g}\!=\!a\!+\!b\!-\!\md{P^f\cup P^g}\!=\!a\!+\!b\!-\!c\!\! \end{split} \label{mc8eq4} \e inequalities $r_i\geqslant 0$, $s_i\geqslant 0$ appearing in \eq{mc8eq3}, using (D) at the last step.
Define a vector subspace $L$ of $T_{(0,0)}\hat V$ by \e \begin{split} L&=\bigl\{\bigl((r_1,\ldots,r_k),(s_1,\ldots,s_l)\bigr)\in T_{(0,0)}\hat V:\text{$r_i=0$, $i\leqslant\nobreak a$, $s_j=0$, $j\leqslant\nobreak b$}\bigr\}. \end{split} \label{mc8eq5} \e That is, we replace each inequality $r_i\geqslant 0$, $s_i\geqslant 0$ in \eq{mc8eq2} by $r_i=0$, $s_i=0$. By the proof of the equivalence of \eq{mc8eq2} and \eq{mc8eq3} using (i)--(iv), we see that \e \begin{split} L&=\bigl\{\bigl((r_1,\ldots,r_k),(s_1,\ldots,s_l)\bigr)\in T_{(0,0)}\hat V: r_{\Pi^f(i)}=0 ,\; i\in Q,\\ &\quad r_i=0,\; i\in\{1,\ldots,a\}\setminus\Pi^f(P^f),\; s_i=0,\; i\in\{1,\ldots,b\}\setminus\Pi^g(P^g)\bigr\}, \end{split} \label{mc8eq6} \e replacing inequalities $r_i\geqslant 0$, $s_i\geqslant 0$ in \eq{mc8eq3} by $r_i=0$, $s_i=0$.
Now $T_z(S^c(Z))={\rm d} f\vert_x(T_x(S^a(X)))+{\rm d} g\vert_y(T_y(S^b(Y)))$ by Definition \ref{mc6def1}. As there is a natural isomorphism \begin{equation*} L\cong \bigl\{u\oplus v\in T_x(S^a(X))\oplus T_y(S^b(Y)): {\rm d} f\vert_x(u)= {\rm d} g\vert_y(v)\bigr\}, \end{equation*} we see that \e \begin{split} \mathop{\rm dim}\nolimits L&=\mathop{\rm dim}\nolimits S^a(x)+\mathop{\rm dim}\nolimits S^b(Y)-\mathop{\rm dim}\nolimits S^c(Z)\\ &=(p-a)+(q-b)-(r-c)=\mathop{\rm dim}\nolimits\hat V-(a+b-c). \end{split} \label{mc8eq7} \e Since by \eq{mc8eq4} there are $a+b-c$ equalities $r_i=0$, $s_i=0$ in \eq{mc8eq6}, equation \eq{mc8eq7} implies that the conditions $r_i=0$, $s_i=0$ in \eq{mc8eq6} are transverse, so the inequalities $r_i\geqslant 0$, $s_i\geqslant 0$ in \eq{mc8eq3} are transverse at $(0,0)$ in $\hat V$. That is, the corresponding 1-forms ${\rm d} r_i\vert_{(0,0)}$, ${\rm d} s_i\vert_{(0,0)}$ are linearly independent at $T^*_{(0,0)}\hat V$. This is an open condition in $\hat V$. Since $\hat V$ is a manifold without boundary of dimension $n$, it follows that $V$ is near $(0,0)$ a manifold with corners of dimension $n$, locally modelled on ${\mathbin{\mathbb R}}^n_{a+b-c}$. Making $R,S$ smaller if necessary, $V$ becomes an $n$-manifold with corners.
Let $(U,\phi')$ be a chart on $V$, with $U$ open in ${\mathbin{\mathbb R}}^n_{a+b-c}$ and $0\in U$ with $\phi'(0)=(0,0)$. Define $\phi:U\rightarrow W=X\times_ZY$ by $\phi=(\theta\times\psi)\circ\phi'$. Then $\phi$ is a homeomorphism with an open set in $W$, since $\phi':U\rightarrow V$ is a homeomorphism with an open set in $V$ and $\theta\times\psi:V\rightarrow W$ is a homeomorphism with an open set in $W$. Also $\phi(0)=(x,y)$ as $\phi'(0)=(0,0)$ and $\theta(0)=x$, $\psi(0)=y$. Thus $(U,\phi)$ is a chart on the topological space $W$ whose image contains~$(x,y)$.
Now $(U,\phi')$ extends to a chart $(\hat U,\hat\phi')$ on $\hat V$. But $\hat V$ comes from Theorem \ref{mc6thm1} for manifolds without boundary, and so \begin{equation*} {\rm d}(\pi_{\hat S}\circ\hat\phi')\vert_{\hat u}\oplus{\rm d}(\pi_{\hat T}\circ\hat \phi')\vert_{\hat u}:T_{\hat u}\hat U\longrightarrow T_{\hat s}\hat S\oplus T_{\hat t}\hat T \end{equation*} is injective for all $\hat u\in\hat U$ with $\hat\phi'(\hat u)=(\hat s,\hat t)$. Restricting to $U$ shows that \e {\rm d}(\pi_S\circ\phi')\vert_u\oplus{\rm d}(\pi_T\circ\phi')\vert_u:T_uU\longrightarrow T_sS \oplus T_tT \label{mc8eq8} \e is injective for all $u\in U$ with $\phi'(u)=(s,t)$. But if $u\in U$ with $\phi'(u)=(s,t)$ and $\theta(s)=x'$, $\psi(t)=y'$ then ${\rm d}(\pi_X\circ\phi)\vert_u={\rm d}\theta\vert_s\circ{\rm d}(\pi_S\circ\phi')\vert_u$ and ${\rm d}(\pi_Y\circ\phi)\vert_u={\rm d}\psi\vert_t\circ{\rm d}(\pi_T\circ\phi') \vert_u$, where ${\rm d}\theta\vert_s:T_sS\rightarrow T_{x'}X$ and ${\rm d}\psi\vert_t:T_tT\rightarrow T_{y'}Y$ are isomorphisms as $(S,\theta),(T,\psi)$ are charts on $X,Y$. So composing \eq{mc8eq8} with ${\rm d}\theta\vert_s\times {\rm d}\psi\vert_t$ shows that \begin{equation*} {\rm d}(\pi_X\circ\phi)\vert_u\oplus{\rm d}(\pi_Y\circ\phi)\vert_u:T_uU\longrightarrow T_{x'}X\oplus T_{y'}Y \end{equation*} is injective, so $(U,\phi)$ satisfies the conditions of Theorem~\ref{mc6thm1}.
We have now shown that $W$ can be covered by charts $(U,\phi)$ satisfying the conditions of Theorem \ref{mc6thm1}. For such $(U,\phi)$, observe that \eq{mc6eq1} actually maps \e \begin{split} {\rm d}(\pi_X\circ\phi)_{u}&\oplus{\rm d}(\pi_Y\circ\phi)_{u}: T_{u}U\cong{\mathbin{\mathbb R}}^n\longrightarrow\\ &\bigl\{(\alpha,\beta)\in T_xX\oplus T_yY:{\rm d} f\vert_x(\alpha)={\rm d} g\vert_y(\beta) \bigr\}. \end{split} \label{mc8eq9} \e Now the r.h.s.\ of \eq{mc8eq9} has dimension $n$ by transversality of $f,g$, and \eq{mc8eq9} is injective, so it is an isomorphism. Let $(U,\phi)$ and $(V,\psi)$ be two such charts, and $u\in U$, $v\in V$ with $\phi(u)=\psi(v)=(x,y)$. Since \eq{mc8eq9} and its analogue for $(V,\psi)$ are isomorphisms, we see that $\psi^{-1} \circ\phi$ is differentiable at $u$ and its derivative is an isomorphism, the composition of \eq{mc8eq9} with the inverse of its analogue for $\psi$. Using the same argument for all $u\in \phi^{-1}(\psi(V))$, we find that $\psi^{-1}\circ\phi:\phi^{-1}(\psi(V))\rightarrow \psi^{-1}(\phi(U))$ is a diffeomorphism, and so $(U,\phi),(V,\psi)$ are automatically compatible.
Therefore the collection of all charts on $W$ satisfying the conditions of Theorem \ref{mc6thm1} is an atlas. But any chart compatible with all charts satisfying Theorem \ref{mc6thm1} also satisfies the conditions of Theorem \ref{mc6thm1}, so this atlas is maximal. Also the topological space $W=X\times_ZY$ is paracompact and Hausdorff, since $X,Y,Z$ are paracompact and Hausdorff as they are manifolds. Hence the construction of Theorem \ref{mc6thm1} does make $W$ into an $n$-manifold with corners.
It remains to show that $\pi_X:W\rightarrow X,$ $\pi_Y:W\rightarrow Y$ are smooth. They are clearly continuous, since $W$ was defined as the topological fibre product. Locally $\pi_X,\pi_Y$ are identified with $\pi_S:V\rightarrow S$ and $\pi_T:V\rightarrow T$ above, which are restrictions to $V$ of $\pi_{\hat S}:\hat V\rightarrow\hat S$ and $\pi_{\hat T}:\hat V\rightarrow\hat T$. But $\hat V$ is a fibre product of manifolds without boundary, so $\pi_{\hat S},\pi_{\hat T}$ are smooth, which implies that $\pi_S,\pi_T$ are weakly smooth. To prove $\pi_S,\pi_T$ are smooth we note that $(S,s_i)$ for $i=1,\ldots,a$ are boundary defining functions on $S$ at $(s,\{s_i=0\})$, and $(T,t_j)$ for $j=1,\ldots,b$ are boundary defining functions on $T$ at $(t,\{t_j=0\})$, and we show using the discussion of (i)--(iv) that the pullbacks to $V$ satisfy the conditions of Definition \ref{mc3def1}. As smoothness is a local condition, $\pi_X,\pi_Y$ are smooth. \end{proof}
\begin{prop} In the situation of Proposition\/ {\rm\ref{mc8prop1},} $W,\pi_X,\pi_Y$ are a fibre product\/ $X\times_{f,Z,g}Y$ in\/~${\mathop{\bf Man^c}}$. \label{mc8prop2} \end{prop}
\begin{proof} By definition $f\circ\pi_X=g\circ\pi_Y$. Suppose $W'$ is a manifold with corners and $\pi_X':W'\rightarrow X$, $\pi_Y':Y'\rightarrow Y$ are smooth maps with $f\circ\pi_X'=g\circ\pi_Y'$. We must show there exists a unique smooth map $h:W'\rightarrow W$ with $\pi_X'=\pi_X\circ h$ and $\pi_Y'=\pi_Y\circ h$. Since $W$ is a fibre product at the level of topological spaces, there is a unique continuous map $h:W'\rightarrow W$ given by $h(w')=\bigl(\pi_X'(w'),\pi_Y'(w')\bigr)$ with $\pi_X'=\pi_X\circ h$ and $\pi_Y'=\pi_Y\circ h$. We must show $h$ is smooth.
Let $w'\in W'$, with $\pi'_X(w')=x\in X$ and $\pi'_Y(w')=y\in Y$, so that $f(x)=g(y)=z\in Z$. Let $(R,\theta),(S,\psi),\ldots$ be as in the proof of Proposition \ref{mc8prop1}. Then $\hat V=\hat R\times_{\hat T}\hat S$ is a fibre product of manifolds without boundary, and $V=R\times_TS\subseteq\hat V$ is a manifold with corners, and $\theta\times\psi:V\rightarrow W$ is a diffeomorphism with an open subset of $W$. Let $U'$ be an open neighbourhood of $w'\in W'$ such that $\pi_X'(U')\subseteq\theta(S)$ and $\pi_Y'(U')\subseteq\psi(T)$. Consider the map $\tilde h=(\theta\times\psi)^{-1}\circ h\vert_{U'}:U'\rightarrow V\subseteq\hat V$. We will show $\tilde h$ is smooth. This implies $h\vert_{U'}=(\theta\times\psi)\circ\tilde h$ is smooth, so $h$ is smooth as this is a local condition.
As $\pi_{\hat S}\circ\tilde h=\theta^{-1}\circ\pi'_X$ and $\pi_{\hat T}\circ\tilde h=\psi^{-1}\circ\pi'_Y$ are smooth, and $\hat V=\hat R\times_{\hat T}\hat S$ is a fibre product of manifolds, we see that $\tilde h:U'\rightarrow \hat V$ is smooth, and therefore $\tilde h:U'\rightarrow V$ is weakly smooth. To show $\tilde h:U'\rightarrow V$ is smooth, we must verify the additional condition in Definition \ref{mc3def1}. It is enough to do this at $w'\in W$ and $(0,0)\in V$. The proof of Proposition \ref{mc8prop1} shows that $V$ is given by \eq{mc8eq3}, and the inequalities $r_i\geqslant 0$, $s_i\geqslant 0$ in \eq{mc8eq3} are transverse. Therefore, if $\beta$ is a local boundary component of $V$ at $(0,0)$, then either (a) $(V,r_i)$ is a boundary defining function for $V$ at $((0,0),\beta)$ for some $r_i\geqslant 0$ appearing in \eq{mc8eq3}, or (b) $(V,s_i)$ is a boundary defining function for $V$ at $((0,0),\beta)$ for some $s_i\geqslant 0$ in~\eq{mc8eq3}.
In case (a), as $\bigl(\theta(U), r_i\circ\theta^{-1}\bigr)$ is a local boundary defining function for $X$ at $(x,\theta_*(\{r_i=0\})$, and $\pi_X':W'\rightarrow X$ is smooth, by Definition \ref{mc3def1} either $r_i\circ\theta^{-1}\circ\pi_X'\equiv 0$ near $w'$ in $W$ or $\bigl((\pi_X')^{-1}(\theta(U)), r_i\circ\theta^{-1}\circ\pi_X'\bigr)$ is a boundary defining function for $W'$ at some $(w',\tilde\beta)$. Since $r_i\circ\tilde h=r_i\circ\theta^{-1}\circ\pi'_X\vert_{U'}$ and $U'$ is an open neighbourhood of $w'$ in $W$ it follows that either $r_i\circ\tilde h\equiv 0$ near $w'$, or $(V,r_i\circ\tilde h)$ is a boundary defining function for $U'$ at some $(w',\tilde\beta)$. This proves the additional condition in Definition \ref{mc3def1} in case (a). The proof for (b) is the same, using $\pi_Y':W'\rightarrow Y$ smooth. Thus $\tilde h$, and hence $h$, is smooth. \end{proof}
\section{Proof of Theorem \ref{mc6thm2}} \label{mc9}
We first construct bijections \eq{mc6eq9}--\eq{mc6eq10}. Let $X,Y,Z$ be manifolds with corners, and $f:X\rightarrow Z,$ $g:Y\rightarrow Z$ be strongly transverse smooth maps, and write $W$ for the fibre product $X\times_{f,Z,g}Y$, which we proved exists as a manifold with corners in \S\ref{mc8}. Let $(x,y)\in W$, so that $x\in X$ and $y\in Y$ with $f(x)=g(y)=z\in Z$. Use all the notation of Proposition \ref{mc8prop1}, so that the local boundary components of $X$ at $x$ are $\beta_1,\ldots,\beta_a$, of $Y$ at $y$ are $\tilde\beta_1,\ldots, \tilde\beta_b$, and of $Z$ at $z$ are $\dot\beta_1,\ldots,\dot\beta_c$. Then over $x,y,z$, the maps $C(f),C(g)$ are given explicitly by \eq{mc8eq1} in terms of $P^f,P^g\subseteq\{1,\ldots,c\}$ and maps $\Pi^f:P^f\rightarrow\{1,\ldots,a\}$ and~$\Pi^g:P^g\rightarrow\{1,\ldots,b\}$.
Properties of these $P^f,P^g,\Pi^f,\Pi^g$ are given in (A)--(D) of the proof of Proposition \ref{mc8prop1}. In addition, as $f,g$ are strongly transverse, there are no $\approx$-equivalence classes $E$ of type (b) in part (C), since if $E$ is such a class then \eq{mc8eq1} gives \e C(f)\bigl(x,\{\beta_i:i\!\in\! \Pi^f(E)\}\bigr)\!=\! C(g)\bigl(y,\{\tilde\beta_i:i\!\in\! \Pi^g(E)\}\bigr)\!=\! \bigl(z,\{\dot\beta_j:j\!\in\! E\}\bigr), \label{mc9eq1} \e and $j=\md{\Pi^f(E)}$, $k=\md{\Pi^g(E)}$, $l=\md{E}$ satisfy $j,k,l>0$ and $j+k=l$, contradicting Definition \ref{mc6def2}. Using \eq{mc8eq1} and these properties of $P^f,P^g,\Pi^f,\Pi^g$ we can describe the points of the r.h.s.\ of \eq{mc6eq9} over $x,y,z$ explicitly when $i=1$. We divide such points into three types: \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item[(i)] $\bigl((x,\{\beta_i\}),(y,\emptyset)\bigr)$ for $i\in\{1,\ldots,a\}\setminus\Pi^f(P^f)$ lies in the term on the r.h.s.\ of \eq{mc6eq9} with $i\!=\!j\!=\!1$ and $k\!=\!l\!=\!0$, as $C(f)(x,\{\beta_i\})=C(g)(y,\emptyset)=(z,\emptyset)$. \item[(ii)] $\bigl((x,\emptyset),(y,\{\tilde\beta_i\})\bigr)$ for $i\in\{1,\ldots,b\}\setminus\Pi^g(P^g)$ lies in the term on the r.h.s.\ of \eq{mc6eq9} with $i\!=\!k\!=\!1$ and $j\!=\!l\!=\!0$, as $C(f)(x,\emptyset)=C(g)(y,\{\tilde\beta_i\})=(z,\emptyset)$. \item[(iii)] $\bigl((x,\{\beta_i:i\in \Pi^f(E)\}),(y,\{\tilde\beta_i:i\in \Pi^g(E)\})\bigr)$ for $E$ a $\approx$-equivalence class of type (a) in part (C) lies in the term on the r.h.s.\ of \eq{mc6eq9} with $i=1$, $j=\md{\Pi^f(E)}$, $k=\md{\Pi^g(E)}$, $l=\md{E}$, since then \eq{mc9eq1} holds, and $i=1=j+k-l$. \end{itemize}
Now $W$ near $(x,y)$ is diffeomorphic to $V$ near $(0,0)$, where $V$ is given in \eq{mc8eq3}, and the inequalities $r_i\geqslant 0$, $s_i\geqslant 0$ in \eq{mc8eq3} are transverse. Thus, the local boundary components of $W$ at $(x,y)$ correspond to the inequalities $r_i\geqslant 0$, $s_i\geqslant 0$ appearing in \eq{mc8eq3}. These in turn correspond to points in the r.h.s.\ of \eq{mc6eq9} with $i=1$ as follows: \begin{itemize} \setlength{\itemsep}{0pt} \setlength{\parsep}{0pt} \item The local boundary component $r_i=0$ for $i\in\{1,\ldots,a\}\setminus\Pi^f(P^f)$ of $V$ at $(0,0)$ corresponds to the point $\bigl((x,\{\beta_i\}),(y,\emptyset)\bigr)$ of type (i). \item The local boundary component $s_i=0$ for $i\in\{1,\ldots,b\}\setminus\Pi^g(P^g)$ of $V$ at $(0,0)$ corresponds to the point $\bigl((x,\emptyset),(y,\{\tilde\beta_i\})\bigr)$ of type (ii). \item The local boundary component $r_{\smash{\Pi^f(i)}}=0$ for $i\in Q$ of $V$ at $(0,0)$ corresponds to the point $\bigl((x,\{\beta_i:i\in \Pi^f(E)\}),(y,\{\tilde\beta_i:i\in \Pi^g(E)\})\bigr)$ of type (iii), where $E$ is the unique $\approx$-equivalence class containing $i$.
(Note that $Q$ contains one element of each $\approx$-equivalence class.) \end{itemize} The natural map $\coprod_{i\geqslant 0}C(W)\rightarrow \coprod_{j\geqslant 0}C(X)\times_{\coprod_{l\geqslant 0}C(Z)}\coprod_{k\geqslant 0}C(Y)$ referred to in the last part of Theorem \ref{mc6thm2} agrees with this correspondence.
This proves that \eq{mc6eq9} is a bijection for $i=1$. For the general case, suppose $(w,\{\hat\beta_1,\ldots,\hat\beta_i\})\in C_i(W)$. Let $(w,\{\hat\beta_a\})$ correspond to $\bigl((x,J_a),(y,K_a)\bigr)$ as above for $a=1,\ldots,i$. Then $C(f)(x,J_a)=C(g)(y,K_a)=(z,L_a)$ for some $L_a$. It is easy to show that as $\hat\beta_1,\ldots,\hat\beta_i$ are distinct, the subsets $J_1,\ldots,J_i$ are disjoint, and $K_1,\ldots,K_i$ are disjoint, and $L_1,\ldots,L_i$ are disjoint. Also $C( f)(x,J_1\amalg\cdots\amalg J_i)=C(g)(y,K_1\amalg\cdots\amalg K_i)=(z,L_1\amalg\cdots\amalg L_i)$. Hence \begin{align*} &\bigl((x,J_1\amalg\cdots\amalg J_i),(y,K_1\amalg\cdots\amalg K_i)\bigr)\in \\ &\bigl(C_j(X)\cap C(f)^{-1}(C_l(Z))\bigr)\times_{C(f),C_l(Z),C(g)}\! \bigl(C_k(Y)\cap C(g)^{-1}(C_l(Z))\bigr), \end{align*} where $j=\md{J_1}+\cdots+\md{J_i}$, $k=\md{K_1}+\cdots+\md{K_i}$ and $l=\md{L_1}+\cdots+\md{L_i}$. As $1=\md{J_a}+\md{K_a}-\md{L_a}$ for $a=1,\ldots,i$, we have $i=j+k-l$. So mapping $(w,\{\hat\beta_1, \ldots,\hat\beta_i\})$ to $\bigl((x,J_1\amalg\cdots\amalg J_i),(y,K_1 \amalg\cdots\amalg K_i)\bigr)$ takes the l.h.s.\ of \eq{mc6eq9} to the r.h.s.\ of \eq{mc6eq9}. Generalizing the argument in the $i=1$ case proves that this map is a bijection, so \eq{mc6eq9} is a bijection, and thus \eq{mc6eq10} is a bijection.
Now let $(w,\{\hat\beta_{a_1},\ldots,\hat\beta_{a_i}\})\in C_i(W)$ with \begin{align*} C(\pi_X):(w,\{\hat\beta_{a_1},\ldots,\hat\beta_{a_i}\})&\longmapsto (x,\{\beta_{b_1},\allowbreak\ldots,\allowbreak\beta_{b_j}\})\in C_j(X),\\ C(\pi_Y):(w,\{\hat\beta_{a_1},\ldots,\hat\beta_{a_i}\})&\longmapsto (y,\{\tilde\beta_{c_1},\ldots,\tilde\beta_{c_k}\})\in C_k(Y),\\ C(f):(x,\{\beta_{b_1},\ldots,\beta_{b_j}\})&\longmapsto (z,\{\dot\beta_{d_1},\ldots,\dot\beta_{d_l}\})\in C_l(Z), \quad\text{and}\\ C(g):\bigl(y,\{\tilde\beta_{c_1},\ldots,\tilde\beta_{c_k}\}\bigr)&\longmapsto (z,\{\dot\beta_{d_1},\ldots,\dot\beta_{d_l}\})\in C_l(Z). \end{align*} Define an immersion $\iota^i_W:C_i(W)\rightarrow W$ by $\iota^i_W:(w,\{\hat\beta_{a_1},\ldots,\hat\beta_{a_i}\})\mapsto w$, and similarly for $X,Y,Z$. Then $i_Z^l\circ C(f)\equiv f\circ i_X^j$ near $(x,\{\beta_{b_1},\ldots,\beta_{b_j}\})$ in $C_j(X)$, and so on, so we have a commutative diagram \e \begin{gathered} \xymatrix@R=5pt@C=17pt{ T_{(w,\{\hat\beta_{a_1},\ldots,\hat\beta_{a_i}\})}C_i(W) \ar[rr]_{{\rm d} C(\pi_Y)} \ar[dd]_{{\rm d} C(\pi_X)} \ar[dr]_(0.65){{\rm d} \iota_W^i} && T_{(y,\{\tilde\beta_{c_1},\ldots,\tilde\beta_{c_k}\})}C_k(Y) \ar[dd]^(0.3){{\rm d} C(g)} \ar[dr]^(0.6){{\rm d}\iota_Y^k} \\ & T_wW \ar[rr]^(0.2){{\rm d}\pi_Y} \ar[dd]^(0.3){{\rm d}\pi_X} && T_yY \ar[dd]_(0.45){{\rm d} g} \\ T_{(x,\{\beta_{b_1},\ldots,\beta_{b_j}\})}C_j(X) \ar[rr]^(0.4){{\rm d} C(f)} \ar[dr]_(0.6){{\rm d}\iota_X^j} && T_{(z,\{\dot\beta_{d_1},\ldots, \dot\beta_{d_l}\})}C_l(Z) \ar[dr]^(0.65){{\rm d}\iota_Z^l} \\ & T_xX \ar[rr]^{{\rm d} f} && T_zZ.} \end{gathered} \label{mc9eq2} \e
Since $W=X\times_ZY$ in ${\mathop{\bf Man^c}}$, in \eq{mc9eq2} we have an isomorphism \e {\rm d}\pi_X\oplus{\rm d}\pi_Y: T_wW\,{\buildrel\cong\over\longrightarrow}\, \mathop{\rm Ker}\bigl(({\rm d} f\oplus -{\rm d} g):T_xX\oplus T_yY\longrightarrow T_zZ\bigr). \label{mc9eq3} \e As $\iota_W^i,\iota_X^j,\iota_Y^k,\iota_Z^l$ are immersions, the diagonal maps ${\rm d}\iota_W^i,{\rm d}\iota_X^j,{\rm d}\iota_Y^k,{\rm d}\iota_Z^l$ in \eq{mc9eq2} are injective. Thus we can identify $T_{(w,\{\hat\beta_{a_1},\ldots, \hat\beta_{a_i}\})}C_i(W)$ with its image in $T_wW$ under ${\rm d}\iota_W^i$, and similarly for $X,Y,Z$. The proof of Proposition \ref{mc8prop1} implies that for each local boundary component $\hat\beta_a$ of $W$ at $w$, the tangent space $T_w\hat\beta_a\subset T_wW$ is the pullback under ${\rm d} f$ or ${\rm d} g$ of $T_x\beta_b$ or $T_y\tilde\beta_c$ for appropriate local boundary components $\beta_b$ of $X$ at $x$ or $\tilde\beta_c$ of $Y$ at $y$. So using that \eq{mc9eq3} is an isomorphism and ${\rm d}\iota_W^i$ is injective, we see that \e \begin{split} &{\rm d} C(\pi_X)\oplus{\rm d} C(\pi_Y):T_{(w,\{\hat\beta_{a_1},\ldots,\hat\beta_{a_i}\})} C_i(W) \longrightarrow\\ &\mathop{\rm Ker}\bigl({\rm d} C(f)\oplus -{\rm d} C(g):T_{(x,\{\beta_{b_1},\ldots,\beta_{b_j} \})}C_j(X)\oplus T_{(y,\{\tilde\beta_{c_1},\ldots,\tilde\beta_{c_k}\})}C_k(Y)\\ &\qquad\qquad\qquad\qquad \longrightarrow T_{(z,\{\dot\beta_{d_1},\ldots, \dot\beta_{d_l}\})}C_l(Z)\bigr) \end{split} \label{mc9eq4} \e is an isomorphism. That is, \eq{mc9eq4} is injective as it is a restriction of \eq{mc9eq3} which is injective, and it is surjective as the equations defining $T_{(w,\{\hat\beta_{a_1},\ldots,\hat\beta_{a_i}\})} C_i(W)$ in $T_wW$ are pullbacks of equations defining $T_{(x,\{\beta_{b_1},\ldots,\beta_{b_j}\})} C_j(X)$ in $T_xX$ or defining $T_{(y,\{\tilde\beta_{c_1},\ldots,\tilde\beta_{c_k}\})}C_k(Y)$ in $T_yY$.
As $W=X\times_ZY$ we have $\mathop{\rm dim}\nolimits W=\mathop{\rm dim}\nolimits X+\mathop{\rm dim}\nolimits Y-\mathop{\rm dim}\nolimits Z$, and $i=j+k-l$ from above. So $\mathop{\rm dim}\nolimits C_i(W)=\mathop{\rm dim}\nolimits C_j(X)+\mathop{\rm dim}\nolimits C_k(Y)-\mathop{\rm dim}\nolimits C_l(Z)$ as $\mathop{\rm dim}\nolimits C_i(W)=\mathop{\rm dim}\nolimits W-i$,\ldots. Together with \eq{mc9eq4} an isomorphism this implies \e \begin{split} &T_{(z,\{\dot\beta_{d_1},\ldots, \dot\beta_{d_l}\})}C_l(Z)= \\ &\qquad{\rm d} C(f)\bigl(T_{(x,\{\beta_{b_1},\ldots,\beta_{b_j} \})}C_j(X) \bigr)+{\rm d} C(g)\bigl(T_{(y,\{\tilde\beta_{c_1},\ldots,\tilde\beta_{c_k}\})}C_k(Y) \bigr). \end{split} \label{mc9eq5} \e
Let $x\in S^p(X)$, $y\in S^q(Y)$ and $z\in S^r(Z)$. Then as $f$ is transverse we have \e T_z(S^r(Z))={\rm d} f\vert_x(T_x(S^p(X)))+{\rm d} g\vert_y(T_y(S^q(Y))) \label{mc9eq6} \e Clearly $(x,\{\beta_{b_1},\ldots,\beta_{b_j}\})\in S^{p-j}(C_j(X))$ and ${\rm d}\iota^j_X\bigl(T_{(x,\{\beta_{b_1},\ldots,\beta_{b_j}\})} S^{p-j}C_j(X)\bigr)\allowbreak =T_xS^p(X)$. So pulling back \eq{mc9eq6} using $\iota^j_X,\iota^k_Y,\iota^l_Z$ yields \e \begin{split} T_{(z,\{\dot\beta_{d_1},\ldots,\dot\beta_{d_l}\})}&(S^{r-l}(C_l(Z)))=\\ &{\rm d} C(f)\vert_{(x,\{\beta_{b_1},\ldots,\beta_{b_j}\})} (T_{(x,\{\beta_{b_1},\ldots,\beta_{b_j}\})} (S^{p-j}(C_j(X))))+\\ &{\rm d} C(g)\vert_{(y,\{\tilde\beta_{c_1},\ldots,\tilde\beta_{c_k}\})} (T_{(y,\{\tilde\beta_{c_1},\ldots,\tilde\beta_{c_k}\})}(S^{q-k}(C_k(Y)))). \end{split} \label{mc9eq7} \e Equations \eq{mc9eq5} and \eq{mc9eq7} imply that the fibre product in \eq{mc6eq9} is transverse, as we have to prove. So the fibre products in \eq{mc6eq9} exist in ${\mathop{\bf Man^c}}$ by Theorem \ref{mc6thm1}, and the natural map from the left hand side of \eq{mc6eq9} to the right hand side is smooth. We have already shown it is a bijection, and \eq{mc9eq4} an isomorphism implies that this natural map induces isomorphisms on tangent spaces. Therefore \eq{mc6eq9} is a diffeomorphism. This completes the proof of Theorem~\ref{mc6thm2}.
\noindent{\small\sc The Mathematical Institute, 24-29 St. Giles, Oxford, OX1 3LB, U.K.}
\noindent{\small\sc E-mail: \tt joyce@maths.ox.ac.uk}
\end{document} | arXiv | {
"id": "0910.3518.tex",
"language_detection_score": 0.6218118071556091,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title[It\^o's diffusion in multidimensional scattering \ldots] {It\^o's diffusion in multidimensional scattering with sign-indefinite potentials.}
\author[Sergey A. Denisov]{Sergey A. Denisov}
\address{Mathematics Department, University of Wisconsin--Madison, 480 Lincoln Dr., Madison, WI 53706 USA} \email{denissov@math.wisc.edu}
\keywords{Absolutely continuous spectrum, Schr\"odinger operator, It\^o stochastic calculus, Feynman-Kac type formulae.}
\subjclass{Primary: 35P25, Secondary: 31C15, 60J45.}
\begin{abstract} This paper extends some results of \cite{dk} to the case of sign-indefinte potentials by applying methods developed in \cite{d1}. This enables us to prove the presence of a.c. spectrum for the generic coupling constant. \end{abstract}
\maketitle
\section*{Introduction}\label{s0}
In this paper, we consider the Schr\"odinger operator \begin{equation} H_\lambda=-\Delta+\lambda V, \quad x\in \mathbb{R}^3 \label{s1} \end{equation} where $V$ is real-valued potential and $\lambda$ is a real parameter usually called a coupling constant. We will study the dependence of the absolutely continuous spectrum of $H$ on the behavior of potential $V$ by blending the methods of two papers \cite{dk} and \cite{d1}. This question attracted much attention recently which resulted in many publications (see, e.g. \cite{dk}-\cite{d4}, \cite{s1}-\cite{s2}, \cite{s3}-\cite{s4}) due to new very fruitful ideas from approximation theory finding the way in the multidimensional scattering problems.
In \cite{dk}, we introduced the stochastic differential equation that has random trajectories as its solutions. These trajectories are natural for describing the scattering properties of (\ref{s1}) provided that $\lambda V\geq 0$ . The reason why this requirement was made is rooted in the method itself. Indeed, as $\lambda V\geq 0$, we have $\sigma(H_\lambda)\subseteq [0,\infty)$ for the spectrum of $H_\lambda$. Moreover, the Green's function $L(x,y,k)=(-\Delta+\lambda V-k^2)^{-1}(x,y,k)$ can be analytically continued in $k$ to the whole upper half-plane $\mathbb{C}^+$ and methods of the complex function theory can be used then. If $V$ is sign-indefinite, the negative spectrum might occur, which is hard to control, and this approach breaks down. The paper \cite{d1} (see also \cite{s4}) however develops new technique which allows to overcome this difficulty by complexifying the coupling constant and considering the hyperbolic pencil \[ P_\lambda(k)=-\Delta+k\lambda V-k^2 \] instead. Then, provided that the spacial asymptotics for the new Green's kernel $P^{-1}_\lambda(k)(x,y,k)$ is established, we can conclude that the a.c. spectrum of $H_\lambda$ contains $[0,\infty)$ for a.e. $\lambda$. In the next section, we state the main result of \cite{dk} and explain how it can be generalized to the sign-indefinite case.
We are going to use the following notation. Let $\omega_R(r)$ be infinitely smooth function on $\mathbb{R}^+$ such that $\omega_R(r)=1$ for $r<R-1$, $\omega_R(r)=0$ for $r>R+1$, and $0\leq \omega_R(r)\leq 1$. The function \[
L^0(x,y,k)=\frac{e^{ik|x-y|}}{4\pi|x-y|} \] denotes the Green's function of the free 3d Schr\"odinger operator, i.e. the kernel of $R_0(k^2)=(-\Delta-k^2)^{-1}$ when $\Im k>0$. The standard symbol $B_t$ stands for the $3$-dimensional Brownian motion and $\mathbb{S}^2$ denotes the two-dimensional unit sphere.
We consider the three-dimensional case only as it makes the writing easy. The method however can be applied for any $d>1$. We will often suppress the dependence on $\lambda$ unless we want to emphasize it. \section{Main result}
We start with stating some results from \cite{dk}. Consider the Lipschitz vector field \[
p(x)=\left(\frac{I'_\nu(|x|)}{I_\nu(|x|)}-\nu |x|^{-1}\right)\cdot
\frac{x}{|x|}, \quad \nu=1/2 \] where $I_\nu$ denotes the modified Bessel function \cite[Sect. 9.6]{as}. Then, fix any point $x^0\in\mathbb{R}^3$ and consider the following stochastic process \begin{equation} dX_t=p(X_t)dt+dB_t, \quad X_0=x^0 \label{stochastic} \end{equation} with the drift given by $p$. The solution to this diffusion process exists and all trajectories are continuous and escape to infinity almost surely. One of the main results in \cite{dk} states (assume here that $\lambda=1$)
\begin{theorem}[\cite{dk}]\label{th2} Let $V$ be any continuous nonnegative function. Assume that $f\in L^2(\mathbb{R}^3)$ is nonnegative and has a compact support. Let $\sigma_f$ be the spectral measure of $f$ with respect to $H_V$ and $\sigma'_f$ be the density of its a.c. part. Then we have \begin{equation} \exp\left[\frac{1}{2\pi}\int_{\mathbb{R}} \frac{\log \sigma'_f(k^2)}{1+k^2}dk\right]\ge C_f\int f(x^0)\mathbb{E}_{x^0} \left[ \exp\left(-\frac 12\int\limits_0^\infty V(X_\tau)
d\tau\right)\right]dx^0 \label{th21} \end{equation} where the constant $C_f>0$ does not depend on $V$. \end{theorem}
The natural corollary of this theorem is a statement that the a.c. spectrum of $H$ fills all of $\mathbb{R}^+$ provided that the potential $V$ is summable along the trajectory $X_t$ with positive probability (which is the same as saying that there are ``sufficiently many" paths over which the potential is summable).
We are going to prove the following
\begin{theorem}Assume that $V$ is bounded and continuous on $\mathbb{R}^3$ and \begin{equation}
\int_0^\infty |V(X_t)|dt<\infty\label{rest} \end{equation} with positive probability. Then $\mathbb{R}^+$ supports the a.c. spectrum of $H_\lambda$ for a.e. $\lambda$.\label{tnew} \end{theorem} {\bf Remark.} The conditions of continuity and boundedness of potential are assumed for simplicity only and can probably be relaxed. The condition of $\lambda$ being generic is perhaps also redundant but this method does not yield any result for a particular value of $\lambda\neq 0$. Under the conditions of the theorem, the a.c. spectrum can be larger than the positive half-line as can be easily seen upon taking $V=-1$ on half-space and $V=0$ on the complement.
We need to start with some preliminary results. They will be mostly concerned with the study of the kernel of the operator $P_R^{-1}(k)=(-\Delta+k\lambda V_R+k^2)^{-1}$ where
$V_R(x)=V(x)\cdot \omega_R(|x|)$. The existence of $P^{-1}(k)$ for $\Im k>0$ as a bounded operator from $L^2(\mathbb{R}^3)$ to $L^2(\mathbb{R}^3)$ was proved in \cite{d1}. Denote the kernel of $P_R^{-1}(x,y,k)$ by $K_R(x,y,k)$ and compare it to the free Green's kernel $L^0(x,y,k)$ in the following way. For $k=i$, we introduce the amplitude \[ b_R(\theta)=\lim_{r\to\infty} \frac{K_R(0,r\theta,i)}{L^0(0,r,i)} \]
where $\theta\in \mathbb{S}^2$. If $L_R(x,y,k)$ is the Green's function for $(-\Delta+|V_R|-k^2)^{-1}$, then similarly \[ a_R(\theta)=\lim_{r\to\infty} \frac{L_R(0,r\theta,i)}{L^0(0,r,i)},\quad a(\theta)=\lim_{R\to\infty} a_R(\theta) \] In \cite{dk}, the following formulas were proved \begin{equation} \int\limits_{\mathbb{S}^2} a(\theta)d\theta=C_1\mathbb{E}_{X_0=0} \left[ \exp\left(-\frac12 \int\limits_0^\infty
|V(X_\tau)|d\tau\right)\right] \label{feynman-kac} \end{equation}
For $\theta\in\mathbb{S}^2$, let \begin{equation} dG_t=\theta dt+dB_t \label{potok} \end{equation}
Then \begin{equation} a(\theta)=C_2\mathbb{E}_{G_0=0} \left[ \exp\left(-\frac12
\int\limits_0^\infty |V(G_\tau)|d\tau\right)\right]\label{fk} \end{equation}
The condition (\ref{rest}) yields \[ \int\limits_{\mathbb{S}^2} a(\theta)d\theta>0 \] and thus $a(\theta)>0$ for $\theta\in \Omega\subseteq \mathbb{S}^2$
and $|\Omega|>0$. In particular, that means
\begin{equation} \mathbb{E}_{G_0=0} \left[\exp\left(-\frac 12\int_0^\infty
|V(G_t)|dt\right)\right]>0 \label{cond1} \end{equation} for any $\theta\in \Omega$. The bound (\ref{cond1}) is exactly what we are going to use in this paper.
The first step is to prove an analog of the formula (\ref{fk}) for the function $b_R(\theta)$. The lemma below holds for any $\lambda$ so we take $\lambda=1$ for the shorthand.
\begin{lemma} If $G_t$ is defined by (\ref{potok}), then \begin{equation} b_R(\theta)=C_2\mathbb{E}_{G_0=0} \left[ \exp\left(-\frac{i}{2} \int\limits_0^\infty V_R(G_\tau)d\tau\right)\right]\label{fk1} \end{equation} for any $R>0$. \end{lemma} \begin{proof} We need the following \begin{proposition}Let $\Sigma_r$ and $B_r$ denote the sphere and closed ball of radius $r$ both centered at the origin. If $V$ is continuous and real-valued in $B_r$ and $F(x)\in C(B_r)$, then $\phi(x)$, the solution to \[ \frac 12 \Delta \phi+\phi_{x_1}-\frac{i}{2}V\phi=-F, \quad \phi
|_{\Sigma_r}=0 \] admits the following representation \begin{equation} \phi(x)=\mathbb{E}_{G_0=x} \left[\int_0^T \exp\left(-\frac{i}{2}\int_0^t V(X(G_\tau))d\tau\right)F(G_t)dt\right] \label{foc} \end{equation} where $G_t=t(1,0,0)+B_t, G_0=x$ and $T$ is the exit time. \end{proposition} Notice that the solution $\phi$ always exists as the boundary problem can be reduced to inverting the operator $-\Delta+1+iV$ with Dirichlet boundary condition. This invertibility is a simple corollary of the spectral theory for hyperbolic pencils and it was proved in \cite{d1} in the context of the operators on the whole space. The existence of the expectation in the right hand side of (\ref{foc}) is guaranteed by $V\in \mathbb{R}$ and the fact that all trajectories $\{G_t\}$ are continuous almost surely and the exit time distribution has a small tail.
\begin{proof}{\it (proposition 1.1)}
This proof is quite standard for negative potentials (see \cite{af}, p.145 and \cite{oks}, lemma 7.3.2 ) but we present it here for the reader's convenience. Take any $x, |x|<r$ and consider \[ \Xi_t=\phi(G_t)\exp\left(-\frac{i}{2}\int_0^t V(G_\tau)d\tau\right), \quad G_0=x, \quad t<T \] By It\^o's calculus, \[ d\Xi_t=\exp\left(-\frac{i}{2}\int_0^t V(G_\tau)d\tau\right)d\phi(G_t)-\frac{i}{2} \phi(G_t)V(G_t)\exp\left(-\frac{i}{2}\int_0^t V(G_\tau)d\tau\right)dt \] as \[ d\phi(G_t)=\left(\phi_{x_1}(G_t)+\frac{\Delta}{2} \phi(G_t)\right)dt+\phi_{x_1}(G_t)dB_t \] and \[ d\exp\left(-\frac{i}{2}\int_0^t V(G_\tau)d\tau\right)=-\frac{i}{2}V(G_t)\exp\left(-\frac{i}{2}\int_0^t V(G_\tau)d\tau\right)dt
\]
Since
$\Xi_0=\phi(0)$, we have \[ \mathbb{E}_x(\Xi_T)=\phi(x)+\mathbb{E}_{x}\left[ \int_0^T -F(G_t)\exp\left(-\frac{i}{2}\int_0^t V(G_\tau)d\tau\right)dt \right] \] \[ +\mathbb{E}_x \left[\int_0^T \phi_{x_1}(G_t)\exp\left(-\frac{i}{2}\int_0^t V(G_\tau)d\tau\right)dB_t\right] \] and the last term is equal to zero. As the left hand side is equal to zero as well due to the Dirichlet boundary conditions imposed, we have the statement of the proposition. \end{proof} Now the proof of the lemma repeats the proof of the formula (2.8) (theorem 2.1, \cite{dk}) word for word. \end{proof}
Assume that $\theta\in \Omega$ so (\ref{cond1}) holds. Consider truncations $V^{(\rho)}(x)=V(x)\cdot (1-\omega_\rho(|x|))$. \begin{lemma}\label{lg} If $\theta\in \Omega$, then \begin{equation} \mathbb{E}_{G_0=0}\left[\exp\left(-\frac 12\int_0^\infty
|V^{(\rho)}(G_t)|dt\right)\right]\to 1 \end{equation} as $\rho\to\infty$. \end{lemma} \begin{proof}
Take any $0<R_1<R_2$ and introduce $t_1$, the random time of hitting the sphere $|x|=R_1$ for the first time. Then, denoting by $\tilde{G}(t)$ the solution to (\ref{potok}) with initial condition $G_{t_1}$, we have elementary inequality \begin{equation} \mathbb{E}_{G_0=0}\left[\exp\left(-\frac 12\int_0^{t_1}
|V_{R_1/2}(G_t)|dt\right)\mathbb{E}_{G_{t_1}}\left[\exp\left(-\frac 12\int_{0}^\infty
|V^{(R_2)}(\tilde{G}_t)|dt\right)\right]\right]\label{odin} \end{equation} \[ \geq\mathbb{E}_{G_0=0}\left[\exp\left(-\frac 12\int_0^\infty
|V(G_t)|dt\right)\right] \] The trajectory $G_t$ is a linear drift plus 3d Browning motion oscillation thus for fixed $R_1$ we have decoupling \begin{equation} \lim_{R_2\to\infty}\left(\mathbb{E}_{G_0=0}\left[\exp\left(-\frac 12\int_0^{t_1}
|V_{R_1/2}(G_t)|dt\right)\mathbb{E}_{G_{t_1}}\left[\exp\left(-\frac 12\int_{0}^\infty
|V^{(R_2)}(\tilde{G}_t)|dt\right)\right]\right]\right) \label{dva} \end{equation} \[ = \mathbb{E}_{G_0=0}\left[\exp\left(-\frac 12\int_0^{t_1}
|V_{R_1/2}(G_t)|dt\right)\right]\cdot \gamma \] with \[ \gamma=\lim_{R_2\to\infty} \left(\mathbb{E}_{G_0=0}\left[ \exp\left(-\frac 12\int_0^\infty
|V^{(R_2)}(G_t)|dt\right)\right]\right) \]
On the other hand, \[ \mathbb{E}_{G_0=0}\left[\exp\left(-\frac 12\int_0^{t_1}
|V_{R_1/2}(G_t)|dt\right)\right]\to\mathbb{E}_{G_0=0}\left[\exp\left(-\frac 12\int_0^\infty |V(G_t)|dt\right)\right], \quad R_1\to\infty \] which along with (\ref{odin}) and (\ref{dva}) implies $\gamma=1$. \end{proof}
{\bf Remark.} Let $a^{(\rho)}$ denote an amplitude for the potential $V^{(\rho)}$. The lemma then says that $a^{(\rho)}(\theta) \to 1$ as $\rho\to\infty$ for any $\theta\in \Omega$. Notice also that the lemma is wrong in general if the trajectory $G_t$ is replaced by $X_t$ as can be easily seen by letting $V=1$ on the half-space and $V=0$ on the complement.
Now we are ready for the proof of theorem \ref{tnew}. \begin{proof}{\it (theorem \ref{tnew})}
Notice first that the standard trace-class perturbation argument \cite{rs} implies that $\sigma_{ac}(-\Delta+V_\lambda)=\sigma_{ac}(-\Delta +V_\lambda^{(\rho)})$ for any $\rho$. Fix some large $c>0$ and take $\lambda\in [-c,c]$. Then, by lemma \ref{lg}, we can make $\rho$ large enough so that for any $\theta\in \Omega_1\subseteq \Omega$, we have \[ \mathbb{E}\left[\exp\left(-\frac{c}{2}\int_0^\infty
|V^{(\rho)}(G_t)|dt\right)\right]>0.99 \] Then, due to (\ref{fk1}), \begin{equation}
|b_R^{(\rho)}(\theta)|>1/2\label{oc1} \end{equation}
for any $\theta\in \Omega_1$ and any $R>\rho$. Now, we need to recall several results from \cite{d1} and repeat a couple of arguments from this paper. Let $f=\chi_{|x|<1}$. Denote the spectral measure of $f$ with respect to $-\Delta+\lambda V^{(\rho)}_R$ by $\sigma_{\rho,R}(E,\lambda)$. For $k\in \mathbb{C}^+$, consider \[ J_{\rho,R}(k,\theta,\lambda)=\lim_{r\to\infty} \frac{\left((-\Delta+\lambda kV^{(\rho)}_R+k^2)^{-1}f\right)(r\theta)}{r^{-1}e^{ikr}} \] Then, the formula (38) from \cite{d1} says \begin{equation}
\sigma'_{\rho,R}(k^2,k\lambda)=k\pi^{-1}\|J_{\rho,R}(k,\theta,\lambda)\|_{L^2(\mathbb{S}^2)}^2 \label{fz} \end{equation} where $k\neq 0$ is real and in the right hand side the limiting value of $B_{\rho,R}$ as $\Im k\to +0$ is taken. This function $J_{\rho,R}(k,\theta,\lambda)$ is continuous on $\overline{\mathbb{C}^+}\backslash \{0\}$ as seen from the absorption principle (\cite{rs4}, chapter 13, section 8 or \cite{d1}). Around zero, we have an estimate \begin{equation}
|J_{\rho,R}(k,\theta,\lambda)|<C(\rho,R)|k|^{-1}\label{estg} \end{equation} that can be deduced from the representation \[ P^{-1}(k)f=R_0(k^2)f-k\lambda R_0(k^2)V_R^{(\rho)}P^{-1}(k)f\quad k\in \mathbb{C}^+ \] and an estimate \[
\|P^{-1}(k)\|\leq (\Im k)^{-2} \] (see (37), \cite{d1}).
For large $|k|$, we have the following uniform estimate \begin{equation}
\int_{\mathbb{S}^2} |J_{\rho,R}(k,\theta,\lambda)|^2 d\theta <C
\frac{1+|k|\Im k}{[\Im k]^4} \|f(x)\|_2 \|f(x)e^{2\Im k|x|}\|_2\label{estg1} \end{equation} (take $r\to\infty$ in (48), \cite{d1}).
Now, consider the function \[ g(k)=\ln
\|ke^{2ik}J_{\rho,R}(k,\theta,\lambda)\|_{L^2(\mathbb{S}^2)} \] This function is subharmonic in $\mathbb{C}^+$ and the estimates (\ref{estg}) and (\ref{estg1}) enable us to apply the mean-value inequality with the reference point $k=i$.
This, along with the identity (\ref{fz}), gives \[ \int_\mathbb{R} \frac{\ln \sigma'_{\rho,R}(k^2,k\lambda)}{k^2+1}dk> C_1+C_2\ln \int_{\mathbb{S}^2}
|J_{\rho,R}(i,\theta,\lambda)|^2d\theta, \quad C_2>0 \] Now, notice that the choice of $f$ guarantees that \[
|J_{\rho,R}(i,\theta,\lambda)|\sim
|b_{R}^{(\rho)}(i,\theta,\lambda)| \] and (\ref{oc1}) implies that \[ C_1 +C_2\ln \int_{\mathbb{S}^2}
|J_{\rho,R}(i,\theta,\lambda)|^2d\theta>C_3 \] uniformly in $R$ and $\lambda\in [-c,c]$. Thus, we have an estimate \[ \int_\mathbb{R} \frac{\ln \sigma'_{\rho,R}(k^2,k\lambda)}{k^2+1}dk>-C \] uniformly in $R>\rho$ and $\lambda\in [-c,c]$ with any fixed $c$. Now, taking any interval $(a,b)\subset (0,\infty)$, we have \[ \int_a^b dE \int _{-c}^c \ln \sigma_{\rho,R}'(E,\lambda)d\lambda>-C \] uniformly in $R$ so taking $R\to\infty$ and using the lower semicontinuity of the entropy (see \cite{ks} and \cite{d1}, p.21 and Lemma 3.4), we get \[ \int_a^b dE \int _{-c}^c \ln \sigma_{\rho}'(E,\lambda)d\lambda>-C \] The Fubini-Tonelli theorem now gives \[ \int_a^b \ln \sigma_{\rho}'(E,\lambda)dE>-\infty \] for a.e. $\lambda$ so $[a,b]\subseteq \sigma_{ac}(H^{(\rho)}_\lambda)$ for a.e. $\lambda$. As was mentioned already, the a.c. spectrum is stable under changing the potential on any compact set. Thus, $[a,b]\subseteq \sigma_{ac}(H_\lambda)$ for a.e. $\lambda$. Since $[a,b]$ was taking arbitrarily, we have statement of the theorem. \end{proof}
{\bf Remark.} In the paper \cite{dk}, we studied the case when the potential $V\geq 0$ and is supported on the set $E$ (a good example to think about is a countable collection of balls) of any geometric structure. The special modified capacity and the harmonic measure were introduced and studied which allowed the effective estimation of probabilities in the natural geometric terms. The same results, e.g. the estimates in terms of anisotropic Hausdorff content and the size of the spherical projection, are true in the current setting when the potential is not assumed to be positive. The statements however are true only generically in $\lambda$.
\noindent {\bf Acknowledgement.} \rm We acknowledge the support by Alfred P. Sloan Research Fellowship and the NSF grant DMS-0758239. Thanks go to Stas Kupin and the University of Provence where part of this work done.
\end{document} | arXiv | {
"id": "1106.2155.tex",
"language_detection_score": 0.688413679599762,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{A splitting approach\\ for the magnetic Schr\"odinger equation} \author{M.~Caliari\corref{cor1}} \ead{marco.caliari@univr.it} \address{Dipartimento di Informatica, Universit\`a di Verona, Italy} \author{A.~Ostermann\corref{}} \ead{alexander.ostermann@uibk.ac.at} \author{C.~Piazzola\corref{}} \ead{chiara.piazzola@uibk.ac.at} \address{Institut f\"ur Mathematik, Universit\"at Innsbruck, Austria} \cortext[cor1]{Corresponding author}
\begin{abstract} The Schr\"odinger equation in the presence of an external electromagnetic field is an important problem in computational quantum mechanics. It also provides a nice example of a differential equation whose flow can be split with benefit into three parts. After presenting a splitting approach for three operators with two of them being unbounded, we exemplarily prove first-order convergence of Lie splitting in this framework. The result is then applied to the magnetic Schr\"odinger equation, which is split into its potential, kinetic and advective parts. The latter requires special treatment in order not to lose the conservation properties of the scheme. We discuss several options. Numerical examples in one, two and three space dimensions show that the method of characteristics coupled with a nonequispaced fast Fourier transform (NFFT) provides a fast and reliable technique for achieving mass conservation at the discrete level. \end{abstract}
\begin{keyword} magnetic Schr\"odinger equation \sep exponential splitting methods \sep convergence \sep Fourier techniques \sep nonequispaced fast Fourier transform \end{keyword} \maketitle
\section{Introduction}
In quantum mechanics a lot of phenomena occur under the influence of an external electromagnetic field. Typical examples include the Zeeman effect, Landau levels and superconductivity. So, quite a few problems in computational solid state physics and quantum chemistry require the solution of the Schr\"{o}dinger equation in the presence of an electromagnetic field \begin{equation}\label{original} \begin{aligned} \mathrm{i}\varepsilon \partial_t u &= \frac{1}{2} (\mathrm{i}\varepsilon\nabla+A)^2u+Vu,\quad t\ge 0, \ x \in \mathbb{R}^d, \\ u(0,x)&= u_0(x). \end{aligned} \end{equation} Here, the unknown $u=u(t,x) \in \mathbb{C}$ is the quantum mechanical wave function, $V(t,x) \in \mathbb{R}$ is the scalar potential and $A(t,x) = (A_1(t,x),\dots, A_d(t,x))^{\sf T} \in \mathbb{R}^d$ is the vector potential. In addition $\varepsilon \in (0,1]$ denotes the small semi-classical parameter which is the scaled Planck constant. The equation is considered subject to vanishing boundary conditions, i.e., $\lim_{\lvert x \rvert \to \infty}u(t,x)=0$. We recall that mass is a conserved quantity of this equation.
Exponential splitting schemes constitute a well-established class of methods for the numerical solution of Schr\"odinger equations (see, e.g., \cite{BJM02, F12, JL00, JMS11, L08}). In this approach, the kinetic part is solved in Fourier space, which gives spectral accuracy in space, whereas the multiplicative potential is integrated pointwise in physical space. The transformation between Fourier and physical space is carried out using the fast Fourier transform, which results in an overall fast algorithm. In our situation, however, when the vector potential depends on the position, we get an additional advection term, which cannot be handled efficiently with Fourier techniques.
Thus, the structure of problem \eqref{original} suggests to split the equation into three subproblems: a potential step which collects the scalar terms of the potentials (which are pointwise multiplications), a kinetic step which involves the Laplacian, and an advection step which results from the vector potential. For carrying out a time step, each of these steps is solved separately and their solutions are recombined to define the numerical approximation. This is the underlying idea of exponential splitting schemes (see \cite{S68, HLW00, MQ02}). In this paper we analyse a first-order method, the so-called Lie splitting. Note, however, that higher-order methods can be analysed in exactly the same way, if the underlying problem has enough spatial smoothness, see~\cite{HO09,JL00}.
Splitting the magnetic Schr\"odinger equation for the purpose of its numerical solution into three subproblems is not a new idea. In their recent paper \cite{JZ13}, Jin and Zhou proposed such a scheme. For the solution of the advection step, they considered a semi-Lagrangian approach. Such an approach has been used in many other fields as well (see, e.g., \cite{SRBG99, EO14, EO15}).
Our present paper differs from~\cite{JZ13} mainly in the following aspects: we give a framework for carrying out an abstract convergence proof for exponential splitting methods applied to \eqref{original}, and we give a detailed error analysis for the Lie splitting scheme by identifying the required smoothness assumptions on the data. Moreover, we address conservation properties of the scheme and identify an alternative to Lagrange interpolation, as the latter does not conserve mass.
The outline of this paper is as follows. We start in section \ref{Convergence} with an abstract convergence result for splitting into three subproblems. Guided by the properties of the magnetic Schr\"odinger equation, we present an analytic framework that allows us to prove convergence for exponential splitting schemes. We exemplify this by proving that Lie splitting applied to \eqref{original} has order of convergence one, as expected.
In section \ref{description} we apply a gauge transformation to the magnetic Schr\"odinger equation to obtain the equivalent formulation \eqref{main} with a divergence-free vector potential. This formulation is used in \eqref{eq:steps} to define the employed splitting. In the following section we show how to compute the solution of the kinetic step in spectral space and that of the potential step in physical space. For the advection step we use the method of characteristics. However, since the characteristic curves do not cut the previous time horizon at grid points, in general, special care has to be taken. We compare three different possibilities, namely discrete Fourier series evaluation, local polynomial interpolation and Fourier series evaluation by a nonequispaced fast Fourier transform (NFFT), see~\cite{KKP09}. The latter allows us to evaluate a Fourier series at an arbitrary set of points in a fast way. To our knowledge, this transform was not yet applied in the present context.
In section \ref{Numerical} we present some numerical results. Our main goal is the comparison of the different approximations used in the advection step. In particular, we study how well the considered numerical algorithms preserve mass, and how they compare in terms of computational efficiency.
\section{Splitting into three operators}\label{Convergence}
For the numerical solution of \eqref{original}, we propose a splitting approach. Motivated by the particular form of the vector field, which is the sum of a kinetic, a potential and an advective part, we consider a splitting into three terms. For this purpose, we formulate \eqref{original} as an abstract initial value problem \begin{equation}\label{prb} \begin{aligned} \partial_t u &= (\mathcal{A}+\mathcal{B}+\mathcal{C})u, \quad 0\le t \le T,\\ u(0)&=u_0 \end{aligned} \end{equation}
in a Banach space $X$ with norm $\| \cdot \|$. Below, we will state an analytic framework for these operators $\mathcal{A}$, $\mathcal{B}$ and $\mathcal{C}$ that, on the one hand, is sufficiently general to include the magnetic Schr\"odinger equation as an example and, on the other hand, allows us to carry out an abstract convergence proof for (exponential) splitting methods. We will illustrate our approach by analysing in detail the Lie splitting scheme\footnote{Throughout the paper $\mathrm{e}^{\tau\mathcal{L}}u_0$ will denote the exact solution at time $\tau$ of the abstract (linear) differential equation $\partial_t u = \mathcal{L}u$ with initial value $u(0)=u_0$.} \begin{equation}\label{eq:lie} u_{n+1}=\mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathrm{e}^{\tau \mathcal{B}} u_n, \end{equation} where $\tau$ denotes the step size and $u_n$ is the numerical approximation to the true solution $u(t)=\mathrm{e}^{t(\mathcal{A}+\mathcal{B}+\mathcal{C})} u(0)$ at time $t=t_n=n\tau$. We will show below that the Lie splitting scheme is first-order convergent. Let us stress, however, that exactly the same ideas can be used to analyse exponential splitting methods of higher order.
In a first step, we will study the local error $\|\mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathrm{e}^{\tau \mathcal{B}} u(t) - u(t+\tau)\|$ of Lie splitting along the exact solution. For this purpose, we employ the following assumption.
\begin{hp} \label{hp} Let $\mathcal{B}$ be a bounded operator, and let $\mathcal{A}$, $\mathcal{C}$, and $\mathcal{A}+\mathcal{C}$ generate strongly continuous semigroups $\mathrm{e}^{t\mathcal{A}}$, $\mathrm{e}^{t\mathcal{C}}$, and $\mathrm{e}^{t(\mathcal{A}+\mathcal{C})}$ on $X$. We assume that the following bounds hold for $0\le t\le T$ along the exact solution \begin{subequations} \begin{align} \Vert [\mathcal{A},\mathcal{C}]\mathrm{e}^{s\mathcal{A}} u(t) \Vert &\leq c_1,\label{hp1}\\ \Vert \mathcal{C} \mathrm{e}^{s\mathcal{A}} \mathcal{B} u(t) \Vert &\leq c_2,\label{hp5}\\ \Vert \mathcal{C}^2 \mathrm{e}^{s\mathcal{A}} u(t) \Vert &\leq c_3,\label{hp2}\\ \Vert \mathcal{C}\mathrm{e}^{\sigma\mathcal{A}}\mathcal{C}\mathrm{e}^{s(\mathcal{A}+\mathcal{C})} u(t) \Vert &\leq c_4,\label{hp3}\\ \Vert [\mathcal{A}+\mathcal{C},\mathcal{B}] \mathrm{e}^{s(\mathcal{A}+\mathcal{C})} u(t)\Vert &\leq c_5\label{hp4} \end{align} \end{subequations} with some constants $c_1$, $c_2$, $c_3$, $c_4$, and $c_5$ that do not depend on $0\le \sigma,s\le T$. \end{hp}
Next, we recall the definition of the $\varphi_k$ functions, which play some role in our analysis. For complex $z$ and integer $k\ge 1$, we set \begin{equation}\label{eq:phi} \varphi_k(z) = \int_{0}^{1}\mathrm{e}^{(1-\theta)z}\frac{\theta^{k-1}}{(k-1)!}\,\mathrm{d}\theta. \end{equation} These functions are uniformly bounded in the complex half-plane $\text{Re\,} z \le 0$ and analytic in $\mathbb{C}$. Let $\mathcal{E}$ be the generator of a strongly continuous semigroup. Then, for all $k \ge 1$, the following identity holds in the domain of $\mathcal{E}^k$ \begin{equation}\label{eq:taylor-phi} \mathrm{e}^{\tau \mathcal{E}} = \sum_{j=0}^{k-1} \frac{\tau^j}{j!}\mathcal{E}^j+\tau^k\mathcal{E}^k\varphi_k(\tau \mathcal{E}). \end{equation} We are now in the position to state the local error bound. \begin{thm}[Local error bound] Under Assumption \ref{hp}, the following bound for the local error holds \begin{equation} \label{err} \Vert \mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathrm{e}^{\tau \mathcal{B}} u(t)-u(t+\tau) \Vert \leq C \tau^2, \qquad t \in [0,T-\tau] \end{equation} with a constant $C$ that does not depend on $t$ and $\tau$. \end{thm}
\begin{proof} Our proof uses ideas developed in \cite{JL00}. Since $\mathcal{B}$ is bounded, the numerical solution can be expanded in the following way \begin{equation} \label{LocErr} \begin{aligned} \mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathrm{e}^{\tau \mathcal{B}} u(t) & = \mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}}\left( I + \tau \mathcal{B}+ \mathcal{O}(\tau^2)\right)u(t)\\ & = \underbrace{\mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}}u(t)}_{P_1}+\underbrace{\tau \mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathcal{B} u(t)}_{Q_1} + \mathcal{O}(\tau^2). \end{aligned} \end{equation} The exact solution, on the other hand, is expanded with the help of the variation-of-constants formula. Applying this formula twice yields the representation \begin{equation} \label{exact} \begin{aligned} \mathrm{e}^{\tau (\mathcal{A}+\mathcal{B}+\mathcal{C})}u(t) &= \mathrm{e}^{\tau(\mathcal{A}+\mathcal{C})}u(t)+\int_0^{\tau}\mathrm{e}^{s(\mathcal{A}+\mathcal{C})}\mathcal{B}\mathrm{e}^{(\tau-s) (\mathcal{A}+\mathcal{B}+\mathcal{C})}u(t)\mathrm{d} s\\ & = \underbrace{\mathrm{e}^{\tau(\mathcal{A}+\mathcal{C})}u(t)}_{P_2} +\underbrace{\int_0^{\tau}\mathrm{e}^{s(\mathcal{A}+\mathcal{C})}\mathcal{B}\mathrm{e}^{(\tau-s) (\mathcal{A}+\mathcal{C})}u(t)\mathrm{d} s}_{Q_2}+ \mathcal{O}(\tau^2). \end{aligned} \end{equation} Collecting all the terms, we can rewrite the local error as \begin{equation}\label{eq:le} \mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathrm{e}^{\tau \mathcal{B}} u(t)- u(t+\tau) = P+Q+ \mathcal{O}(\tau^2), \end{equation} where $P = P_1-P_2$ and $Q = Q_1-Q_2$.
For expanding $P_1$ we employ the $\varphi_2$ function (see \eqref{eq:taylor-phi}) to get \[ \mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}}u(t) = \mathrm{e}^{\tau \mathcal{A}}u(t)+ \tau \mathcal{C} \mathrm{e}^{\tau \mathcal{A}}u(t) +\tau^2 \mathcal{C}^2 \varphi_2(\tau\mathcal{C})\mathrm{e}^{\tau \mathcal{A}}u(t). \] Using the variation-of-constants formula twice, we can rewrite $P_2$ as \[ \begin{aligned} \mathrm{e}^{\tau(\mathcal{A}+\mathcal{C})}u(t) &= \mathrm{e}^{\tau\mathcal{A}}u(t)+\int_0^{\tau}\mathrm{e}^{s\mathcal{A}}\mathcal{C}\mathrm{e}^{(\tau-s) \mathcal{A}}u(t)\,\mathrm{d} s\\ &\quad +\int_0^{\tau}\mathrm{e}^{s\mathcal{A}}\mathcal{C}\int_0^{\tau-s}\mathrm{e}^{\sigma\mathcal{A}}\mathcal{C}\mathrm{e}^{(\tau-s-\sigma) (\mathcal{A}+\mathcal{C})}u(t) \,\mathrm{d} \sigma \mathrm{d} s. \end{aligned} \] Thus, to bound $P$, we need first to estimate \begin{equation} \label{1P} \tau \mathcal{C} \mathrm{e}^{\tau \mathcal{A}}u(t)-\int_0^{\tau}\mathrm{e}^{s\mathcal{A}}\mathcal{C}\mathrm{e}^{(\tau-s)\mathcal{A}}u(t)\mathrm{d} s, \end{equation} and then to bound the remaining terms. Let $f(s)=\mathrm{e}^{s\mathcal{A}}\mathcal{C}\mathrm{e}^{(\tau-s) \mathcal{A}}u(t)$. Then, the expression \eqref{1P} becomes \[ \tau f(0)-\int_0^{\tau} f(s) \mathrm{d} s= \tau f(0)- \int_0^{\tau} \left(f(0) + \int_0^s f'(\sigma) \mathrm{d} \sigma \right) \mathrm{d} s = -\int_0^{\tau}\!\!\! \int_0^s f'(\sigma) \mathrm{d} \sigma \mathrm{d} s, \] and can be bounded with assumption \eqref{hp1} \begin{equation} \label{T1} \bigg\Vert \int_0^{\tau}\!\!\! \int_0^s \mathrm{e}^{\sigma\mathcal{A}}[\mathcal{A},\mathcal{C}]\mathrm{e}^{(\tau-\sigma)\mathcal{A}} u(t) \mathrm{d} \sigma \mathrm{d} s \bigg\Vert \leq c\tau^2. \end{equation} Furthermore, by employing assumptions \eqref{hp2} and \eqref{hp3}, the remaining terms in $P$ can be estimated as \begin{equation} \label{T2} \lVert \tau^2 \mathcal{C}^2 \varphi_2(\tau\mathcal{C})\mathrm{e}^{\tau \mathcal{A}}u(t) \Vert = \Vert \tau^2 \varphi_2(\tau\mathcal{C})\mathcal{C}^2 \mathrm{e}^{\tau \mathcal{A}}u(t) \rVert \leq c\tau^2 \end{equation} and \begin{equation} \label{T3} \bigg\lVert \int_0^{\tau}\mathrm{e}^{s\mathcal{A}}\mathcal{C}\int_0^{\tau-s}\mathrm{e}^{\sigma\mathcal{A}}\mathcal{C}\mathrm{e}^{(\tau-s-\sigma) (\mathcal{A}+\mathcal{C})}u(t) \mathrm{d} \sigma \mathrm{d} s \bigg\rVert \leq c\tau^2. \end{equation} Taking all together, we have shown that $P=\mathcal{O}(\tau^2)$.
As regards $Q$, by setting $g(s)=\mathrm{e}^{s(\mathcal{A}+\mathcal{C})}\mathcal{B}\mathrm{e}^{(\tau-s) (\mathcal{A}+\mathcal{C})}u(t)$ and proceeding in the same way as for \eqref{1P} we obtain \[ \begin{aligned} Q &= \tau \mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathcal{B} u(t) - \int_{0}^{\tau} g(s) \,\mathrm{d} s\\ &=\tau \mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathcal{B} u(t) - \tau g(\tau) - \int_{0}^{\tau} \!\!\!\int_{\tau}^{s}g'(\sigma) \,\mathrm{d} \sigma \mathrm{d} s\\ &=\tau \mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathcal{B} u(t)-\tau \mathrm{e}^{\tau(\mathcal{A}+\mathcal{C})}\mathcal{B} u(t) \\ &\quad - \int_0^{\tau}\!\!\!\int_{\tau}^{s} \mathrm{e}^{\sigma(\mathcal{A}+\mathcal{C})}[\mathcal{A}+\mathcal{C},\mathcal{B}] \mathrm{e}^{(\tau-\sigma)(\mathcal{A}+\mathcal{C})}u(t)\,\mathrm{d} \sigma \mathrm{d} s. \end{aligned} \] The double integral is bounded with the help of assumption \eqref{hp4} by $c\tau^2$. For the remaining two terms, we use that $$ \mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathcal{B} u(t) = \mathrm{e}^{\tau \mathcal{A}} \mathcal{B} u(t) + \tau \mathcal{C}\varphi_1(\tau \mathcal{C}) \mathrm{e}^{\tau \mathcal{A}} \mathcal{B} u(t) $$ and employ once more the variation-of-constants formula $$ \mathrm{e}^{\tau(\mathcal{A}+\mathcal{C})}\mathcal{B} u(t) = \mathrm{e}^{\tau \mathcal{A}} \mathcal{B} u(t) + \int_0^{\tau} \mathrm{e}^{s\mathcal{A}}\mathcal{C}\mathrm{e}^{(\tau-s)(\mathcal{A}+\mathcal{C})}\mathcal{B} u(t)\,\mathrm{d} s. $$ Assumption \eqref{hp5} shows that their difference is again bounded by $c\tau^2$. From this we conclude the assertion.\qed \end{proof}
Assumption~\ref{hp} guarantees that the semigroups, generated by $\mathcal{A}$, $\mathcal{B}$, and $\mathcal{C}$ satisfy the bounds \[ \Vert \mathrm{e}^{t \mathcal{A}}\Vert \leq M_1 \mathrm{e}^{t\omega_1}, \quad \Vert \mathrm{e}^{t \mathcal{B}}\Vert \leq \mathrm{e}^{t\omega_2}, \quad \Vert \mathrm{e}^{t \mathcal{C}}\Vert \leq M_3\mathrm{e}^{t\omega_3},\qquad t\ge 0 \]
for some constants $M_1\ge 1$, $M_3\ge 1$, $\omega_1$, $\omega_2$, and $\omega_3$. Moreover, it is possible to choose an equivalent norm $\|\cdot\|_*$ on $X$ such that $\Vert \mathrm{e}^{t \mathcal{A}}\Vert_* \leq \mathrm{e}^{t\omega_{\mathcal{A}}}$. Unfortunately, this is still not enough to prove stability, in general. Therefore, we impose an additional assumption.
\begin{hp} \label{hpStab} There is a constant $\omega_C$ such that $\Vert \mathrm{e}^{t \mathcal{C}}\Vert_* \leq \mathrm{e}^{t\omega_{\mathcal{C}}}$ for all $t\ge 0$. \end{hp}
Under this additional assumption, it is easy to show stability.
\begin{thm} [Stability] Under Assumptions \ref{hp} and \ref{hpStab}, Lie splitting is stable, i.e., there is a constant $C$ such that \begin{equation} \label{stab} \left\Vert \left(\mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathrm{e}^{\tau \mathcal{B}} \right)^j\right\Vert \leq C \end{equation} for all $j\in \mathbb{N}$ and $\tau\ge 0$ satisfying $0\le j\tau \le T$.\qed \end{thm}
\begin{proof}
Our assumptions imply that $\left\|\mathrm{e}^{\tau \mathcal{C}}\mathrm{e}^{\tau \mathcal{A}}\mathrm{e}^{\tau \mathcal{B}}\right\|_* \le \mathrm{e}^{\tau(\omega_{\mathcal{A}} + \omega_{\mathcal{B}} + \omega_{\mathcal{C}})}$ from which the assertion follows. \qed \end{proof}
From consistency and stability, convergence follows in a standard way.
\begin{thm}[Global error bound]\label{thm:convergence} Under Assumptions \ref{hp} and \ref{hpStab}, the Lie splitting discretization \eqref{eq:lie} of the initial value problem \eqref{prb} is convergent of order~1, i.e., there exists a constant $C$ such that $$ \Vert u_n-u(t_n) \Vert \leq C \tau, $$ for all $n\in\mathbb{N}$ and $\tau> 0$ satisfying $0\le n\tau = t_n \le T$. \end{thm}
\begin{proof} We express the global error with the help of a telescopic sum \[ \begin{aligned} u_n-u(t_n)& =\left(\left(\mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathrm{e}^{\tau \mathcal{B}} \right)^n -\mathrm{e}^{n\tau(\mathcal{A}+\mathcal{B}+\mathcal{C})}\right)u(0) \\ & =\sum_{j=0}^{n-1} \left(\mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathrm{e}^{\tau \mathcal{B}} \right)^{n-j-1} \left(\mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\tau \mathcal{A}} \mathrm{e}^{\tau \mathcal{B}}u(t_j)-u(t_{j+1})\right) \end{aligned} \] and use the estimates \eqref{err} and \eqref{stab}.\qed \end{proof}
\section{Example: the magnetic Schr\"{o}dinger equation} \label{description}
The electromagnetic field in $\mathbb{R}^3$ is the combination of the electric field $E$ and the magnetic field $B$. Both fields depend on time and space, in general. Mathematically, they are given by a scalar potential $V$ and a vector potential $A$, respectively \[ E =- \nabla V-\frac{\partial A}{\partial t}, \qquad B= \nabla \times A. \] Making use of the fact that we can impose conditions on the potentials as long as we do not affect the resulting fields, we will apply the following transformations \begin{equation} \label{transform} \begin{aligned} \tilde{u}(t,x) & = u(t,x) \,\mathrm{e}^{\mathrm{i}\lambda(t,x)}, \\ \tilde{A}(t,x) & = A(t,x)+\varepsilon\nabla\lambda(t,x), \\ \tilde{V}(t,x) & = V(t,x)-\varepsilon\partial_t \lambda(t,x). \end{aligned} \end{equation} One natural choice is to impose a so-called Coulomb gauge, i.e., to select $\lambda$ in such a way that $\nabla \cdot \tilde{A}=0$. Consequently, this gauge $\lambda$ has to satisfy the Poisson equation $\varepsilon \Delta \lambda = -\nabla\cdot A$.
Applying \eqref{transform} to the Schr\"odinger equation \eqref{original} and dropping right away the tildes, we obtain the following problem \begin{equation} \label{main} \begin{aligned} \mathrm{i}\varepsilon \partial_t u &= -\frac{\varepsilon^2}{2}\Delta u+\mathrm{i}\varepsilon A \cdot \nabla u+ \frac{1}{2}\lvert A \rvert ^2 u +Vu, \quad t\in [0,T],\\ u(0,x)&=u_0(x) \end{aligned} \end{equation} with a divergence-free vector potential $A$.
We are now in the position to give a precise formulation of the three subproblems that are used in our splitting. Henceforth, they will be called potential, kinetic and advection step, respectively: \begin{subequations}\label{eq:steps} \begin{align} \partial_t u & =\mathcal{B} u = -\frac{\mathrm{i}}{\varepsilon}\left(\frac12\lvert A \rvert^2 +V\right)u, \label{potential}\\ \partial_t u &= \mathcal{A} u = \frac{\mathrm{i}\varepsilon}{2} \Delta u, \label{kinetic}\\ \partial_t u &= \mathcal{C} u= A \cdot \nabla u,\qquad \nabla\cdot A=0. \label{convection} \end{align} \end{subequations} The kinetic step \eqref{kinetic} can be handled analytically in Fourier space, whereas the potential step \eqref{potential} is easily performed in physical space. For the advection step \eqref{convection} we will present three modifications of a semi-Lagrangian method in section \ref{sec:adv} below.
An important feature of \eqref{original} and \eqref{main} is the conservation of mass $m = \Vert u (t,\cdot)\Vert_{L^2}^2$, i.e., $\frac{\partial}{\partial t} \Vert u(t,\cdot) \Vert_{L^2}^2 = 0$. The split step solution based on \eqref{eq:steps} is mass conserving as well. Indeed, the kinetic step preserves the $L^2$ norm due to Parseval's identity. The modulus of the solution of the potential step is preserved, and we are also able to show that the advection step conserves the mass. This is seen by multiplying \eqref{convection} by $\overline u$ \[ \overline{u}\,\partial_t u-\overline{u}\, A\cdot \nabla u= 0 \] and adding this equation to its complex conjugate, which results in \[ \partial_t \lvert u \rvert^2 = A \cdot \nabla \lvert u \rvert^2. \] Integrating this last equation by parts shows \[ \partial_t\Vert u \Vert_{L^2}^2 = \int \partial_t \lvert u \rvert^2 \,\mathrm{d} x = \int A \cdot \nabla \lvert u \rvert ^2 \,\mathrm{d} x = -\int \lvert u \rvert ^2 \nabla \cdot A \,\mathrm{d} x =0, \] where the last identity follows from the Coulomb gauge.
Henceforth, we consider \eqref{main} and \eqref{eq:steps} on the hyperrectangle $\Omega=\Pi_{i=1}^d[a_i,b_i)$, subject to periodic boundary conditions. In particular, the potentials $V$ and $A$ are assumed to be periodic functions on $\Omega$. Then, all what has been said in this section remains valid.
We finally remark that our splitting approach also works if $A$ is not divergence-free. In this case the potential operator is given by \[ \mathcal{B}=-\frac{\mathrm{i}}{\varepsilon}\left(\frac{1}{2}\lvert A \rvert^2+V\right)+\frac{1}{2}\nabla \cdot A, \] whereas the other two operators stay the same. However, in this case, we will lose the conservation of mass for the potential step.
\section{Space discretization, potential and kinetic step}
We discretize the hyperrectangle $\Omega=\prod_{i=1}^d [a_i,b_i)$ by a regular grid. For $1\le i \le d$, let $N_i\ge 2$ be an even integer and let \begin{equation}\label{eq:indexset} I_N=\mathbb{Z}^d\cap \prod_{i=1}^d\left[-\tfrac{N_i}{2},\tfrac{N_i}{2}\right). \end{equation} For $j=(j_1,\ldots,j_d)\in I_N$ we consider the grid points $x^j$ with components $$ x^j_i = \frac{a_i+b_i}2 +\frac{j_i}{N_i}(b_i-a_i),\qquad 1\le i\le d. $$ For performing the potential step, we solve the ordinary differential equation~\eqref{potential} at each grid point $x^j$. More precisely, starting with an initial value $v$ at time $t_n$, we solve $$
\dot w(s) = -\frac{\mathrm{i}}{\varepsilon}\left(\frac12\left| A(t_n+s,x^j) \right|^2 +V(t_n+s,x^j)\right)w(s), \quad w(0) = v(x^j) $$ to obtain $$ \left(\mathrm{e}^{\tau\cal B}v\right)(x^j) = w(\tau). $$ If the potentials $A$ and $V$ are time-independent, the analytic solution is readily available. Otherwise, a quadrature method (up to machine precision) can be employed.
The kinetic step is approximated in Fourier space. For a given function \begin{equation*} v\colon \prod_{i=1}^d [a_i,b_i)\to \mathbb{C}, \end{equation*} let $\hat v_k$ denote its Fourier coefficients, i.e. $$ v(x)=\sum_{k\in I_N} \hat v_k E_k(x), \qquad E_k(x)=\prod_{i=1}^d\frac{\mathrm{e}^{2\pi\mathrm{i} k_i(x_i-a_i)/(b_i-a_i)}}{\sqrt{b_i-a_i}}, $$ where $x=(x_1,\ldots,x_d)$. Further, let $$ \lambda_i = \frac{\mathrm{i} \varepsilon}2\left(\frac{2\pi k_i}{b_i-a_i}\right)^2. $$ The Fourier coefficients of $\mathrm{e}^{\tau\cal A}v$ are then given by $\mathrm{e}^{\tau\lambda_i} \hat v_{k_i}$. The transformation between physical and Fourier space is usually carried out with the fast Fourier transform.
\section{Advection step}\label{sec:adv}
In this section we describe the solution of the advection step \begin{equation}\label{eq:adv} \left\{ \begin{aligned} \partial_t v(t,x)&=A(x)\cdot \nabla v(t,x),&t&\in[0,\tau],\\ v(0,x)&=v_0(x). \end{aligned}\right. \end{equation} Our approach is based on the method of characteristics, i.e., we make use of the curves $s\mapsto x(s)\in \mathbb{R}^d$ satisfying the $d$-dimensional system of ordinary differential equations \begin{equation*} \dot x(s)=-A(x(s)). \end{equation*} Since the solution of the advection equation \eqref{eq:adv} is constant along characteristics, we have $v(\tau,x^j)=v(0,x^j(0))$ for each grid point $x^j$, $j\in I_N$, where $x^j(0)$ denotes the solution of \begin{equation}\label{eq:ODE} \left\{ \begin{aligned} \dot x^j(s)&=-A(x^j(s)),&s&\in[0,\tau],\\ x^j(\tau)&=x^j \end{aligned} \right. \end{equation} at $s=0$. This system can be solved once and for all for each grid point with an explicit method at high precision, if the time step $\tau$ is kept constant. However, since $x^j(0)$ is not a grid point, in general, the value $v(0,x^j(0))=v_0(x^j(0))$ has to be recovered. We describe here three different procedures for achieving the evaluation of $v_0(x^{j}(0))$ at the set of $\prod_iN_i$ points $\{x^{j}(0)\}_j$. For the sake of simplicity, we only describe the one-dimensional case in detail. However, we also report the overall computational cost for the general $d$-dimensional case.
We remark that the same approach can be used for time dependent potentials $A(t,x)$. Instead of \eqref{eq:ODE} one has to solve the non-autonomous problem \begin{equation} \left\{ \begin{aligned} \dot x^j(s)&=-A(t_n+s, x^j(s)),&s&\in[0,\tau],\\ x^j(\tau)&=x^j. \end{aligned} \right. \end{equation} Its numerical solution at $s=0$ is again used to define the sought-after approximation $v(\tau,x^j) = v(0,x^j(0))$.
\subsection{Direct Fourier series evaluation}
Since the initial value $v_0(x)$ of the advection step is the result of the solution of the kinetic step, the function $v_0$ is known through its Fourier coefficients $\{\hat v_k\}_k$. It is therefore possible to directly evaluate \begin{equation}\label{eq:DFS} v_0(x^j(0))= \sum_{k\in I_N}\hat v_{k}E_k(x^j(0)). \end{equation} In the $d$-dimensional case, the $\prod_i N_i^2$ values $E_k(x^j(0))$ can be precomputed once and for all, if the time step $\tau$ is constant. The evaluation cost of~\eqref{eq:DFS} at the point set $\{x^j(0)\}_j$ is then $\mathcal{O}(\prod_i N_i^2)$ at each time step.
\subsection{Local polynomial interpolation}
Another possibility (see, for instance, \cite{SRBG99,JZ13,EO15}) is local polynomial interpolation. It is possible to evaluate $v_0(x)$ at the grid points $\{x^j\}_j$ with an inverse fast Fourier transform of cost $\mathcal{O}\left(N_1\cdot\ldots\cdot N_d\cdot(\log N_1+\ldots+\log N_d)\right)$. An approximation of the values $v_0(x^j(0))$ can then be obtained by local polynomial interpolation \begin{equation}\label{eq:PE} v_0(x^j(0))\approx \sum_{k\in I_p}v_0(x^{j+k})L_{j+k}(x^j(0)). \end{equation} Here $\{x^{j+k}\}_k$ is the set of the $p$ grid points, $p$ even, satisfying \begin{equation*} x^{j-p/2}<\ldots<x^{j-1}\le x^j(0)<x^j<\ldots<x^{j+p/2-1}, \end{equation*} and $L_{j+k}$ denotes the elementary Lagrange polynomial of degree $p-1$ that takes the value one at $x^{j+k}$ and zero at all the other $p-1$ points. Of course, the points $x^{j+k}$ and the corresponding values $v_0(x^{j+k})$ have to be taken by periodicity if necessary.
In the $d$-dimensional case, for a constant time step $\tau$ it is possible to precompute once and for all the elementary Lagrange polynomials at the points $\{x^j(0)\}_j$ (for a total amount of $p^d\prod_i N_i$ values). Then, the evaluation of \eqref{eq:PE} at each time step requires $\mathcal{O}(p^d\prod_i N_i)$ operations.
\subsection{Fourier series evaluation by NFFT}
The third explored possibility is the evaluation of~\eqref{eq:DFS} by means of an approximate fast Fourier transform. Among others, we tested the nonequi\-spaced fast Fourier transform (NFFT) by Keiner, Kunis and Potts~\cite{KKP09}. The computational cost of such an approach is $\mathcal{O}\bigl(N_1\cdot\ldots\cdot N_d\cdot(\log N_1+\ldots+\log N_d+ \abs{\log\epsilon}^d)\bigr)$, where $\epsilon$ is the desired accuracy.
For the readers' convenience, we briefly sketch the NFFT algorithm in one dimension, using the original notation of~\cite{KKP09}. Given some coefficients $\{\hat f_k\}_{k\in I_N}$, $N$ even, and a set of \emph{arbitrary} points $\{x^j\}_j\subset\left[-\frac12,\frac12\right)$, the aim is a fast evaluation of the one-periodic trigonometric polynomial \begin{equation}\label{eq:f} f(x)=\sum_{k\in I_N}\hat f_k \mathrm{e}^{-2\pi\mathrm{i} kx} \end{equation} at the points $\{x^j\}_j$. In the first step, $f(x)$ is replaced with the ansatz \begin{equation*} s_1(x)=\sum_{\ell\in I_{n}}g_\ell\,\tilde\varphi\left(x-\tfrac{\ell}{n}\right),\quad \sigma\ge 2,\quad \text{$n=\sigma N$ even}, \end{equation*} where $\{g_\ell\}_\ell$ are some coefficients to be defined later and \begin{equation*} \tilde \varphi(x)=\sum_{r\in\mathbb{Z}}\varphi(x+r) \end{equation*} is the one-periodic version of a \emph{window} function $\varphi\colon\mathbb{R}\to\mathbb{R}$. The window function $\varphi$ is chosen in such a way that $\tilde \varphi$ has a uniformly convergent Fourier series \begin{equation*} \tilde\varphi(x)=\sum_{k\in\mathbb{Z}}c_k(\tilde \varphi)\mathrm{e}^{-2\pi\mathrm{i} kx}. \end{equation*} The default window function used by NFFT is the so called \emph{Keiser--Bessel function} \begin{equation*} \varphi(x)=\frac{1}{\pi}\left\{ \begin{aligned} &\frac{\sinh(\beta\sqrt{m^2-n^2x^2})}{\sqrt{m^2-n^2x^2}}&&\text{for $\abs{x}<\frac{m}{n}$},\\ &\frac{\sin(\beta\sqrt{n^2x^2-m^2})}{\sqrt{n^2x^2-m^2}}&&\text{for $\abs{x}>\frac{m}{n}$},\\ &\beta&&\text{for $\abs{x}=\frac{m}{n}$} \end{aligned} \right. \end{equation*} with the \emph{shape} parameter $\beta=\pi(2-1/\sigma)$. The value of $m$ depends on the desired accuracy $\epsilon$ and is chosen $m=8$ for double precision. The \emph{oversampling} factor $\sigma$ is defined by \begin{equation*} \sigma=\frac{2^{\lceil\log_2 2N\rceil}}{N}. \end{equation*} That is, $n=\sigma N$ is the smallest power of two with $2\le \sigma<4$. Now we plug the Fourier series expansion of $\tilde\varphi(x)$ into $s_1(x)$ in order to get \begin{equation*} \begin{aligned} s_1(x)&=\sum_{\ell\in I_{n}}g_\ell\,\tilde \varphi\left(x-\tfrac{\ell}{n}\right)\\ &= \sum_{\ell\in I_{n}}g_\ell\sum_{k\in\mathbb{Z}}c_k(\tilde\varphi)\mathrm{e}^{-2\pi\mathrm{i} k\left(x-\frac{\ell}{n}\right)}\\ &=\sum_{k\in\mathbb{Z}}\left(\sum_{\ell\in I_{n}}g_\ell\mathrm{e}^{2\pi\mathrm{i} k\frac{\ell}{n}}\right)c_k(\tilde\varphi)\mathrm{e}^{-2\pi\mathrm{i} kx} \end{aligned} \end{equation*} and apply a cutoff in the frequency domain \begin{equation}\label{eq:s1cutoff} s_1(x)\approx \sum_{k\in I_{n}}\left(\sum_{\ell\in I_{n}}g_\ell\mathrm{e}^{2\pi\mathrm{i} k\frac{\ell}{n}}\right)c_k(\tilde\varphi)\mathrm{e}^{-2\pi\mathrm{i} kx} =\sum_{k\in I_{n}}\hat g_kc_k(\tilde\varphi)\mathrm{e}^{-2\pi\mathrm{i} k x}. \end{equation} Comparing now equations~\eqref{eq:f} and \eqref{eq:s1cutoff}, we see that the coefficients $\{\hat g_k\}_k$ are simply given by \begin{equation*} \hat g_k=\left\{ \begin{aligned} &\frac{\hat f_k}{c_k(\tilde \varphi)},&&k\in I_N,\\ &0,&&k\in I_n\setminus I_N, \end{aligned} \right. \end{equation*} and the values $\{g_\ell\}_\ell$ can be recovered by a fast Fourier transform of length $n$. The parameter $m$ is then used as a \emph{cutoff} to approximate in practice the window function $\varphi(x)$ with \begin{equation*} \psi(x)=\varphi(x)\chi_{\left[-\frac{m}{n},\frac{m}{n}\right]}(x). \end{equation*} In this way, $s_1(x)$ is further approximated by \begin{equation*} s_1(x)\approx s(x)=\sum_{\ell\in I_{n}}g_\ell\,\tilde \psi\left(x-\tfrac{\ell}{n}\right). \end{equation*} Now we use that $\tilde \psi$ vanishes outside of $-\frac{m}{n}\le x-\frac{\ell}{n}\le \frac{m}{n}$. Thus, for fixed $x^j$, the above sum contains at most $2m+1$ terms different from zero. Finally, $s(x)$ is evaluated at the set $\{x^j\}_j$, providing the desired approximation of $\{f(x^j)\}_j$.
\section{Application to the magnetic Schr\"odinger equation} \label{Verification}
In this section we exemplify the assumptions of Theorem~\ref{thm:convergence} for the magnetic Schr\"{o}dinger equation \eqref{main}. For this purpose, we choose $X=L^2(\Omega)$ with $\Omega = \prod_{i=1}^d [a_i,b_i)$ and assume that the potentials $A$ and $V$ are sufficiently smooth. Note that the potential operator $\mathcal{B}$ is bounded, whereas the kinetic operator $\mathcal{A}$ and the advection operator $\mathcal{C}$ are both unbounded. We start with the verification of Assumption \ref{hp}. \begin{itemize} \item[$\diamond$] Condition \eqref{hp1}: Since $\mathrm{e}^{t\mathcal{A}}w$ is the exact solution of the problem $\partial_t u=\mathcal{A} u$, $u(0)=w$, it preserves the smoothness of the initial data. Further, the commutator $[\mathcal{A},\mathcal{C}]$ is a second-order differential operator \begin{align*} [\mathcal{A},\mathcal{C}] u &= \frac{\mathrm{i} \varepsilon}{2}[\Delta, A\cdot \nabla]u\\ &= \frac{\mathrm{i}\varepsilon}{2} \Bigl(\Delta(A\cdot \nabla u)-A\cdot \nabla(\Delta u) \Bigr). \end{align*} So, we need to assume that the initial data are twice differentiable. \item[$\diamond$] Conditions \eqref{hp5}, \eqref{hp2}, and \eqref{hp3}: As $\mathcal{C}$ is a first-order differential operator, it is again sufficient to require that the initial data are twice differentiable. \item[$\diamond$] Condition \eqref{hp4}: The commutator is a second-order differential operator \[ [\mathcal{A}+\mathcal{C},\mathcal{B}]u = \left[\frac{\mathrm{i}\varepsilon}{2} \Delta + A \cdot \nabla, -\frac{\mathrm{i}}{\varepsilon}\left(\frac{1}{2}\lvert A \rvert^2+V\right)\right]u, \] so the same smoothness as before is required. \end{itemize}
Stability is easily verified. From the conservation of mass discussed at the end of section \ref{description}, we get $\lVert \mathrm{e}^{\tau\mathcal{A}} \rVert_{L^2} = 1$ and $\lVert \mathrm{e}^{\tau\mathcal{C}} \rVert_{L^2} = 1$.
Note that the above bound for the advection semigroup only holds in the Coulomb gauge setting. However, by the method of characteristics, the solution of the advection step is of the form $u(t,x(t))=u_0(x(0))$, where $x(t)=x(0)+tA(x(0))+\mathcal{O}(t^2).$ Setting $\xi=x(0)$, we have \[
\Vert u \Vert_{L^2}^2= \int_{\Omega} \vert u(x) \vert^2 \mathrm{d} x= \int_{\Omega} \vert u_0(\xi) \vert^2 \mathrm{d} x = \int_{\Omega} \vert u_0(\xi) \vert^2 \left|\det\bigl(I+tA'(\xi)+\mathcal O(t^2)\bigr)\right|\mathrm{d}\xi. \] Under the assumption that the partial derivatives of $A$ are bounded, we have \[ \Vert u \Vert_{L^2}^2 \leq \Vert u_0 \Vert^2_{L^2}+ C t \Vert u_0 \Vert^2_{L^2}\leq (1+Ct)\Vert u_0 \Vert^2_{L^2} \le \mathrm{e}^{2t\omega_{\mathcal{C}}}\Vert u_0 \Vert^2_{L^2}, \] which is exactly the weaker bound required in Assumption \ref{hpStab}.
\section{Numerical experiments} \label{Numerical}
The first numerical example is a variation of~\cite[Example 2]{JZ13}. The vector potential is chosen as $A(x)=\sin(2\pi x)/5+1/5$ and the scalar potential as $V(x)=\cos(2\pi x)/5+4/5$. The initial value is $u_0(x)=\sqrt{\rho_0(x)}\exp(\mathrm{i} S_0(x)/\varepsilon)$, where \begin{equation*} \rho_0(x)=\mathrm{e}^{-50\left(x-\frac{1}{2}\right)^2},\quad S_0(x)=-\frac{\log\bigl(\mathrm{e}^{5(x-\frac{1}{2})}+\mathrm{e}^{-5(x-\frac{1}{2})}\bigr)}{5},\quad \varepsilon=\frac{1}{128}. \end{equation*} Note that this initial value is not periodic. However, due to the exponential decay of $\rho_0(x)$, the problem can be solved numerically up to time $T=0.42$ in the space interval $[0,1]$ by assuming periodic boundary conditions. The Coulomb gauge transformation yields \begin{equation*} \lambda(x)=\frac{\cos(2\pi x)}{10\pi\varepsilon}. \end{equation*} \begin{figure}
\caption{Temporal errors (stars, circles) for the Lie and Strang splitting methods and reference orders (lines) for the one-dimensional example.}
\label{fig:order1d}
\end{figure} In Figure~\ref{fig:order1d} we plot the global errors of Lie splitting at the final time $T=0.42$ for various time steps and $N=2048$ spatial discretization points. The reference solution was obtained with 512 time steps. We include in this figure the error behavior of Strang splitting, defined by \begin{equation}\label{eq:strang} u_{n+1}=\mathrm{e}^{\frac{\tau}{2} \mathcal{B}}\mathrm{e}^{\frac{\tau}{2} \mathcal{A}}\mathrm{e}^{\tau \mathcal{C}} \mathrm{e}^{\frac{\tau}{2} \mathcal{A}} \mathrm{e}^{\frac{\tau}{2} \mathcal{B}}u_n. \end{equation} In this double logarithmic diagram, the errors of a method lie on a straight line of slope $q$, where $q$ denotes the order of the method. Both, Lie and Strang splitting show their expected orders of convergence. Lie splitting has order one, as proved in Theorem~\ref{thm:convergence}, whereas Strang splitting converges with order two.
Note that the computationally most expensive task in the employed splitting approach is the advection step. Therefore, we order the steps in \eqref{eq:strang} in such a way that the advection equation is solved only once in each time step. In this way, Strang splitting provides much more accuracy without being significantly more expensive than Lie splitting.
Next, we compare the three different numerical realizations of the advection step, namely by local interpolation, by direct Fourier series evaluation (DFT) and by NFFT. In Table~\ref{tab:massconservation1d} we report the error in mass conservation and the required CPU time for various values of $N$. The number of time steps is fixed to $n=128$. The error in mass conservation is measured as the maximum deviation from the initial mass on the discrete level ($l^2$ in space and $l^\infty$ in time).
\begin{table}[!ht] \centering \renewcommand{1.2}{1.2}
\begin{tabular}{|*{8}{c|}} \hline
& & \multicolumn{4}{c|}{interpolation} & \multicolumn{2}{c|}{Fourier}\\ \cline{3-8} $N$ & & $p=2$ & $p=4$ & $p=6$ & $p=8$ & DFT & NFFT\\ \hline 128 & mass & 1.4e-01 & 1.8e-02 & 2.1e-03 & 2.8e-04 & 2.8e-15 & 8.6e-14 \\ \cline{2-8}
& CPU & 0.13 & 0.12 & 0.12 & 0.12 & 0.10 & 0.16 \\ \hline 256 & mass & 9.4e-02 & 2.7e-03 & 7.2e-05 & 2.5e-06 & 2.0e-15 & 1.0e-14 \\ \cline{2-8}
& CPU & 0.13 & 0.13 & 0.13 & 0.14 & 0.19 & 0.17 \\ \hline 512 & mass & 5.2e-02 & 2.9e-04 & 2.0e-06 & 1.8e-08 & 3.6e-15 & 1.7e-14 \\ \cline{2-8}
& CPU & 0.16 & 0.19 & 0.17 & 0.16 & 0.27 & 0.19 \\ \hline 1024 & mass & 1.6e-02 & 1.8e-05 & 3.0e-08 & 9.6e-11 & 4.0e-15 & 5.5e-14 \\ \cline{2-8}
& CPU & 0.22 & 0.23 & 0.23 & 0.24 & 0.56 & 0.23 \\ \hline 2048 & mass & 4.2e-03 & 1.1e-06 & 4.9e-10 & 3.8e-12 & 3.3e-15 & 1.3e-14 \\ \cline{2-8}
& CPU & 0.36 & 0.37 & 0.37 & 0.37 & 1.42 & 0.33 \\ \hline \end{tabular} \caption{Error in mass conservation and CPU time (in seconds) for the one-dimensional numerical example.} \label{tab:massconservation1d} \end{table} Due to the compressive behavior of $S'_0(x)$, which acts as an initial velocity, the evolution develops caustics and the numerical solution requires a sufficiently large number $N$ of Fourier modes in order to reproduce accurate physical observables. While DFT and NFFT always preserve the mass almost up to machine precision, the polynomial methods become comparable only with the largest tested value of $N$ and at polynomial degree 7. For this degree, they are slightly more expensive than the NFFT approach.
The second numerical experiment is set in the two-dimensional domain $[-5,5]^2$ with \begin{equation*} \begin{aligned} A_1(x,y)&=-3\sin\left(\tfrac{2\pi(y+5)}{10}\right),\\ A_2(x,y)&=3\sin\left(\tfrac{2\pi(x+5)}{10}\right),\\ V(x,y)&= 20\cos\left(\tfrac{2\pi(x+5)}{10}\right)+ 20\cos\left(\tfrac{2\pi(y+5)}{10}\right)+40, \end{aligned} \end{equation*} and initial value \begin{equation*} u_0(x,y)=\sqrt{\tfrac{\sqrt{10}}{\pi}}\exp\left(-\tfrac{\sqrt{10}}{2}\left((x-1)^2+y^2\right)\right). \end{equation*} The semi-classical parameter is chosen $\varepsilon=1$, the final time $T=50$ and the number of time steps $n=1000$. In Table~\ref{tab:massconservation2d} we compare the three methods that only differ in the treatment of the advection step. In particular, we compare the behavior of tensor interpolation at $4\times 4$ and $6\times 6$ points with direct Fourier series evaluation and NFFT with the default value $m=8$ and the smaller values $m=6$ and $m=4$, respectively.
\begin{table}[!ht] \centering \renewcommand{1.2}{1.2}
\begin{tabular}{|*{8}{c|}} \hline
& & \multicolumn{2}{c|}{} & \multicolumn{4}{c|}{Fourier}\\ \cline{5-8}
& & \multicolumn{2}{c|}{interpolation} & & \multicolumn{3}{c|}{NFFT}\\ \cline{3-4}\cline{6-8} $N_1=N_2$ & & $p=4$ & $p=6$ & DFT & $m=8$ & $m=6$ & $m=4$\\ \hline 128 & mass & 1.0e-01 & 2.5e-03 & 9.9e-11 & 1.0e-10 & 2.4e-10 & 2.2e-07\\ \cline{2-8} & CPU & 25.2 & 33.5 & 174.3 & 23.7 & 22.8 & 20.6\\ \hline 256 & mass & 6.9e-03 & 3.9e-05 & 1.3e-08 & 1.3e-08 & 2.0e-08 & 2.5e-02\\ \cline{2-8} & CPU & 101.7 & 117.9 & 2254 & 99.6 & 85.6 & 87.7\\ \hline 512 & mass & 4.3e-04 & 6.2e-07 & * & 9.7e-11 & 2.5e-10 & 2.0e-07\\ \cline{2-8} & CPU & 412.7 & 506.8 & * & 435.7 & 401.4 & 400.4\\ \hline 1024 & mass & 2.7e-05 & 9.6e-09 & * & 9.7e-11 & 2.5e-10 & 1.9e-07\\ \cline{2-8} & CPU & 1796 & 2139 & * & 1948 & 1840 & 1709\\ \hline \end{tabular} \caption{Error in mass conservation and CPU time (in seconds) for the two-dimensional numerical example.} \label{tab:massconservation2d} \end{table} We observe that, for this long-term simulation, the mass is always well conserved by the direct Fourier series evaluation and by NFFT with the default value $m=8$. On the other hand, if $m$ is halved, there is a significant degradation, especially with $N_1=N_2=256$. The direct Fourier series evaluation is much more expensive than the other methods, being impracticable for $N_1=N_2\ge 512$. The interpolation methods roughly cost as much as the NFFT approach, but their mass preservation is by far worse.
The final numerical example is a three-dimensional variation of the previous one. In the domain $[-5,5]^3$, with $\varepsilon=1$, we chose \begin{equation*} \begin{aligned} A_1(x,y,z)&=\sin\left(\tfrac{2\pi(y+5)}{10}\right)+ \sin\left(\tfrac{2\pi(z+5)}{10}\right)\\ A_2(x,y,z)&=\sin\left(\tfrac{2\pi(x+5)}{10}\right)+ \sin\left(\tfrac{2\pi(z+5)}{10}\right)\\ A_3(x,y,z)&=\sin\left(\tfrac{2\pi(x+5)}{10}\right)+ \sin\left(\tfrac{2\pi(y+5)}{10}\right)\\ V(x,y,z)&= 20\cos\left(\tfrac{2\pi(x+5)}{10}\right)+ 20\cos\left(\tfrac{2\pi(y+5)}{10}\right)+20\cos\left(\tfrac{2\pi(z+5)}{10}\right)+ 60, \end{aligned} \end{equation*} and the initial value \begin{equation*} u_0(x,y,z)=\tfrac{2^{3/8}}{\pi^{3/2}}\exp\left(-\tfrac{\sqrt{2}}{2}\left((x-1)^2+y^2+z^2\right)\right). \end{equation*} \begin{table}[!ht] \centering \renewcommand{1.2}{1.2}
\begin{tabular}{|c|c|c|c|} \hline
& & \multicolumn{2}{c|}{NFFT}\\ \cline{3-4} $N_1=N_2=N_3$ & & \verb+PRE_PSI+ & \verb+PRE_FULL_PSI+\\ \hline 16 & mass & 6.1e-13 & 6.1e-13\\ \cline{2-4}
& CPU & 5.6 & 6.5\\ \hline 32 & mass & 8.2e-14 & 8.2e-14\\ \cline{2-4}
& CPU & 37.7 & 51.7\\ \hline 64 & mass & 7.1e-13 & *\\ \cline{2-4}
& CPU & 396.5 & *\\ \hline 128 & mass & 7.9e-09 & *\\ \cline{2-4}
& CPU & 2976 & *\\ \hline \end{tabular} \caption{Error in mass conservation and CPU time (in seconds) for the three-dimensional example.} \label{tab:massconservation3d} \end{table} With this example, we also tested the option \verb+PRE_FULL_PSI+ of NFFT (see~\cite{KKP09}). At the price of a full precomputation of the window functions, which requires a storage of $(2m+1)^3\prod_i N_i$ double precision numbers, this option should allow an overall faster execution. In Table~\ref{tab:massconservation3d} we display the error of mass conservation and the CPU time for simulations up to $T=5$ with 100 time steps. As expected, there is no difference in the mass conservation property between the two schemes. However, we never succeeded in getting the \verb+PRE_FULL_PSI+ version faster than the default one (named \verb+PRE_PSI+). For $N_1=N_2=N_3\ge64$, it was even not possible to store the precomputed values in the RAM (8 GB). Nevertheless, the default implementation of NFFT, which requires a storage of $3(2m+1)\prod_i N_i$ for the window functions, works without any problem.
\section{Conclusions}
In this paper we considered the numerical solution of the linear Schr\"odinger equation with a vector potential. The structure of the problem suggested to use a splitting method involving three different parts, namely a multiplicative term coming from scalar potentials, the Laplacian, and the advective term due to the vector potential. After establishing convergence of Lie splitting for an abstract problem, we analysed the required assumptions in the specific case of the magnetic Schr\"odinger equation. For the advection step, the solution along the characteristic curves was approximated by a nonequispaced fast Fourier transform. It turned out to be as fast as local polynomial interpolation and as accurate as direct Fourier series evaluation in the mass conservation at discrete level. Therefore, it can be considered as a competitive tool in the solution of advection equations with the method of characteristics.
\section*{\refname}
\end{document} | arXiv | {
"id": "1604.08044.tex",
"language_detection_score": 0.6556428074836731,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\preprint{}
\title{Hilbert-Schmidt Geometry of $n$-Level Jak\'obczyk-Siennicki Two-Dimensional Quantum Systems}
\author{Paul B. Slater} \email{slater@kitp.ucsb.edu} \affiliation{ ISBER, University of California, Santa Barbara, CA 93106\\ } \date{\today}
\begin{abstract} Jak\'obczyk and Siennicki studied {\it two}-dimensional sections of a set of (generalized) Bloch vectors corresponding to $n \times n$ density matrices of two-qubit systems (that is, the case $n=4$). They found essentially five different types of (nontrivial) separability regimes. We compute the Euclidean/Hilbert-Schmidt (HS) separability {\it probabilities} assigned to these regimes, and conduct parallel {\it two}-dimensional sectional analyses for the {\it higher}-level cases $n=6,8,9$ and 10. Making use of the newly-introduced capability for integration over {\it implicitly} defined regions of version 5.1 of Mathematica --- as we have also fruitfully done in the $n=4$ {\it three}-parameter entropy-maximization-based study quant-ph/0507203 --- we obtain a wide-ranging variety of {\it exact} HS-probabilities. For $n>6$, the probabilities are those of having a {\it partial positive transpose} (PPT). For the $n=6$ case, we also obtain {\it biseparability} probabilities; in the $n=8,9 $ instances, bi-PPT probabilities; and for $n=8$, {\it tri}-PPT probabilities. By far, the most frequently recorded probability for $n>4$ is $\frac{\pi}{4} \approx 0.785398$. We also conduct a number of related analyses, pertaining to the (one-dimensional) {\it boundaries} (both exterior and interior) of the separability and PPT domains, and attempt (with quite limited success) some
{\it exact} calculations pertaining to the {\it 9}-dimensional (real) and {\it 15}-dimensional (complex) convex sets of two-qubit density matrices --- for which exact HS-separability probabilities have been conjectured, but not yet verified. \end{abstract}
\pacs{Valid PACS 02.40.Dr, 02.40.Ft, 03.67.-a}
\keywords{separability probabilities, Hilbert-Schmidt metric, density matrices, Bloch vectors, positive partial transpose, two-dimensional sections}
\maketitle \section{Introduction} There has been considerable recent interest \cite{kk,kimura,byrd} in understanding how one can, from the spherical coordinate point-of view,
generalize to $n$-level quantum systems ($n \ge 2$) the familiar Bloch ball representation of the two-level quantum systems ($n=2$) --- in which the pure states form the bounding spherical surface (``Bloch sphere'') of the unit ball in three-dimensional Euclidean space. Kimura and Kossakowski have expressed the generalized Bloch representation of an $n \times n$ density matrix in the form \cite[eq. (3)]{kk} \begin{equation} \label{expansion} \rho = \frac{\mbox{tr} \rho}{n} I_{n} +\frac{1}{2} \Sigma_{i=1}^{n^2-1} (\mbox{tr} \rho \lambda_{i}) \lambda_{i}, \end{equation} where $I_{n}$ is the identity operator, and the $\lambda_{i}$'s are the $(n^2-1)$ orthogonal generators of $SU(n)$, forming a basis of the set of all the linear operators with respect to the Hilbert-Schmidt inner product.
An interesting application of these concepts was made by Jak\'obczyk and Siennicki (JS) \cite{jak}. They examined all those two-qubit ($n=4$) systems describable as {\it two}-dimensional sections of sets of (generalized) Bloch (coherence \cite{byrd}) vectors. (The {\it totality} of $4 \times 4$ density matrices, on the other hand,
comprises a {\it fifteen}-dimensional convex set --- the $n \times n$ density matrices being $(n^2-1)$-dimensional in nature --- so thirteen of the fifteen $SU(4)$ orthogonal generators [Gell-mann matrices] are assigned null weight in the JS $n = 4$ analyses. That is, thirteen of the fifteen coefficients, $(\mbox{tr} \lambda_{i})$ in the expansion (\ref{expansion}) are zero.)
Since there were only two parameters involved in each of their scenarios, JS were able to present {\it planar} diagrams depicting the feasible regions, as well as those subsets of these regions composed of separable states. In their Fig. 1, JS exhibited thirteen possible types of parameter domains. Further, in their Fig. 2, they showed six different (nontrivial) separability scenarios (two of which --- labelled ``EF)'' and ``FE)'' by JS -- are simply geometric reflections of one another).
We will, firstly (sec.~\ref{secN=4}), in this study, evaluate the sizes (areas) of these six (two-dimensional) domains and nontrivial subdomains, in terms of the {\it Hilbert-Schmidt} (HS) metric --- a task JS did not explicitly address. (The HS-distance between two density operators $\rho_{1},\rho_{2}$ is defined as $\sqrt{\mbox{Tr}(\rho_{1}-\rho_{2})^2}$ \cite[eq. (2.3)]{hilb2}.) Then, we extend the JS analyses to the cases $n=6$ (sec.~\ref{secN=6}), 8 (sec.~\ref{secN=8}), 9 (sec.~\ref{secN=9}) and 10 (sec.~\ref{secN=10}), in which various {\it multi}partite --- as opposed to simply {\it bi}partite scenarios can arise. For all these instances, except $n=10$, we additionally obtain the HS-lengths of the (one-dimensional) boundary states (that is, those with {\it degenerate} spectra) and the HS-probabilities that states lying on this boundary are separable. (Motivated by our extensive numerical results given in \cite{slaterPRA} and \cite{ten}, Szarek, Bengtsson and \.Zyczkowski have recently
proved ``that the probability to find a random state to be separable equals 2 times the probability to find a random boundary state to be separable, provided the random states are generated uniformly with respect to the Hilbert-Schmidt (Euclidean) distance'' \cite{sbz}). Also, we compute in certain cases, the HS-lengths of the (interior) boundaries dividing one domain of interest from another. (The interior states generically have {\it nondegenerate} spectra.) Then (sec.~\ref{finalSEC}), we undertake some analyses involving {\it three} (rather than two) parameters. These prove to be much more {\it problematical} in nature (cf. \cite{slaterjpanew}).
We also report, at the end (sec.~\ref{final}), some initial steps in an attempt to determine
exact {\it upper} bounds for the HS-volumes of the {\it separable} {\it 9}-dimensional real and {\it 15}-dimensional complex $4 \times 4$ density matrices. (Only in these computations --- in order to compare our formulas with known HS-volumes of separable and nonseparable steps \cite{hilb2} --- do
we not take the HS-volume element to be {\it unity}.)
Our computations in this paper were {\it greatly} facilitated by a new feature of the programming language Mathematica (version 5.1) --- the capacity to integrate over {\it implicitly} defined regions. (This feature was also employed by us in \cite{slaterjpanew}, in a somewhat related two-qubit context, in which the Jaynes maximum-entropy principle was employed.) We, first, found explicit forms for the $n$ eigenvalues of the various $n \times n$ matrices {\it and} for their partial transposes. Then we required, in the several integrations, using the new feature (thus, saving us from the laborious task of having to specify large numbers of particular integration limits and do corresponding detailed bookkeeping), simply that these eigenvalues be {\it nonnegative}. This ensured that we either had, in fact, the requisite {\it density} matrices and/or {\it positive partial transposes} (PPT) of density matrices.
\section{The qubit-qubit case $n=4$ of Jak{\o}bczyk and Siennicki} \label{secN=4}
For the (geometrically-reflected) scenarios that JS labeled
``EF)'' and ``FE)'', we have found (Table~\ref{tab:tabJS1}) that the Hilbert-Schmidt volume (cf. \cite{hilb2}) of separable {\it and} nonseparable states is $\frac{2 \sqrt{2}}{3}$ and of the separable states {\it alone} is $\frac{2}{3}$. So the corresponding separability {\it probability} (taking ratios) is --- elegantly --- $\frac{1}{\sqrt{2}} \approx 0.707107$.
For the scenario ``CK)'', possessing a triangular separability domain, the total volume is $\frac{4 \sqrt{\frac{2}{3}}}{3}$ and the separability probability is $\frac{1}{24} (9 +2 \sqrt{3} \pi) \approx 0.828450$. For ``GH')'', the total volume is
$\frac{9}{32} \sqrt{\frac{3}{2}} \pi$ and the separability probability, $\frac{26 \sqrt{2}+27 \tan ^{-1}\left(2 \sqrt{2}\right)}{27
\pi } \approx 0.825312$. For ``KC)'', the total volume is $\frac{\sqrt{2} \pi }{3}$ and the separability probability is (the smallest) $\frac{1}{3} +\frac{\sqrt{3}}{2 \pi} \approx 0.608998$. For ``HG')'', the total HS-volume is $\frac{3}{2 \sqrt{2}}$ and the HS-separability probability is (the largest of the five) $\frac{52+27 \sqrt{2} \sec ^{-1}(3)}{48 \sqrt{6}} \approx 0.842035$.
\begin{table}[ht] \caption{\label{tab:tabJS1}} \begin{ruledtabular} \begin{tabular}{rrrrr} \hline JS scenario & HS total vol. & HS separable vol. & HS sep. prob. & num. approx. \\ \hline EF) and FE) & $ \frac{2 \sqrt{2}}{3}$ & $ \frac{2}{3}$ & $\frac{1}{{\sqrt{2}}}$ & 0.707107 \\ CK) & $\frac{4 \sqrt{\frac{2}{3}}}{3} $ & $\frac{1}{18} \left(3 \sqrt{6}+2 \sqrt{2} \pi \right) $ & $\frac{9 + 2\, {\sqrt{3}}\, \pi}{24} $ & 0.828450 \\ GH') & $\frac{9}{32} \sqrt{\frac{3}{2}} \pi$ & $ \frac{1}{192} \left(52 \sqrt{3}+27 \sqrt{6} \sin
^{-1}\left(\frac{2 \sqrt{2}}{3}\right)\right)$ & $ \frac{26 \sqrt{2}+27 \tan ^{-1}\left(2 \sqrt{2}\right)}{27
\pi }$ &
0.825312 \\ HG') & $\frac{3}{2 \sqrt{2}}$ &
$ \frac{1}{192} \left(52 \sqrt{3}+27 \sqrt{6} \sec
^{-1}(3)\right)$ &
$ \frac{52+27 \sqrt{2} \sec ^{-1}(3)}{48 \sqrt{6}}$ & 0.842035 \\ KC) & $\frac{\sqrt{2} \pi }{3}$ & $ \frac{1}{18} \left(3 \sqrt{6}+2 \sqrt{2} \pi \right)$ & $ \frac{1}{3}+\frac{\sqrt{3}}{2 \pi }$ & 0.608998 \\ \end{tabular} \end{ruledtabular} \end{table}
Now, let us present again most of these results (Table~\ref{tab:tabJS1}) in the form of the array (\ref{n=4case}). We do so because we will also present all the results of our subsequent analyses below (for $n>4$) in this manner (which we have found to be the most convenient for directly incorporating
our large-scale Mathematica computer-generated analyses into this report).
In the first column of (\ref{n=4case}) are given the identifying numbers of a {\it pair} of Gell-Mann matrices (generators of $SU(4)$) --- which, in fact, can be seen to fully agree with the numbering (and associated scenario-labelling) of JS \cite[p. 389]{jak}. (Here and further, we will {\it always} adhere to the conventional/standard numbering \cite[sec. III]{todd} of the Lie generators of $SU(n)$, so that our results should be reproducible/verifiable to others. We list the pairs in lexographic order, using the first pair as the representative for its equivalence class.) In the second column of (\ref{n=4case}) are shown the {\it number} of distinct unordered pairs of $SU(4)$ generators which share the same total (separable and nonseparable) HS volume, as well as the same separable HS volume, and consequently, identical HS separability probabilities. The third column gives us these HS total volumes, the fourth column, the HS separability probabilities and the last (fifth) column, numerical approximations to the exact probabilities (which, of course, we see --- being probabilities --- do not exceed the value 1).
(Due to space/page width constraints, we were unable to generally present in these data arrays the HS separable volumes too, though they can, of course, be deduced from the total volume and the separability probability.) \begin{equation} \label{n=4case} \left( \begin{array}{lllll}
\{3,6\} & 4 & \frac{2 \sqrt{2}}{3} & \frac{1}{\sqrt{2}} &
0.707107 \\
\{6,8\} & 2 & \frac{9}{32} \sqrt{\frac{3}{2}} \pi &
\frac{26 \sqrt{2}+27 \tan ^{-1}\left(2
\sqrt{2}\right)}{27 \pi } & 0.825312 \\
\{6,15\} & 2 & \frac{4 \sqrt{\frac{2}{3}}}{3} &
\frac{1}{24} \left(9+2 \sqrt{3} \pi \right) & 0.828450
\\
\{8,9\} & 2 & \frac{3}{2 \sqrt{2}} & \frac{52+27 \sqrt{2}
\sec ^{-1}(3)}{48 \sqrt{6}} & 0.842035 \\
\{9,15\} & 2 & \frac{\sqrt{2} \pi }{3} &
\frac{1}{3}+\frac{\sqrt{3}}{2 \pi } & 0.608998 \end{array} \right). \end{equation} Thus, twelve of the $210 =15 \cdot 14$ possible unordered pairs of Gell-Mann matrices are associated with nontrivial ($ < 1$) separability probabilities \cite[p. 389]{jak}.
\subsection{Boundary states} For the scenario associated with the pair of Gell-Mann matrices $\{3,6 \}$, the HS-length of the (one-dimensional) {\it boundary} states (that is, those with {\it degenerate} spectra) is $\frac{3}{2}$, and of the bounding states which are separable, $\frac{1}{2}$. For the pair $\{6,8 \}$, the analogous results are $\frac{3}{2 \sqrt{2}} \approx 1.06066$ and 1; for $\{6,15\}$, they are $\frac{2}{3}$
and $\frac{1}{2}$; and for $\{8,9\}$, $\frac{3 \sqrt{3}}{4}$ and $\frac{\sqrt{3}}{2}$, for a separability probability of boundary states of $\frac{2}{3}$. For the last $\{9,15\}$ of the five scenarios, we have $\frac{2}{\sqrt{3}}$ and 1. Let us now present these results in the following array form (which we will adopt for our more extensive results further below): \begin{equation} \label{boundary1} \left( \begin{array}{lllll}
\{3,6\} & \frac{3}{2} & \frac{1}{2} & \frac{1}{3} &
0.333333 \\
\{6,8\} & \frac{3}{2 \sqrt{2}} & 1 & \frac{2 \sqrt{2}}{3}
& 0.942809 \\
\{6,15\} & \frac{2}{3} & \frac{1}{2} & \frac{3}{4} & 0.75
\\
\{8,9\} & \frac{3 \sqrt{3}}{4} & \frac{\sqrt{3}}{2} &
\frac{2}{3} & 0.666667 \\
\{9,15\} & \frac{2}{\sqrt{3}} & 1 & \frac{\sqrt{3}}{2} &
0.866025 \end{array} \right). \end{equation} \subsection{Length of Separability-Nonseparability {\it Interior} Boundary} In the following array, we present the HS-length of the common border separating the nonseparable (entangled) states from the separable ones. The states lying along this interior border generically have {\it nondegenerate} spectra. \begin{equation} \left( \begin{array}{lllll}
\{3,6\} & \{6,8\} & \{6,15\} & \{8,9\} & \{9,15\} \\
\frac{1}{2} & 1 & 1 & \frac{\sqrt{3}}{4} & \frac{1}{2} \end{array} \right). \end{equation} \section{The qubit-qutrit case $n=6$} \label{secN=6} \subsection{$3 \times2$ Decomposition} \label{n=6a} Moving on from the $n=4$ case specifically studied by Jac\'obczyk and
Siennicki to $n=6$ (cf. \cite{slaterPRA}), we compute the partial transposes of the $6 \times 6$ density matrices, corresponding to two-dimensional sections of the set of Bloch vectors. We, first, transpose in place the ($2^2$) four $3 \times 3$ blocks of the density matrices. By the Peres-Horodecki criterion, such density matrices with {\it positive} partial transposes must be {\it separable}.
We obtained the following results, presented in the same manner as (\ref{n=4case}). \begin{equation} \label{n=6case1} \left( \begin{array}{lllll}
\{1,13\} & 48 & \frac{4}{9} & \frac{\pi }{4} & 0.785398
\\
\{3,11\} & 4 & \frac{8 \sqrt{2}}{27} & \frac{1}{\sqrt{2}}
& 0.707107 \\
\{3,13\} & 4 & \frac{4}{9} & \frac{5}{6} & 0.833333 \\
\{3,25\} & 4 & \frac{8 \sqrt{2}}{27} & \frac{5}{4
\sqrt{2}} & 0.883883 \\
\{8,13\} & 4 & \frac{2}{3} & \frac{1}{\sqrt{3}} &
0.577350 \\
\{8,25\} & 4 & \frac{\sqrt{2}}{3} & \sqrt{\frac{2}{3}} &
0.816497 \\
\{11,15\} & 4 & \frac{4 \sqrt{2} \pi }{27} &
\frac{1}{3}+\frac{3 \sqrt{3}}{4 \pi } & 0.746830 \\
\{11,24\} & 2 & \frac{25 \sqrt{\frac{5}{2}}}{72} &
\frac{2}{5}+\frac{1}{2} \sin
^{-1}\left(\frac{4}{5}\right) & 0.863648 \\
\{13,24\} & 2 & \frac{25 \sqrt{\frac{5}{2}}}{72} &
\frac{8}{75} \left(-2+5 \sqrt{5}\right) & 0.979236 \\
\{13,35\} & 4 & \frac{4 \sqrt{\frac{3}{5}}}{5} &
\frac{1}{12} \left(5+3 \sqrt{5} \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)\right) & 0.886838
\\
\{15,16\} & 4 & \frac{32 \sqrt{2}}{81} & \frac{1}{32}
\left(9 \sqrt{3}+4 \pi \right) & 0.879838 \\
\{16,24\} & 2 & \frac{25}{144} \sqrt{\frac{5}{2}} \pi &
\frac{4+5 \sin ^{-1}\left(\frac{4}{5}\right)}{5 \pi } &
0.549815 \\
\{20,24\} & 2 & \frac{25}{144} \sqrt{\frac{5}{2}} \pi &
\frac{92+75 \sin ^{-1}\left(\frac{4}{5}\right)}{75 \pi
} & 0.685627 \\
\{24,25\} & 2 & \frac{25}{27 \sqrt{2}} & 1-\frac{2}{5
\sqrt{5}} & 0.821115 \\
\{24,27\} & 2 & \frac{25}{27 \sqrt{2}} & \frac{92+75 \cos
^{-1}\left(\frac{3}{5}\right)}{80 \sqrt{5}} & 0.903076
\\
\{25,35\} & 4 & \frac{\sqrt{3} \pi }{5} &
\frac{\sqrt{5}+3 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{3 \pi } &
0.504975 \end{array} \right). \end{equation} \subsubsection{Boundary states} In the following array, we list, first the scenario pair, then, the HS-length of the boundary states (those with degenerate spectra), then, the HS-length of those boundary states which are separable, then, the separability probability and a numerical approximation to it. \begin{equation} \left( \begin{array}{lllll}
\{1,13\} & \frac{2}{3} & 0 & 0 & 0. \\
\{3,11\} & 1 & \frac{1}{3} & \frac{1}{3} & 0.333333 \\
\{3,13\} & \frac{2}{3} & \frac{1}{3} & \frac{1}{2} & 0.5
\\
\{3,25\} & 1 & \frac{1}{3} & \frac{1}{3} & 0.333333 \\
\{8,13\} & \frac{2}{\sqrt{3}} & \frac{1}{2 \sqrt{3}} &
\frac{1}{4} & 0.25 \\
\{8,25\} & \frac{\sqrt{3}}{2} & \frac{1}{\sqrt{3}} &
\frac{2}{3} & 0.666667 \\
\{11,15\} & \frac{4}{3 \sqrt{3}} & \frac{2}{3} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{11,24\} & \frac{5}{12} & \frac{1}{3} & \frac{4}{5} &
0.8 \\
\{13,24\} & \frac{5}{6} & \frac{\sqrt{5}}{3} &
\frac{2}{\sqrt{5}} & 0.894427 \\
\{13,35\} & \frac{2}{5} & \frac{1}{3} & \frac{5}{6} &
0.833333 \\
\{15,16\} & \frac{4 \sqrt{\frac{2}{3}}}{3} &
\sqrt{\frac{2}{3}} & \frac{3}{4} & 0.75 \\
\{16,24\} & \frac{5}{6} & \frac{2}{3} & \frac{4}{5} & 0.8
\\
\{20,24\} & \frac{5}{6} & \frac{2}{3} & \frac{4}{5} & 0.8
\\
\{24,25\} & \frac{5 \sqrt{\frac{5}{2}}}{6} &
\frac{\sqrt{\frac{5}{2}}}{6} & \frac{1}{5} & 0.2 \\
\{24,27\} & \frac{5 \sqrt{\frac{5}{2}}}{6} &
\frac{\sqrt{10}}{3} & \frac{4}{5} & 0.8 \\
\{25,35\} & \frac{2}{\sqrt{5}} & \frac{2}{3} &
\frac{\sqrt{5}}{3} & 0.745356 \end{array} \right). \end{equation} \subsubsection{Length of Separability-Nonseparability {\it Interior} Boundary} In the following arrays, we present the HS-length of the common border separating the nonseparable (entangled) states from the separable ones for each specific scenario. \begin{equation} \left( \begin{array}{llllllll}
\{1,13\} & \{3,11\} & \{3,13\} & \{3,25\} & \{8,13\} &
\{8,25\} & \{11,15\} & \{11,24\} \\
\frac{2}{3} & \frac{1}{3} & \frac{1}{3} & \frac{1}{3} &
\frac{1}{\sqrt{3}} & \frac{1}{2 \sqrt{3}} & \frac{2}{3}
& \frac{2}{3} \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{llllllll}
\{13,24\} & \{13,35\} & \{15,16\} & \{16,24\} & \{20,24\}
& \{24,25\} & \{24,27\} & \{25,35\} \\
\frac{\sqrt{5}}{3} & \frac{2}{3} &
\frac{\sqrt{\frac{2}{3}}}{3} & \frac{1}{3} &
\frac{2}{3} & \frac{\sqrt{10}}{3} &
\frac{\sqrt{\frac{5}{2}}}{6} & \frac{1}{3} \end{array} \right). \end{displaymath}
\subsection{$2 \times 3$ Decomposition} \label{n=6b} Here, we compute the partial transposes of the same collection of $6 \times 6$ density matrices, corresponding to two-dimensional sections of the set of Bloch vectors, by transposing in place the ($3^2$)
nine $2 \times 2$ blocks of the density matrices --- rather than the four ($2^2) 3 \times 3$ blocks as previously (sec.~\ref{n=6a}).
We obtained the following results.
\begin{equation} \left( \begin{array}{lllll}
\{3,6\} & 8 & \frac{8 \sqrt{2}}{27} & \frac{1}{\sqrt{2}}
& 0.707107 \\
\{4,18\} & 48 & \frac{4}{9} & \frac{\pi }{4} & 0.785398
\\
\{6,8\} & 2 & \frac{1}{8} \sqrt{\frac{3}{2}} \pi &
\frac{26 \sqrt{2}+27 \tan ^{-1}\left(2
\sqrt{2}\right)}{27 \pi } & 0.825312 \\
\{6,15\} & 2 & \frac{16 \sqrt{\frac{2}{3}}}{27} &
\frac{1}{24} \left(9+2 \sqrt{3} \pi \right) & 0.828450
\\
\{8,9\} & 2 & \frac{\sqrt{2}}{3} & \frac{52+27 \sqrt{2}
\sec ^{-1}(3)}{48 \sqrt{6}} & 0.842035 \\
\{8,22\} & 2 & \frac{1}{\sqrt{3}} & \frac{8}{9} &
0.888889 \\
\{8,29\} & 2 & \frac{2}{3} & \frac{4}{3 \sqrt{3}} &
0.769800 \\
\{9,15\} & 2 & \frac{4 \sqrt{2} \pi }{27} &
\frac{1}{3}+\frac{\sqrt{3}}{2 \pi } & 0.608998 \\
\{15,22\} & 2 & \frac{32 \sqrt{\frac{2}{3}}}{27} &
\frac{1}{2} & 0.500000 \\
\{15,29\} & 2 & \frac{32 \sqrt{2}}{81} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{18,24\} & 4 & \frac{25}{144} \sqrt{\frac{5}{2}} \pi &
\frac{92+75 \sin ^{-1}\left(\frac{4}{5}\right)}{75 \pi
} & 0.685627 \\
\{18,35\} & 4 & \frac{4 \sqrt{\frac{3}{5}}}{5} &
\frac{1}{12} \left(5+3 \sqrt{5} \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)\right) & 0.886838
\\
\{24,25\} & 4 & \frac{25}{27 \sqrt{2}} & \frac{92+75 \cos
^{-1}\left(\frac{3}{5}\right)}{80 \sqrt{5}} & 0.903076
\\
\{25,35\} & 4 & \frac{\sqrt{3} \pi }{5} &
\frac{\sqrt{5}+3 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{3 \pi } &
0.504975 \end{array} \right). \end{equation} There are now only fourteen rows, while in the preceding qubit-qutrit analysis (\ref{n=6case1}) there were sixteen. In both analyses, though, there are 48 unordered pairs of Lie generators which yield HS separability probabilities equal to $\frac{\pi}{4}$.
\subsubsection{Boundary states} Here, we again present the results, restricting consideration to the boundary (degenerate spectra) states, in the same form as previously (\ref{boundary1}). \begin{equation} \left( \begin{array}{lllll}
\{3,6\} & 1 & \frac{1}{3} & \frac{1}{3} & 0.333333 \\
\{4,18\} & \frac{2}{3} & 0 & 0 & 0. \\
\{6,8\} & \frac{1}{\sqrt{2}} & \frac{2}{3} & \frac{2
\sqrt{2}}{3} & 0.942809 \\
\{6,15\} & \frac{4}{9} & \frac{1}{3} & \frac{3}{4} & 0.75
\\
\{8,9\} & \frac{\sqrt{3}}{2} & \frac{1}{\sqrt{3}} &
\frac{2}{3} & 0.666667 \\
\{8,22\} & \frac{\sqrt{3}}{2} & \frac{1}{\sqrt{3}} &
\frac{2}{3} & 0.666667 \\
\{8,29\} & \frac{2}{\sqrt{3}} & \frac{1}{2 \sqrt{3}} &
\frac{1}{4} & 0.25 \\
\{9,15\} & \frac{4}{3 \sqrt{3}} & \frac{2}{3} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{15,22\} & \frac{5 \sqrt{\frac{2}{3}}}{3} &
\frac{\sqrt{\frac{2}{3}}}{3} & \frac{1}{5} & 0.2 \\
\{15,29\} & \frac{4 \sqrt{\frac{2}{3}}}{3} &
\sqrt{\frac{2}{3}} & \frac{3}{4} & 0.75 \\
\{18,24\} & \frac{5}{6} & \frac{2}{3} & \frac{4}{5} & 0.8
\\
\{18,35\} & \frac{2}{5} & \frac{1}{3} & \frac{5}{6} &
0.833333 \\
\{24,25\} & \frac{5 \sqrt{\frac{5}{2}}}{6} &
\frac{\sqrt{10}}{3} & \frac{4}{5} & 0.8 \\
\{25,35\} & \frac{2}{\sqrt{5}} & \frac{2}{3} &
\frac{\sqrt{5}}{3} & 0.745356 \end{array} \right) \end{equation} \subsubsection{Length of Separability-Nonseparability {\it Interior} Boundary} In the following arrays, we present the HS-length of the common border separating the nonseparable (entangled) states from the separable ones for each specific scenario. \begin{equation} \left( \begin{array}{lllllll}
\{3,6\} & \{4,18\} & \{6,8\} & \{6,15\} & \{8,9\} &
\{8,22\} & \{8,29\} \\
\frac{1}{3} & \frac{2}{3} & \frac{2}{3} & \frac{2}{3} &
\frac{1}{2 \sqrt{3}} & \frac{1}{2 \sqrt{3}} &
\frac{1}{\sqrt{3}} \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{lllllll}
\{9,15\} & \{15,22\} & \{15,29\} & \{18,24\} & \{18,35\}
& \{24,25\} & \{25,35\} \\
\frac{1}{3} & \sqrt{\frac{2}{3}} &
\frac{\sqrt{\frac{2}{3}}}{3} & \frac{2}{3} &
\frac{2}{3} & \frac{\sqrt{\frac{5}{2}}}{6} &
\frac{1}{3} \end{array} \right). \end{displaymath} \subsection{Biseparable HS probabilities} Now, we determine which of the two-dimensional set of $6 \times 6$ density matrices have positive partial transposes for {\it both} forms of partial transposition used in sec.~\ref{n=6a} \ref{n=6b}. The results we obtained were: \begin{equation} \left( \begin{array}{lllll}
\{1,13\} & 88 & \frac{4}{9} & \frac{\pi }{4} & 0.785398
\\
\{3,6\} & 12 & \frac{8 \sqrt{2}}{27} & \frac{1}{\sqrt{2}}
& 0.707107 \\
\{3,13\} & 4 & \frac{4}{9} & \frac{5}{6} & 0.833333 \\
\{3,27\} & 2 & \frac{8 \sqrt{2}}{27} & \frac{5}{4
\sqrt{2}} & 0.883883 \\
\{6,8\} & 2 & \frac{1}{8} \sqrt{\frac{3}{2}} \pi &
\frac{26 \sqrt{2}+27 \tan ^{-1}\left(2
\sqrt{2}\right)}{27 \pi } & 0.825312 \\
\{6,15\} & 2 & \frac{16 \sqrt{\frac{2}{3}}}{27} &
\frac{1}{24} \left(9+2 \sqrt{3} \pi \right) & 0.828450
\\
\{8,9\} & 2 & \frac{\sqrt{2}}{3} & \frac{52+27 \sqrt{2}
\sec ^{-1}(3)}{48 \sqrt{6}} & 0.842035 \\
\{8,13\} & 4 & \frac{2}{3} & \frac{1}{\sqrt{3}} &
0.577350 \\
\{8,22\} & 2 & \frac{1}{\sqrt{3}} & \frac{8}{9} &
0.888889 \\
\{8,25\} & 4 & \frac{\sqrt{2}}{3} & \sqrt{\frac{2}{3}} &
0.816497 \\
\{8,29\} & 2 & \frac{2}{3} & \frac{4}{3 \sqrt{3}} &
0.769800 \\
\{9,15\} & 2 & \frac{4 \sqrt{2} \pi }{27} &
\frac{1}{3}+\frac{\sqrt{3}}{2 \pi } & 0.608998 \\
\{11,15\} & 4 & \frac{4 \sqrt{2} \pi }{27} &
\frac{1}{3}+\frac{3 \sqrt{3}}{4 \pi } & 0.746830 \\
\{11,24\} & 2 & \frac{25 \sqrt{\frac{5}{2}}}{72} &
\frac{2}{5}+\frac{1}{2} \sin
^{-1}\left(\frac{4}{5}\right) & 0.863648 \\
\{13,24\} & 2 & \frac{25 \sqrt{\frac{5}{2}}}{72} &
\frac{8}{75} \left(-2+5 \sqrt{5}\right) & 0.979236 \\
\{13,35\} & 8 & \frac{4 \sqrt{\frac{3}{5}}}{5} &
\frac{1}{12} \left(5+3 \sqrt{5} \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)\right) & 0.886838
\\
\{15,16\} & 4 & \frac{32 \sqrt{2}}{81} & \frac{1}{32}
\left(9 \sqrt{3}+4 \pi \right) & 0.879838 \\
\{15,22\} & 2 & \frac{32 \sqrt{\frac{2}{3}}}{27} &
\frac{1}{2} & 0.500000 \\
\{15,29\} & 2 & \frac{32 \sqrt{2}}{81} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{16,24\} & 2 & \frac{25}{144} \sqrt{\frac{5}{2}} \pi &
\frac{4+5 \sin ^{-1}\left(\frac{4}{5}\right)}{5 \pi } &
0.549815 \\
\{18,24\} & 6 & \frac{25}{144} \sqrt{\frac{5}{2}} \pi &
\frac{92+75 \sin ^{-1}\left(\frac{4}{5}\right)}{75 \pi
} & 0.685627 \\
\{24,25\} & 2 & \frac{25}{27 \sqrt{2}} & \frac{3
\left(4+5 \cos ^{-1}\left(\frac{3}{5}\right)\right)}{16
\sqrt{5}} & 0.724191 \\
\{24,27\} & 4 & \frac{25}{27 \sqrt{2}} & \frac{92+75 \cos
^{-1}\left(\frac{3}{5}\right)}{80 \sqrt{5}} & 0.903076
\\
\{25,35\} & 6 & \frac{\sqrt{3} \pi }{5} &
\frac{\sqrt{5}+3 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{3 \pi } &
0.504975 \end{array} \right). \end{equation} \subsubsection{Boundary States} Concerning the corresponding one-dimensional (exterior) boundary (degenerate spectra) states we found: \begin{equation} \left( \begin{array}{lllll}
\{1,13\} & \frac{2}{3} & 0 & 0 & 0. \\
\{3,6\} & 1 & \frac{1}{3} & \frac{1}{3} & 0.333333 \\
\{3,13\} & \frac{2}{3} & \frac{1}{3} & \frac{1}{2} & 0.5
\\
\{3,27\} & 1 & \frac{1}{3} & \frac{1}{3} & 0.333333 \\
\{6,8\} & \frac{1}{\sqrt{2}} & \frac{2}{3}
& \frac{2 \sqrt{2}}{3} & 0.942809 \\
\{6,15\} & \frac{4}{9} & \frac{1}{3} & \frac{3}{4} & 0.75
\\
\{8,9\} & \frac{\sqrt{3}}{2} & \frac{1}{\sqrt{3}} &
\frac{2}{3} & 0.666667 \\
\{8,13\} & \frac{2}{\sqrt{3}} & \frac{1}{2 \sqrt{3}} &
\frac{1}{4} & 0.25 \\
\{8,22\} & \frac{\sqrt{3}}{2} & \frac{1}{\sqrt{3}} &
\frac{2}{3} & 0.666667 \\
\{8,25\} & \frac{\sqrt{3}}{2} & \frac{1}{\sqrt{3}} &
\frac{2}{3} & 0.666667 \\
\{8,29\} & \frac{2}{\sqrt{3}} & \frac{1}{2 \sqrt{3}} &
\frac{1}{4} & 0.25 \\
\{9,15\} & \frac{4}{3 \sqrt{3}} & \frac{2}{3} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{11,15\} & \frac{4}{3 \sqrt{3}} & \frac{2}{3} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{11,24\} & \frac{5}{12} & \frac{1}{3} & \frac{4}{5} &
0.8 \\
\{13,24\} & \frac{5}{6} & \frac{\sqrt{5}}{3} &
\frac{2}{\sqrt{5}} & 0.894427 \\
\{13,35\} & \frac{2}{5} & \frac{1}{3} & \frac{5}{6} &
0.833333 \\
\{15,16\} & \frac{4 \sqrt{\frac{2}{3}}}{3} &
\sqrt{\frac{2}{3}} & \frac{3}{4} & 0.75 \\
\{15,22\} & \frac{5 \sqrt{\frac{2}{3}}}{3} &
\frac{\sqrt{\frac{2}{3}}}{3} & \frac{1}{5} & 0.2 \\
\{15,29\} & \frac{4 \sqrt{\frac{2}{3}}}{3} &
\sqrt{\frac{2}{3}} & \frac{3}{4} & 0.75 \\
\{16,24\} & \frac{5}{6} & \frac{2}{3} & \frac{4}{5} & 0.8
\\
\{18,24\} & \frac{5}{6} & \frac{2}{3} & \frac{4}{5} & 0.8
\\
\{24,25\} & \frac{5 \sqrt{\frac{5}{2}}}{6} & 0 & 0 & 0.
\\
\{24,27\} & \frac{5 \sqrt{\frac{5}{2}}}{6} &
\frac{\sqrt{10}}{3} & \frac{4}{5} & 0.8 \\
\{25,35\} & \frac{2}{\sqrt{5}} & \frac{2}{3} &
\frac{\sqrt{5}}{3} & 0.745356 \end{array} \right). \end{equation} \subsubsection{Length of Biseparability-Nonbiseparability {\it Interior} Boundary} In the following array, we present the HS-length of the common border separating the (generically nondegenerate) biseparable states from the non-biseparable ones for each specific scenario. \begin{equation} \left( \begin{array}{ll}
\{1,13\} & \frac{2}{3} \\
\{3,6\} & \frac{2}{3} \\
\{3,13\} & \frac{2}{3} \\
\{3,27\} & \frac{2}{3} \\
\{6,8\} & \frac{4}{3} \\
\{6,15\} & 1 \\
\{8,9\} & \frac{\sqrt{3}}{2} \\
\{8,13\} & \frac{\sqrt{3}}{2} \\
\{8,22\} & \frac{\sqrt{3}}{2} \\
\{8,25\} & \frac{\sqrt{3}}{2} \\
\{8,29\} & \frac{\sqrt{3}}{2} \\
\{9,15\} & 1 \\
\{11,15\} & \frac{4}{3} \\
\{11,24\} & 1 \\
\{13,24\} & \frac{2 \sqrt{5}}{3} \\
\{13,35\} & 1 \\
\{15,16\} & \frac{4 \sqrt{\frac{2}{3}}}{3} \\
\{15,22\} & \frac{4 \sqrt{\frac{2}{3}}}{3} \\
\{15,29\} & \frac{4 \sqrt{\frac{2}{3}}}{3} \\
\{16,24\} & 1 \\
\{18,24\} & \frac{4}{3} \\
\{24,25\} &
\frac{\sqrt{\frac{5}{2}}}{6}+\frac{\sqrt{10}}{3} \\
\{24,27\} &
\frac{\sqrt{\frac{5}{2}}}{6}+\frac{\sqrt{10}}{3} \\
\{25,35\} & \frac{2}{3} \end{array} \right). \end{equation} \section{The case $n=8$} \label{secN=8} \subsection{$4 \times 2$ Decomposition} Here, we compute the partial transposes of the $8 \times 8$ density matrices, corresponding to two-dimensional sections of the set of Bloch vectors, by, first, transposing in place the ($2^2$) four $4 \times 4$ blocks of the density matrices. The results are \begin{equation} \label{eight4x2} \left( \begin{array}{lllll}
\{1,20\} & 192 & \frac{1}{4} & \frac{\pi }{4} & 0.785398
\\
\{3,18\} & 4 & \frac{1}{3 \sqrt{2}} & \frac{1}{\sqrt{2}}
& 0.707107 \\
\{3,20\} & 8 & \frac{1}{4} & \frac{5}{6} & 0.833333 \\
\{3,36\} & 8 & \frac{1}{3 \sqrt{2}} & \frac{5}{4
\sqrt{2}} & 0.883883 \\
\{8,20\} & 4 & \frac{3}{8} & \frac{1}{\sqrt{3}} &
0.577350 \\
\{8,22\} & 4 & \frac{3 \sqrt{3}}{16} & \frac{7}{9} &
0.777778 \\
\{8,36\} & 4 & \frac{3}{8 \sqrt{2}} & \sqrt{\frac{2}{3}}
& 0.816497 \\
\{8,42\} & 2 & \frac{3 \sqrt{3}}{16} & \frac{8}{9} &
0.888889 \\
\{8,49\} & 4 & \frac{3}{8 \sqrt{2}} & \frac{7}{3
\sqrt{6}} & 0.952579 \\
\{8,53\} & 2 & \frac{3}{8} & \frac{4}{3 \sqrt{3}} &
0.769800 \\
\{15,22\} & 6 & \frac{2 \sqrt{\frac{2}{3}}}{3} &
\frac{1}{2} & 0.500000 \\
\{15,49\} & 6 & \frac{2 \sqrt{2}}{9} & \frac{\sqrt{3}}{2}
& 0.866025 \\
\{18,24\} & 6 & \frac{25}{256} \sqrt{\frac{5}{2}} \pi &
\frac{92+75 \sin ^{-1}\left(\frac{4}{5}\right)}{75 \pi
} & 0.685627 \\
\{18,35\} & 2 & \frac{9 \sqrt{\frac{3}{5}}}{20} &
\frac{1}{12} \left(5+3 \sqrt{5} \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)\right) & 0.886838
\\
\{20,35\} & 4 & \frac{9 \sqrt{\frac{3}{5}}}{20} &
\frac{1}{108} \left(-25+24 \sqrt{30}\right) & 0.985680
\\
\{20,48\} & 4 & \frac{49 \sqrt{\frac{7}{3}}}{192} &
\frac{1}{28} \left(12+7 \sqrt{6} \sin
^{-1}\left(\frac{2 \sqrt{6}}{7}\right)\right) &
0.903278 \\
\{22,48\} & 4 & \frac{49 \sqrt{\frac{7}{3}}}{192} &
\frac{4}{147} \left(-9+7 \sqrt{42}\right) & 0.989529 \\
\{22,63\} & 6 & \frac{8}{7 \sqrt{7}} & \frac{1}{16}
\left(7+4 \sqrt{7} \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)\right) & 0.915544
\\
\{24,25\} & 6 & \frac{25}{48 \sqrt{2}} & \frac{92+75 \cos
^{-1}\left(\frac{3}{5}\right)}{80 \sqrt{5}} & 0.903076
\\
\{25,35\} & 2 & \frac{9 \sqrt{3} \pi }{80} &
\frac{\sqrt{5}+3 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{3 \pi } &
0.504975 \\
\{29,35\} & 4 & \frac{9 \sqrt{3} \pi }{80} & \frac{14
\sqrt{5}+27 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{27 \pi } &
0.636783 \\
\{35,36\} & 4 & \frac{3}{5 \sqrt{2}} & 1-\frac{5
\sqrt{\frac{5}{6}}}{24} & 0.809819 \\
\{35,38\} & 4 & \frac{3}{5 \sqrt{2}} & \frac{14
\sqrt{5}+27 \cos ^{-1}\left(\frac{2}{3}\right)}{24
\sqrt{6}} & 0.918793 \\
\{36,48\} & 4 & \frac{49}{384} \sqrt{\frac{7}{2}} \pi &
\frac{2 \sqrt{6}+7 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{7 \pi } & 0.469522 \\
\{42,48\} & 2 & \frac{49}{384} \sqrt{\frac{7}{2}} \pi &
\frac{22 \sqrt{6}+49 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{49 \pi } & 0.596820 \\
\{48,49\} & 4 & \frac{49}{72 \sqrt{2}} & 1-\frac{3
\sqrt{\frac{3}{14}}}{7} & 0.801610 \\
\{48,53\} & 2 & \frac{49}{72 \sqrt{2}} & \frac{3 \left(22
\sqrt{6}+49 \cos
^{-1}\left(\frac{5}{7}\right)\right)}{112 \sqrt{7}} &
0.930129 \\
\{49,63\} & 6 & \frac{2 \pi }{7} & \frac{\sqrt{7}+4 \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)}{4 \pi } &
0.440596 \end{array} \right). \end{equation} \subsubsection{Boundary states} The results pertaining to the {\it one}-dimensional (exterior) boundary (generically degenerate) states were: \begin{equation} \left( \begin{array}{lllll}
\{1,20\} & \frac{1}{2} & 0 & 0 & 0. \\
\{3,18\} & \frac{3}{4} & \frac{1}{4} & \frac{1}{3} &
0.333333 \\
\{3,20\} & \frac{1}{2} & \frac{1}{4} & \frac{1}{2} & 0.5
\\
\{3,36\} & \frac{3}{4} & \frac{1}{4} & \frac{1}{3} &
0.333333 \\
\{8,20\} & \frac{\sqrt{3}}{2} & \frac{\sqrt{3}}{8} &
\frac{1}{4} & 0.25 \\
\{8,22\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{8} &
\frac{1}{3} & 0.333333 \\
\{8,36\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,42\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,49\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,53\} & \frac{\sqrt{3}}{2} & \frac{\sqrt{3}}{8} &
\frac{1}{4} & 0.25 \\
\{15,22\} & \frac{5}{2 \sqrt{6}} & \frac{1}{2 \sqrt{6}} &
\frac{1}{5} & 0.2 \\
\{15,49\} & \sqrt{\frac{2}{3}} &
\frac{\sqrt{\frac{3}{2}}}{2} & \frac{3}{4} & 0.75 \\
\{18,24\} & \frac{5}{8} & \frac{1}{2} & \frac{4}{5} & 0.8
\\
\{18,35\} & \frac{3}{10} & \frac{1}{4} & \frac{5}{6} &
0.833333 \\
\{20,35\} & \frac{3}{5} & \sqrt{\frac{3}{10}} &
\sqrt{\frac{5}{6}} & 0.912871 \\
\{20,48\} & \frac{7}{24} & \frac{1}{4} & \frac{6}{7} &
0.857143 \\
\{22,48\} & \frac{7}{12} & \frac{\sqrt{\frac{7}{6}}}{2} &
\sqrt{\frac{6}{7}} & 0.92582 \\
\{22,63\} & \frac{2}{7} & \frac{1}{4} & \frac{7}{8} &
0.875 \\
\{24,25\} & \frac{5 \sqrt{\frac{5}{2}}}{8} &
\frac{\sqrt{\frac{5}{2}}}{2} & \frac{4}{5} & 0.8 \\
\{25,35\} & \frac{3}{2 \sqrt{5}} & \frac{1}{2} &
\frac{\sqrt{5}}{3} & 0.745356 \\
\{29,35\} & \frac{3}{2 \sqrt{5}} & \frac{1}{2} &
\frac{\sqrt{5}}{3} & 0.745356 \\
\{35,36\} & \frac{3 \sqrt{\frac{3}{5}}}{2} &
\frac{\sqrt{\frac{3}{5}}}{4} & \frac{1}{6} & 0.166667
\\
\{35,38\} & \frac{3 \sqrt{\frac{3}{5}}}{2} &
\frac{\sqrt{15}}{4} & \frac{5}{6} & 0.833333 \\
\{36,48\} & \frac{7}{4 \sqrt{6}} & \frac{1}{2} & \frac{2
\sqrt{6}}{7} & 0.699854 \\
\{42,48\} & \frac{7}{4 \sqrt{6}} & \frac{1}{2} & \frac{2
\sqrt{6}}{7} & 0.699854 \\
\{48,49\} & \frac{7 \sqrt{\frac{7}{3}}}{8} &
\frac{\sqrt{\frac{7}{3}}}{8} & \frac{1}{7} & 0.142857
\\
\{48,53\} & \frac{7 \sqrt{\frac{7}{3}}}{8} &
\frac{\sqrt{21}}{4} & \frac{6}{7} & 0.857143 \\
\{49,63\} & \frac{2}{\sqrt{7}} & \frac{1}{2} &
\frac{\sqrt{7}}{4} & 0.661438 \end{array} \right). \end{equation} Here, we see the appearance of (fully entangled) domains that have no separable component, at all. \subsection{$2 \times 4$ Decomposition} Now, we compute the partial transposes of the $8 \times 8$ density matrices, corresponding to two-dimensional sections of the set of Bloch vectors, by transposing in place the ($4^4$) sixteen $2 \times 2$ blocks of the density matrices. We obtained the following results. \begin{equation} \label{eight2x4} \left( \begin{array}{lllll}
\{3,6\} & 12 & \frac{1}{3 \sqrt{2}} & \frac{1}{\sqrt{2}}
& 0.707107 \\
\{4,18\} & 192 & \frac{1}{4} & \frac{\pi }{4} & 0.785398
\\
\{6,8\} & 2 & \frac{9}{128} \sqrt{\frac{3}{2}} \pi &
\frac{26 \sqrt{2}+27 \tan ^{-1}\left(2
\sqrt{2}\right)}{27 \pi } & 0.825312 \\
\{6,15\} & 2 & \frac{\sqrt{\frac{2}{3}}}{3} &
\frac{1}{24} \left(9+2 \sqrt{3} \pi \right) & 0.828450
\\
\{8,9\} & 2 & \frac{3}{8 \sqrt{2}} & \frac{52+27 \sqrt{2}
\sec ^{-1}(3)}{48 \sqrt{6}} & 0.842035 \\
\{8,22\} & 4 & \frac{3 \sqrt{3}}{16} & \frac{8}{9} &
0.888889 \\
\{8,29\} & 4 & \frac{3}{8} & \frac{4}{3 \sqrt{3}} &
0.769800 \\
\{9,15\} & 2 & \frac{\pi }{6 \sqrt{2}} &
\frac{1}{3}+\frac{\sqrt{3}}{2 \pi } & 0.608998 \\
\{15,22\} & 4 & \frac{2 \sqrt{\frac{2}{3}}}{3} &
\frac{1}{2} & 0.500000 \\
\{15,29\} & 4 & \frac{2 \sqrt{2}}{9} & \frac{\sqrt{3}}{2}
& 0.866025 \\
\{18,24\} & 4 & \frac{25}{256} \sqrt{\frac{5}{2}} \pi &
\frac{92+75 \sin ^{-1}\left(\frac{4}{5}\right)}{75 \pi
} & 0.685627 \\
\{18,35\} & 4 & \frac{9 \sqrt{\frac{3}{5}}}{20} &
\frac{1}{12} \left(5+3 \sqrt{5} \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)\right) & 0.886838
\\
\{24,25\} & 4 & \frac{25}{48 \sqrt{2}} & \frac{92+75 \cos
^{-1}\left(\frac{3}{5}\right)}{80 \sqrt{5}} & 0.903076
\\
\{24,46\} & 2 & \frac{5 \sqrt{\frac{5}{2}}}{16} &
\frac{14}{15} & 0.933333 \\
\{24,57\} & 2 & \frac{25}{24 \sqrt{2}} & \frac{7}{5
\sqrt{5}} & 0.626099 \\
\{25,35\} & 4 & \frac{9 \sqrt{3} \pi }{80} &
\frac{\sqrt{5}+3 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{3 \pi } &
0.504975 \\
\{35,46\} & 2 & \frac{3}{\sqrt{10}} & \frac{1}{\sqrt{6}}
& 0.408248 \\
\{35,57\} & 2 & \frac{3}{5 \sqrt{2}} & \sqrt{\frac{5}{6}}
& 0.912871 \\
\{38,48\} & 6 & \frac{49}{384} \sqrt{\frac{7}{2}} \pi &
\frac{22 \sqrt{6}+49 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{49 \pi } & 0.596820 \\
\{38,63\} & 6 & \frac{8}{7 \sqrt{7}} & \frac{1}{16}
\left(7+4 \sqrt{7} \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)\right) & 0.915544
\\
\{48,49\} & 6 & \frac{49}{72 \sqrt{2}} & \frac{3 \left(22
\sqrt{6}+49 \cos
^{-1}\left(\frac{5}{7}\right)\right)}{112 \sqrt{7}} &
0.930129 \\
\{49,63\} & 6 & \frac{2 \pi }{7} & \frac{\sqrt{7}+4 \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)}{4 \pi } &
0.440596 \end{array} \right). \end{equation} We see that there are fewer rows in (\ref{eight2x4}) than in (\ref{eight4x2}), obtained by the alternative form of partial transposition. \subsubsection{Boundary states} Our analysis of the HS-{\it lengths} of the corresponding boundary states yielded: \begin{equation} \left( \begin{array}{lllll}
\{3,6\} & \frac{3}{4} & \frac{1}{4} & \frac{1}{3} &
0.333333 \\
\{4,18\} & \frac{1}{2} & 0 & 0 & 0. \\
\{6,8\} & \frac{3}{4 \sqrt{2}} & \frac{1}{2} & \frac{2
\sqrt{2}}{3} & 0.942809 \\
\{6,15\} & \frac{1}{3} & \frac{1}{4} & \frac{3}{4} & 0.75
\\
\{8,9\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4}
& \frac{2}{3} & 0.666667 \\
\{8,22\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,29\} & \frac{\sqrt{3}}{2} & \frac{\sqrt{3}}{8} &
\frac{1}{4} & 0.25 \\
\{9,15\} & \frac{1}{\sqrt{3}} & \frac{1}{2} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{15,22\} & \frac{5}{2 \sqrt{6}} & \frac{1}{2 \sqrt{6}} &
\frac{1}{5} & 0.2 \\
\{15,29\} & \sqrt{\frac{2}{3}} &
\frac{\sqrt{\frac{3}{2}}}{2} & \frac{3}{4} & 0.75 \\
\{18,24\} & \frac{5}{8} & \frac{1}{2} & \frac{4}{5} & 0.8
\\
\{18,35\} & \frac{3}{10} & \frac{1}{4} & \frac{5}{6} &
0.833333 \\
\{24,25\} & \frac{5 \sqrt{\frac{5}{2}}}{8} &
\frac{\sqrt{\frac{5}{2}}}{2} & \frac{4}{5} & 0.8 \\
\{24,46\} & \frac{5 \sqrt{\frac{5}{2}}}{8} &
\frac{\sqrt{\frac{5}{2}}}{2} & \frac{4}{5} & 0.8 \\
\{24,57\} & \frac{3 \sqrt{\frac{5}{2}}}{4} &
\frac{\sqrt{\frac{5}{2}}}{8} & \frac{1}{6} & 0.166667
\\
\{25,35\} & \frac{3}{2 \sqrt{5}} & \frac{1}{2} &
\frac{\sqrt{5}}{3} & 0.745356 \\
\{35,46\} & \frac{7 \sqrt{\frac{3}{5}}}{4} &
\frac{\sqrt{\frac{3}{5}}}{4} & \frac{1}{7} & 0.142857
\\
\{35,57\} & \frac{3 \sqrt{\frac{3}{5}}}{2} &
\frac{\sqrt{15}}{4} & \frac{5}{6} & 0.833333 \\
\{38,48\} & \frac{7}{4 \sqrt{6}} & \frac{1}{2} & \frac{2
\sqrt{6}}{7} & 0.699854 \\
\{38,63\} & \frac{2}{7} & \frac{1}{4} & \frac{7}{8} &
0.875 \\
\{48,49\} & \frac{7 \sqrt{\frac{7}{3}}}{8} &
\frac{\sqrt{21}}{4} & \frac{6}{7} & 0.857143 \\
\{49,63\} & \frac{2}{\sqrt{7}} & \frac{1}{2} &
\frac{\sqrt{7}}{4} & 0.661438 \end{array} \right). \end{equation} \subsection{Bi-PPT} \label{secBi} Here, we obtain the probabilities that an $8 \times 8$ density matrix will have a positive partial transpose, under {\it both} forms of partial transposition employed immediately above. Our results were:
\begin{equation} \left( \begin{array}{lllll}
\{1,20\} & 288 & \frac{1}{4} & \frac{\pi }{4} & 0.785398
\\
\{3,6\} & 12 & \frac{1}{3 \sqrt{2}} & \frac{1}{\sqrt{2}}
& 0.707107 \\
\{3,20\} & 8 & \frac{1}{4} & \frac{5}{6} & 0.833333 \\
\{3,36\} & 4 & \frac{1}{3 \sqrt{2}} & \frac{5}{4
\sqrt{2}} & 0.883883 \\
\{6,8\} & 2 & \frac{9}{128} \sqrt{\frac{3}{2}} \pi &
\frac{26 \sqrt{2}+27 \tan ^{-1}\left(2
\sqrt{2}\right)}{27 \pi } & 0.825312 \\
\{6,15\} & 2 & \frac{\sqrt{\frac{2}{3}}}{3} &
\frac{1}{24} \left(9+2 \sqrt{3} \pi \right) & 0.828450
\\
\{8,9\} & 2 & \frac{3}{8 \sqrt{2}} & \frac{52+27 \sqrt{2}
\sec ^{-1}(3)}{48 \sqrt{6}} & 0.842035 \\
\{8,20\} & 4 & \frac{3}{8} & \frac{1}{\sqrt{3}} &
0.577350 \\
\{8,22\} & 2 & \frac{3 \sqrt{3}}{16} & \frac{2}{3} &
0.666667 \\
\{8,31\} & 2 & \frac{3 \sqrt{3}}{16} & \frac{7}{9} &
0.777778 \\
\{8,36\} & 4 & \frac{3}{8 \sqrt{2}} & \sqrt{\frac{2}{3}}
& 0.816497 \\
\{8,42\} & 2 & \frac{3 \sqrt{3}}{16} & \frac{8}{9} &
0.888889 \\
\{8,49\} & 4 & \frac{3}{8 \sqrt{2}} & \frac{7}{3
\sqrt{6}} & 0.952579 \\
\{8,53\} & 2 & \frac{3}{8} & \frac{4}{3 \sqrt{3}} &
0.769800 \\
\{9,15\} & 2 & \frac{\pi }{6 \sqrt{2}} &
\frac{1}{3}+\frac{\sqrt{3}}{2 \pi } & 0.608998 \\
\{15,22\} & 6 & \frac{2 \sqrt{\frac{2}{3}}}{3} &
\frac{1}{2} & 0.500000 \\
\{15,29\} & 8 & \frac{2 \sqrt{2}}{9} & \frac{\sqrt{3}}{2}
& 0.866025 \\
\{18,24\} & 6 & \frac{25}{256} \sqrt{\frac{5}{2}} \pi &
\frac{92+75 \sin ^{-1}\left(\frac{4}{5}\right)}{75 \pi
} & 0.685627 \\
\{18,35\} & 4 & \frac{9 \sqrt{\frac{3}{5}}}{20} &
\frac{1}{12} \left(5+3 \sqrt{5} \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)\right) & 0.886838 \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{lllll}
\{20,35\} & 2 & \frac{9 \sqrt{\frac{3}{5}}}{20} &
\frac{1}{108} \left(-25+24 \sqrt{30}\right) & 0.985680
\\
\{20,48\} & 4 & \frac{49 \sqrt{\frac{7}{3}}}{192} &
\frac{1}{28} \left(12+7 \sqrt{6} \sin
^{-1}\left(\frac{2 \sqrt{6}}{7}\right)\right) &
0.903278 \\
\{22,48\} & 4 & \frac{49 \sqrt{\frac{7}{3}}}{192} &
\frac{4}{147} \left(-9+7 \sqrt{42}\right) & 0.989529 \\
\{22,63\} & 10 & \frac{8}{7 \sqrt{7}} & \frac{1}{16}
\left(7+4 \sqrt{7} \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)\right) & 0.915544
\\
\{24,25\} & 8 & \frac{25}{48 \sqrt{2}} & \frac{92+75 \cos
^{-1}\left(\frac{3}{5}\right)}{80 \sqrt{5}} & 0.903076
\\
\{24,46\} & 2 & \frac{5 \sqrt{\frac{5}{2}}}{16} &
\frac{14}{15} & 0.933333 \\
\{24,57\} & 2 & \frac{25}{24 \sqrt{2}} & \frac{7}{5
\sqrt{5}} & 0.626099 \\
\{25,35\} & 4 & \frac{9 \sqrt{3} \pi }{80} &
\frac{\sqrt{5}+3 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{3 \pi } &
0.504975 \\
\{31,35\} & 2 & \frac{9 \sqrt{3} \pi }{80} & \frac{14
\sqrt{5}+27 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{27 \pi } &
0.636783 \\
\{35,36\} & 4 & \frac{3}{5 \sqrt{2}} & 1-\frac{5
\sqrt{\frac{5}{6}}}{24} & 0.809819 \\
\{35,38\} & 4 & \frac{3}{5 \sqrt{2}} & \frac{14
\sqrt{5}+27 \cos ^{-1}\left(\frac{2}{3}\right)}{24
\sqrt{6}} & 0.918793 \\
\{35,46\} & 2 & \frac{3}{\sqrt{10}} & \frac{1}{\sqrt{6}}
& 0.408248 \\
\{35,57\} & 2 & \frac{3}{5 \sqrt{2}} & \sqrt{\frac{5}{6}}
& 0.912871 \\
\{36,48\} & 4 & \frac{49}{384} \sqrt{\frac{7}{2}} \pi &
\frac{2 \sqrt{6}+7 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{7 \pi } & 0.469522 \\
\{42,48\} & 4 & \frac{49}{384} \sqrt{\frac{7}{2}} \pi &
\frac{22 \sqrt{6}+49 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{49 \pi } & 0.596820 \\
\{48,49\} & 2 & \frac{49}{72 \sqrt{2}} & \frac{3 \left(2
\sqrt{6}+7 \cos
^{-1}\left(\frac{5}{7}\right)\right)}{16 \sqrt{7}} &
0.731739 \\
\{48,51\} & 2 & \frac{49}{72 \sqrt{2}} & 1-\frac{3
\sqrt{\frac{3}{14}}}{7} & 0.801610 \\
\{48,53\} & 4 & \frac{49}{72 \sqrt{2}} & \frac{3 \left(22
\sqrt{6}+49 \cos
^{-1}\left(\frac{5}{7}\right)\right)}{112 \sqrt{7}} &
0.930129 \\
\{49,63\} & 8 & \frac{2 \pi }{7} & \frac{\sqrt{7}+4 \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)}{4 \pi } &
0.440596 \end{array} \right). \end{displaymath} \subsubsection{Boundary states} The lengths, separable lengths and separability probabilities of the corresponding (exterior/degenerate spectra) boundary states are given in the following array: \begin{equation} \left( \begin{array}{lllll}
\{1,20\} & \frac{1}{2} & 0 & 0 & 0. \\
\{3,6\} & \frac{3}{4} & \frac{1}{4} & \frac{1}{3} &
0.333333 \\
\{3,20\} & \frac{1}{2} & \frac{1}{4} & \frac{1}{2} & 0.5
\\
\{3,36\} & \frac{3}{4} & \frac{1}{4} & \frac{1}{3} &
0.333333 \\
\{6,8\} & \frac{3}{4 \sqrt{2}} & \frac{1}{2} & \frac{2
\sqrt{2}}{3} & 0.942809 \\
\{6,15\} & \frac{1}{3} & \frac{1}{4} & \frac{3}{4} & 0.75
\\
\{8,9\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,20\} & \frac{\sqrt{3}}{2} & \frac{\sqrt{3}}{8} &
\frac{1}{4} & 0.25 \\
\{8,22\} & \frac{3 \sqrt{3}}{8} & 0 & 0 & 0. \\
\{8,31\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{8} &
\frac{1}{3} & 0.333333 \\
\{8,36\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,42\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,49\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,53\} & \frac{\sqrt{3}}{2} & \frac{\sqrt{3}}{8} &
\frac{1}{4} & 0.25 \\
\{9,15\} & \frac{1}{\sqrt{3}} & \frac{1}{2} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{15,22\} & \frac{5}{2 \sqrt{6}} & \frac{1}{2 \sqrt{6}} &
\frac{1}{5} & 0.2 \\
\{15,29\} & \sqrt{\frac{2}{3}} &
\frac{\sqrt{\frac{3}{2}}}{2} & \frac{3}{4} & 0.75 \\
\{18,24\} & \frac{5}{8} & \frac{1}{2} & \frac{4}{5} & 0.8
\\
\{18,35\} & \frac{3}{10} & \frac{1}{4} & \frac{5}{6} &
0.833333 \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{lllll}
\{20,35\} & \frac{3}{5} & \sqrt{\frac{3}{10}} &
\sqrt{\frac{5}{6}} & 0.912871 \\
\{20,48\} & \frac{7}{24} & \frac{1}{4} & \frac{6}{7} &
0.857143 \\
\{22,48\} & \frac{7}{12} & \frac{\sqrt{\frac{7}{6}}}{2} &
\sqrt{\frac{6}{7}} & 0.92582 \\
\{22,63\} & \frac{2}{7} & \frac{1}{4} & \frac{7}{8} &
0.875 \\
\{24,25\} & \frac{5 \sqrt{\frac{5}{2}}}{8} &
\frac{\sqrt{\frac{5}{2}}}{2} & \frac{4}{5} & 0.8 \\
\{24,46\} & \frac{5 \sqrt{\frac{5}{2}}}{8} &
\frac{\sqrt{\frac{5}{2}}}{2} & \frac{4}{5} & 0.8 \\
\{24,57\} & \frac{3 \sqrt{\frac{5}{2}}}{4} &
\frac{\sqrt{\frac{5}{2}}}{8} & \frac{1}{6} & 0.166667
\\
\{25,35\} & \frac{3}{2 \sqrt{5}} & \frac{1}{2} &
\frac{\sqrt{5}}{3} & 0.745356 \\
\{31,35\} & \frac{3}{2 \sqrt{5}} & \frac{1}{2} &
\frac{\sqrt{5}}{3} & 0.745356 \\
\{35,36\} & \frac{3 \sqrt{\frac{3}{5}}}{2} &
\frac{\sqrt{\frac{3}{5}}}{4} & \frac{1}{6} & 0.166667
\\
\{35,38\} & \frac{3 \sqrt{\frac{3}{5}}}{2} &
\frac{\sqrt{15}}{4} & \frac{5}{6} & 0.833333 \\
\{35,46\} & \frac{7 \sqrt{\frac{3}{5}}}{4} &
\frac{\sqrt{\frac{3}{5}}}{4} & \frac{1}{7} & 0.142857
\\
\{35,57\} & \frac{3 \sqrt{\frac{3}{5}}}{2} &
\frac{\sqrt{15}}{4} & \frac{5}{6} & 0.833333 \\
\{36,48\} & \frac{7}{4 \sqrt{6}} & \frac{1}{2} & \frac{2
\sqrt{6}}{7} & 0.699854 \\
\{42,48\} & \frac{7}{4 \sqrt{6}} & \frac{1}{2} & \frac{2
\sqrt{6}}{7} & 0.699854 \\
\{48,49\} & \frac{7 \sqrt{\frac{7}{3}}}{8} & 0 & 0 & 0.
\\
\{48,51\} & \frac{7 \sqrt{\frac{7}{3}}}{8} &
\frac{\sqrt{\frac{7}{3}}}{8} & \frac{1}{7} & 0.142857
\\
\{48,53\} & \frac{7 \sqrt{\frac{7}{3}}}{8} &
\frac{\sqrt{21}}{4} & \frac{6}{7} & 0.857143 \\
\{49,63\} & \frac{2}{\sqrt{7}} & \frac{1}{2} &
\frac{\sqrt{7}}{4} & 0.661438 \end{array} \right). \end{displaymath} \subsection{Tri-ppt} Now, we derive the probabilities that an $8 \times 8$ density matrix will have a positive partial transpose, not only under {\it both} forms of partial transposition previously employed, as in sec.~\ref{secBi}, but also under a {\it third} (independent) form obtained, first, applying a certain $8 \times 8$ {\it permutation} matrix (\cite[eq. (3)]{zhong}) to the original $8 \times 8$ density matrix, {\it then} transposing in place the resultant four $4 \times 4$ blocks. We obtained the following results.
\begin{equation} \label{triseparabilitycase} \left( \begin{array}{lllll}
\{1,20\} & 288 & \frac{1}{4} & \frac{\pi }{4} & 0.785398
\\
\{3,6\} & 12 & \frac{1}{3 \sqrt{2}} & \frac{1}{\sqrt{2}}
& 0.707107 \\
\{3,20\} & 4 & \frac{1}{4} & \frac{5}{6} & 0.833333 \\
\{3,22\} & 4 & \frac{1}{4} & \frac{2}{3} & 0.666667 \\
\{3,36\} & 4 & \frac{1}{3 \sqrt{2}} & \frac{5}{4
\sqrt{2}} & 0.883883 \\
\{6,8\} & 2 & \frac{9}{128} \sqrt{\frac{3}{2}} \pi &
\frac{26 \sqrt{2}+27 \tan ^{-1}\left(2
\sqrt{2}\right)}{27 \pi } & 0.825312 \\
\{6,15\} & 2 & \frac{\sqrt{\frac{2}{3}}}{3} &
\frac{1}{24} \left(9+2 \sqrt{3} \pi \right) & 0.828450
\\
\{8,9\} & 2 & \frac{3}{8 \sqrt{2}} & \frac{52+27 \sqrt{2}
\sec ^{-1}(3)}{48 \sqrt{6}} & 0.842035 \\
\{8,20\} & 4 & \frac{3}{8} & \frac{1}{\sqrt{3}} &
0.577350 \\
\{8,22\} & 2 & \frac{3 \sqrt{3}}{16} & \frac{2}{3} &
0.666667 \\
\{8,31\} & 2 & \frac{3 \sqrt{3}}{16} & \frac{7}{9} &
0.777778 \\
\{8,36\} & 6 & \frac{3}{8 \sqrt{2}} & \sqrt{\frac{2}{3}}
& 0.816497 \\
\{8,42\} & 2 & \frac{3 \sqrt{3}}{16} & \frac{8}{9} &
0.888889 \\
\{8,51\} & 2 & \frac{3}{8 \sqrt{2}} & \frac{7}{3
\sqrt{6}} & 0.952579 \\
\{8,53\} & 2 & \frac{3}{8} & \frac{4}{3 \sqrt{3}} &
0.769800 \\
\{9,15\} & 2 & \frac{\pi }{6 \sqrt{2}} &
\frac{1}{3}+\frac{\sqrt{3}}{2 \pi } & 0.608998 \\
\{15,22\} & 6 & \frac{2 \sqrt{\frac{2}{3}}}{3} &
\frac{1}{2} & 0.500000 \\
\{15,29\} & 10 & \frac{2 \sqrt{2}}{9} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{18,24\} & 6 & \frac{25}{256} \sqrt{\frac{5}{2}} \pi &
\frac{92+75 \sin ^{-1}\left(\frac{4}{5}\right)}{75 \pi
} & 0.685627 \\
\{18,35\} & 4 & \frac{9 \sqrt{\frac{3}{5}}}{20} &
\frac{1}{12} \left(5+3 \sqrt{5} \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)\right) & 0.886838 \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{lllll}
\{20,35\} & 2 & \frac{9 \sqrt{\frac{3}{5}}}{20} &
\frac{1}{108} \left(-25+24 \sqrt{30}\right) & 0.985680
\\
\{20,48\} & 6 & \frac{49 \sqrt{\frac{7}{3}}}{192} &
\frac{1}{28} \left(12+7 \sqrt{6} \sin
^{-1}\left(\frac{2 \sqrt{6}}{7}\right)\right) &
0.903278 \\
\{22,63\} & 12 & \frac{8}{7 \sqrt{7}} & \frac{1}{16}
\left(7+4 \sqrt{7} \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)\right) & 0.915544
\\
\{24,25\} & 10 & \frac{25}{48 \sqrt{2}} & \frac{92+75
\cos ^{-1}\left(\frac{3}{5}\right)}{80 \sqrt{5}} &
0.903076 \\
\{24,46\} & 2 & \frac{5 \sqrt{\frac{5}{2}}}{16} &
\frac{14}{15} & 0.933333 \\
\{24,57\} & 2 & \frac{25}{24 \sqrt{2}} & \frac{7}{5
\sqrt{5}} & 0.626099 \\
\{25,35\} & 4 & \frac{9 \sqrt{3} \pi }{80} &
\frac{\sqrt{5}+3 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{3 \pi } &
0.504975 \\
\{31,35\} & 2 & \frac{9 \sqrt{3} \pi }{80} & \frac{14
\sqrt{5}+27 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{27 \pi } &
0.636783 \\
\{31,48\} & 2 & \frac{49 \sqrt{\frac{7}{3}}}{192} &
\frac{4}{147} \left(-9+7 \sqrt{42}\right) & 0.989529 \\
\{35,36\} & 2 & \frac{3}{5 \sqrt{2}} & 1-\frac{5
\sqrt{\frac{5}{6}}}{24} & 0.809819 \\
\{35,38\} & 4 & \frac{3}{5 \sqrt{2}} & \frac{1}{8}
\sqrt{\frac{3}{2}} \left(\sqrt{5}+3 \cos
^{-1}\left(\frac{2}{3}\right)\right) & 0.728612 \\
\{35,46\} & 2 & \frac{3}{\sqrt{10}} & \frac{1}{\sqrt{6}}
& 0.408248 \\
\{35,51\} & 2 & \frac{3}{5 \sqrt{2}} & \frac{14
\sqrt{5}+27 \cos ^{-1}\left(\frac{2}{3}\right)}{24
\sqrt{6}} & 0.918793 \\
\{35,57\} & 2 & \frac{3}{5 \sqrt{2}} & \sqrt{\frac{5}{6}}
& 0.912871 \\
\{36,48\} & 4 & \frac{49}{384} \sqrt{\frac{7}{2}} \pi &
\frac{2 \sqrt{6}+7 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{7 \pi } & 0.469522 \\
\{42,48\} & 4 & \frac{49}{384} \sqrt{\frac{7}{2}} \pi &
\frac{22 \sqrt{6}+49 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{49 \pi } & 0.596820 \\
\{48,49\} & 2 & \frac{49}{72 \sqrt{2}} & \frac{3 \left(2
\sqrt{6}+7 \cos
^{-1}\left(\frac{5}{7}\right)\right)}{16 \sqrt{7}} &
0.731739 \\
\{48,51\} & 2 & \frac{49}{72 \sqrt{2}} & 1-\frac{3
\sqrt{\frac{3}{14}}}{7} & 0.801610 \\
\{48,53\} & 4 & \frac{49}{72 \sqrt{2}} & \frac{3 \left(22
\sqrt{6}+49 \cos
^{-1}\left(\frac{5}{7}\right)\right)}{112 \sqrt{7}} &
0.930129 \\
\{49,63\} & 8 & \frac{2 \pi }{7} & \frac{\sqrt{7}+4 \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)}{4 \pi } &
0.440596 \end{array} \right). \end{displaymath} Here there are only four generator pairs yielding the probability $\frac{5}{6}$, while there were eight in simply the ``Bi-PPT'' case (sec.~\ref{secBi}). \subsubsection{Boundary States} Now, we obtained from the analysis of the one-dimensional (exterior) boundary states the results: \begin{equation} \left( \begin{array}{lllll}
\{1,20\} & \frac{1}{2} & 0 & 0 & 0. \\
\{3,6\} & \frac{3}{4} & \frac{1}{4} & \frac{1}{3} &
0.333333 \\
\{3,20\} & \frac{1}{2} & \frac{1}{4} & \frac{1}{2} & 0.5
\\
\{3,22\} & \frac{1}{2} & 0 & 0 & 0. \\
\{3,36\} & \frac{3}{4} & \frac{1}{4} & \frac{1}{3} &
0.333333 \\
\{6,8\} & \frac{3}{4 \sqrt{2}} & \frac{1}{2} & \frac{2
\sqrt{2}}{3} & 0.942809 \\
\{6,15\} & \frac{1}{3} & \frac{1}{4} & \frac{3}{4} & 0.75
\\
\{8,9\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,20\} & \frac{\sqrt{3}}{2} & \frac{\sqrt{3}}{8} &
\frac{1}{4} & 0.25 \\
\{8,22\} & \frac{3 \sqrt{3}}{8} & 0 & 0 & 0. \\
\{8,31\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{8} &
\frac{1}{3} & 0.333333 \\
\{8,36\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,42\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,51\} & \frac{3 \sqrt{3}}{8} & \frac{\sqrt{3}}{4} &
\frac{2}{3} & 0.666667 \\
\{8,53\} & \frac{\sqrt{3}}{2} & \frac{\sqrt{3}}{8} &
\frac{1}{4} & 0.25 \\
\{9,15\} & \frac{1}{\sqrt{3}} & \frac{1}{2} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{15,22\} & \frac{5}{2 \sqrt{6}} & \frac{1}{2 \sqrt{6}} &
\frac{1}{5} & 0.2 \\
\{15,29\} & \sqrt{\frac{2}{3}} &
\frac{\sqrt{\frac{3}{2}}}{2} & \frac{3}{4} & 0.75 \\
\{18,24\} & \frac{5}{8} & \frac{1}{2} & \frac{4}{5} & 0.8
\\
\{18,35\} & \frac{3}{10} & \frac{1}{4} & \frac{5}{6} &
0.833333 \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{lllll}
\{20,35\} & \frac{3}{5} & \sqrt{\frac{3}{10}} &
\sqrt{\frac{5}{6}} & 0.912871 \\
\{20,48\} & \frac{7}{24} & \frac{1}{4} & \frac{6}{7} &
0.857143 \\
\{22,63\} & \frac{2}{7} & \frac{1}{4} & \frac{7}{8} &
0.875 \\
\{24,25\} & \frac{5 \sqrt{\frac{5}{2}}}{8} &
\frac{\sqrt{\frac{5}{2}}}{2} & \frac{4}{5} & 0.8 \\
\{24,46\} & \frac{5 \sqrt{\frac{5}{2}}}{8} &
\frac{\sqrt{\frac{5}{2}}}{2} & \frac{4}{5} & 0.8 \\
\{24,57\} & \frac{3 \sqrt{\frac{5}{2}}}{4} &
\frac{\sqrt{\frac{5}{2}}}{8} & \frac{1}{6} & 0.166667
\\
\{25,35\} & \frac{3}{2 \sqrt{5}} & \frac{1}{2} &
\frac{\sqrt{5}}{3} & 0.745356 \\
\{31,35\} & \frac{3}{2 \sqrt{5}} & \frac{1}{2} &
\frac{\sqrt{5}}{3} & 0.745356 \\
\{31,48\} & \frac{7}{12} & \frac{\sqrt{\frac{7}{6}}}{2} &
\sqrt{\frac{6}{7}} & 0.92582 \\
\{35,36\} & \frac{3 \sqrt{\frac{3}{5}}}{2} &
\frac{\sqrt{\frac{3}{5}}}{4} & \frac{1}{6} & 0.166667
\\
\{35,38\} & \frac{3 \sqrt{\frac{3}{5}}}{2} & 0 & 0 & 0.
\\
\{35,46\} & \frac{7 \sqrt{\frac{3}{5}}}{4} &
\frac{\sqrt{\frac{3}{5}}}{4} & \frac{1}{7} & 0.142857
\\
\{35,51\} & \frac{3 \sqrt{\frac{3}{5}}}{2} &
\frac{\sqrt{15}}{4} & \frac{5}{6} & 0.833333 \\
\{35,57\} & \frac{3 \sqrt{\frac{3}{5}}}{2} &
\frac{\sqrt{15}}{4} & \frac{5}{6} & 0.833333 \\
\{36,48\} & \frac{7}{4 \sqrt{6}} & \frac{1}{2} & \frac{2
\sqrt{6}}{7} & 0.699854 \\
\{42,48\} & \frac{7}{4 \sqrt{6}} & \frac{1}{2} & \frac{2
\sqrt{6}}{7} & 0.699854 \\
\{48,49\} & \frac{7 \sqrt{\frac{7}{3}}}{8} & 0 & 0 & 0.
\\
\{48,51\} & \frac{7 \sqrt{\frac{7}{3}}}{8} &
\frac{\sqrt{\frac{7}{3}}}{8} & \frac{1}{7} & 0.142857
\\
\{48,53\} & \frac{7 \sqrt{\frac{7}{3}}}{8} &
\frac{\sqrt{21}}{4} & \frac{6}{7} & 0.857143 \\
\{49,63\} & \frac{2}{\sqrt{7}} & \frac{1}{2} &
\frac{\sqrt{7}}{4} & 0.661438 \end{array} \right). \end{displaymath} \subsubsection{Length of Triseparability-Nontriseparability {\it Interior} Boundary} In the following array, we present the HS-length of the common (interior) border separating the triseparable states from the non-triseparable ones for each specific scenario. \begin{equation} \left( \begin{array}{ll}
\{1,20\} & 1 \\
\{3,6\} & \frac{3}{4} \\
\{3,20\} & \frac{3}{4} \\
\{3,22\} & \frac{1}{2} \\
\{3,36\} & \frac{3}{4} \\
\{6,8\} & \frac{3}{2} \\
\{6,15\} & \frac{5}{4} \\
\{8,9\} & \frac{\sqrt{3}}{2} \\
\{8,20\} & \frac{5 \sqrt{3}}{8} \\
\{8,22\} & \frac{5 \sqrt{3}}{8} \\
\{8,31\} & \frac{5 \sqrt{3}}{8} \\
\{8,36\} & \frac{\sqrt{3}}{2} \\
\{8,42\} & \frac{\sqrt{3}}{2} \\
\{8,51\} & \frac{\sqrt{3}}{2} \\
\{8,53\} & \frac{5 \sqrt{3}}{8} \\
\{9,15\} & 1 \\
\{15,22\} & \frac{3 \sqrt{\frac{3}{2}}}{2} \\
\{15,29\} & \frac{7}{2 \sqrt{6}} \\
\{18,24\} & \frac{3}{2} \\
\{18,35\} & \frac{5}{4} \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{ll}
\{20,35\} & 3 \sqrt{\frac{3}{10}} \\
\{20,48\} & \frac{5}{4} \\
\{22,63\} & 1 \\
\{24,25\} & \frac{3 \sqrt{\frac{5}{2}}}{4} \\
\{24,46\} & \frac{3 \sqrt{\frac{5}{2}}}{4} \\
\{24,57\} & \frac{9 \sqrt{\frac{5}{2}}}{8} \\
\{25,35\} & 1 \\
\{31,35\} & \frac{3}{2} \\
\{31,48\} & \frac{\sqrt{\frac{21}{2}}}{2} \\
\{35,36\} & \frac{11 \sqrt{\frac{3}{5}}}{4} \\
\{35,38\} & \frac{3 \sqrt{\frac{3}{5}}}{2} \\
\{35,46\} & \frac{11 \sqrt{\frac{3}{5}}}{4} \\
\{35,51\} & \frac{7 \sqrt{\frac{3}{5}}}{4} \\
\{35,57\} & \frac{7 \sqrt{\frac{3}{5}}}{4} \\
\{36,48\} & 1 \\
\{42,48\} & \frac{3}{2} \\
\{48,49\} & \frac{13 \sqrt{\frac{7}{3}}}{8} \\
\{48,51\} & \frac{13 \sqrt{\frac{7}{3}}}{8} \\
\{48,53\} & \sqrt{\frac{7}{3}} \\
\{49,63\} & \frac{3}{4} \end{array} \right). \end{displaymath}
\section{The qutrit-qutrit case $n=9$} \label{secN=9} Here we only have --- since $9=3^2$ --- {\it one} option available for computing the partial transpose, that is transposing in place the nine $3 \times 3$ blocks of the $9 \times 9$ density matrices. We obtained the results: \begin{equation} \label{n=9case} \left( \begin{array}{lllll}
\{1,13\} & 360 & \frac{16}{81} & \frac{\pi }{4} &
0.785398 \\
\{3,11\} & 8 & \frac{32 \sqrt{2}}{243} &
\frac{1}{\sqrt{2}} & 0.707107 \\
\{3,13\} & 8 & \frac{16}{81} & \frac{5}{6} & 0.833333 \\
\{3,25\} & 8 & \frac{32 \sqrt{2}}{243} & \frac{5}{4
\sqrt{2}} & 0.883883 \\
\{8,13\} & 8 & \frac{8}{27} & \frac{1}{\sqrt{3}} &
0.577350 \\
\{8,25\} & 8 & \frac{4 \sqrt{2}}{27} & \sqrt{\frac{2}{3}}
& 0.816497 \\
\{11,15\} & 4 & \frac{16 \sqrt{2} \pi }{243} &
\frac{1}{3}+\frac{3 \sqrt{3}}{4 \pi } & 0.746830 \\
\{11,24\} & 2 & \frac{25 \sqrt{\frac{5}{2}}}{162} &
\frac{2}{5}+\frac{1}{2} \sin
^{-1}\left(\frac{4}{5}\right) & 0.863648 \\
\{13,24\} & 2 & \frac{25 \sqrt{\frac{5}{2}}}{162} &
\frac{8}{75} \left(-2+5 \sqrt{5}\right) & 0.979236 \\
\{13,35\} & 4 & \frac{16}{15 \sqrt{15}} & \frac{1}{12}
\left(5+3 \sqrt{5} \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)\right) & 0.886838
\\
\{15,16\} & 4 & \frac{128 \sqrt{2}}{729} & \frac{1}{32}
\left(9 \sqrt{3}+4 \pi \right) & 0.879838 \\
\{15,44\} & 4 & \frac{32 \sqrt{\frac{2}{3}}}{81} &
\frac{11}{12} & 0.916667 \\
\{15,55\} & 4 & \frac{128 \sqrt{\frac{2}{3}}}{243} &
\frac{11}{16} & 0.687500 \\
\{16,24\} & 2 & \frac{25}{324} \sqrt{\frac{5}{2}} \pi &
\frac{4+5 \sin ^{-1}\left(\frac{4}{5}\right)}{5 \pi } &
0.549815 \\
\{20,24\} & 2 & \frac{25}{324} \sqrt{\frac{5}{2}} \pi &
\frac{92+75 \sin ^{-1}\left(\frac{4}{5}\right)}{75 \pi
} & 0.685627 \\
\{24,25\} & 2 & \frac{50 \sqrt{2}}{243} & 1-\frac{2}{5
\sqrt{5}} & 0.821115 \\
\{24,27\} & 2 & \frac{50 \sqrt{2}}{243} & \frac{92+75
\cos ^{-1}\left(\frac{3}{5}\right)}{80 \sqrt{5}} &
0.903076 \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{lllll}
\{24,44\} & 2 & \frac{100 \sqrt{2}}{243} &
\frac{1}{\sqrt{5}} & 0.447214 \\
\{24,46\} & 2 & \frac{10 \sqrt{10}}{81} & \frac{11}{15} &
0.733333 \\
\{24,55\} & 2 & \frac{50 \sqrt{2}}{243} &
\frac{2}{\sqrt{5}} & 0.894427 \\
\{24,59\} & 2 & \frac{10 \sqrt{10}}{81} & \frac{14}{15} &
0.933333 \\
\{24,70\} & 2 & \frac{50 \sqrt{2}}{243} & \frac{11}{5
\sqrt{5}} & 0.983870 \\
\{24,72\} & 2 & \frac{100 \sqrt{2}}{243} & \frac{7}{5
\sqrt{5}} & 0.626099 \\
\{25,35\} & 4 & \frac{4 \pi }{15 \sqrt{3}} &
\frac{\sqrt{5}+3 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{3 \pi } &
0.504975 \\
\{35,46\} & 4 & \frac{32 \sqrt{\frac{2}{5}}}{27} &
\frac{1}{\sqrt{6}} & 0.408248 \\
\{35,70\} & 4 & \frac{32 \sqrt{2}}{135} &
\sqrt{\frac{5}{6}} & 0.912871 \\
\{38,48\} & 8 & \frac{49}{486} \sqrt{\frac{7}{2}} \pi &
\frac{22 \sqrt{6}+49 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{49 \pi } & 0.596820 \\
\{38,63\} & 8 & \frac{512}{567 \sqrt{7}} & \frac{1}{16}
\left(7+4 \sqrt{7} \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)\right) & 0.915544
\\
\{40,63\} & 8 & \frac{512}{567 \sqrt{7}} &
-\frac{49}{192}+\frac{\sqrt{14}}{3} & 0.992011 \\
\{48,49\} & 8 & \frac{196 \sqrt{2}}{729} & \frac{3
\left(22 \sqrt{6}+49 \cos
^{-1}\left(\frac{5}{7}\right)\right)}{112 \sqrt{7}} &
0.930129 \\
\{49,63\} & 8 & \frac{128 \pi }{567} & \frac{\sqrt{7}+4
\csc ^{-1}\left(\frac{4}{\sqrt{7}}\right)}{4 \pi } &
0.440596 \\
\{53,63\} & 8 & \frac{128 \pi }{567} & \frac{19
\sqrt{7}+48 \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)}{48 \pi } &
0.563412 \\
\{63,64\} & 8 & \frac{512 \sqrt{2}}{1701} & 1-\frac{7
\sqrt{\frac{7}{2}}}{64} & 0.795378 \\
\{63,66\} & 8 & \frac{512 \sqrt{2}}{1701} & \frac{19
\sqrt{7}+48 \cos ^{-1}\left(\frac{3}{4}\right)}{64
\sqrt{2}} & 0.938690 \end{array} \right). \end{displaymath} \subsection{Boundary States} Here, we have for the one-dimensional sets of (exterior) boundary states the results \begin{equation} \left( \begin{array}{lllll}
\{1,13\} & \frac{4}{9} & 0 & 0 & 0. \\
\{3,11\} & \frac{2}{3} & \frac{2}{9} & \frac{1}{3} &
0.333333 \\
\{3,13\} & \frac{4}{9} & \frac{2}{9} & \frac{1}{2} & 0.5
\\
\{3,25\} & \frac{2}{3} & \frac{2}{9} & \frac{1}{3} &
0.333333 \\
\{8,13\} & \frac{4}{3 \sqrt{3}} & \frac{1}{3 \sqrt{3}} &
\frac{1}{4} & 0.25 \\
\{8,25\} & \frac{1}{\sqrt{3}} & \frac{2}{3 \sqrt{3}} &
\frac{2}{3} & 0.666667 \\
\{11,15\} & \frac{8}{9 \sqrt{3}} & \frac{4}{9} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{11,24\} & \frac{5}{18} & \frac{2}{9} & \frac{4}{5} &
0.8 \\
\{13,24\} & \frac{5}{9} & \frac{2 \sqrt{5}}{9} &
\frac{2}{\sqrt{5}} & 0.894427 \\
\{13,35\} & \frac{4}{15} & \frac{2}{9} & \frac{5}{6} &
0.833333 \\
\{15,16\} & \frac{8 \sqrt{\frac{2}{3}}}{9} & \frac{2
\sqrt{\frac{2}{3}}}{3} & \frac{3}{4} & 0.75 \\
\{15,44\} & \frac{8 \sqrt{\frac{2}{3}}}{9} & \frac{2
\sqrt{\frac{2}{3}}}{3} & \frac{3}{4} & 0.75 \\
\{15,55\} & \frac{10 \sqrt{\frac{2}{3}}}{9} & \frac{2
\sqrt{\frac{2}{3}}}{9} & \frac{1}{5} & 0.2 \\
\{16,24\} & \frac{5}{9} & \frac{4}{9} & \frac{4}{5} & 0.8
\\
\{20,24\} & \frac{5}{9} & \frac{4}{9} & \frac{4}{5} & 0.8
\\
\{24,25\} & \frac{5 \sqrt{\frac{5}{2}}}{9} &
\frac{\sqrt{\frac{5}{2}}}{9} & \frac{1}{5} & 0.2 \\
\{24,27\} & \frac{5 \sqrt{\frac{5}{2}}}{9} & \frac{2
\sqrt{10}}{9} & \frac{4}{5} & 0.8 \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{lllll}
\{24,44\} & \frac{\sqrt{10}}{3} &
\frac{\sqrt{\frac{5}{2}}}{9} & \frac{1}{6} & 0.166667
\\
\{24,46\} & \frac{5 \sqrt{\frac{5}{2}}}{9} &
\frac{\sqrt{\frac{5}{2}}}{9} & \frac{1}{5} & 0.2 \\
\{24,55\} & \frac{5 \sqrt{\frac{5}{2}}}{9} & \frac{2
\sqrt{10}}{9} & \frac{4}{5} & 0.8 \\
\{24,59\} & \frac{5 \sqrt{\frac{5}{2}}}{9} & \frac{2
\sqrt{10}}{9} & \frac{4}{5} & 0.8 \\
\{24,70\} & \frac{5 \sqrt{\frac{5}{2}}}{9} & \frac{2
\sqrt{10}}{9} & \frac{4}{5} & 0.8 \\
\{24,72\} & \frac{\sqrt{10}}{3} &
\frac{\sqrt{\frac{5}{2}}}{9} & \frac{1}{6} & 0.166667
\\
\{25,35\} & \frac{4}{3 \sqrt{5}} & \frac{4}{9} &
\frac{\sqrt{5}}{3} & 0.745356 \\
\{35,46\} & \frac{14}{3 \sqrt{15}} & \frac{2}{3
\sqrt{15}} & \frac{1}{7} & 0.142857 \\
\{35,70\} & \frac{4}{\sqrt{15}} & \frac{2
\sqrt{\frac{5}{3}}}{3} & \frac{5}{6} & 0.833333 \\
\{38,48\} & \frac{7 \sqrt{\frac{2}{3}}}{9} & \frac{4}{9}
& \frac{2 \sqrt{6}}{7} & 0.699854 \\
\{38,63\} & \frac{16}{63} & \frac{2}{9} & \frac{7}{8} &
0.875 \\
\{40,63\} & \frac{32}{63} & \frac{8
\sqrt{\frac{2}{7}}}{9} & \frac{\sqrt{\frac{7}{2}}}{2} &
0.935414 \\
\{48,49\} & \frac{7 \sqrt{\frac{7}{3}}}{9} & \frac{2
\sqrt{\frac{7}{3}}}{3} & \frac{6}{7} & 0.857143 \\
\{49,63\} & \frac{16}{9 \sqrt{7}} & \frac{4}{9} &
\frac{\sqrt{7}}{4} & 0.661438 \\
\{53,63\} & \frac{16}{9 \sqrt{7}} & \frac{4}{9} &
\frac{\sqrt{7}}{4} & 0.661438 \\
\{63,64\} & \frac{32}{9 \sqrt{7}} & \frac{4}{9 \sqrt{7}}
& \frac{1}{8} & 0.125 \\
\{63,66\} & \frac{32}{9 \sqrt{7}} & \frac{4 \sqrt{7}}{9}
& \frac{7}{8} & 0.875 \end{array} \right). \end{displaymath} \subsection{Length of Separability-Nonseparability {\it Interior} Boundary} In the following arrays, we present the HS-length (and now a numerical approximation to it) of the common border separating the states lacking a PPR from those that possess a PPT for each specific scenario. \begin{equation} \left( \begin{array}{lll}
\{1,13\} & \frac{4}{9} & 0.444444 \\
\{3,11\} & \frac{4}{9 \sqrt{3}} & 0.2566 \\
\{3,13\} & \frac{4}{9} & 0.444444 \\
\{3,25\} & \frac{4}{9 \sqrt{3}} & 0.2566 \\
\{8,13\} & \frac{2}{27} \left(-2 \sqrt{3}+3
\sqrt{6}\right) & 0.287731 \\
\{8,25\} & \frac{1}{297} \left(11 \sqrt{3}+12
\sqrt{33}\right) & 0.296254 \\
\{11,15\} & \frac{4}{9} & 0.444444 \\
\{11,24\} & \frac{4}{11} & 0.363636 \\
\{13,24\} & \frac{4}{11} & 0.363636 \\
\{13,35\} & \frac{7}{18} & 0.388889 \\
\{15,16\} & \frac{1}{621} \left(23 \sqrt{6}+12
\sqrt{138}\right) & 0.317724 \\
\{15,44\} & \frac{2}{27} \left(3+\sqrt{6}\right) &
0.403666 \\
\{15,55\} & \frac{1}{45} \left(-5 \sqrt{6}+8
\sqrt{10}\right) & 0.290017 \\
\{16,24\} & \frac{4}{9} & 0.444444 \\
\{20,24\} & \frac{4}{9} & 0.444444 \\
\{24,25\} & \frac{117 \sqrt{10}+40 \sqrt{390}}{3510} &
0.330462 \\
\{24,27\} & \frac{117 \sqrt{10}+40 \sqrt{390}}{3510} &
0.330462 \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{lll}
\{24,44\} & -\frac{4}{135} \left(3 \sqrt{10}-5
\sqrt{15}\right) & 0.292684 \\
\{24,46\} & \frac{1}{18} \left(4+\sqrt{10}\right) &
0.397904 \\
\{24,55\} & \frac{117 \sqrt{10}+40 \sqrt{390}}{3510} &
0.330462 \\
\{24,59\} & \frac{1}{18} \left(4+\sqrt{10}\right) &
0.397904 \\
\{24,70\} & \frac{117 \sqrt{10}+40 \sqrt{390}}{3510} &
0.330462 \\
\{24,72\} & -\frac{4}{135} \left(3 \sqrt{10}-5
\sqrt{15}\right) & 0.292684 \\
\{25,35\} & \frac{4}{9} & 0.444444 \\
\{35,46\} & -\frac{2}{189} \left(7 \sqrt{15}-12
\sqrt{21}\right) & 0.295027 \\
\{35,70\} & \frac{4 \left(59 \sqrt{15}+15
\sqrt{885}\right)}{7965} & 0.338853 \\
\{38,48\} & \frac{4}{9} & 0.444444 \\
\{38,63\} & \frac{12}{29} & 0.413793 \\
\{40,63\} & \frac{12}{29} & 0.413793 \\
\{48,49\} & \frac{415 \sqrt{21}+84 \sqrt{1743}}{15687} &
0.344789 \\
\{49,63\} & \frac{4}{9} & 0.444444 \\
\{53,63\} & \frac{4}{9} & 0.444444 \\
\{63,64\} & \frac{333 \sqrt{7}+56 \sqrt{777}}{6993} &
0.349209 \\
\{63,66\} & \frac{333 \sqrt{7}+56 \sqrt{777}}{6993} &
0.349209 \end{array} \right). \end{displaymath} \section{The case $n=10$} \label{secN=10} \subsection{$5 \times 2$ Decomposition} We first compute the partial transpose by transposing in place the ($2^2$) four $5 \times 5$ blocks of our set of two-dimensional $10 \times 10$ density matrices. Our analysis yielded for the HS-total volumes and PPT-probabilities \begin{equation} \label{first10} \left( \begin{array}{lllll}
\{1,29\} & 480 & \frac{4}{25} & \frac{\pi }{4} & 0.785398
\\
\{3,27\} & 4 & \frac{8 \sqrt{2}}{75} & \frac{1}{\sqrt{2}}
& 0.707107 \\
\{3,29\} & 12 & \frac{4}{25} & \frac{5}{6} & 0.833333 \\
\{3,49\} & 12 & \frac{8 \sqrt{2}}{75} & \frac{5}{4
\sqrt{2}} & 0.883883 \\
\{8,29\} & 4 & \frac{6}{25} & \frac{1}{\sqrt{3}} &
0.577350 \\
\{8,31\} & 8 & \frac{3 \sqrt{3}}{25} & \frac{7}{9} &
0.777778 \\
\{8,49\} & 4 & \frac{3 \sqrt{2}}{25} & \sqrt{\frac{2}{3}}
& 0.816497 \\
\{8,55\} & 4 & \frac{3 \sqrt{3}}{25} & \frac{8}{9} &
0.888889 \\
\{8,64\} & 8 & \frac{3 \sqrt{2}}{25} & \frac{7}{3
\sqrt{6}} & 0.952579 \\
\{8,68\} & 4 & \frac{6}{25} & \frac{4}{3 \sqrt{3}} &
0.769800 \\
\{15,31\} & 6 & \frac{32 \sqrt{\frac{2}{3}}}{75} &
\frac{1}{2} & 0.500000 \\
\{15,33\} & 6 & \frac{8 \sqrt{\frac{2}{3}}}{25} &
\frac{3}{4} & 0.750000 \\
\{15,64\} & 6 & \frac{32 \sqrt{2}}{225} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{15,72\} & 2 & \frac{8 \sqrt{\frac{2}{3}}}{25} &
\frac{11}{12} & 0.916667 \\
\{15,81\} & 6 & \frac{32 \sqrt{2}}{225} & \frac{9
\sqrt{3}}{16} & 0.974279 \\
\{15,87\} & 2 & \frac{32 \sqrt{\frac{2}{3}}}{75} &
\frac{11}{16} & 0.687500 \\
\{24,33\} & 8 & \frac{\sqrt{2}}{3} & \frac{1}{\sqrt{5}} &
0.447214 \\
\{24,81\} & 8 & \frac{1}{3 \sqrt{2}} & \frac{2}{\sqrt{5}}
& 0.894427 \\
\{27,35\} & 8 & \frac{9 \sqrt{3} \pi }{125} & \frac{14
\sqrt{5}+27 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{27 \pi } &
0.636783 \\
\{27,48\} & 2 & \frac{49 \sqrt{\frac{7}{3}}}{300} &
\frac{1}{28} \left(12+7 \sqrt{6} \sin
^{-1}\left(\frac{2 \sqrt{6}}{7}\right)\right) &
0.903278 \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{lllll}
\{29,48\} & 6 & \frac{49 \sqrt{\frac{7}{3}}}{300} &
\frac{4}{147} \left(-9+7 \sqrt{42}\right) & 0.989529 \\
\{29,63\} & 4 & \frac{128}{175 \sqrt{7}} & \frac{1}{16}
\left(7+4 \sqrt{7} \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)\right) & 0.915544
\\
\{31,63\} & 8 & \frac{128}{175 \sqrt{7}} &
-\frac{49}{192}+\frac{\sqrt{14}}{3} & 0.992011 \\
\{31,80\} & 6 & \frac{243}{800} & \frac{4}{9}+\frac{\sin
^{-1}\left(\frac{4 \sqrt{2}}{9}\right)}{\sqrt{2}} &
0.925046 \\
\{33,80\} & 6 & \frac{243}{800} & \frac{8}{243}
\left(-8+27 \sqrt{2}\right) & 0.993704 \\
\{33,99\} & 8 & \frac{4 \sqrt{5}}{27} & \frac{3}{20}
\left(3+5 \sin ^{-1}\left(\frac{3}{5}\right)\right) &
0.932626 \\
\{35,36\} & 8 & \frac{24 \sqrt{2}}{125} & \frac{14
\sqrt{5}+27 \cos ^{-1}\left(\frac{2}{3}\right)}{24
\sqrt{6}} & 0.918793 \\
\{36,48\} & 2 & \frac{49}{600} \sqrt{\frac{7}{2}} \pi &
\frac{2 \sqrt{6}+7 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{7 \pi } & 0.469522 \\
\{40,48\} & 6 & \frac{49}{600} \sqrt{\frac{7}{2}} \pi &
\frac{22 \sqrt{6}+49 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{49 \pi } & 0.596820 \\
\{48,49\} & 6 & \frac{49 \sqrt{2}}{225} & 1-\frac{3
\sqrt{\frac{3}{14}}}{7} & 0.801610 \\
\{48,51\} & 6 & \frac{49 \sqrt{2}}{225} & \frac{3
\left(22 \sqrt{6}+49 \cos
^{-1}\left(\frac{5}{7}\right)\right)}{112 \sqrt{7}} &
0.930129 \\
\{49,63\} & 4 & \frac{32 \pi }{175} & \frac{\sqrt{7}+4
\csc ^{-1}\left(\frac{4}{\sqrt{7}}\right)}{4 \pi } &
0.440596 \\
\{55,63\} & 4 & \frac{32 \pi }{175} & \frac{19
\sqrt{7}+48 \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)}{48 \pi } &
0.563412 \\
\{63,64\} & 8 & \frac{128 \sqrt{2}}{525} & 1-\frac{7
\sqrt{\frac{7}{2}}}{64} & 0.795378 \\
\{63,68\} & 4 & \frac{128 \sqrt{2}}{525} & \frac{19
\sqrt{7}+48 \cos ^{-1}\left(\frac{3}{4}\right)}{64
\sqrt{2}} & 0.938690 \\
\{64,80\} & 6 & \frac{243 \pi }{800 \sqrt{2}} & \frac{4
\sqrt{2}+9 \sin ^{-1}\left(\frac{4
\sqrt{2}}{9}\right)}{9 \pi } & 0.416417 \\
\{72,80\} & 2 & \frac{243 \pi }{800 \sqrt{2}} &
\frac{\frac{172 \sqrt{2}}{243}+\sin ^{-1}\left(\frac{4
\sqrt{2}}{9}\right)}{\pi } & 0.534977 \\
\{80,81\} & 6 & \frac{27}{50 \sqrt{2}} & 1-\frac{4
\sqrt{2}}{27} & 0.790487 \\
\{80,87\} & 2 & \frac{27}{50 \sqrt{2}} & \frac{43}{54
\sqrt{2}}+\frac{9}{16} \cos
^{-1}\left(\frac{7}{9}\right) & 0.945383 \\
\{81,99\} & 8 & \frac{\sqrt{5} \pi }{9} & \frac{3+5 \sin
^{-1}\left(\frac{3}{5}\right)}{5 \pi } & 0.395819 \end{array} . \right) \end{displaymath} \subsection{$2 \times 5$ Decomposition} Now, we compute the partial transpose by transposing in place the ($5^2$) twenty-five $4 \times 4$ blocks of the $10 \times 10$ density matrices. \begin{equation} \left( \begin{array}{lllll}
\{3,6\} & 16 & \frac{8 \sqrt{2}}{75} & \frac{1}{\sqrt{2}}
& 0.707107 \\
\{4,18\} & 480 & \frac{4}{25} & \frac{\pi }{4} & 0.785398
\\
\{6,8\} & 2 & \frac{9}{200} \sqrt{\frac{3}{2}} \pi &
\frac{26 \sqrt{2}+27 \tan ^{-1}\left(2
\sqrt{2}\right)}{27 \pi } & 0.825312 \\
\{6,15\} & 2 & \frac{16 \sqrt{\frac{2}{3}}}{75} &
\frac{1}{24} \left(9+2 \sqrt{3} \pi \right) & 0.828450
\\
\{8,9\} & 2 & \frac{3 \sqrt{2}}{25} & \frac{52+27
\sqrt{2} \sec ^{-1}(3)}{48 \sqrt{6}} & 0.842035 \\
\{8,22\} & 6 & \frac{3 \sqrt{3}}{25} & \frac{8}{9} &
0.888889 \\
\{8,29\} & 6 & \frac{6}{25} & \frac{4}{3 \sqrt{3}} &
0.769800 \\
\{9,15\} & 2 & \frac{4 \sqrt{2} \pi }{75} &
\frac{1}{3}+\frac{\sqrt{3}}{2 \pi } & 0.608998 \\
\{15,22\} & 6 & \frac{32 \sqrt{\frac{2}{3}}}{75} &
\frac{1}{2} & 0.500000 \\
\{15,29\} & 6 & \frac{32 \sqrt{2}}{225} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{18,24\} & 4 & \frac{1}{16} \sqrt{\frac{5}{2}} \pi &
\frac{92+75 \sin ^{-1}\left(\frac{4}{5}\right)}{75 \pi
} & 0.685627 \\
\{18,35\} & 4 & \frac{36 \sqrt{\frac{3}{5}}}{125} &
\frac{1}{12} \left(5+3 \sqrt{5} \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)\right) & 0.886838
\\
\{24,25\} & 4 & \frac{1}{3 \sqrt{2}} & \frac{92+75 \cos
^{-1}\left(\frac{3}{5}\right)}{80 \sqrt{5}} & 0.903076
\\
\{24,46\} & 4 & \frac{1}{\sqrt{10}} & \frac{14}{15} &
0.933333 \\
\{24,57\} & 4 & \frac{\sqrt{2}}{3} & \frac{7}{5 \sqrt{5}}
& 0.626099 \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{lllll}
\{25,35\} & 4 & \frac{9 \sqrt{3} \pi }{125} &
\frac{\sqrt{5}+3 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{3 \pi } &
0.504975 \\
\{35,46\} & 4 & \frac{24 \sqrt{\frac{2}{5}}}{25} &
\frac{1}{\sqrt{6}} & 0.408248 \\
\{35,57\} & 4 & \frac{24 \sqrt{2}}{125} &
\sqrt{\frac{5}{6}} & 0.912871 \\
\{38,48\} & 6 & \frac{49}{600} \sqrt{\frac{7}{2}} \pi &
\frac{22 \sqrt{6}+49 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{49 \pi } & 0.596820 \\
\{38,63\} & 6 & \frac{128}{175 \sqrt{7}} & \frac{1}{16}
\left(7+4 \sqrt{7} \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)\right) & 0.915544
\\
\{48,49\} & 6 & \frac{49 \sqrt{2}}{225} & \frac{3
\left(22 \sqrt{6}+49 \cos
^{-1}\left(\frac{5}{7}\right)\right)}{112 \sqrt{7}} &
0.930129 \\
\{48,78\} & 2 & \frac{7 \sqrt{\frac{7}{3}}}{25} &
\frac{20}{21} & 0.952381 \\
\{48,93\} & 2 & \frac{98}{75 \sqrt{3}} & \frac{10}{7
\sqrt{7}} & 0.539949 \\
\{49,63\} & 6 & \frac{32 \pi }{175} & \frac{\sqrt{7}+4
\csc ^{-1}\left(\frac{4}{\sqrt{7}}\right)}{4 \pi } &
0.440596 \\
\{63,78\} & 2 & \frac{128 \sqrt{\frac{2}{7}}}{75} &
\frac{1}{2 \sqrt{2}} & 0.353553 \\
\{63,93\} & 2 & \frac{128 \sqrt{2}}{525} &
\frac{\sqrt{\frac{7}{2}}}{2} & 0.935414 \\
\{66,80\} & 8 & \frac{243 \pi }{800 \sqrt{2}} &
\frac{\frac{172 \sqrt{2}}{243}+\sin ^{-1}\left(\frac{4
\sqrt{2}}{9}\right)}{\pi } & 0.534977 \\
\{66,99\} & 8 & \frac{4 \sqrt{5}}{27} & \frac{3}{20}
\left(3+5 \sin ^{-1}\left(\frac{3}{5}\right)\right) &
0.932626 \\
\{80,81\} & 8 & \frac{27}{50 \sqrt{2}} & \frac{43}{54
\sqrt{2}}+\frac{9}{16} \cos
^{-1}\left(\frac{7}{9}\right) & 0.945383 \\
\{81,99\} & 8 & \frac{\sqrt{5} \pi }{9} & \frac{3+5 \sin
^{-1}\left(\frac{3}{5}\right)}{5 \pi } & 0.395819 \end{array} \right). \end{displaymath} \subsection{Bi-PPT} Now, we require that the $10 \times 10$ density matrices be positive under {\it both} the forms of partial transposition employed immediately above. We obtained the (extensive) results \begin{equation} \left( \begin{array}{lllll}
\{1,29\} & 904 & \frac{4}{25} & \frac{\pi }{4} & 0.785398
\\
\{3,6\} & 20 & \frac{8 \sqrt{2}}{75} & \frac{1}{\sqrt{2}}
& 0.707107 \\
\{3,29\} & 12 & \frac{4}{25} & \frac{5}{6} & 0.833333 \\
\{3,51\} & 6 & \frac{8 \sqrt{2}}{75} & \frac{5}{4
\sqrt{2}} & 0.883883 \\
\{6,8\} & 2 & \frac{9}{200} \sqrt{\frac{3}{2}} \pi &
\frac{26 \sqrt{2}+27 \tan ^{-1}\left(2
\sqrt{2}\right)}{27 \pi } & 0.825312 \\
\{6,15\} & 2 & \frac{16 \sqrt{\frac{2}{3}}}{75} &
\frac{1}{24} \left(9+2 \sqrt{3} \pi \right) & 0.828450
\\
\{8,9\} & 2 & \frac{3 \sqrt{2}}{25} & \frac{52+27
\sqrt{2} \sec ^{-1}(3)}{48 \sqrt{6}} & 0.842035 \\
\{8,22\} & 8 & \frac{3 \sqrt{3}}{25} & \frac{8}{9} &
0.888889 \\
\{8,29\} & 4 & \frac{6}{25} & \frac{1}{\sqrt{3}} &
0.577350 \\
\{8,31\} & 6 & \frac{3 \sqrt{3}}{25} & \frac{7}{9} &
0.777778 \\
\{8,42\} & 2 & \frac{3 \sqrt{3}}{25} & \frac{2}{3} &
0.666667 \\
\{8,49\} & 4 & \frac{3 \sqrt{2}}{25} & \sqrt{\frac{2}{3}}
& 0.816497 \\
\{8,53\} & 6 & \frac{6}{25} & \frac{4}{3 \sqrt{3}} &
0.769800 \\
\{8,64\} & 8 & \frac{3 \sqrt{2}}{25} & \frac{7}{3
\sqrt{6}} & 0.952579 \\
\{9,15\} & 2 & \frac{4 \sqrt{2} \pi }{75} &
\frac{1}{3}+\frac{\sqrt{3}}{2 \pi } & 0.608998 \\
\{15,22\} & 10 & \frac{32 \sqrt{\frac{2}{3}}}{75} &
\frac{1}{2} & 0.500000 \\
\{15,29\} & 12 & \frac{32 \sqrt{2}}{225} &
\frac{\sqrt{3}}{2} & 0.866025 \\
\{15,33\} & 6 & \frac{8 \sqrt{\frac{2}{3}}}{25} &
\frac{3}{4} & 0.750000 \\
\{15,72\} & 2 & \frac{8 \sqrt{\frac{2}{3}}}{25} &
\frac{11}{12} & 0.916667 \\
\{15,81\} & 4 & \frac{32 \sqrt{2}}{225} & \frac{9
\sqrt{3}}{16} & 0.974279 \end{array} \right) \end{equation} \begin{displaymath} \left( \begin{array}{lllll}
\{15,87\} & 2 & \frac{32 \sqrt{\frac{2}{3}}}{75} &
\frac{11}{16} & 0.687500 \\
\{18,24\} & 4 & \frac{1}{16} \sqrt{\frac{5}{2}} \pi &
\frac{92+75 \sin ^{-1}\left(\frac{4}{5}\right)}{75 \pi
} & 0.685627 \\
\{18,35\} & 4 & \frac{36 \sqrt{\frac{3}{5}}}{125} &
\frac{1}{12} \left(5+3 \sqrt{5} \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)\right) & 0.886838
\\
\{24,25\} & 4 & \frac{1}{3 \sqrt{2}} & \frac{92+75 \cos
^{-1}\left(\frac{3}{5}\right)}{80 \sqrt{5}} & 0.903076
\\
\{24,33\} & 8 & \frac{\sqrt{2}}{3} & \frac{1}{\sqrt{5}} &
0.447214 \\
\{24,46\} & 4 & \frac{1}{\sqrt{10}} & \frac{14}{15} &
0.933333 \\
\{24,81\} & 8 & \frac{1}{3 \sqrt{2}} & \frac{2}{\sqrt{5}}
& 0.894427 \\
\{24,89\} & 2 & \frac{\sqrt{2}}{3} & \frac{7}{5 \sqrt{5}}
& 0.626099 \\
\{25,35\} & 4 & \frac{9 \sqrt{3} \pi }{125} &
\frac{\sqrt{5}+3 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{3 \pi } &
0.504975 \\
\{27,35\} & 6 & \frac{9 \sqrt{3} \pi }{125} & \frac{14
\sqrt{5}+27 \csc
^{-1}\left(\frac{3}{\sqrt{5}}\right)}{27 \pi } &
0.636783 \\
\{27,48\} & 2 & \frac{49 \sqrt{\frac{7}{3}}}{300} &
\frac{1}{28} \left(12+7 \sqrt{6} \sin
^{-1}\left(\frac{2 \sqrt{6}}{7}\right)\right) &
0.903278 \\
\{29,48\} & 6 & \frac{49 \sqrt{\frac{7}{3}}}{300} &
\frac{4}{147} \left(-9+7 \sqrt{42}\right) & 0.989529 \\
\{29,63\} & 10 & \frac{128}{175 \sqrt{7}} & \frac{1}{16}
\left(7+4 \sqrt{7} \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)\right) & 0.915544
\\
\{31,63\} & 6 & \frac{128}{175 \sqrt{7}} &
-\frac{49}{192}+\frac{\sqrt{14}}{3} & 0.992011 \\
\{31,80\} & 6 & \frac{243}{800} & \frac{4}{9}+\frac{\sin
^{-1}\left(\frac{4 \sqrt{2}}{9}\right)}{\sqrt{2}} &
0.925046 \\
\{33,80\} & 6 & \frac{243}{800} & \frac{8}{243}
\left(-8+27 \sqrt{2}\right) & 0.993704 \\
\{33,99\} & 16 & \frac{4 \sqrt{5}}{27} & \frac{3}{20}
\left(3+5 \sin ^{-1}\left(\frac{3}{5}\right)\right) &
0.932626 \\
\{35,36\} & 8 & \frac{24 \sqrt{2}}{125} & \frac{14
\sqrt{5}+27 \cos ^{-1}\left(\frac{2}{3}\right)}{24
\sqrt{6}} & 0.918793 \\
\{35,46\} & 4 & \frac{24 \sqrt{\frac{2}{5}}}{25} &
\frac{1}{\sqrt{6}} & 0.408248 \\
\{35,57\} & 4 & \frac{24 \sqrt{2}}{125} &
\sqrt{\frac{5}{6}} & 0.912871 \end{array} \right) \end{displaymath} \begin{displaymath} \left( \begin{array}{lllll}
\{36,48\} & 2 & \frac{49}{600} \sqrt{\frac{7}{2}} \pi &
\frac{2 \sqrt{6}+7 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{7 \pi } & 0.469522 \\
\{38,48\} & 10 & \frac{49}{600} \sqrt{\frac{7}{2}} \pi &
\frac{22 \sqrt{6}+49 \sin ^{-1}\left(\frac{2
\sqrt{6}}{7}\right)}{49 \pi } & 0.596820 \\
\{48,49\} & 2 & \frac{49 \sqrt{2}}{225} & \frac{3 \left(2
\sqrt{6}+7 \cos
^{-1}\left(\frac{5}{7}\right)\right)}{16 \sqrt{7}} &
0.731739 \\
\{48,51\} & 10 & \frac{49 \sqrt{2}}{225} & \frac{3
\left(22 \sqrt{6}+49 \cos
^{-1}\left(\frac{5}{7}\right)\right)}{112 \sqrt{7}} &
0.930129 \\
\{48,64\} & 4 & \frac{49 \sqrt{2}}{225} & 1-\frac{3
\sqrt{\frac{3}{14}}}{7} & 0.801610 \\
\{48,78\} & 2 & \frac{7 \sqrt{\frac{7}{3}}}{25} &
\frac{20}{21} & 0.952381 \\
\{48,93\} & 2 & \frac{98}{75 \sqrt{3}} & \frac{10}{7
\sqrt{7}} & 0.539949 \\
\{49,63\} & 8 & \frac{32 \pi }{175} & \frac{\sqrt{7}+4
\csc ^{-1}\left(\frac{4}{\sqrt{7}}\right)}{4 \pi } &
0.440596 \\
\{55,63\} & 2 & \frac{32 \pi }{175} & \frac{19
\sqrt{7}+48 \csc
^{-1}\left(\frac{4}{\sqrt{7}}\right)}{48 \pi } &
0.563412 \\
\{63,64\} & 8 & \frac{128 \sqrt{2}}{525} & 1-\frac{7
\sqrt{\frac{7}{2}}}{64} & 0.795378 \\
\{63,68\} & 4 & \frac{128 \sqrt{2}}{525} & \frac{19
\sqrt{7}+48 \cos ^{-1}\left(\frac{3}{4}\right)}{64
\sqrt{2}} & 0.938690 \\
\{63,78\} & 2 & \frac{128 \sqrt{\frac{2}{7}}}{75} &
\frac{1}{2 \sqrt{2}} & 0.353553 \\
\{63,93\} & 2 & \frac{128 \sqrt{2}}{525} &
\frac{\sqrt{\frac{7}{2}}}{2} & 0.935414 \\
\{64,80\} & 6 & \frac{243 \pi }{800 \sqrt{2}} & \frac{4
\sqrt{2}+9 \sin ^{-1}\left(\frac{4
\sqrt{2}}{9}\right)}{9 \pi } & 0.416417 \\
\{70,80\} & 8 & \frac{243 \pi }{800 \sqrt{2}} &
\frac{\frac{172 \sqrt{2}}{243}+\sin ^{-1}\left(\frac{4
\sqrt{2}}{9}\right)}{\pi } & 0.534977 \\
\{80,81\} & 4 & \frac{27}{50 \sqrt{2}} & \frac{1}{16}
\left(4 \sqrt{2}+9 \cos
^{-1}\left(\frac{7}{9}\right)\right) & 0.735870 \\
\{80,83\} & 2 & \frac{27}{50 \sqrt{2}} & 1-\frac{4
\sqrt{2}}{27} & 0.790487 \\
\{80,87\} & 6 & \frac{27}{50 \sqrt{2}} & \frac{43}{54
\sqrt{2}}+\frac{9}{16} \cos
^{-1}\left(\frac{7}{9}\right) & 0.945383 \\
\{81,99\} & 12 & \frac{\sqrt{5} \pi }{9} & \frac{3+5 \sin
^{-1}\left(\frac{3}{5}\right)}{5 \pi } & 0.395819 \end{array} . \right) \end{displaymath} The probability 0.993704 (corrresponding to the pair of Lie generators numbered $\{33,80 \}$) is the largest of any recorded in all our results above. (This also occurs in (\ref{first10}).) \section{Analyses of Scenarios with {\it More} than Two Parameters} \label{finalSEC} We found it considerably simpler to extend the Jak\'obczyk-Siennicki model \cite{jak} from two-qubit systems ($n=4$) to higher $n$ --- as illustrated above --- than to extend it from two-dimensional sections of Bloch vectors to $m$-dimensional sections ($m \ge 3$), even just for the case $n=4$. (However, we were able to obtain a highly interesting set of {\it exact} HS separability probabilites for certain $m=3,n=4$ systems, using the Jaynes maximum-entropy principle in conjunction with the new integration over implicitly defined regions feature of Mathematica, in \cite[Fig.~11]{slaterjpanew}.) {\it A fortiori}, it appears that the determination of the HS separable volume of the {\it fifteen}-dimensional convex set of $4 \times 4$ density matrices --- conjectured on the basis of an extensive quasi-Monte Carlo analysis in to be $(3^3 5^7 \sqrt{3})^{-1} \approx 2.73707 \cdot 10^{-7} $ \cite[eq. (41)]{slaterPRA} --- would have to proceed in some quite different analytic fashion to that pursued here. (Based on our experience in the above-reported analyses, it appears to be a necessary condition for obtaining exact HS separability/PPT-probabilities that explicit formulas be available for the eigenvectors of both the class of density matrices under consideration {\it and} of their partial transposes.) \subsection{$m=3,n=4$} We have been able to find, up to this point in time, that for the {\it three}-dimensional two-qubit ($m=3, n=4$) scenarios generated by the four {\it triads} of Gell-Mann matrices $\{1,4,6\}$, $\{1,5,7\}$ $\{2,4,7\}$ and $\{2,5,6\}$, the volume of separable states is --- having to resort to numerical methods --- 0.478512 and of all the (separable and nonseparable/entangled) states, 0.61685, for a separability probability of 0.775734. For the scenario $\{10,12,13\}$, the separable volume remains the same, but the total volume is {\it exactly} $\frac{\pi}{6} \approx 0.523599$ for an HS separability probability of 0.913891. \subsection{Two-Dimensional Boundaries of $m=3,n=4$ Systems} Of course, it we restrict attention to the generic boundary states of the three-dimensional scenarios, we only have to perform two-dimensional computations. Thus, we were able to find that for the {\it triadic} scenarios $\{1,3,6\},\{1,3,7\}, \{1,3,9\}, \{1,3,10\}, \{1,4,9\}$ and $\{1,5,10\}$, amongst others, the HS-area of the states with degenerate spectra is $\frac{\pi}{8}$ and that of the separable component of this area, one-half that value. For the triadic scenarios $\{1,4,6\}$ and $\{1,5,7\}$, the separable component of the boundary states has area $\frac{\pi}{4}$ and the total area is $\frac{1}{2} \Big(\sqrt{5} + 6 \sin^{-1}(\frac{1}{\sqrt{6}})\Big)$ for a separability probability of 0.165025. Also, for several scenarios (for instance, $\{3,4,9\}$), we have a total area of $\frac{2 \sqrt{2}}{3}$, a separable area of $\frac{1}{3}$, giving a separability probability of $\frac{1}{2 \sqrt{2}} \approx 0.353553$.
Now, we present all our results of this type (two-dimensional exterior boundaries of three-dimensional scenarios) in the following array. The first column gives the corresponding {\it triad} of Gell-Mann matrices, the second column shows the total HS-area of the boundary states, the third column gives the exact separability probability, and the last, a numerical approximation to the probability. (There may exist additional nontrivial scenarios, as we were not readily able to fully analyze all $2730 = 13 \cdot 14 \cdot 15$ possible triads of $4 \times 4$ Gell-Mann matrices.) \begin{equation} \left( \begin{array}{llll}
\{1,3,6\} & \frac{\pi }{4} & \frac{1}{2} & 0.500000 \\
\{1,4,6\} & \frac{1}{2} \left(\sqrt{5}+6 \sin
^{-1}\left(\frac{1}{\sqrt{6}}\right)\right) & \frac{\pi
}{4 \sqrt{5}+24 \csc ^{-1}\left(\sqrt{6}\right)} &
0.165025 \\
\{3,4,6\} & \frac{1}{2} \left(\sqrt{5}+6 \sin
^{-1}\left(\frac{1}{\sqrt{6}}\right)\right) &
\frac{1}{2}+\frac{4 \left(-1+\sqrt{2}\right)}{3
\left(\sqrt{5}+6 \csc
^{-1}\left(\sqrt{6}\right)\right)} & 0.616044 \\
\{3,4,9\} & \frac{2 \sqrt{2}}{3} & \frac{1}{2 \sqrt{2}} &
0.353553 \\
\{3,6,7\} & \frac{1}{2} \left(\sqrt{5}+6 \sin
^{-1}\left(\frac{1}{\sqrt{6}}\right)\right) &
\frac{1}{2} & 0.500000 \\
\{3,6,8\} & \frac{3 \pi }{2} & -\frac{\sqrt{15}-8 \pi +8
\tan ^{-1}\left(\sqrt{\frac{3}{5}}\right)}{8 \pi } &
0.636114 \\
\{6,9,15\} & \frac{3 \pi }{4} & \frac{4-2 \sqrt{5}+3 \pi
-12 \csc ^{-1}\left(\sqrt{6}\right)}{6 \pi } & 0.207232
\\
\{8,9,10\} & \frac{3}{16} \left(4+\sqrt{7}+2 \pi +8 \cot
^{-1}\left(\sqrt{7}\right)\right) & \frac{2 (2+\pi
)}{4+\sqrt{7}+2 \pi +8 \cot ^{-1}\left(\sqrt{7}\right)}
& 0.650017 \\
\{9,11,13\} & \frac{3 \pi }{2} & \frac{1}{12} & 0.0833333 \end{array} \right). \end{equation} \subsection{9- and 15-Parameter Analyses ($m=9,15,n=4$)} \label{final} Now, we sought to make some progress in obtaining the (conjecturally exact) Hilbert-Schmidt volume of the separable $4 \times 4$ density matrices, in both the 9-dimensional case of real density matrices and the 15-dimensional case of (fully general) complex density matrices. In both cases, we dispensed with the Bloch vector parameterization \cite{kk,kimura,byrd} used in the above analyses (neither did we employ the integration over implicitly defined regions capabilities of Mathematica version 5.1), and adopted a simple, naive parameterization, in which the four diagonal elements of the density matrices were denoted $a,b,c,1-a-b-c$ and the off-diagonal (upper triangular) elements, $\alpha_{ij}+ i \beta_{ij}$ (in the real $m =9$ case, of course, all $\beta$'s equal zero). (In order to compare our results here with the HS-volume formulas of \.Zyczkowski and Sommers \cite{hilb2}, the volumes we do report below are our initial volumes multiplied by factors of $2^7$ in the complex case, and $2^4$ in the real case. In all our earlier analyses above, we have simply taken the HS-volume element to equal 1.)
In both ($m=9,15$) of these cases, we pursued the same analytical strategy. We required that the six principal $2 \times 2$ minors of the density matrices and/or their partial transposes have nonnegative determinants. This is (only) one of the requirements for nonnegative-definiteness (cf. \cite[eq. (12)]{bloore}). Ideally, we would also have required that the leading principal $3 \times 3$ minor have nonnegative determinant and also that the determinant of the matrix be nonnegative. But these last two requirements were too computationally onerous to impose (at least in our first round of efforts). So, our analytical strategy should yield {\it upper} bounds on the Hilbert-Schmidt volumes in these cases. \subsubsection{9-Dimensional Real Case} When we only required that the six principal $2 \times 2$ minors have nonnegative determinants, we obtained for the volume the result $\frac{\pi^2}{1120} \approx 0.00881215$. (We can reduce [improve] this to $\frac{\pi^2 (16+\pi^2)}{35840} \approx 0.00712396$ by modifying [narrowing], to begin with, the integration limits over a {\it single} off-diagonal variable, so that in addition, to its corresponding $2 \times 2$ minor, a corresponding $3 \times 3$ minor also has a nonnegative determinant. If we narrow similarly a second set of integration limits, this is further reduced to $\frac{\pi^4}{26880} \approx 0.00362385$. An attempt to add a third set of similar integration limits --- corresponding to a $3 \times 3$ minor --- did not succeed computationally.) Applying formula (7.7) of the \.Zyczkowski-Sommers study \cite{hilb2}, we obtain for the HS-volume of the $4 \times 4$ real density matrices, $\frac{\pi^4}{60480} \approx 0.0016106$. This is $\frac{\pi^2}{54} \approx 0.18277$ times {\it smaller} than our first, principal calculation ($\frac{\pi^2}{1120}$), so we have a considerable overestimation.
If we {\it additionally} imposed the condition that the six principal minors of the partial transpose also have nonnegative determinants (only two of them being actually different from the original six), the result was $\frac{544}{99225} \approx 0.00548249$. (Note that $1120 = 2^5 \cdot 5 \cdot 7$ and $99225 = 3^4 \cdot 5^2 \cdot 7^2$.) So (taking the ratio) of this to $\frac{\pi^2}{1120}$, we obtain a crude estimate of the HS-separability probability of the real density matrices is 0.622151.
Unfortunately, our upper bound (0.00548249) on the HS-volume of the separable real two-qubit states is {\it larger} than the (known) HS-volume (0.0016106) of the (separable {\it and} nonseparable) two-qubit states, so we have not yet succeeded in deriving a nontrivial upper bound on the separable volume. (The same will be the case in the immediate next analysis.) \subsubsection{15-Dimensional Complex Case} Now, when we again required that the six principal $2 \times2$ minors have nonnegative determinants, we obtained for the corresponding Hilbert-Schmidt volume $\frac{\pi^6}{7882875} \approx 0.000121959$. (Note that $7882875 = 3^2 \cdot 5^3 \cdot 7^2 \cdot 11 \cdot 13$.) Formula (4.5) of \cite{hilb2} gives us for the HS-volume of the 15-dimensional convex set of two-qubit density matrices, the value $\frac{\pi^6}{85130500} \approx 1.12925 \cdot 10^{-6}$. (The ratio of these two volumes --- the measure of our overestimation --- is $\frac{7484}{693} \approx 10.7994$.) Imposing (just as we did in the {\it real} 9-dimensional case) the {\it further} requirements that the six $2 \times 2$ minors of the partial transpose all have nonnegative determinants, we obtain a HS-volume of $\frac{1964 \pi^6}{30435780375} \approx 0.0000620378$. (Observe that $30435780375 = 3^5 \cdot 5^3 \cdot 7^2 \cdot 11^2 \cdot 13^2$.) So, our crude separability {\it probability} estimate (less than in the 9-dimensional real case --- as conforms with our intuition) is $\frac{1964}{3861} \approx 0.508677$. Based on certain numerical and theoretical considerations, the actual value of this (15-dimensional) separability probability has been conjectured to be \cite[eq. (43)]{slaterPRA} \begin{equation} \frac{2^2 \cdot 3 \cdot 7^2 \cdot 11 \cdot 13 \sqrt{3}}{5^3 \pi^6} \approx 0.242379. \end{equation} \section{Concluding Remarks} We have found the newly-introduced capability of Mathematica (version 5.1) for integration over implicitly defined regions particularly useful for obtaining a very wide variety of separability (and positive-PPT) probabilities, particularly for low-dimensional ($m=2,3$) cases, essentially independently of the sizes ($n$) of the corresponding $n \times n$ density matrices analyzed. The use of such methods for cases $m>=4$ appears, however --- such as the two-qubit ($n=4$) scenarios for the real ($m=9$) and complex ($m=15$) cases --- to be particularly challenging.
Eggeling and Werner \cite{tilo} studied the separability properties in a five-dimensional set of states of quantum systems composed of {\it three} subsystems of equal but arbitrary finite Hilbert space dimension. They are the states that commute with unitaries of the form $U \otimes U \otimes U$. In \cite{slatertilo}, we evaluated the probabilities of an Eggeling-Werner state being biseparable, triseparable or having a positive partial transpose with respect to certain partitions. However, the Hilbert-Schmidt measure was not employed, but rather the {\it Bures} one \cite{hanskarol}.
\begin{acknowledgments} I wish to express gratitude to the Kavli Institute for Theoretical Physics (KITP) for computational support in this research and to Michael Trott of Wolfram Research Inc. for his generous willingness/expertise in assisting with Mathematica computations.. \end{acknowledgments}
\end{document} | arXiv | {
"id": "0508227.tex",
"language_detection_score": 0.4021002948284149,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\author{Hanno von Bodecker\footnote{Fakult{\"a}t f{\"u}r Mathematik, Ruhr-Universit{\"a}t Bochum, 44780 Bochum, Germany}}
\title{The beta family at the prime two and modular forms of level three}
\date{}
\maketitle
\begin{abstract}{We use the orientation underlying the Hirzebruch genus of level three to map the beta family at the prime $p=2$ into the ring of divided congruences. This procedure, which may be thought of as the elliptic greek letter beta construction, yields the $f$--invariants of this family.} \end{abstract}
\section{Introduction and statement of the results}
One of the most fundamental problems in pure mathematics is to understand the structure of the stable homotopy groups of the sphere $\pi_*S^0$, and the Adams--Novikov spectral sequence (ANSS) serves as a powerful tool to attack this problem: Working locally at a fixed prime $p$, we have $$\textrm{E}_{2}^{s,t}=\textrm{Ext}^{s,t}_{BP_*BP}\left(BP_*,BP_*\right)\Rightarrow\left(\pi_{t-s}S^0\right)_{\left(p\right)},$$ and much insight can be gained by resolving its $E_2$--term into $v_n$--periodic components \cite{Ravenel:2004xh}. In their seminal paper propagating this chromatic approach, Miller, Ravenel, and Wilson introduced the so-called greek letter map, and computed the 1--line (for all primes) and the 2--line (for odd primes), generated by the alpha and beta families, respectively \cite{Miller:1977ya}. The computation of the 2--line for $p=2$ is due to Shimomura \cite{Shimomura:1981oy}: Let us concentrate on the beta elements at $p=2$ (there are also products of $\alpha$'s): Starting from certain elements $x_i\in v_2^{-1}BP_*$, $y_i\in v_1^{-1}BP_*$, put $$a_0=1,\ a_1=2,\ a_k=3\cdot2^{k-1}\ k\geq2;$$ then, for $n\geq0$, odd $s\geq1$, $j\geq1$, $i\geq0$, subject to the conditions
$$n\geq i,\ 2^i|j,\ j\leq a_{n-i},\textrm{ and }j\leq2^n\textrm{ if }s=1 {\textrm{ and }} i=0,$$ the simplest beta elements are given by \begin{equation}\label{simple_betas} \beta_{s\cdot2^n/j,i+1}=\eta\left({x_n^s}/{2^{i+1}v_1^j}\right), \end{equation} where $\eta$ is the universal greek letter map. In fact, it is sometimes possible to improve divisibility, viz.\ for $n$, $s$, $j$, and $i$ as above with the additional conditions that $$n\geq i+1\geq2, j=2\mbox{ and }s\geq3\mbox{ if }n=2,\mbox{ and }j\leq a_{n-i-1}\mbox{ if }n\geq3,$$ Shimomura defines \begin{equation}\label{higher_beta} \beta_{s\cdot2^n/j,i+2}=\eta\left({x_n^s}/{2^{i+2}y_i^m}\right)\quad\mbox{where } m=j/2^i, \end{equation} and shows the following relations between the betas in \eqref{simple_betas} and \eqref{higher_beta}: \begin{itemize}
\item[(i)]{$\beta_{s\cdot2^n/j,i+2}=\beta_{s\cdot2^n/j,\left(i+1\right)+1}$ if $2^{i+1}|j$,} \item[(ii)]{$2\beta_{s\cdot2^n/j,i+2}=\beta_{s\cdot2^n/j,i+1}$}. \end{itemize}
There are striking number-theoretical patterns lurking in the stable stems which become visible from the chromatic point of view, e.g.\ the (nowadays) well-known relation between the 1--line and the (denominators of the) Bernoulli numbers. Concerning the 2--line, Behrens has established a precise relation between the beta family for primes $p\geq5$ and the existence of modular forms satisfying appropriate congruences \cite{Behrens:2009dz}. On the other hand, using an injection of the 2--line into the ring of divided congruences (tensored with $\mathbb{Q/Z}$), Laures introduced the $f$--invariant as a higher analog of the $e$--invariant \cite{Laures:1999sh}. Subsequent work has shown how these approaches can be merged and used to derive the $f$--invariant of the beta family, albeit still only for $p\geq5$ \cite{Behrens:2008fp}. A different route has been taken in \cite{Hornbostel:2007ss}, where the $f$--invariant is represented using Artin--Schreier theory; however, although no longer limted to primes $p\geq5$, the calculations actually carried out in that reference only take care of two subfamilies (viz.\ $\beta_t$ for $p\nmid t$ and $\beta_{s2^n/2^n}$).
Since there has been some progress on our geometrical understanding of the $f$--invariant through analytical techniques (to an extent where explicit calculations can be done, cf.\ e.g.\ \cite{Bodecker:2008pi}) it is desirable to have some sort of `comparison table'; to this end, we compute the $f$--invariant of the beta family\footnote{The situation of products of permanent alpha elements has been studied in \cite{Bodecker:2009kx}.} at the prime $p=2$. More precisely, we take a look at the following diagram (at the level $N=3$, i.e.\ $\Gamma=\Gamma_1(3)$): \begin{equation}\label{the_diagram} \xymatrix{ \textrm{Ext}^0\left(BP_*,BP_*/\left(p^{\infty},v_1^{\infty}\right)[v_2^{-1}]\right)\ar[rr]\ar[d]& &\textrm{Ext}^{2,*}\left(BP_*,BP_*\right)\ar[d]\\ \textrm{Ext}^0\left(E^{\Gamma}_*,E^{\Gamma}_*/\left(p^{\infty},v_1^{\infty}\right)\right)\ar[rr]\ar@{.>}[drr]& &\textrm{Ext}^{2,*}\left(E^{\Gamma}_*,E^{\Gamma}_*\right)\ar[d]\\
& &{\underline{\underline{D}}_{*+1}^{\Gamma}\otimes\mathbb{Q/Z}}\\}
\end{equation} The composition of the vertical arrows on the RHS (which are injective by the results of \cite{Laures:1999sh}) accounts for the algebraic portion of the $f$--invariant, while the upper horizontal arrow produces the beta family. So, in order to compute the $f$--invariant of a member of this family, we chase its preimage through the composition of the vertical arrow on the LHS and the dotted arrow; put differently, we carry out (a sufficiently large portion of) the elliptic greek letter construction explicitly. The result can be summarized as follows (where, as usual, we abbreviate $\beta_{k/j}=\beta_{k/j,1}$ and $\beta_k=\beta_{k/1}$):
\begin{thm}\label{order-two} The $f$--invariants of the beta elements of order two are \begin{itemize} \item[\textup{(i)}] for odd $s\geq3$: $$f\left(\beta_{s}\right)\equiv\frac{1}{2}\left(\frac{E_1^2-1}{4}\right)^{s}\mod\underline{\underline{D}}_{3s-1}^{\Gamma}$$ \item[\textup{(ii)}] for odd $s\geq1$:
$$f\left(\beta_{2s/j}\right)\equiv\frac{1}{2}\left(\frac{E_1^2-1}{4}\right)^{2s}\mod\underline{\underline{D}}_{6s-j}^{\Gamma}$$ \item[\textup{(iii)}] for $l\geq0$ and odd $s\geq1$: \begin{equation*} \begin{split} f\left(\beta_{4s\cdot2^l/j}\right)&\equiv\frac{1}{2}\left(\frac{E_1^2-1}{4}\right)^{4s\cdot2^l}+\frac{1}{2}\left(\frac{E_1^2-1}{4}\right)^{\left(4s-1\right)2^l}\mod\underline{\underline{D}}^{\Gamma}_{12s\cdot2^l-j}\\ &\equiv\frac{1}{2}\left(\frac{E_1^2-1}{4}\right)^{4s\cdot2^l}\textrm{{\em\ if }}j\leq3\cdot2^l\\ \end{split} \end{equation*} \end{itemize} \end{thm}
\begin{thm}\label{higher-order} The $f$--invariants of the beta elements of higher order are \begin{itemize} \item[\textup{(i)}] for odd $s\geq1$: $$f\left(\beta_{4s/2,2}\right)\equiv\frac{1}{4}\left(\frac{E_1^2-1}{4}\right)^{4s}\mod\underline{\underline{D}}_{12s-2}^{\Gamma}$$
\item[\textup{(ii)}] for $l\geq0$, $i\geq1$, $j=m\cdot2^i\leq a_{l+2}$, odd $s\geq1$, and mod $\underline{\underline{D}}_{3s\cdot2^{l+i+2}-j}^{\Gamma}$: \begin{align*}
f\left(\beta_{s\cdot2^{l+i+2}/j,i+1}\right)&\equiv\frac{1}{2^{i+1}}\left(\frac{E_1^2-1}{4}\right)^{s\cdot2^{l+i+2}}+\frac{1}{2}\left(\frac{E_1^2-1}{4}\right)^{\left(s\cdot2^{i+2}-1\right)2^{l}}\\ &\equiv\frac{1}{2^{i+1}}\left(\frac{E_1^2-1}{4}\right)^{s\cdot2^{l+i+2}}\textrm{{\em\ if }}j\leq3\cdot2^l
\end{align*}
\item[\textup{(iii)}] for $k\geq2$: $$f\left(\beta_{4k/2,3}\right)\equiv\frac{1+4k}{8}\left(\frac{E_1^2-1}{4}\right)^{4k}\mod\underline{\underline{D}}_{12k-2}^{\Gamma}$$
\item[\textup{(iv)}] for $l\geq0$, $i\geq1$, $j=m\cdot2^i\leq a_{l+2}$, odd $s\geq1$, and mod $\underline{\underline{D}}_{3s\cdot2^{l+i+3}-j}^{\Gamma}$: \begin{equation*} \begin{split} f\left(\beta_{s\cdot2^{l+i+3}/j,i+2}\right)&\equiv\frac{1}{2^{i+2}}\left(\frac{E_1^2-1}{4}\right)^{s\cdot2^{l+i+3}}+\frac{1}{2}\left(\frac{E_1^2-1}{4}\right)^{\left(s\cdot2^{i+3}-1\right)2^l}\\ &\equiv\frac{1}{2^{i+2}}\left(\frac{E_1^2-1}{4}\right)^{s\cdot2^{l+i+3}}\textrm{{\em\ if }}j\leq3\cdot2^l\\ \end{split} \end{equation*} \end{itemize} \end{thm}
The proof presented in the following section turns out to be a pretty much straightforward calculation: After a brief recollection of the relevant definitions, we study the image (under the orientation underlying the Hirzebruch genus) of the elements $x_i$ and $y_i$ occurring in the definition of the beta elements. Then, we are going to sketch our approach to the argument given in \cite[section 4]{Behrens:2008fp}, i.e.\ we explain how to carry out the greek letter map on the level of (holomorphic) modular forms. The final step consists of performing this computation explicitly.
\section{Proof of the Theorems}
\subsection{Preliminaries}
Following \cite{Laures:1999sh}, we consider the congruence subgroup $\Gamma=\Gamma_1(N)$ for a fixed level $N>1$, set $\mathbb{Z}^{\Gamma}=\mathbb{Z}[\zeta_N,1/N]$ and denote by $M^{\Gamma}_*$ the graded ring of modular forms w.r.t.~$\Gamma$ which expand integrally, i.e.~which lie in $\mathbb{Z}^{\Gamma}[\![q]\!]$. The ring of {\em divided congruences} $D^{\Gamma}$ consists of those rational combinations of modular forms which expand integrally; this ring can be filtered by setting
$$D_k^{\Gamma}=\left\{\left.f={\textstyle{\sum_{i=0}^{k}}}f_i\ \right| f_i\in M_i^{\Gamma}\otimes\mathbb{Q},\ f\in\mathbb{Z}^{\Gamma}[\![q]\!]\right\}.$$ Finally, we introduce $$\underline{\underline{D}}^{\Gamma}_{k}=D^{\Gamma}_k+M_0^{\Gamma}\otimes\mathbb{Q}+M_k^{\Gamma}\otimes\mathbb{Q}.$$
Now, if $Ell^{\Gamma}$ denotes the complex oriented elliptic cohomology theory associated to the universal curve over the ring of modular forms w.r.t.\ $\Gamma$, the composite $$\textrm{E}_2^{2,2k+2}[MU]\rightarrow \textrm{E}_2^{2,2k+2}[Ell^{\Gamma}]\rightarrow\underline{\underline{D}}^{\Gamma}_{k+1}\otimes{\mathbb{Q/Z}}$$ is injective (away from primes dividing the level $N$) \cite{Laures:1999sh}. Henceforth, we fix $p=2$ and $N=3$. Thus we have $$M^{\Gamma}_*=\mathbb{Z}^{\Gamma}[E_1,E_3],$$ where \begin{equation*} \begin{split}
E_1&=1+6\sum_{n=1}^{\infty}\sum_{d|n}(\frac{d}{3})\ q^n,\\
E_3&=1-9\sum_{n=1}^{\infty}\sum_{d|n}(\frac{d}{3})d^2\ q^n\\ \end{split} \end{equation*} are the odd Eisenstein series of the indicated weight at the level $N=3$ (and $(\frac{d}{3})$ denotes the Legendre symbol). Furthermore, the following basic congruence can be read off of the $q$-expansions: \begin{equation}\label{the_basic_congruence}
E_3-1\equiv \frac{E_1^2-1}{4}\mod 2D_3^{\Gamma}. \end{equation}
\subsection{The image under the orientation}\label{computing_the_image}
Recall that the power series associated to the Hirzebruch elliptic genus of level three may be expressed as (see e.g.\ \cite{Bodecker:2008pi}) $$Q\left(x\right)=\exp\left(3\sum_{n=1}^{\infty}\frac{x^{2n}}{(2n)!}G_{2n}^*(\tau)-2\sum_{k=0}^{\infty}\frac{x^{2k+1}}{(2k+1)!}G_{2k+1}^{(-\omega)}(\tau)\right)\in M_*^{\Gamma}\otimes{\mathbb{Q}}[\![x]\!]$$ where $\omega=2\pi i/3$ and \begin{equation*} \begin{split} G_{2n}^*(\tau)&=G_{2n}(\tau)-3^{2n-1}G_{2n}(3\tau),\\ G_{2k+1}^{(-\omega)}(\tau)&=\frac{e^{\omega}-e^{-\omega}}{2}3^{2k}\frac{B_{2k+1}(1/3)}{2k+1}E_{2k+1}^{\Gamma_1(3)}(\tau).\\ \end{split} \end{equation*} The first few terms of this power series, when expressed in terms of $E_1$ and $E_3$, i.e.\ the generators of $M_*^{\Gamma}$, read \begin{equation}\label{expandedgenus} \begin{split} Ell^{\Gamma_1(3)}(x) & = 1+\frac{iE_1}{2\sqrt{3}}x+\frac{E_1^2}{12}x^2+\frac{iE_1^3-iE_3}{18\sqrt{3}}x^3+\frac{13E_1^4-16E_1E_3}{2160}x^4\\ &\quad+\frac{iE_1^2(E_1^3-E_3)}{216\sqrt{3}}x^5+\frac{121E_1^6-152E_1^3E_3+40E_3^2}{272160}x^6\\ &\quad+\frac{iE_1}{\sqrt{3}}\frac{7E_1^6-11E_1^3E_3+4E_3^2}{19440}x^7+O(x^8)\\ \end{split} \end{equation} The genus of the following complex projective spaces is readily evaluated: \begin{align*} w_1&=\phi({\mathbb{CP}}^1)=\frac{i}{\sqrt3}E_1,\\ w_3&=\phi({\mathbb{CP}}^3)=\frac{i}{\sqrt3}\frac{5E_1^3-2E_3}{9},\\ w_7&=\phi({\mathbb{CP}}^7)=\frac{i}{\sqrt3}\frac{70E_1^4E_3-14E_1E_3^2-65E_1^7}{243}. \end{align*} As is well-known, underlying this genus is the complex orientation of the cohomology theory $Ell^{\Gamma}$, i.e.\ $$\phi:MU\rightarrow Ell^{\Gamma}$$ and we can compute the images of the Hazewinkel generators \cite[Appendix A2]{Ravenel:2004xh} at the prime $p=2$, which we still denote by $v_i$: $$v_1=w_1=\frac{i}{\sqrt3}E_1$$ $$v_2=\frac{w_3-w_1^3}{2}=\frac{i}{\sqrt{3}}\frac{4E_1^3-E_3}{9},$$ $$v_3=\frac{w_7}{4}-\frac{w_1^7+w_1w_3^2}{8}=\frac{iE_1}{\sqrt{3}}\frac{5E_1^3E_3-E_3^2-4E_1^6}{81}.$$ In particular, we see that $v_3$ is decomposable: \begin{equation}\label{decomp_v_3} \begin{split} v_3&=\frac{iE_1}{\sqrt{3}}\left(\frac{4E_1^3E_3-E_3^2}{81}-\frac{4E_1^6-E_1^3E_3}{81}\right)\\ &=\frac{iE_1}{\sqrt{3}}\left(\frac{i}{\sqrt{3}}\frac{4E_1^3-E_3}{9}\right)\left(-\frac{i}{3\sqrt{3}}\left(E_3-E_1^3\right)\right)\\ &=3v_1v_2\left(v_2+v_1^3\right)\\ \end{split} \end{equation} Plugging \eqref{decomp_v_3} into the definitions of the $x_i$ (considered in $v_2^{-1}M_*^{\Gamma}$) yields \begin{equation}\label{the_x_i} \begin{split} x_0&=v_2\\ x_1&=v_2^2-v_1^2v_2^{-1}v_3=v_2^2-3v_1^3\left(v_2+v_1^3\right)\\ x_2&=x_1^2-v_1^3v_2^3-v_1^5v_3=v_2^4-7v_1^3v_2^3+15v_1^9v_2+9v_1^{12}\\ x_i&=x_{i-1}^2\quad i\geq3,\\ \end{split} \end{equation} showing that the $x_i$ are actually holomorphic. On the other hand, unless $i=0$, this is not true for the $y_i\in v_1^{-1}M_*^{\Gamma}$, which read: \begin{equation}\label{the_y_i} \begin{split} y_0&=v_1\\ y_1&=v_1^2-4v_1^{-1}v_2\\ y_i&=y_{i-1}^2\quad i\geq2.\\ \end{split} \end{equation} However, for $i\geq1$ and $m\geq1$, we may introduce \begin{equation}\label{the_z_i} \begin{split} z_{i,m}&=v_1^{m\cdot2^i}-m\cdot2^{i+1}v_1^{m\cdot2^i-3}v_2,\\ \end{split} \end{equation} which are holomorphic for $m\cdot2^i\geq4$ and satisfy \begin{align*} z_{i,m}&\equiv y_i^m \mod2^{i+2}v_1^{-1}M_*^{\Gamma}\\
&\equiv 1 \ \ \mod2^{i+2}\mathbb{Z}^{\Gamma}[\![q]\!], \end{align*} the second line being an immediate consequence of \eqref{the_basic_congruence}.
\subsection{Determining `elliptic' beta elements}
Requiring $p>3$ and working with the full modular group, Behrens and Laures have shown in \cite[section 4]{Behrens:2008fp} how an element in $H^0\left(M_*/\left(p^{\infty},E_{p-1}^{\infty}\right)\right)$ gives rise to an element in $D\otimes{\mathbb{Q}}/D[\frac{1}{6}]+M_k\otimes\mathbb{Q}+\mathbb{Q}$; clearly, the other primes can be treated analogously by working with a smaller congruence subgroup. Let us rephrase their argument in a language closer to the original formulation of the greek letter construction:
Still working at the prime $p=2$ and the level $N=3$, we choose a (holomorphic) modular form $\mu \in M^{\Gamma}_{|\mu|}$ and a pair of positive integers $\left(i_0, i_1\right)$ such that
\begin{equation}
\mu^{i_1}\equiv1\mod 2^{i_0}D_{i_1|\mu|}^{\Gamma};
\end{equation} in particular, this ensures that $\left(2^{i_0},\mu^{i_1}\right)$ is regular on $M_*^{\Gamma}$.
Now, given a modular form $\tilde\varphi_t\in M_t^{\Gamma}$, we can use the natural inclusion
$$M_t^{\Gamma}\hookrightarrow D_t^{\Gamma}$$ and ask whether $\tilde\varphi_t$ satisfies \begin{equation}\label{invariant_mod}
\tilde\varphi_t\equiv\mu^{i_1}\varphi_{t/i_1|\mu|,i_0} \mod 2^{i_0}D^{\Gamma}_t \end{equation} for some
$$\varphi_{t/i_1|\mu|,i_0}\in D^{\Gamma}_{t-i_1|\mu|}/2^{i_0}D^{\Gamma}_{t-i_1|\mu|}$$ Let us call a modular form satisfying \eqref{invariant_mod} {\em invariant mod} $\left(2^{i_0},\mu^{i_1}\right)$. Moreover, we have the obvious composition $$\underline{\underline{\left(\ \cdot\ \right)}}\ \colon D_k^{\Gamma}/2^{i_0}D^{\Gamma}_k\cong{D^{\Gamma}_k}\otimes \mathbb{Z}/2^{i_0} \rightarrow D^{\Gamma}_k\otimes \mathbb{Q/Z} \rightarrow \underline{\underline{D}}_k^{\Gamma}\otimes \mathbb{Q/Z},$$ $$\quad\varphi_k\mapsto\underline{\underline{\varphi}}_k$$ Then it is easy to see that, for an invariant modular form $\tilde\varphi_t$, the assignment
$$\tilde\varphi_t\mapsto\underline{\underline{\varphi}}_{t/i_1|\mu|,i_0}$$ depends only on the reduction $\varphi_t\equiv\tilde\varphi_t\mod\left(2^{i_0},\mu^{i_1}\right)$, hence descends to a well-defined map \begin{equation}\label{elliptic_beta_map}
\ker\left(M_t^{\Gamma}/\left(2^{i_0},\mu^{i_1}\right)\rightarrow D_t^{\Gamma}/\left(2^{i_0},\mu^{i_1}\right)\right)\longrightarrow \underline{\underline{D}}_{t-i_1|\mu|}^{\Gamma}\otimes \mathbb{Q/Z} \end{equation} which we may think of as the {\em `elliptic' greek letter beta map} and which corresponds to the dotted arrow in \eqref{the_diagram}.
\begin{rmk} By removing the constant term of the $q$-expansion, we obtain another map $$d\colon M_t^{\Gamma}\rightarrow D^{\Gamma}_t,\quad d\left(\tilde\varphi_t\right)=\tilde\varphi-q^0\left(\tilde\varphi_t\right)$$
that might look like a more natural choice w.r.t.\ which invariance should be defined (cf.\ \cite{Behrens:2008fp}). However, we have $q^0(\varphi)\equiv \mu^{i_1}q^0(\varphi)\mod 2^{i_0}D^{\Gamma}_t$, hence both choices are equivalent (up to the shift of $\varphi_{t/i_1|\mu|,i_0}$ by the constant $q^0\left(\tilde\varphi_t\right)$, which maps to zero in $\underline{\underline{D}}^{\Gamma}_{k}\otimes\mathbb{Q/Z}$). \end{rmk}
\subsection{Explicit computations}
In this subsection, we are going to compute the effect of the map \eqref{elliptic_beta_map} on the preimage of Shimomura's beta elements; the ones defined by \eqref{simple_betas} are dealt with easily, since $\left(2^{i+1},v_1^{j}\right)$ is regular on $M^{\Gamma}_*$ provided that $j=m\cdot2^i$; moreover, for $k\geq0$ this implies: \begin{equation}\label{multi_by_v_1^j} \left(\frac{E_1^2-1}{4}\right)^k\equiv v_1^j\left(\frac{E_1^2-1}{4}\right)^k\mod2^{i+1}D^{\Gamma}_{2k+j} \end{equation} Furthermore, the following two results are useful: \begin{lem}\label{E_3_congruences} For $i\geq0$, $l\geq0$, $m\cdot2^i=j\leq6\cdot2^l$ we have: \begin{equation*} \begin{split} E_3^{s\cdot2^{l+i+2}}&\equiv\left(\frac{E_1^2-1}{4}\right)^{s\cdot2^{l+i+2}}\mod2^{i+1}D^{\Gamma}_{12s\cdot2^{l+i}}+v_1^j\cdot M_{12s\cdot2^{l+i}-j}^{\Gamma}\\ \end{split} \end{equation*} \end{lem} \begin{proof} It is easy to see that for $l\geq0$ and $i\geq0$, we have \begin{equation}\label{dunno} E_3^{2^{l+i+2}}\equiv\left(E_3-v_1^3\right)^{2^{l+i+2}}+2^{i+1}\left(v_1^6E_3^2\right)^{2^l}E_3^{2^{l+2}\left(2^i-1\right)}\mod\left(2^{i+2},v_1^{12\cdot2^l}\right), \end{equation} and the basic congruence \eqref{the_basic_congruence} implies \begin{equation} \left(E_3-v_1^3\right)^{2^k}\equiv\left(\frac{E_1^2-1}{4}\right)^{2^k}\mod2^{k+1}D^{\Gamma}_{3\cdot2^k}\qedhere \end{equation} \end{proof}
\begin{lem}\label{removal_of_second_summand}
For $i\geq0$, $l\geq0$, $1\leq j\leq6\cdot2^l$ we have: \begin{align*} E_3^{\left(s\cdot2^{i+2}-1\right)2^l}&\equiv\left(\frac{E_1^2-1}{4}\right)^{\left(s\cdot2^{i+2}-1\right)2^l}& &\mod2D^{\Gamma}_{12s\cdot2^{l+i}}+v_1^j\cdot M^{\Gamma}_{12s\cdot2^{l+i}-j}\\ &\equiv0& &\textrm{\quad{\em if }}j\leq3\cdot2^l \end{align*} \end{lem} \begin{proof} This immediately follows from \eqref{the_basic_congruence}\end{proof}
\noindent Now let us treat the beta elements of order two, i.e.\ those with $i=0$ in \eqref{simple_betas}:
\begin{proof}[\bf{Proof of Theorem \ref{order-two}:}]\ \\ For part (i), we observe that: \begin{align*} x_0^s&=v_2^s\\ &\equiv E_3^s& &\mod 2D^{\Gamma}_{3s}\\ &\equiv \left(E_3-E_1^3\right)^s & &\mod2D^{\Gamma}_{3s}+v_1\cdot M^{\Gamma}_{3s-1}\\ &\equiv \left(\frac{E_1^2-1}{4}\right)^s & &\mod2D^{\Gamma}_{3s}+v_1\cdot M^{\Gamma}_{3s-1} \end{align*} Similarly, for part (ii) we have: \begin{align*} x_1^s&\equiv v_2^s & &\mod v_1^j\\ &\equiv E_3^{2s}& &\mod 2D^{\Gamma}_{6s}+v_1^j\cdot M^{\Gamma}_{6s-j}\\ &\equiv \left(E_3-E_1^3\right)^{2s}& &\mod2D^{\Gamma}_{6s}+v_1^j\cdot M^{\Gamma}_{6s-j}\\ &\equiv \left(\frac{E_1^2-1}{4}\right)^{2s}& &\mod2D^{\Gamma}_{6s}+v_1^j\cdot M^{\Gamma}_{6s-j} \end{align*} and since $j\leq a_{l+2}=6\cdot2^l$ (and $j\leq 2^{l+2}$ if $s=1$), for part (iii) we conclude: \begin{align*} x_{2+l}^s&\equiv v_2^{4s\cdot2^l}+v_1^{3\cdot2^l}v_2^{\left(4s-1\right)2^l}& &\mod\left(2,v_1^{a_{l+2}}\right)\\ &\equiv E_3^{4s\cdot2^l}+E_3^{\left(4s-1\right)2^l}& &\mod2D^{\Gamma}_{12s\cdot2^l}+v_1^j\cdot M^{\Gamma}_{12s\cdot2^l-j}\\ &\equiv \left(\frac{E_1^2-1}{4}\right)^{4s\cdot2^l}+\left(\frac{E_1^2-1}{4}\right)^{\left(4s-1\right)2^l}& &\mod2D^{\Gamma}_{12s\cdot2^l}+v_1^j\cdot M^{\Gamma}_{12s\cdot2^l-j} \end{align*} In view of \eqref{multi_by_v_1^j}, this completes the proof. \end{proof}
\begin{rmk} Since $x_0=v_2$ is sent to zero under the map \eqref{elliptic_beta_map} w.r.t.\ $\left(2,v_1\right)$, we see that in order to obtain something interesting, we have to impose $s\geq3$ in part (i). In a similar vein, the condition $j\leq 2^{l+2}$ if $s=1$ in part (iii) is needed to ensure that $D^{\Gamma}_{8s\cdot2^l+j}\subset D^{\Gamma}_{12s\cdot2^l}$ when using \eqref{multi_by_v_1^j}. \end{rmk}
Next, we turn our attention to the elements $\beta_{4s\cdot2^l/j,i+1}$ for $i\geq1$:
\begin{lem}\label{reduction_of_x_n} For $l\geq0$ and $i\geq0$, we have \begin{equation*} x_{l+i+3}\equiv v_2^{2^{l+i+3}}+2^{i+1}v_1^{3\cdot2^{l}}v_2^{\left(2^{i+3}-1\right)2^{l}}\mod\left(2^{i+2},v_1^{a_{l+2}}\right) \end{equation*} \end{lem}
\begin{proof} Since $(a+b)^{2^{l+1}}\equiv a^{2^{l+1}}+b^{2^{l+1}}+2(ab)^{2^{l}}\mod4$ for $l\geq0$, we compute \begin{equation*} x_{l+3}=x_2^{2^{l+1}}\equiv v_2^{8\cdot2^l}+2\left(v_1^{3}v_2\right)^{2^{l}}v_2^{6\cdot2^l}\mod\left(4,v_1^{a_{l+2}}\right) \end{equation*} and use the binomial theorem. \end{proof}
\begin{proof}[{\bf Proof of Theorem \ref{higher-order}, part (i):}]\ \\ The choice $n=2$ and $i=1$ in \eqref{simple_betas} dictates $j=2$, hence we compute \begin{align*} x_2^s&\equiv v_2^{4s}& &\mod \left(4,v_1^2\right)\\ &\equiv E_3^{4s} & &\mod 4D^{\Gamma}_{12s}+v_1^2\cdot M_{12s-2}^{\Gamma}\\ &\equiv \left(\frac{E_1^2-1}{4}\right)^{4s}& &\mod 4D^{\Gamma}_{12s}+v_1^2\cdot M_{12s-2}^{\Gamma} \end{align*} Combined with \eqref{multi_by_v_1^j}, this yields the claim. \end{proof}
\begin{proof}[{\bf Proof of Theorem \ref{higher-order}, part (ii):}]\ \\ In order to treat the remaining cases of our computation of $x_n^s$ mod $\left(2^{i+1},v_1^j\right)$, we notice that since \eqref{simple_betas} requires $j=m\cdot2^i\leq a_{n-i}$, and since all cases with $i=0$ and the case $i=1$ for $n=2$ have already been taken care of, it suffices to consider $n=l+i+2$ where $l\geq0$ and $i\geq1$; now, for odd $s\geq1$ we have (by Lemma \ref{reduction_of_x_n} in a reindexed form) \begin{align*} x_{l+i+2}^s&\equiv v_2^{s\cdot2^{l+i+2}}+2^iv_1^{3\cdot2^l}v_2^{s\cdot2^{l+i+2}-2^l}& &\mod\left(2^{i+1},v_1^{a_{l+2}}\right)\\ &\equiv E_3^{s\cdot2^{l+i+2}}+2^iE_3^{s\cdot2^{l+i+2}-2^l}& &\mod2^{i+1}D^{\Gamma}_{12s\cdot2^{l+i}}+v_1^j\cdot M_{12s\cdot2^{l+i}-j}^{\Gamma} \end{align*} from which the desired result follows. \end{proof}
Finally, we treat the beta elements defined by \eqref{higher_beta}:
\begin{proof}[{\bf Proof of Theorem \ref{higher-order}, part (iii):}]\ \\ In order to compute the $f$--invariant of $\beta_{4k/2,3}$, we are going to show that, although $z_{1,1}=v_1^2-4v_1^{-1}v_2$ is not holomorphic, we can still make sense out of the map \eqref{elliptic_beta_map} w.r.t.\ $\left(8, z_{1,1}\right)$ if $t=12k\geq24$. To this end, we observe $$v_1^6=z_{1,1}v_1^4+4v_1^3v_2=z_{1,1}\left(v_1^4+4v_1v_2\right)+16v_2^2,$$ hence we compute \begin{align*} x_2^k&\equiv v_2^{4k}+kv_1^3v_2^{4k-1}& &\mod\left(8,v_1^6\right)\\ &\equiv \left(1+4k\right)v_2^{4k}& &\mod\left(8,z_{1,1}\right)\\ &\equiv \left(1+4k\right)E_3^{4k}& &\mod 8D^{\Gamma}_{12k}+z_{1,1}M^{\Gamma}_{12k-2} \end{align*} where $z_{1,1} M_{12k-2}^{\Gamma}\subset M_{12k}^{\Gamma}$ for dimensional reasons; finally, we note that \begin{align*} E_3^{4k}&\equiv\left(\frac{E_1^2-1}{4}\right)^{4k}& &\mod 8D^{\Gamma}_{12k}+z_{1,1}M^{\Gamma}_{12k-2}\\ &\equiv \left(\frac{E_1^2-1}{4}\right)^{4k}v_1^4z_{1,1} & &\mod 8D^{\Gamma}_{12k}+z_{1,1}M^{\Gamma}_{12k-2}\quad{\textrm{if\ }}k\geq2 \end{align*} which completes the proof.\end{proof}
\begin{proof}[{\bf Proof of Theorem \ref{higher-order}, part (iv):}]\ \\ Recall that in the definition \eqref{higher_beta} we have to impose $j=m\cdot2^i\leq a_{n-i-l}$ for $n\geq3$; since the situation $m=i=1$ has already been dealt with in the previous part (iii), it is sufficient to consider the case $n=l+i+3$, $4\leq m\cdot 2^i=j\leq a_{l+2}$, where $l\geq0$, $i\geq1$. In order to compute the $f$--invariants, we calculate the effect of the map \eqref{elliptic_beta_map} w.r.t.\ $\left(2^{i+2},z_{i,m}\right)$: Since \begin{equation}\label{mod_higher_power} \begin{split} v_1^{6\cdot2^l}&=z_{i,m}v_1^{6\cdot 2^l-j}+2jv_1^{6\cdot2^l-3}v_2\\ v_1^{9\cdot2^l}&=z_{i,m}\left(v_1^{9\cdot2^l-j}+2jv_1^{9\cdot2^l-j-3}v_2\right)+4j^2v_1^{9\cdot2^l-6}v_2^2 \end{split} \end{equation} we calculate for $l\geq0$, $i\geq1$, and odd $s\geq1$: \begin{align*} x_{l+i+3}^s&\equiv v_2^{s\cdot2^{l+i+3}}+2^{i+1}v_1^{3\cdot2^l}v_2^{\left(s2^{i+3}-1\right)2^l}+\\ &\quad+3s\cdot2^iv_1^{6\cdot2^l}v_2^{\left(s2^{i+3}-2\right)2^l}& &\mod\left(2^{i+2}, v_1^{9\cdot2^l}\right)\\ &\equiv v_2^{s\cdot2^{l+i+3}}+2^{i+1}v_1^{3\cdot2^l}v_2^{\left(s2^{i+3}-1\right)2^l}& &\mod\left(2^{i+2},z_{i,m}\right) \end{align*} hence \begin{align*} x^s_{l+i+3}&\equiv E_3^{s\cdot2^{l+i+3}}+2^{i+1}E_3^{\left(s\cdot2^{i+3}-1\right)2^l}\mod2^{i+2}D^{\Gamma}_{24s\cdot2^{l+i}}+z_{i,m}\cdot M^{\Gamma}_{24s\cdot2^{l+i}-j} \end{align*} and due to \eqref{mod_higher_power}, application of Lemma \ref{E_3_congruences} and Lemma \ref{removal_of_second_summand} yields the claim. \end{proof}
\end{document} | arXiv | {
"id": "0912.3082.tex",
"language_detection_score": 0.5232210755348206,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\makeatletter \renewenvironment{proof}[1][\proofname]{\par
\pushQED{\qed}
\normalfont \topsep6\p@\@plus6\p@\relax
\trivlist
\item[\hskip\labelsep
\scshape
#1\@addpunct{.}]\ignorespaces }{
\popQED\endtrivlist\@endpefalse } \makeatother
\numberwithin{equation}{section} \theoremstyle{remark}\newtheorem{obs}{Remark} \theoremstyle{plain}\newtheorem{teo}{Theorem}[section] \theoremstyle{plain}\newtheorem{prop}[teo]{Proposition} \theoremstyle{plain}\newtheorem{lema}[teo]{Lemma} \theoremstyle{plain}\newtheorem{cor}[teo]{Corollary} \theoremstyle{definition}\newtheorem{df}[teo]{Definition} \theoremstyle{remark}\newtheorem{rem}[teo]{Remark} \swapnumbers \theoremstyle{definition}\newtheorem{example}[teo]{Example}
\begin{abstract} We give an algebraic characterization of the possible characteristic tensors of an infinitesimally homogeneous affine manifold with $G$-structure. Such concepts were introduced in \cite{PiTausk}. \end{abstract}
\maketitle
\begin{section}{Introduction}
The concept of {\em infinitesimally homogeneous\/} affine manifold with $G$-structure was introduced in the recent article \cite{PiTausk} with the aim to find a unifying language for several isometric immersion (Bonnet type) theorems that appear in the classical literature \cite{Dajczer} (immersions into Riemannian manifolds with constant sectional curvature, immersions into K\"ahler manifolds of constant holomorphic curvature), and also some more recent results (see for instance \cite{Benoit1, Benoit}) concerning the existence of isometric immersions in more general Riemannian manifolds. By an affine manifold with $G$-structure we mean a triple $(M,\nabla,P)$, with $M$ an $n$-dimensional diffe\-rentiable manifold, $\nabla$ a connection on $M$ and $P$ a $G$-structure on $M$, i.e., $G$ is a Lie subgroup of ${{\rm{GL}}}(n)$ and $P$ is a $G$-principal subbundle of the frame bundle of $M$. We denote by $R$ and $T$, respectively, the curvature and torsion tensors of $\nabla$. In order to handle the case in which $P$ is not compatible with $\nabla$, the concept of {\em inner torsion\/} was introduced in \cite{PiTausk}: it is a tensor ${{\mathfrak{I}}}^P$ that plays the role of a covariant derivative of the $G$-structure $P$ and it vanishes if and only if $\nabla$ is compatible with $P$. The concept of infinitesimal homogeneity plays the same role in the theory of affine manifolds with $G$-structure as the concept of constant sectional curvature plays in Riemannian geometry; in fact, Riemannian manifolds with constant sectional curvature are precisely the infinitesimally homogeneous triples $(M,\nabla,P)$ in which $P$ is the $\mathrm O(n)$-principal bundle of orthonormal frames and both the torsion and the inner torsion vanish. Notice that Riemannian manifolds with constant sectional curvature are those in which the (four indexed) matrix representing the curvature tensor with respect to orthonormal frames is independent of the orthonormal frame and of the point on the manifold. While it does not make sense to require that a tensor field on a manifold be constant, we can define, for manifolds endowed with a $G$-structure, the notion of {\em $G$-constant\/} tensor field: that is a tensor field whose matrix with respect to frames that belong to the $G$-structure is independent of the frame and of the point of the manifold. An affine manifold with $G$-structure $(M,\nabla,P)$ is said to be {\em infinitesimally homogeneous\/} if the tensor fields $R$, $T$ and ${{\mathfrak{I}}}^P$ are all $G$-constant. When $M$ is simply connected and $\nabla$ is geodesically complete then this condition implies that the group of all affine $G$-structure preserving diffeomorphisms of $M$ acts transitively on the frames that belong to $P$ and in that case we say that the triple $(M,\nabla,P)$ is {\em homogeneous\/} \cite{PiTausk}.
The $G$-constant tensor fields $R$ and $T$ of an infinitesimally homogeneous triple $(M,\nabla,P)$ are represented, with respect to an arbitrary frame belonging to $P$, by multilinear maps $R_0:\ensuremath{\mathbb{R}}^n\times\ensuremath{\mathbb{R}}^n\times\ensuremath{\mathbb{R}}^n\to\ensuremath{\mathbb{R}}^n$ and $T_0:\ensuremath{\mathbb{R}}^n\times\ensuremath{\mathbb{R}}^n\to\ensuremath{\mathbb{R}}^n$, respectively; moreover, the $G$-constant inner torsion ${{\mathfrak{I}}}^P$ is represented (with respect to an arbitrary frame belonging to $P$) by a linear map ${{\mathfrak{I}}}^P_0:\ensuremath{\mathbb{R}}^n\to{\mathfrak{gl}(n)}/\mathfrak g$, where $\mathfrak g$ denotes the Lie algebra of $G$. We call $R_0$, $T_0$, ${{\mathfrak{I}}}^P_0$ the {\em characteristic tensors\/} of $(M,\nabla,P)$. The characteristic tensors $R_0$, $T_0$, ${{\mathfrak{I}}}^P_0$ characterize locally an infinitesimally homogeneous triple $(M,\nabla,P)$, in the sense that two infinitesimally homogeneous triples having the same characteristic tensors are locally equivalent (by means of affine $G$-structure preserving diffeomorphisms). It is then very natural to ask what are the necessary and sufficient conditions for maps $R_0$, $T_0$, ${{\mathfrak{I}}}^P_0$ to be the characteristic tensors of an infinitesimally homogeneous triple $(M,\nabla,P)$. This paper answers such question.
The main result of this paper can be seen as part of a program of reducing a problem of classification of certain geometric objects to a problem of classification of certain algebraic objects. Other examples of such reductions are: (i) the result that two Lie groups having the same Lie algebra are locally isomorphic and every Lie algebra is the Lie algebra of a Lie group; (ii) the result that two Riemannian symmetric spaces having the same orthogonal involutive Lie algebra (oil algebra) are locally isometric and every oil algebra is the oil algebra of a Riemannian symmetric space (see \cite{Helgason}).
It would be natural to ask what are the necessary and sufficient conditions for $R_0$, $T_0$, ${{\mathfrak{I}}}^P_0$ to be the characteristic tensors of a (globally) homogeneous triple $(M,\nabla,P)$. Is it true that if $R_0$, $T_0$, ${{\mathfrak{I}}}^P_0$ are the characteristic tensors of an infinitesimally homogeneous triple then they are also the characteristic tensors of some (globally) homogeneous triple? While we do not know the answer to that question, a partial answer will be given in a forthcoming paper.
\end{section}
\begin{section}{Notation and Preliminaries}
\begin{subsection}{Vector spaces}Let $V$ be a real finite-dimensional vector space. We denote by ${{\rm{GL}}}(V)$ the general linear group of $V$ and by $\mathfrak{gl}(V)$ its Lie algebra. If $W$ is another real finite-dimensional vector space, $\mathrm{Lin}_k(V;W)$ denotes the space of $k$-linear maps from $V$ to $W$. Given multilinear maps $T\in \mathrm{Lin}_k(V;V)$, $S\in \mathrm{Lin}_k(W;W)$ and a (not necessarily invertible) linear map $\sigma:V\to W$ then $T$ is said to be {\em $\sigma$-related\/} with $S$ if: \[S\big(\sigma (v_1),\dots,\sigma(v_k)\big)=\sigma\big(T(v_1,\dots,v_k)\big),\] for all $v_1,\dots,v_k \in V$. If $p:V\to W$ is a linear isomorphism we denote by $\mathcal I_p:{{\rm{GL}}}(V)\to{{\rm{GL}}}(W)$ the Lie group isomorphism given by conjugation with $p$ and $\mathrm{Ad}_p={\rm d} \mathcal I_p(\mathrm{Id}):\mathfrak{gl}(V)\to\mathfrak{gl}(W)$ denotes the Lie algebra isomorphism given by conjugation with $p$. \end{subsection}
\begin{subsection}{$\mathbf G$-structures on manifolds}
If $G$ is a Lie subgroup of ${\rm GL}(n)$, by a {\em $G$-structure\/} on an $n$-dimensional real vector space $V$ we mean a $G$-orbit of the action given by right composition of ${\rm GL}(n)$ on the set of all linear isomorphisms $p:\ensuremath{\mathbb{R}}^n\to V$. By a $G$-structure on an $n$-dimensional differentiable manifold $M$ we mean a $G$-principal subbundle $P$ of $ {{\rm{FR}}}(TM)$, such that for each $x\in M$, $P_x$ is a $G$-structure on the vector space $T_xM$. Let $M$ and $M'$ be $n$-dimensional diffe\-rentiable manifolds endowed with $G$-structures $P$ and $P'$, respectively. A smooth map $f:M\to M'$ is said to be {\em $G$-structure preserving\/} if for each $x\in M$, the linear map ${\rm d} f_x:T_xM\to T_{f(x)}M'$ sends frames of $P_x$ to frames that belong to $P'_{f(x)}$.
\begin{obs}\label{thm:tensorG-invariante} If $G$ is a Lie subgroup of ${\rm GL}(n)$ a multilinear map $\tau_0 \in \mathrm{Lin}_k(\ensuremath{\mathbb{R}}^n;\ensuremath{\mathbb{R}}^n)$ is said to be {\em $G$-invariant\/}, if for each $g \in G$, $\tau_0$ is $g$-related with itself. Clearly, given a $G$-invariant tensor $\tau_0 \in \mathrm{Lin}_k(\ensuremath{\mathbb{R}}^n;\ensuremath{\mathbb{R}}^n)$ one can induce a version of $\tau_0$ on every vector space endowed with a $G$-structure. More precisely, let $V$ be a real $n$-dimensional vector space endowed with a $G$-structure $P$. Given any $p\in P$ let $\tau_V\in\mathrm{Lin}_k(V;V)$ be the tensor which is $p$-related with $\tau_0$. The $G$-invariance of $\tau_0$ implies that $\tau_V$ does not depend on the choice of $p \in P$. In particular, when $M$ is an $n$-dimensional differentiable manifold endowed with a $G$-structure $P$ and $\tau_0 \in \mathrm{Lin}_k(\ensuremath{\mathbb{R}}^n;\ensuremath{\mathbb{R}}^n)$ is $G$-invariant, by using frames that belong to $P$ it is possible to define a tensor field $\tau$ on $M$ such that for each $x\in M$, the map $\tau_x\in\mathrm{Lin}_k(T_xM;T_xM)$ is the version of $\tau_0$ in $T_xM$. \end{obs} \end{subsection} \begin{subsection}{Connections on vector bundles}
Let $E$ be a vector bundle over a differentiable manifold $M$ with typical fiber $E_0$. We denote by $\mathbf{\Gamma}(E)$ the set of all smooth sections of $E$ and by ${{\rm{FR}}}_{E_0}(E)$ the ${{\rm{GL}}}(E_0)$-principal bundle over $M$ formed by all $E_0$-frames of $E$. When $E_0=\ensuremath{\mathbb{R}}^n$ we write ${{\rm{FR}}}(E)$ instead of ${{\rm{FR}}}_{E_0}(E)$. If $\epsilon:U\to E$ is a local section of the vector bundle $E$ and $s:U\to {{\rm{FR}}}_{E_0}(E)$ is a smooth local frame for $E$ then the {\em representation\/} of the section $\epsilon$ with respect to the smooth local frame $s$ is a map $\tilde{\epsilon}:U\to E_0$ defined by: $\tilde\epsilon(x)=s(x)^{-1}\big(\epsilon(x)\big)$, for all $x\in U$.
A smooth local frame $s:U\to {{\rm{FR}}}_{E_0}(E)$ defines, in a natural way, a connection ${\mathrm{d\!I}}^s$ in $E\vert_U$, which corresponds via the trivialization of $E\vert_U$ definided by $s$ to the standard derivative. More explicitly, we set: \[{\mathrm{d\!I}}^s_v\epsilon=s(x)\big({\rm d}\tilde\epsilon_x(v)\big),\] for all $x\in U$, $v\in T_xM$ and all $\epsilon\in\mathbf{\Gamma}(E\vert_U)$, where $\tilde\epsilon:U\to E_0$ denotes the representation of $\epsilon$ with respect to the local frame $s$.
If $\nabla$ is a connection in $E$, the {\em Christoffel tensor\/} of $\nabla$ with respect to the smooth local frame $s$ is the smooth tensor $\Gamma=\nabla-{\mathrm{d\!I}}^s \in \mathbf{\Gamma}(TM^*\otimes E^*\otimes E)$ such that: \[\nabla_v\epsilon={\mathrm{d\!I}}^s_v\epsilon+\Gamma_x\big(v,\epsilon(x)\big),\] for all $x\in U$, $v\in T_xM$ and all $\epsilon\in\mathbf{\Gamma}(E\vert_U)$. Denoting by $\omega$ the smooth $\mathfrak{gl}(E_0)$-valued connection form on ${{\rm{FR}}}_{E_0}(E)$ associated to $\nabla$, we have the following: \begin{equation}\label{eq:tensorcriseforma} \Gamma_x(v)=s(x)\circ\bar\omega_x(v)\circ s(x)^{-1}\in\mathfrak{gl}(E_x), \end{equation} for all $x\in U$, $v\in T_xM$, where $\bar\omega=s^*\omega$ denotes the pullback by $s$ of the connection form $\omega$.
\begin{obs}\label{thm:diferenciadeconexiones} If $\nabla$ is a (symmetric) connection on $TM$ and ${\mathfrak{t}}:\mathbf{\Gamma}(TM)\times\mathbf{\Gamma}(TM)\to\mathbf{\Gamma}(TM)$ is an arbitrary $C^\infty(M)$-bilinear (symmetric) map, $\nabla'=\nabla+{\mathfrak{t}}$ is also a (symmetric) connection on $TM$ and a simple calculation shows that, (see \cite{T}): \begin{align} \label{eq:curvaturas} R'(X,Y)Z &= R(X,Y)Z+(\nabla_X{\mathfrak{t}})(Y,Z)-(\nabla_Y{\mathfrak{t}})(X,Z) + [{\mathfrak{t}}(X),{\mathfrak{t}}(Y)]Z\\ \label{eq:torcoes} T'(X,Y)&= T(X,Y)+{\mathfrak{t}}(X)Y-{\mathfrak{t}}(Y)X, \end{align} for each $X,Y,Z\in\mathbf{\Gamma}(TM)$. Where $R'$ and $T'$ denote the curvature and torsion tensors of $\nabla'$, respectively; $R$ and $T$ denote the curvature and torsion tensors of $\nabla$, respectively. \end{obs}
\end{subsection} \end{section}
\begin{section}{Infinitesimally homogeneous manifolds\/}\label{sectioninfinitesimally}
Let ${(M,\nabla,P)}$ be an $n$-dimensional affine manifold with $G$-structure $P$, the inner torsion of $P$ with respect to the connection $\nabla$ was introduced in \cite{T}, this notion gives rise to a tensor field ${{\mathfrak{I}}}^P$ on $M$ that measures the lack of compatibility of the connection $\nabla$ with $P$, since this notion plays an important roll in this work, we present below its definition in a brief way.
For each $x\in M$, we denote by $G_x$ the Lie subgroup of ${{\rm{GL}}}(T_xM)$ consisting of {\em $G$-structure preserving\/} endomorphisms of $T_xM$. Clearly $G_x = \mathcal{I}_p(G)$, for all $p \in P_x$, so that $G_x$ is a Lie subgroup of ${{\rm{GL}}}(T_xM).$ We denote by $\mathfrak{g}_x\subset {\mathfrak{gl}(T_xM)}$ the Lie algebra of $G_x$. It is clear that $\mathrm{Ad}_p(\mathfrak{g})=\mathfrak{g}_x$, for all $p\in P_x$, where $\mathfrak{g}\subset {\mathfrak{gl}(n)}$ denotes the Lie algebra of $G$. Since $\mathrm{Ad}_p:{\mathfrak{gl}(n)} \to {\mathfrak{gl}(T_xM)}$ carries $\mathfrak{g}$ onto ${\mathfrak{g}_x}$; therefore, it induces an isomorphism: \[\overline{\rm{Ad}}_p :{\mathfrak{gl}(n)}/\mathfrak{g} \to {\mathfrak{gl}(T_xM)}/{\mathfrak{g}_x}.\] Let $s:U\subset M \to P$ be a smooth local section of $P$, with $x\in U$ and set $s(x)=p$. If $\omega$ denotes the ${\mathfrak{gl}(n)}$-valued connection form on ${{\rm{FR}}}(TM)$ associated with $\nabla$ and $\overline{\omega}=s^*{\omega}$. The map \begin{equation}\label{eq:e2}\xymatrix{ T_xM \ar@.@/_1.5pc/[rrr]_{{{\mathfrak{I}}} ^P_x}\ar[r]^-{\overline{\omega}_x}& \mathfrak{gl(n)} \ar[r]^-{\mathfrak q}& \mathfrak{gl(n)}/\mathfrak{g} \ar[r]^-{\overline{{\rm{Ad}}}_{p}}& {\mathfrak{gl}(T_xM)}/{\mathfrak{g}_x} } \end{equation} does not depend on the choice of the local section $s$. The linear map ${{\mathfrak{I}}}^P_x$ defined by \eqref{eq:e2} is called the {\em inner torsion\/} of the $G$-structure $P$ at the point $x$ with respect to the connection $\nabla$. It follows from \eqref{eq:tensorcriseforma}, that if $s:U\to P$ is a smooth local section with $x\in U$ and $\Gamma$ denotes the Christoffel tensor of $\nabla$ with respect to $s$ then the inner torsion ${{\mathfrak{I}}}^P_x$ is precisely the composition of the $\Gamma_x :T_xM \to {\mathfrak{gl}(T_xM)}$ with the quotient map ${\mathfrak{gl}(T_xM)}\to {\mathfrak{gl}(T_xM)}/{\mathfrak{g}_x}$. This observation gives a simple way of computing inner torsions, (see \cite{T}).
The geometry of an affine manifold with $G$-structure $(M,\nabla,P)$ is described by three tensors of $M$: the torsion $T$ of $\nabla$, the curvature $R$ of $\nabla$ and the inner torsion ${{\mathfrak{I}}}^P$. An important class of examples of affine manifolds with $G$-structure is defined by the property that these three tensors $T$, $R$ and ${{\mathfrak{I}}}^P$ be {\em constant\/} when written in frames of the $G$-structure $P$. When this is the case, $(M,\nabla,P)$ is said to be {\em infinitesimally homogeneous\/}. This statement is made more precise in the following definition.
\begin{df}\label{thm:tensoresconstantes} An $n$-dimensional affine manifold with $G$-structure, ${(M,\nabla,P)}$ is said to be {\em infinitesimally homogeneous\/} if there exists maps $R_0\in\mathrm{Lin}_3(\ensuremath{\mathbb{R}}^n,\ensuremath{\mathbb{R}}^n)$, $T_0\in\mathrm{Lin}_2(\ensuremath{\mathbb{R}}^n,\ensuremath{\mathbb{R}}^n)$ and a linear map ${{\mathfrak{I}}}_0:\ensuremath{\mathbb{R}}^n\to{\mathfrak{gl}(n)}/\mathfrak g$ such that: for every $x\in M$, every $p\in P_x$ relates $T_0$ with $T_x$, $R_0$ with $R_x$ and $\overline\mathrm{Ad}_p\circ{{\mathfrak{I}}}_0={{\mathfrak{I}}}^P_x\circ p$. \end{df} The maps $T_0$, $R_0$, ${{\mathfrak{I}}}_0$ as refered above are called the {\em cha\-racteristic tensors\/} of the infinitesimally homogeneous manifold $(M,\nabla,P)$.
Clearly, the charac\-teristic tensors $T_0$, $R_0$, ${{\mathfrak{I}}}_0$ of an infinitesimally homogeneous manifold $(M,\nabla,P)$ are invariant by the action of the {\em structural group\/} $G$. Therefore, it follows from the $G$-invariance condition that the following relations hold: \begin{gather} \label{eq:R0} R_0(u,v)=\mathrm{Ad}_g\cdot R_0(g^{-1}\cdot u,g^{-1}\cdot v);\\ \label{eq:T0} T_0(u,v)=g\cdot T_0(g^{-1}\cdot u,g^{-1}\cdot v);\\ \label{eq:e6} \mathrm{Ad}_g\big(\lambda(g^{-1}\cdot u)\big)-\lambda(u)\in {\mathfrak{g}}, \end{gather} for all $g\in G$, all $u, v\in \ensuremath{\mathbb{R}}^n$. Where $\lambda: \ensuremath{\mathbb{R}}^n\to{\mathfrak{gl}(n)}$ is an arbitrary lifting of ${{\mathfrak{I}}}_0$. Notice that relation \eqref{eq:e6} does not depend on $\lambda$. In fact, let $\lambda, \delta$ be liftings of ${{\mathfrak{I}}}_0$. Write $\lambda = \delta+L$, where $L$ is a ${\mathfrak{g}}$-valued linear map defined in $\ensuremath{\mathbb{R}}^n$. An easy computation shows that: \[{\mathfrak{g}} \ni \mathrm{Ad}_g\big(\lambda(g^{-1}\cdot u)\big)-\lambda(u) = \mathrm{Ad}_g\big(\delta(g^{-1}\cdot u)\big)-\delta(u) + \underbrace{\mathrm{Ad}_g\big(L(g^{-1}\cdot u)\big)-L(u)}_{\in {\mathfrak{g}}},\] for all $g\in G$, $u\in\ensuremath{\mathbb{R}}^n$.
By differentiating \eqref{eq:R0}, \eqref{eq:T0}, and \eqref{eq:e6} we obtain the following: \begin{lema}\label{thm:g-invariantes1} Let $\lambda: \ensuremath{\mathbb{R}}^n\to{\mathfrak{gl}(n)}$ be an arbitray lifting of ${{\mathfrak{I}}}_0$. Then for all $L \in {\mathfrak{g}}$ and all $u,v \in \ensuremath{\mathbb{R}}^n$, the following conditions hold: \begin{enumerate} \item $[L, R_0(u,v)]-R_0(L\cdot u,v)-R_0(u,L\cdot v) =0;$ \item $L\circ T_0(u,v)-T_0(L\cdot u,v)-T_0(u,L\cdot v)=0;$ \item $[L,\lambda(u)]-\lambda(L\cdot u)\in {\mathfrak{g}}$. \end{enumerate} \end{lema} \end{section}
\begin{section}{ Algebraic relation between the characteristic tensors\/} It is a natural question to ask whether one can give a (local) {\em classification\/} of infinitesimally homogeneous manifolds with prescribed group $G$ and prescribed characteristic tensors $T_0$, $R_0$, ${{\mathfrak{I}}}_0$. We solve this question in this paper by giving ne\-cessary and sufficient conditions for maps $T_0$, $R_0$, ${{\mathfrak{I}}}_0$ to be the characteristic tensors of an infinitesimally homogeneous manifold. Our plan for developing the necessary condition is the following: we show that to give a classification of infinite\-simally homogeneous manifolds with prescribed group $G$ is equivalent to finding an infinitesimally homogeneous manifold without torsion whose structural group is $G$, and to give a classification of the $G$-invariant maps ${\mathfrak{t}}_0\in \mathrm{Lin}_2(\ensuremath{\mathbb{R}}^n,\ensuremath{\mathbb{R}}^n)$. Once, this is done, in order to obtain the aimed condition, it will suffice to consider the case of symmetric connections (equivalently $T_0=0$). This is the purpose of this section, and the sufficient conditions will be developed in the following section.
\subsection{Covariant derivative for $G$-constant tensors\/}\label{thm:derivadatensorgconstante}
Let ${(M,\nabla,P)}$ be an homogeneous affine manifold with $G$-structure $P$. If ${{\mathfrak{I}}}^P=0$, i.e., the covariant deri\-vative of $P$ is zero, it follows that every $G$-constant tensor is parallel with respect to $\nabla$. On the other hand, if $\nabla$ is not compatible with $P$, i.e., the covariant deri\-vative of $P$ is not zero, this is not true. In what follows we will show a simply way to calculate the covariant derivative for $G$-constan tensors on this case, i.e., when ${{\mathfrak{I}}}^P\ne 0$.
Denoting by $\Vect{}$ the category whose objects are real finite-dimensional vector spaces and whose morphisms are linear isomorphisms. Given a smooth functor $\Cat F:\Vect{} \to\Vect{}$ and any object $V$ of $\Vect{}$, $\Cat F$ induces a Lie group homomorphism $\Cat F:{{\rm{GL}}}(V)\longrightarrow{{\rm{GL}}}\big(\Cat F(V)\big),$ whose differential at the identity is a Lie algebra homomorphism that will be denoted by $ {\Cat f}:\mathfrak{gl}(V)\longrightarrow\mathfrak{gl}\big(\Cat F(V)\big). $
Let $E$ be a vector bundle with typical fiber $E_0$ over $M$. Given a smooth functor $\Cat F:\Vect{}\to\Vect{}$ we denote by $\Cat F(E)=\bigcup_{x\in M}\Cat F(E_x)$, the vector bundle with typical fiber $\Cat F(E_0)$ obtained from $E$ by using $\Cat F$.
Given a smooth funtor $\Cat F :\Vect {}\to \Vect {}$ we have the following: \begin{lema}\label{thm:derivadaconstantes} Let ${\mathfrak{t}}$ be a smooth $G$-constant section of $\Cat F(TM)$. Then \begin{equation}\label{eq:derivadaGconstante}\nabla_v{\mathfrak{t}} = \Cat f(L)\cdot{\mathfrak{t}}_x, \end{equation} for all $x\in M$, $v \in T_xM$, where $L\in {\mathfrak{gl}(T_xM)}$ is such that $\mathfrak I_x^P(v)=L+{\mathfrak{g}_x}$. \end{lema} \begin{proof} Clearly ${\mathfrak{t}}$ can be thought of as an ${{\rm{FR}}}(TM)$-valued $0$-form on $M$, which is associated to a $0$-form $\phi:{{\rm{FR}}}(TM)\to \Cat F(\ensuremath{\mathbb{R}}^n)$ such that: $\phi(p)=\Cat F(p)^{-1}({\mathfrak{t}}_x)$ for all $x\in M$, $p \in {{\rm{FR}}}(TM)$. Moreover the covariant exterior differential $\mathrm D \phi$ is associated to the covariant exterior differential $\mathrm D {\mathfrak{t}}$ of ${\mathfrak{t}}$ \cite{T}. More explicitly, we have: \begin{equation}\label{eq:derivadaconstante1} {\rm d} \phi_p(\zeta) =\mathrm D\phi_p(\zeta)=\Cat F(p)^{-1}(\mathrm D{\mathfrak{t}})_x\cdot v =\Cat F(p)^{-1}\nabla_v{\mathfrak{t}}, \end{equation} for all $x \in M$, $p\in P_x$, $v \in T_xM$ and $\zeta$ a horizontal vector such that ${\rm d} {\Pi}_p(\zeta) =v$, where ${\Pi}:{{\rm{FR}}}(TM)\to M$ denotes the canonical projection. To obtain the desired result, we must to calculate ${\rm d} \phi_p(\zeta)$. If $X\in {\mathfrak{gl}(n)}$ is such that $\overline{\mathrm{Ad}}_p(X+{\mathfrak{g}})=\mathfrak I_x^P(v)$ then \[\zeta =({\rm d} {\Pi}_p,\omega_p)^{-1}(v,X)-({\rm d} {\Pi}_p,\omega_p)^{-1}(0,X)=\underbrace{({\rm d} {\Pi}_p,\omega_p)^{-1}(v,X)}_{\in T_pP}-{\rm d} \beta_p(1)\cdot X,\] where $\beta_p$ denotes the map given by the action of ${\rm GL}(n)$ on $p$. Since $\phi\mid_P$ is constant, we have: \begin{equation}\label{eq:derivadaconstante2}{\rm d} \phi_p(\zeta) = -{\rm d} \phi_p\big({\rm d} \beta_p(1)\cdot X\big)=\Cat f(X)\cdot {\mathfrak{t}}_0. \end{equation} But \eqref{eq:derivadaGconstante} follows directly from equalities \eqref{eq:derivadaconstante1}, \eqref{eq:derivadaconstante2}. \end{proof}
\begin{example} Let $\Cat F:\Vect {}\to \Vect {}$ be the funtor defined by: \[\Cat F(V) =\mathrm{Lin}_k(V;\mathrm{Lin}(V))\] for each object $V$ of $\Vect {}$. Let ${(M,\nabla,P)}$ be an $n$-dimensional affine manifold with $G$-structure. If ${\mathfrak{t}}_0\in \mathrm{Lin}_k\big(\ensuremath{\mathbb{R}}^n;{\mathfrak{gl}(n)}\big)$ is a $G$-constant tensor, denoting by ${\mathfrak{t}}_x$ the induced version of ${\mathfrak{t}}_0$ on $T_xM$, by using \eqref{thm:derivadaconstantes} we have: \[\nabla_v{\mathfrak{t}}=[L,{\mathfrak{t}}_x(\cdot,\dots,\cdot)]-{\mathfrak{t}}_x(L\cdot,\cdot,\dots,\cdot)-\cdots -{\mathfrak{t}}_x(\cdot,\cdot,\dots,L\cdot), \] where $L \in {\mathfrak{gl}(T_xM)}$ is such that $\mathfrak I_x^P(v) =L+{\mathfrak{g}_x}$. On the other hand, it is clear that an arbitrary lifting $\lambda: \ensuremath{\mathbb{R}}^n\to \mathfrak{gl}(n)$ of $\mathfrak I_0$, induces for all $X \in \ensuremath{\mathbb{R}}^n$, a derivation $\mathcal{D}_{\lambda(X)}$ on the tensor algebra over the vector space $\ensuremath{\mathbb{R}}^n$, an easy computation shows that: \[\big(\mathcal D_{\lambda(X)}{\mathfrak{t}}_0\big)=\Cat f\big(\lambda(X)\big)\cdot {\mathfrak{t}}_0\] Therefore, if $\lambda$ is an arbitrary lifting of ${{\mathfrak{I}}}_0$, given $x\in M$, $p \in P_x$ and $X\in \ensuremath{\mathbb{R}}^n$ such that $v=p(X)$ and $\mathrm{Ad}_p(\lambda(X)) =L$ we have: \[\mathrm{Ad}_p(\mathcal D_{\lambda(X)}{\mathfrak{t}}_0)= (\nabla_v{\mathfrak{t}})\circ (p,\dots,p).\] \end{example}
\subsection{Infinitesimally homogeneous manifolds without torsion\/} Let ${(M,\nabla,P)}$ be an $n$-dimensional affine manifold with $G$-structure and assume that $\nabla$ is a symmetric connection. Let ${\mathfrak{t}}_0 \in \mathrm{Lin}_2(\ensuremath{\mathbb{R}}^n,\ensuremath{\mathbb{R}}^n)$ be a $G$-invariant skew-symmetric tensor. For each $x\in M$, we denote by ${\mathfrak{t}}_x$ the induced version of ${\mathfrak{t}}_0$ on $T_xM$. In view of remark \ref{thm:diferenciadeconexiones}, it is clear that $\nabla' = \nabla +\frac 12{\mathfrak{t}}$ defines a connection on $M$ whose torsion is ${\mathfrak{t}}$. We devote this section to prove the following. \begin{lema}\label{thm:lemasintorsion} With the same notation as above, if $(M,\nabla,P)$ is an infinitesimally homogeneous manifold then the triple $(M,\nabla',P)$ is also infinitesimally homogeneous. \end{lema} \begin{proof}It is enough to prove that there exists tensors $T_0'$, $R_0'$, ${{\mathfrak{I}}}_0'$ as in \ref{thm:tensoresconstantes}. We take $T'_0={\mathfrak{t}}_0$. On the other hand, ${\mathfrak{t}}$ can be identified with a smooth $\mathrm{Lin}(TM)$-valued covariant $1$-tensor field on $M$. Let $s:U\to P$ be a smooth local section of $P$. We denote by $\Gamma'$ and $\Gamma$, respectively, the Christoffel tensor of $\nabla'$ and $\nabla$ with respect to $s$. Given $x\in U$, it is clear that $\Gamma'_x =\Gamma_x+{\mathfrak{t}}_x$, by composing this with the canonical projection $\mathfrak{gl}(T_xM)\to \mathfrak{gl}(T_xM)/{\mathfrak{g}_x}$ we obtain: \[\mathfrak I_x'^P = \mathfrak I_x^P + \mathfrak q\circ {\mathfrak{t}}_x.\] Therefore, we can take $\mathfrak I'_0 = \mathfrak I_0 + \mathfrak q\circ {\mathfrak{t}}_0.$ On the other hand, we denote by $R'$ and $R$, respectively, the curvature tensor of $\nabla'$ and $\nabla$. Let $\lambda$ be an arbitrary lifting of ${{\mathfrak{I}}}_0$, $x\in U$ and set $s(x)=p$. From \eqref{eq:curvaturas} and by using lemma ~\ref{thm:derivadaconstantes} we have that the following holds: \begin{eqnarray} R'_x(p\cdot,p\cdot)&=&R_x(p\cdot,p\cdot )+(\mathrm D{\mathfrak{t}})_x(p\cdot,p\cdot)+[{\mathfrak{t}}_x(p\cdot),{\mathfrak{t}}_x(p\cdot)] \nonumber \\ &=&\mathrm{Ad}_p\circ\big( R_0(\cdot,\cdot) +\mathrm{Alt}\big(\mathcal D_{\lambda(\cdot)}{\mathfrak{t}}_0\big)\cdot+[{\mathfrak{t}}_0(\cdot),{\mathfrak{t}}_0(\cdot)]\big).\nonumber\end{eqnarray}
Therefore, in order to obtain the desired result we can take \[R'_0 = R_0 + \mathcal D {\mathfrak{t}}_0+[{\mathfrak{t}}_0,{\mathfrak{t}}_0].\]
\end{proof}
\subsection{The necessary conditions\/}\label{relationalgebric}
We are now ready to give necessary conditions which must be satified by the characteristic tensors of an infinitesimally homogeneous manifold. To do this, throughout the subsection we consider a fixed $n$-dimensional infinitesimally homogeneous manifold ${(M,\nabla,P)}$ with structural group $G$. From lemma \ref{thm:lemasintorsion} it fo\-llows that we may assume without loss of genera\-lity that $\nabla$ is a symmetric connection with curvature $R$. We denote by $R_0, {{\mathfrak{I}}}_0$ the characteristic tensor of ${(M,\nabla,P)}$. Clearly, a necessary condition is that $R_0, {{\mathfrak{I}}}_0$ are $G$-invariant.
Let $\omega$ be the $\mathfrak{gl}(n)$-valued connection form on ${{\rm{FR}}}(TM)$ associated with $\nabla$, let $\Omega$ be its curvature form and let $\theta$ be the canonical form of ${{\rm{FR}}} (TM)$. Given a smooth local frame $s:U\to P$ then, setting $\overline \omega =s^*(\omega)$, $\overline{\Omega}=s^*\Omega$, $\overline{\theta}=s^*\theta$, we have: \[\overline \Omega = {\rm d} \overline \omega + \overline \omega \wedge \overline \omega,\;\;\;{\rm d} \overline{\theta}=-\overline{\omega}\wedge \overline{\theta}.\] Moreover, the infinitesimal homogenity implies that: \[\overline \Omega_x(X,Y)= s(x)\circ R_x(X,Y) \circ s(x)^{-1}=R_0\big(s(x)^{-1}X,s(x)^{-1}Y\big),\] \[ \mathfrak q \circ \overline{\omega}_x = \overline{\mathrm{Ad}}_{s(x)^{-1}}\circ {{\mathfrak{I}}}_x^P={{\mathfrak{I}}}_0 \circ \overline{\theta},\] for all $x\in U$, $X, Y \in T_xM$, where $\mathfrak{q}:\mathfrak{gl}(n)\to \mathfrak{gl}(n)/\mathfrak{g}$ denotes the canonical projection and $\mathfrak{g}$ denotes the Lie algebra of $G$.
Clearly when the linear map ${{\mathfrak{I}}}^P$ vanishes, $\overline \Omega$ is a $\mathfrak{g}$-valued $2$-form on $M$. Under the previous conditios, in order to handle the general case in which $P$ is not compatible with $\nabla$ we get: \begin{eqnarray} \mathfrak q\circ \overline{\Omega} &=& {\rm d} (\mathfrak q \circ \overline{\omega})+ \mathfrak q \circ \overline{\omega}\wedge \overline{\omega} \nonumber \\ &=& {\rm d} ({{\mathfrak{I}}}_0 \circ \overline{\theta})+ \mathfrak q \circ \overline{\omega}\wedge \overline{\omega} \nonumber \\ &=& {{\mathfrak{I}}}_0\circ {\rm d} \overline{\theta} +\mathfrak q \circ \overline{\omega} \wedge \overline{\omega} \nonumber\\ &=& \label{eq:proyecciondelaforma}-{{\mathfrak{I}}}_0 \circ (\overline{\omega} \wedge \overline{\theta}) + \mathfrak q \circ\overline{\omega} \wedge \overline{\omega}. \end{eqnarray} Given $x\in U$, let $\widetilde{\Gamma}: \ensuremath{\mathbb{R}}^n \to {\mathfrak{gl}(n)}$ be the map defined by requiring the diagram \[ \xymatrix{T_xM \ar[r]^-{\Gamma_x} \ar[rd]_{\overline{\omega}_x}& \mathfrak{gl}(T_xM)\\ \ensuremath{\mathbb{R}}^n \ar[r]_-{\widetilde{\Gamma}} \ar[u]^{s(x)} & \mathfrak{gl}(n) \ar[u]_{\mathrm{Ad}_{s(x)}} } \] to be commutative. Therefore, ${{\mathfrak{I}}}_0 =\mathfrak q \circ \widetilde{\Gamma}$ and substituting in \eqref{eq:proyecciondelaforma} we obtain the following relation: \begin{equation*} \overline{\Omega}_x+\widetilde{\Gamma} \circ (\overline{\omega}_x\wedge\overline{\theta}_x)-\overline{\omega}_x\wedge \overline{\omega}_x \in {\mathfrak{g}}.\end{equation*}
Thus, given vectors $u, v \in \ensuremath{\mathbb{R}}^n$ the relation above can be written as: \begin{equation}\label{eq:relentreReI} R_0(u,v)-[\widetilde{\Gamma}(u),\widetilde{\Gamma}(v)]+\widetilde{\Gamma}\big(\widetilde{\Gamma}(u)v-\widetilde{\Gamma}(v)u\big) \in {\mathfrak{g}}.\end{equation} This relation does not depend on the choice of $\tilde{\Gamma}$. Namely, let $\lambda$ be an arbitrary lifting of ${{\mathfrak{I}}}_0$ and $\delta$ be a ${\mathfrak{g}}$-valued linear map in $\ensuremath{\mathbb{R}}^n$ such that $\widetilde{\Gamma}= \lambda+\delta$. By replacing this into \ref{eq:relentreReI}, we obtain \begin{equation}\label{eq:relationRandI} {\mathfrak{g}} \ni R_0(u,v)-[\lambda(u),\lambda(v)]+\lambda\big(\lambda(u)v-\lambda(v)u\big) + \mathcal A(\delta) + \mathcal B(\delta),\end{equation} where
\begin{align*} \mathcal A(\delta)&=\big([\delta(v),\lambda(u)]-\lambda(\delta(v)\cdot u)\big)- \big([\delta(u),\lambda(v)]-\lambda(\delta(u)\cdot v)\big),\\ \mathcal B(\delta)&=\delta\big(\widetilde{\Gamma}(u)v - \widetilde{\Gamma}(v)u\big)-[\delta(u),\delta(v)].\\ \end{align*}
So that Lemma~\ref{thm:g-invariantes1} guarantees that $\mathcal A(\delta) \in {\mathfrak{g}}$; moreover, $\mathcal B(\delta) \in {\mathfrak{g}}$ because $\delta$ is a ${\mathfrak{g}}$-valued linear map. Therefore for an arbitrary lifting $\lambda$ of ${{\mathfrak{I}}}_0$ the following relation holds: \[R_0(u,v)-[\lambda(u),\lambda(v)]+\lambda\big(\lambda(u)v-\lambda(v)u\big)\in {\mathfrak{g}},\] this shows the independence on the lifting; hence we have proved the following:
\begin{teo}\label{thm:relentreReI} Let $M$ be an $n$--dimensional differentiable manifold, $G$ a Lie subgroup of ${{\rm{GL}}}(n)$ with Lie algebra $\mathfrak{g}$ and assume that $M$ is endowed with a symme\-tric connection $\nabla$ and a $G$--structure $P\subset {{\rm{FR}}}(T M )$. Assume that ${(M,\nabla,P)}$ is an infinitesimally homogeneous manifold with characteristic tensors $R_0$, ${{\mathfrak{I}}}_0$. Then given an arbitrary lifting $\lambda$ of ${{\mathfrak{I}}}_0$, the following relation holds: \[R_0(u,v)-[\lambda(u),\lambda(v)]+\lambda\big(\lambda(u)v-\lambda(v)u\big)\in {\mathfrak{g}},\] for all $u,v \in \ensuremath{\mathbb{R}}^n$. \end{teo} \end{section}
\begin{section}{Infinitesimally homogeneous manifolds with prescribed group and prescribed characteristic tensors}\label{inverseproblem}
We devote this section to obtain sufficient conditions for maps $T_0$, $R_0$, ${{\mathfrak{I}}}_0$ to be the characteristic tensors of an infinitesimally homogeneous manifold. Therefore, in this section we will consider fixed a real finite-dimensional vector space ${\mathfrak{m}}$, a Lie subgroup $H \subset {{\rm{GL}}}({\mathfrak{m}})$ with Lie algebra ${\mathfrak{h}} \subset \mathfrak{gl}({\mathfrak{m}})$ and $H$--invariant maps $R_0 \in \mathrm{Lin}_2\big({\mathfrak{m}},\mathfrak{gl}({\mathfrak{m}})\big)$, ${{\mathfrak{I}}}_0:{\mathfrak{m}}\to \mathfrak{gl}({\mathfrak{m}})/{\mathfrak{h}}$. As we said above, our goal is to obtain conditions for the maps $R_0$, ${{\mathfrak{I}}}_0$ to be the characteristic tensors of an infinitesimally homogeneous manifold $(M,\nabla,P)$.
Let $\lambda:{\mathfrak{m}}\to \mathfrak{gl}({\mathfrak{m}})$ be an arbitrary lifting of ${{\mathfrak{I}}}_0$. As in Section \ref{sectioninfinitesimally}, by using the $H$--invariance of ${{\mathfrak{I}}}_0$ we conclude that the following relation holds: \begin{equation} \label{eq:inverso2}[L,\lambda(X)]-\lambda(L\cdot X)\in {\mathfrak{h}}, \end{equation} for all $L\in {\mathfrak{h}}$, all $X, Y \in {\mathfrak{m}}$. An analogous relation to \eqref{eq:relentreReI} is: \begin{equation}\label{eq:inverso3}R_0(X,Y)-[\lambda(X),\lambda(Y)]+\lambda\big(\lambda(X)Y-\lambda(Y)X\big) \in {\mathfrak{h}}\end{equation} for all $X,Y \in {\mathfrak{m}}$. Neither relation \eqref{eq:inverso2} nor relation \eqref{eq:inverso3} do not depend on the choice of $\lambda$.
Assuming that \eqref{eq:inverso3} holds, we have the following: \begin{df}\label{thm:colchete} Setting ${\mathfrak{a}} = {\mathfrak{h}} \oplus {\mathfrak{m}}$. We endow ${\mathfrak{a}}$ with a bracket operation which is defined below. For each $X,Y \in {\mathfrak{m}}$, each $L, T \in {\mathfrak{h}}$ we set: \begin{enumerate} \item $[X,Y]^{{\mathfrak{m}}} = \lambda(X)\cdot Y-\lambda(Y)\cdot X$;
\item $[X,Y]^{{\mathfrak{h}}} =R_0(X,Y)+\lambda\big(\lambda(X)\cdot Y-\lambda(Y)\cdot X\big)- [\lambda(X),\lambda(Y)]$;
\item $[L,X]^{{\mathfrak{m}}}=L\cdot X$;
\item $[L,X]^{{\mathfrak{h}}}=[L,\lambda(X)]-\lambda(L\cdot X)$;
\item $[L,T]$ is the Lie bracket of ${\mathfrak{h}}$;
\item $[L,X]=-[X,L]$.
\end{enumerate} \end{df}
We will prove that the vector space ${\mathfrak{a}}$ endowed with the bracket operation as above is a Lie algebra. Before we proceed, we will present some algebraic preli\-minaries.
\begin{df} We say that the map $R_0$ satisfies the {\em Bianchi identities} if the fo\-llowing equalities hold: \begin{itemize} \item[$(B_1)$] $\mathfrak{S}R_0(X,Y)\cdot Z =0$; \item[$(B_2)$] $\mathfrak{S}\big(\mathcal D_{\lambda(X)}R_0\big)(Y,Z) =0$. \end{itemize} Where for $ X\in {\mathfrak{m}}$, $\mathcal{D}_{\lambda(X)}$ denotes the derivation on the tensor algebra over the vector space ${\mathfrak{m}}$ induced by $\lambda(X)$ and $\mathfrak{S}$ denotes the sum over all cyclic permutations of $X, Y, Z.$ \end{df}
\begin{obs}\label{rem:4} For $X, Y, Z \in {\mathfrak{m}}$ and $L\in {\mathfrak{h}}$ we will use the next notation: \begin{align*} \mathcal S_{[L,X,Y]}&=[L,\lambda(X)]\cdot Y -\lambda(Y)\cdot (L\cdot X). \\ \mathcal T_{[X,Y,Z]}&=[\lambda(X),\lambda(Y)]\cdot Z-\lambda(Z)\cdot[X,Y]^{{\mathfrak{m}}}. \end{align*} Thus, it is not difficult to see that: \begin{eqnarray} \mathcal S_{[L,X,Y]}-\mathcal S_{[L,Y,X]} \label{eq:exprssaoS}&=& L\big([X,Y]^{{\mathfrak{m}}}\big). \end{eqnarray} We can also easily see that: \begin{equation}\label{eq:expressaoT} \mathfrak{S} \mathcal T_{[X,Y,Z]}=0. \end{equation} \end{obs}
\begin{obs} For $X, Y, Z \in {\mathfrak{m}}$ by using the Bianchi identities we obtain: \begin{equation}\label{eq:sumaparaR} \mathfrak{S}\Big([\lambda(Z),R_0(X,Y)]-R_0\big([X,Y]^{{\mathfrak{m}}},Z\big)\Big)=0. \end{equation} \end{obs}
\begin{lema}\label{thm:algebradeLie} Using the same notations and terminology as above, suppose that the $H$--invariant maps $R_0, {{\mathfrak{I}}}_0$ satisfy the following conditions \begin{enumerate} \item $R_0$ is skew-symmetric; \item given an arbitrary lifting $\lambda:{\mathfrak{m}}\to \mathfrak{gl}({\mathfrak{m}})$ of ${{\mathfrak{I}}}_0$, the map $R_0$ satisfies the Bianchi identities and the relation \eqref{eq:inverso3} holds. \end{enumerate} Then the vector space ${\mathfrak{a}}= {\mathfrak{h}}\oplus {\mathfrak{m}}$ endowed with the bracket operation $[\cdot,\cdot]$, defined as in \eqref{thm:colchete}, is a Lie algebra. \end{lema}
\begin{proof}[Proof of Lemma \ref{thm:algebradeLie}] Since $[\cdot,\cdot]$ is skew-symmetric, it is enough to show that satifies the Jacobi identity. To do that, we divide the proof in three cases. First we consider the case that $L,T \in {\mathfrak{h}}$, $X\in {\mathfrak{m}}$. It follows from definition~\ref{thm:colchete} that:
\begin{equation}\label{eq:inverso8}\big[[X,L],T\big] = -\big[[L,\lambda(X)],T\big] -\lambda\big(T(L\cdot X)\big)+T(L\cdot X). \end{equation}
Interchanging $T$ and $L$ in \eqref{eq:inverso8} we get:
\begin{equation}\label{eq:inverso9} \big[[T,X],L\big]=\big[[T,\lambda(X)],L\big] +\lambda\big(L(T\cdot X)\big)-L(T\cdot X). \end{equation} On the other hand, it follows from definition \ref{thm:colchete} that:
\begin{equation}\label{eq:inverso12} \big[[L,T],X\big] = \big[[L,T],\lambda(X)\big]-\lambda\big([L,T]\cdot X\big) + [L,T]\cdot X. \end{equation} The conclusion follows from \eqref{eq:inverso8}, \eqref{eq:inverso9} and \eqref{eq:inverso12} by applying the Jacobi identity in $\mathfrak{gl}({\mathfrak{m}})$.
Now we consider the case that $X, Y \in {\mathfrak{m}}$, $L\in {\mathfrak{h}}$. In this case, we get:
\begin{align} \label{eq:casom1}\big[[X,Y],L\big]^{{\mathfrak{m}}}&=-L\big([X,Y]^{{\mathfrak{m}}}\big)\\ \label{eq:casoh1}\big[[X,Y],L\big]^{{\mathfrak{h}}}&=\big[[\lambda(X),\lambda(Y)],L\big]+\lambda\big(L\cdot[X,Y]^{{\mathfrak{m}}}\big)-[R_0(X,Y),L], \end{align} and using remark~\ref{rem:4} we obtain:
\begin{eqnarray} \label{eq:casom2} \big[[Y,L],X]^{{\mathfrak{m}}} &=& -\mathcal S_{[L,Y,X]}\\ \label{eq:casoh2} \big[[Y,L],X\big]^{{\mathfrak{h}}} &=&\big[[\lambda(Y),L],\lambda(X)\big] +\lambda\big(\mathcal S_{[L,Y,X]}\big)-R_0(X,L\cdot Y). \end{eqnarray} Interchanging $X$ and $Y$ in \eqref{eq:casom2}, \eqref{eq:casoh2} we get:
\begin{eqnarray} \label{eq:casom3}\big[[L,X],Y]^{{\mathfrak{m}}}&=& \mathcal S_{[L,X,Y]}.\\ \label{eq:casoh3}\big[[L,X],Y\big]^{{\mathfrak{h}}}&=&\big[[L,\lambda(X)],\lambda(Y)\big] -\lambda\big(\mathcal S_{[L,X,Y]}\big)+R_0(Y,L\cdot X). \end{eqnarray} It follows from \eqref{eq:casom1}, \eqref{eq:casom2} and \eqref{eq:casom3} by using \eqref{eq:exprssaoS} that:
\[\mathfrak{S}\big[[X,Y],L\big]^{{\mathfrak{m}}}=0.\] On the other hand, it follows from \eqref{eq:casoh1}, \eqref{eq:casoh2} and \eqref{eq:casoh3} by using \eqref{eq:exprssaoS}, \eqref{eq:sumaparaR} and the Jacobi identity in $\mathfrak{gl}({\mathfrak{m}})$ that: \[\mathfrak{S}\big[[X,Y],L\big]^{{\mathfrak{h}}}=0.\]
Finally, we consider the case $X, Y, Z \in {\mathfrak{m}}$. It follows directly from definition \ref{thm:colchete} that:
\[\mathfrak{S}\big[[X,Y],Z\big]^{{\mathfrak{m}}}=0\] For the ${\mathfrak{h}}$ component we have:
\begin{eqnarray} \big[[X,Y],Z\big]^{{\mathfrak{h}}}&=&\big[[\lambda(X),\lambda(Y)],\lambda(Z)\big]-R_0\big([X,Y]^{{\mathfrak{m}}},Z\big)\nonumber\\&-&[R_0(X,Y),\lambda(Z)]-\lambda\big(\mathcal T_{[X,Y,Z]}-R_0(X,Y)Z \big).\nonumber \end{eqnarray} It follows from \eqref{eq:expressaoT} and \eqref{eq:sumaparaR} by using the Jacobi identity in $\mathfrak{gl}({\mathfrak{m}})$ that:\[\mathfrak{S}\big[[X,Y],Z\big]^{{\mathfrak{h}}}=0.\] \end{proof}
We now must prove that the Lie bracket defined in \ref{thm:colchete} does not depend on the choice of $\lambda$. In fact, if $[\cdot,\cdot]_{\lambda}$ denotes the Lie Bracket in ${\mathfrak{a}}$ obtained by using the arbitrary lifting $\lambda$ of ${{\mathfrak{I}}}_0$, given another lifting $\tilde{\lambda}$ there exists a linear map $\delta:{\mathfrak{m}}\to{\mathfrak{h}}$ such that $\lambda = \tilde{\lambda}+\delta$. The map $\varphi:{\mathfrak{a}}\to\big({\mathfrak{a}},[\cdot,\cdot]_{\tilde{\lambda}}\big)$ defined by the matrix: \[\left(\begin{array}{cc} \mathrm{Id}_{{\mathfrak{h}}}&\delta\\ 0&\mathrm{Id}_{{\mathfrak{m}}}\\ \end{array}\right),\] is an isomorphism of vector spaces, moreover, a direct computation shows that $[\cdot,\cdot]_{\lambda}=\varphi^*[\cdot,\cdot]_{\tilde{\lambda}}$ so that $\varphi$ is an isomorphism of Lie algebras. Which shows the assertion.
\subsection{Existence of an infinitesimally homogeneous manifold} The main goal of this subsection is to show the existence of an infinitesimally homogeneous manifold with prescribed structural group and prescribed characteristic tensors. To do this, let ${\mathfrak{m}}$ be a real finite-dimensional vector space, let $H\subset {{\rm{GL}}}({\mathfrak{m}})$ be a Lie subgroup with Lie algebra ${\mathfrak{h}}\subset \mathfrak{gl}({\mathfrak{m}})$. Let $R_0 \in \mathrm{Lin}_2\big({\mathfrak{m}},\mathfrak{gl}({\mathfrak{m}})\big)$, ${{\mathfrak{I}}}_0:{\mathfrak{m}}\to \mathfrak{gl}({\mathfrak{m}})/{\mathfrak{h}}$, be maps satisfying the following conditions: \begin{enumerate} \item $R_0, {{\mathfrak{I}}}_0$ are $H$-- invariants; \item $R_0$ is skew--symmetric; \item given an arbitrary lifting $\lambda:{\mathfrak{m}}\to \mathfrak{gl}({\mathfrak{m}})$ of ${{\mathfrak{I}}}_0$, $R_0$ satisfies the Bianchi identities and the relation \eqref{eq:inverso3} holds. \end{enumerate} Now we are going to obtain an infinitesimally homogeneous manifold with structural group $H$ whose characteristic tensor are $R_0$, ${{\mathfrak{I}}}_0$. It follows from Lemma \ref{thm:algebradeLie} that the vector space ${\mathfrak{a}}={\mathfrak{h}}\oplus {\mathfrak{m}}$ endowed with the bracket defined on \ref{thm:colchete} is a Lie algebra.
\noindent Let $\overline{\lambda}:{\mathfrak{a}}={\mathfrak{h}}\oplus {\mathfrak{m}}\to \mathfrak{gl}({\mathfrak{m}})$ be a map defined by: \begin{equation}\label{eq:lambdabarra} \overline{\lambda}(X)=\begin{cases} \lambda(X),&\text{se $X\in{\mathfrak{m}}$},\\ \overline{\mathrm{ad}}_X,&\text{se $X\in{\mathfrak{h}}$}, \end{cases} \end{equation} for each $X\in {\mathfrak{a}}$, where $\overline{\mathrm{ad}}$ denotes the isotropic representation of ${\mathfrak{h}}$ on ${\mathfrak{m}}$, more precisely $\overline{\mathrm{ad}}_X(Y) = \pro {\mathfrak{m}}\big([X,Y]\big)=X(Y)$ for all $X\in {\mathfrak{h}}$, $Y\in {\mathfrak{m}}$.
\begin{lema}\label{thm:propriedadelambda} If $L\in {\mathfrak{h}}$ and $\mathfrak X\in {\mathfrak{a}}$. Then \[\big[\overline{\lambda}(L),\overline{\lambda}(\mathfrak X)\big]=\overline{\lambda}\big([L,\mathfrak X]\big).\] \end{lema} \begin{proof} We set $\mathfrak X = T+X$, for $T \in \mathfrak h$, $X\in \mathfrak m$. \begin{eqnarray} \overline{\lambda}\big([L,\mathfrak X]\big) &=& \overline{\mathrm{ad}}_{[L,T]}+\overline{\mathrm{ad}}_{\pro {\mathfrak{h}}\big([L,X]\big)}+ \lambda(L\cdot X)\nonumber \\ &=& [\overline{\mathrm{ad}}_L,\overline{\mathrm{ad}}_T]+[\overline{\mathrm{ad}}_L,\lambda(X)]\nonumber \\ &=& [\overline{\lambda}(L),\overline{\lambda}(\mathfrak X)].\nonumber \end{eqnarray} \end{proof}
Let $A$ be a Lie group such that $T_1A = {\mathfrak{a}}$. Let $M'\subset A$ be a submanifold of $A$ throught $1$ such that $T_1M'={\mathfrak{m}}$. Let $\pro {\mathfrak{m}}^L$ be the left invariant $1$-form on $A$ induced by the linear projection $\pro {\mathfrak{m}}:{\mathfrak{a}} = {\mathfrak{h}}\oplus {\mathfrak{m}} \to {\mathfrak{m}}$. Setting $\overline{\kappa}=\pro {\mathfrak{m}}^L|_{M'}$ then: \[\overline{\kappa}_1(X)=\pro {\mathfrak{m}}^L(X)= \pro {\mathfrak{m}}(X) =X\] for all $X\in {\mathfrak{m}}$. Let $M$ be a neighborhood of $1$ in $M'$ such that for all $x\in M$ the map $\overline{\kappa}_x:T_xM \to {\mathfrak{m}}$ is a linear isomorphism. Then, the map $s:M\to{{\rm{FR}}}_{{\mathfrak{m}}}(TM)$ defined by $s(x) = \overline{\kappa}_x^{-1}:{\mathfrak{m}}\to T_xM$, for all $x\in M$ gives us a global section of the ${{\rm{GL}}}({\mathfrak{m}})$-principal bundle ${{\rm{FR}}}_{{\mathfrak{m}}}(TM)$ over $M$. Given $x\in M$, the set \[P_x =s(x)\cdot H=\{s(x)\circ h:h \in H\},\] is an $H$-structure on $T_xM$ and $\displaystyle P=\bigcup_{x\in M}P_x$ defines an $H$-structure on $M$.
\noindent To construct $\nabla$, let $\overline{\lambda}^L$ the left invariant $1$-form on $A$ induced by the linear map $\overline{\lambda}$ defined in \eqref{eq:lambdabarra}. Setting $\overline{\omega}=\overline{\lambda}^L|_M$, it is clear that $\overline{\omega}$ is a $\mathfrak{gl}({\mathfrak{m}})$-valued smooth $1$-form on $M$. Let $\omega$ be the unique $\mathfrak{gl}({\mathfrak{m}})$-valued $1$-form on ${{\rm{FR}}}_{{\mathfrak{m}}}(TM)$ such that $s^*\omega=\overline{\omega}$. Then $\omega$ is a connection form on ${{\rm{FR}}}_{{\mathfrak{m}}}(TM)$, (see \cite{T}). \newline
So far, we have obtained an affine manifold with $H$-structure ${(M,\nabla,P)}$, where $\nabla$ denotes the linear connection associated with the connection form $\omega$. We claim that ${(M,\nabla,P)}$ is an infinitesimally homogeneous manifold whose characteristic tensors are $R_0$, ${{\mathfrak{I}}}_0$. In fact, given $x\in M$ and $X\in T_xM$, we have: \[\overline{\omega}_x(X)=\overline{\lambda}^L_x(X)=\overline{\lambda}(x^{-1}\cdot X)=\underbrace{\overline{\mathrm{ad}}_{\pro {\mathfrak{h}}(x^{-1}\cdot X)}}_{\in {\mathfrak{h}}}+\lambda\big(\pro {\mathfrak{m}}(x^{-1}\cdot X)\big),\] therefore, in the quotient $\mathfrak{gl}({\mathfrak{m}})/{\mathfrak{h}}$ the following equality holds: \[\overline{\omega}_x(X)=\lambda\big(\pro {\mathfrak{m}}(x^{-1}\cdot X)\big);\] clearly $\pro {\mathfrak{m}}(x^{-1}\cdot X) = \overline{\kappa}_x(X)=s(x)^{-1}\cdot X$. Thus we have: \[{{\mathfrak{I}}}_x^P (X) =\overline{\mathrm{Ad}}_{s(x)}\big(\mathfrak q\circ \lambda \circ s(x)^{-1}\cdot X\big)=\overline{\mathrm{Ad}}_{s(x)}\big({{\mathfrak{I}}}_0\circ s(x)^{-1}\cdot X\big).\] On the other hand, we set $\overline{\Omega}=s^*\Omega$, where $\Omega$ denotes the curvature form of $\omega$. For each $x\in M$, $X, Y\in T_xM$. Setting $x^{-1}\cdot X =L+\overline{\kappa}_x\cdot X$, $x^{-1}\cdot Y =T+\overline{\kappa}_x\cdot Y$, for $L,T \in {\mathfrak{h}}$. It follows from Lemma~\ref{thm:propriedadelambda} that: \begin{eqnarray} -\overline{\omega}_x\big([X,Y]\big)&=&-\overline{\lambda}\big([L,T+\overline{\kappa}_x\cdot Y]+[\overline{\kappa}_x\cdot X,T]+[\overline{\kappa}_x\cdot X,\overline{\kappa}_x\cdot Y]\big)\nonumber \\ &=& -\big[\overline{\lambda}(L),\overline{\lambda}(T+\overline{\kappa}_x\cdot Y)\big]-\big[\overline{\lambda}(\overline{\kappa}_x\cdot X),\overline{\lambda}(T)\big]\nonumber\\&-&\overline{\lambda}\big[\overline{\kappa}_x\cdot X,\overline{\kappa}_x\cdot Y\big]. \nonumber\end{eqnarray} Moreover: \begin{eqnarray} \big[\overline{\omega}_x(X),\overline{\omega}_x(Y)\big] &=&\big[\overline{\lambda}(L),\overline{\lambda}(T+\overline{\kappa}_x\cdot Y)\big]+\big[\overline{\lambda}(\overline{\kappa}_x\cdot X),\overline{\lambda}(T)\big]\nonumber\\&+&\big[\overline{\lambda}(\overline{\kappa}_x\cdot X),\overline{\lambda}(\overline{\kappa}_x\cdot Y)\big].\nonumber \end{eqnarray} Since \[\overline{\Omega}_x(X,Y)={\rm d} \overline{\omega}_x(X,Y) +\big[\overline{\omega}_x(X),\overline{\omega}_x(Y)\big] =-\overline{\omega}_x\big([X,Y]\big)+\big[\overline{\omega}_x(X),\overline{\omega}_x(Y)\big],\] it follows from the previous equalities that: \begin{eqnarray} \overline{\Omega}_x(X,Y)&=&-\overline{\lambda}\big[\overline{\kappa}_x\cdot X,\overline{\kappa}_x\cdot Y\big]+\big[\overline{\lambda}(\overline{\kappa}_x\cdot X),\overline{\lambda}(\overline{\kappa}_x\cdot Y)\big]\nonumber \\ &=& R_0(\overline{\kappa}_x\cdot X,\overline{\kappa}_x\cdot Y).\nonumber \end{eqnarray} Which shows the claim. The following Theorem summarizes all subsection:
\begin{teo} Let ${\mathfrak{m}}$ be a real finite-dimensional vector space, let $H\subset {{\rm{GL}}}({\mathfrak{m}})$ be a Lie subgroup with Lie algebra ${\mathfrak{h}}\subset \mathfrak{gl}({\mathfrak{m}})$. Let $R_0 \in \mathrm{Lin}_2\big({\mathfrak{m}},\mathfrak{gl}({\mathfrak{m}})\big)$, ${{\mathfrak{I}}}_0:{\mathfrak{m}}\to \mathfrak{gl}({\mathfrak{m}})/{\mathfrak{h}}$, be maps satisfying the following conditions: \begin{enumerate} \item $R_0, {{\mathfrak{I}}}_0$ are $H$-- invariants; \item $R_0$ is skew--symmetric; \item given an arbitrary lifting $\lambda:{\mathfrak{m}}\to \mathfrak{gl}({\mathfrak{m}})$ of ${{\mathfrak{I}}}_0$, the map $R_0$ satisfies the Bianchi identities and the relation \[R_0(X,Y)-[\lambda(X),\lambda(Y)]+\lambda\big(\lambda(X)Y-\lambda(Y)X\big) \in {\mathfrak{h}}\] holds. \end{enumerate} Then there exists an infinitesimally homogeneous manifold ${(M,\nabla,P)}$ with structural group $H$, whose cha\-racteristic tensors are $R_0$, ${{\mathfrak{I}}}_0$. \end{teo} \end{section}
\end{document} | arXiv | {
"id": "0908.2239.tex",
"language_detection_score": 0.5932250618934631,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\setstcolor{red} \title{The $\Sigma_1$-Provability Logic of $\hbox{\sf HA}{} $} \author{\begin{tabular}{c c}
Mohammad Ardeshir\thanks{mardeshir@sharif.ir}
\quad & Mojtaba Mojtahedi\thanks{mojtaba.mojtahedi@ut.ac.ir}\\
Department of Mathematical Sciences
\quad &Department of Mathematics, \\
Sharif University of Technology
\quad & Statistics and Computer Science, \\
\quad &College of Sciences, University of Tehran \end{tabular}}
\maketitle
\begin{abstract} In this paper we introduce a modal theory $\sf{iH}_{\sigma}$ which is sound and complete for arithmetical $\Sigma_1$-interpretations in $\hbox{\sf HA}{} $, in other words, we will show that $\sf{iH}_{\sigma}$ is the $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $. Moreover we will show that $\sf{iH}_{\sigma}$ is decidable. As a by-product of these results, we show that $\hbox{\sf HA}{} +\Box \bot$ has de Jongh property. \end{abstract}
\tableofcontents
\section{Introduction}\label{sec-introduction} As far as we know, there are at least two updated reliable sources \cite{ArtBekProv,VisBek} for current situation, historical background and motivations for {\em provability logic}.
To be self-contained, in this introduction, we extract a brief backgrounds of provability logic from the mentioned sources for readers not much familiar with the subject.
Provability Logic is a modal logic in which the modal operator $\Box$ has intended meaning of provability in some formal system. Unlike the other realms of modal logic, e.g. temporal logic, epistemic logic and deontic logic, here in provability logic, we have a rational meaning for $\Box A$:
\begin{center}
``$A$ is provable in the system $T$"
\end{center}
The notion of provability logic goes back essentially to K.~G\"{o}del \cite{Godel33} in 1933, where he intended to provide a semantics for
Heyting's formalization of {\em intuitionistic logic} $\hbox{\sf IPC} $. He defined a {\em translation}, or {\em interpretation} $\tau$ from the propositional language to the modal language such that \begin{center} ${\sf IPC}\vdash A\quad\quad\Longleftrightarrow \quad\quad{\sf S4}\vdash \tau(A)$. \end{center} The translation $\tau(A)$ adds a $\Box$ before each sub-formula of $A$. The idea behind this translation is hidden in the intuitionistic meaning of truth (the BHK interpretation): ``The truth of a proposition coincides with its provability". Hence if one assumes $\Box A$ as ``provability of $A$", then it is reasonable to add a $\Box$ behind each sub-formula and expect to have a correspondence between the intuitionistic propositional calculus $\hbox{\sf IPC} $ and some classical modal logic.
On the other hand, by works of G\"{o}del in \cite{Godel}, for each arithmetical formula $A$ and recursively axiomatizable theory ${T}$ (like ${\sf PA}$), we can formalize the statement ``there exists a proof in ${ T}$ for $A$" by a sentence of the language of arithmetic, i.e. ${\sf Prov}_\tinysub{T}(\gnumber{A}):=\exists{x}\,{\sf Proof}_\tinysub{T}(x,\gnumber{A})$, where $\gnumber{A}$ is the code of $A$. Now the question is whether we can find some modal propositional theory such that the $\square$ operator captures the notion of {\em provability} in classical mathematics. Let us restrict our attention to the part of mathematics known as Peano Arithmetic ${\sf PA}$. Hence the question is to find some propositional modal theory $T_\Box$ such that: $$T_\Box\vdash A\quad\quad \Longleftrightarrow \quad\quad \forall{*} \ \hbox{\sf PA}{} \vdash A^*$$ By $(\ )^*$, we mean a mapping from the modal language to the first-order language of arithmetic, such that \begin{itemize} \item $p^*$ is an arithmetical first-order sentence, for any atomic variable $p$, and $\bot^*=\bot$, \item $(A\circ B)^*=A^*\circ B^*$, for $\circ\in\{\vee, \wedge, \rightarrow\}$, \item $(\square A)^*:=\exists{x}\,{\sf Proof}_\tinysub{\sf PA}(x,\gnumber{A^*})$. \end{itemize}
It turned out that ${\sf S4}$ is {\em not} a right candidate for interpreting the notion of {\em provability}, since $\neg\square\bot$ is a theorem of ${\sf S4}$, contradicting G\"{o}del's second incompleteness theorem (Peano Arthmetic ${\sf PA}$, does not prove its own consistency).
In 1976, R. Solovay \cite{Solovay} proved that the right modal logic, in which the $\square$ operator interprets the notion of {\em provability in {\sf PA}}, is $\hbox{\sf GL}{} $. This modal logic is well-known as the G\"{o}del-L\"{o}b logic, and has the following axioms and rules: \begin{itemize} \item all tautologies of classical propositional logic, \item $\Box (A\rightarrow B)\rightarrow(\Box A\rightarrow\Box B)$, \item $\Box A\rightarrow\Box\Box A$, \item L\"{o}b's axiom \textup{(}{\sf L}\textup{)}: $\Box(\Box A\rightarrow A)\rightarrow\Box A$, \item Necessitation Rule: $A/\Box A$, \item Modus ponens: $(A,A\rightarrow B)/B$. \end{itemize} \noindent {\bf Theorem.} {(Solovay)} {\em For any sentence $A$ in the language of modal logic, ${\sf GL}\vdash A$ if and only if for all interpretations $(\ )^*$, ${\sf PA}\vdash A^*$. }
There are many open problems which could be assumed as a generalization of the above theorem. A list of such problems could be found in \cite{VisBek}. Also a live list of open problems could be found in the homepage of Lev Beklemishev\footnote{\url{http://www.mi.ras.ru/~bekl}}.
The question of generalizing Solovay's result from classical theories to intuitionistic ones, such as the intuitionistic counterpart of ${\sf PA}$, well-known as Heyting Arithmetic ${\sf HA}$, proved to be remarkably difficult \cite{ArtBekProv}. This problem was taken up by A. Visser, D. de Jongh and their students. The problem of axiomatizing the provability logic of ${\sf HA}$ remains a major open problem since the end of 70s \cite{ArtBekProv}. Precisely speaking, the problem of the provability logic of $\hbox{\sf HA}{} $ is as follows:
$$\text{Find a modal theory ${\sf iH}$ such that:}\quad
{\sf iH}\vdash A\quad\quad \Longleftrightarrow \quad\quad \forall{*} \ \hbox{\sf HA}{} \vdash A^*$$
Note that in the above statement of the provability logic of $\hbox{\sf HA}{} $, we have $(\Box A)^*:={\sf Prov}_{_{\sf HA}}(\gnumber{A^*})$. The following list contains important results about the provability logic of $\hbox{\sf HA}{} $ with arithmetical nature: \begin{itemize} \item Myhill 1973 and Friedman 1975. $ {\sf iH}\nvdash \Box (A\vee B)\to(\Box A\vee \Box B)$, \cite{Myhill,Friedman75} \item Leivant 1975. ${\sf iH}\vdash\Box(A\vee B)\to\Box(\Boxdot A\vee\Boxdot B)$, in which $\Boxdot A$ is a shorthand for $A\wedge\Box A$, \cite{Leivant-Thesis} \item Visser 1981. ${\sf iH}\vdash\Box\neg\neg\Box A\to\Box\Box A$ and ${\sf iH}\vdash \Box(\neg\neg\Box A\to\Box A)\to\Box(\Box A\vee \neg\Box A)$, \cite{VisserThes,Visser82} \item Iemhoff 2001. Introduced a uniform axiomatization of all known axiom schemas of ${\sf iH}$ in an extended language with a bimodal operator $\rhd$. In her Ph.D. dissertation \cite{IemhoffT}, Iemhoff raised a conjecture that implies directly that her axiom system, ${\sf iPH}$, restricted to the normal modal language, is equal to ${\sf iH}$, \cite{IemhoffT} \item Visser 2002. Introduced a decision algorithm for ${\sf iH}\vdash A$, for all $A$ not containing any atomic variable. \cite{Visser02} \end{itemize}
In this paper, we introduce an axiomatization of a modal logic $\sf{iH}_{\sigma}$ and prove the following result which partially answers the question.
We first show that any $\hbox{\sf TNNIL}{} $-proposition\footnote{ We say that $A$ is $\hbox{\sf TNNIL}{} $, if any two nested occurrences of $\to$ in the left are separated by a $\Box$. For example $(p\to q)\to r$ and $\neg (p\to q)$ are not $\hbox{\sf TNNIL}{} $, while $p\to q$ and $\Box(p\to q)\to r$ are $\hbox{\sf TNNIL}{} $. Precise definition of $\hbox{\sf TNNIL}{} $-propositions, a modal variant of $\hbox{\sf NNIL}{} $-propositions \cite{Visser-Benthem-NNIL}, is in \Cref{subsubsec-TNNIL-algorithm}. }
$A$ is in the $\Sigma_1$-provability logic of ${\sf HA}$, iff ${\sf i GL}+{\sf CP}\vdash A$, where ${\sf iGL}$ is the intuitionistic G\"odel-L\"ob's logic and ${\sf CP}$ is the {\em completeness principle} $B\to\Box B$ (we call this theory as $\hbox{\sf LC}{}$). This fact in combination with the conservativity result of
\Cref{Theorem-TNNIL Conservativity of LC over LLe+} and also some variant of
Visser's $\hbox{\sf NNIL}{} $-algorithm in \cite{Visser02}, implies that the $\Sigma_1$-provability logic of ${\sf HA}$, is a decidable modal theory, that is called $\sf{iH}_{\sigma}$ here. More precisely, we find a system $\sf{iH}_{\sigma}$ such that $$\sf{iH}_{\sigma}\vdash A\quad\quad \Longleftrightarrow \quad\quad \forall{*} \ \hbox{\sf HA}{} \vdash A^*,$$ in which, $*$ range over all of the interpretations that $p^*$ is a $\Sigma_1$-sentence for atomic variables $p$. The complete axiomatization of $\sf{iH}_{\sigma}$ is in \Cref{Sec-Aximatizing-TNNIL}. It is worth mentioning that a non-modal variant for all of the axioms of $\sf{iH}_{\sigma}$, were already discovered by Visser in \cite{VisserThes,Visser82,Visser02}. He also showed in \cite{Visser02} that those variant of axioms of $\sf{iH}_{\sigma}$ are sound for $\Sigma_1$ arithmetical interpretations in $\hbox{\sf HA}{} $.
\subsection{Inspiring examples}
In the following four examples, we roughly explain the main roads in the paper. Before we continue with examples, let us review what we are going to do in this paper. Our main results are soundness and completeness theorems of $\sf{iH}_{\sigma}$ for arithmetical $\Sigma_1$-interpretations in $\hbox{\sf HA}{} $. As usual, the difficult part is the completeness theorem. The soundness part is not problematic: some major part of soundness is already done by Visser \cite{Visser02} and the rest (extended Leivant's principle) is done in \autoref{Theorem-Soundness of HA for lle+}. We are not going to talk about soundness in these examples. We explain how to refute some modal proposition $A$ from the $\Sigma_1$-provability logic (and a fortiori from the provability logic) of $\hbox{\sf HA}{} $, i.e. we will find some $\Sigma_1$-interpretation $\sigma_{_{\sf HA}}$ such that $\hbox{\sf HA}{} \nvdash \sigma_{_{\sf HA}}(A)$.
The propositions which we treat here are $(p\to q) \vee (q\to p)$, $\Box(p\vee q)\to(\Box p\vee\Box q)$, ${\neg\neg\Box(\neg\neg p\to p)}\to{\Box(\neg\neg p\to p)}$ and finally $A:=\Box(p\vee q)\to [ (\Box p\to (p\vee q\vee \Box q))\vee (\Box q\to (p\vee q\vee \Box p))]$.
\begin{example}\label{example00}\em In this example we will show that how to refute the Dummet formula $A:={(p\to q)} \vee {(q\to p)}$
from the $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $. Since $A$ is non-modal, we are actually faced with a special case of proving
de Jongh property for $\hbox{\sf HA}{} $ with $\Sigma_1$-substitutions. C. Smory\'nsk first discovered this result \cite{Smorynski-Thesis}. For a survey on the de Jongh property see \cite{vi1}. Here we explain how to use Solovey's method \cite{Solovay} in combination with Smor\'ynski's general method for defining first-order Kripke models of $\hbox{\sf HA}{} $ \cite[page~372]{Smorynski-Troelstra} to deduce the de Jongh property (with $\Sigma_1$-substitutions) for $\hbox{\sf HA}{} $. We are not going to provide all details here, instead we explain the idea which motivated us to our main result \autoref{Theorem-Main tool}.
First we find a Kripke model $\mathcal{K}_0\nVdash A$:
\begin{tabular}{r l} \quad \quad \quad \quad \parbox{3cm}{\includegraphics[scale=.6]{drawing-dummet.eps}} \quad \quad \quad \quad & \parbox{7cm}{$\beta\Vdash p$\quad,\quad $\beta\nVdash q$\quad,\quad $\gamma\Vdash q$\quad, \quad $\gamma\nVdash p$ \quad
$ \alpha\leq \beta,\gamma$ \quad , \quad $\alpha \nVdash p,q$ } \end{tabular}
\noindent In the left and right hand side of each node we wrote the name of that node and the set of atomic variables which are forced at that node, respectively. The precise definition of Kripke models for intuitionistic propositional logic $\hbox{\sf IPC} $,
came in \Cref{Sec-PropModKripke}.
Next we will find some arithmetical $\Sigma_1$-sentences $B$ and $C$ and also a first-order Kripke model $\mathcal{K}_1\Vdash \hbox{\sf HA}{} $ such that $\mathcal{K}_1$ simulates $\mathcal{K}_0$, with $B$ and $C$ playing the role of $p$ and $q$, respectively: \begin{center} \includegraphics[scale=.6]{drawing-dummet2.eps} \end{center} In the above picture, $\mathfrak{M}_\alpha$, $\mathfrak{M}_\beta$ and $\mathfrak{M}_\gamma$ are classical structures assigned to the corresponding nodes. For definition of intuitionistic first-order Kripke models, see \Cref{sec-KripkeModelFirstOrder}.
To explain what are these classical structures and also what are the
sentences $B$ and $C$, we first define a recursive function $F$ with the domain of natural numbers and with the range in the nodes of the Kripke model. Let us define: $$B:=\exists{x}(F(x)=\beta) \quad \text{and} \quad C:=\exists{x}(F(x)=\gamma)$$ Since $F$ is a recursive function, $B$ and $C$ are $\Sigma_1$ sentences. Moreover, for any $\delta\in\{\alpha,\beta,\gamma\}$, we assume the classical structures $\mathfrak{M}_\delta$ such that $$\mathfrak{M}_\delta \models T_\delta\quad , \quad T_\delta:=\hbox{\sf PA}{} +(\lim_{x\to\infty}\!\!F(x)=\delta)$$ In which $\lim_{x\to\infty}\!\!F(x)=\delta$ is defined as $\exists{x}\forall{y\geq x} F(y)=\delta$. The function $F$ is defined as follows: $F(0):=\alpha_0$ and $F(n+1)$ is defined to be some node $\delta>F(n)$, if there exists some proof (in $\hbox{\sf PA}{} $) with the G\"odel number less than $n+1$ for the statement $$ \lim_{x\to\infty}F(x)\neq\delta $$ In other words, $F$ climbs at stage $n+1$ to the node $\delta$ if there is a witness for the inconsistency of $T_\delta$. This function is the same as the Solovey's function in \cite{Solovay}. The recursive definition of $F$ is such that although it is true that $F$ is a constant function, $\hbox{\sf PA}{} $ can't prove it. Moreover, $F$ is such that for any pair of nodes $\delta\lneqq\delta'$, we have $T_\delta\vdash {\sf Con}(T_{\delta'})$, i.e. $$\hbox{\sf PA}{} +\lim_{x\to \infty}F(x)=\delta\vdash \neg {\sf Prov}_{_{\sf PA}}(\gnumber{\lim_{x\to \infty}F(x)\neq\delta'})$$ This guaranties the existence of the classical structures $\mathfrak{M}_\delta\models T_\delta$, such that $\mathcal{K}_1$ is a first-order Kripke model of $\hbox{\sf HA}{} $ (see \cite{Smorynski-Thesis,Smorynski-Troelstra}). From $\mathfrak{M}_\beta\models T_\beta$, we can deduce that $\mathcal{K}_1,\beta\Vdash B$ and $\mathcal{K}_1,\beta\nVdash C$. From $\mathfrak{M}_\gamma\models T_\gamma$, we can deduce $\mathcal{K}_1,\gamma\Vdash C$ and $\mathcal{K}_1,\gamma \nVdash B$. These would imply that $\mathcal{K}_1,\alpha\nVdash (B\to C)\vee(C\to B)$, as desired. \end{example}
\begin{example}\label{example01}\em Let $A=\Box (p\vee q)\to(\Box p\vee\Box q)$.
J. Myhill \cite{Myhill} and H. Friedman \cite{Friedman75} have already shown that there exist some first-order arithmetical formulas $B$ and $C$ such that $\hbox{\sf HA}{} \nvdash \Box(B\vee C)\to(\Box B\vee\Box C)$, in other words, there exist some arithmetical substitution $\sigma$ such that $\hbox{\sf HA}{} \nvdash\sigma_{_{\sf HA}}(A)$, i.e. $A$ does not belong to the provability logic of $\hbox{\sf HA}{} $. However, their proof does not provide an explicit $B$ and $C$. It only guarantees the existence of such arithmetical propositions. With the methods of this paper, we will find some explicit sentences $B$ and $C$ such that $\hbox{\sf HA}{} \nvdash \Box(B\vee C)\to(\Box B\vee\Box C)$.
\\
As usual, we first find some Kripke model which refutes the proposition $A$. The Kripke models of intuitionistic modal logic have two relations: one for intuitionistic logic ($\leq$) which we illustrated it in the pictures with one arrow in the middle line, and another relation for modal connective (${\mathcal{R}}$) which is illustrated with two arrows in middle line. All Kripke models in this paper have the following property: $\alpha\leq \beta\,\mathcal{R}\,\gamma$ implies $\alpha\,\mathcal{R}\,\gamma$.
Also, in this example and other examples (\Cref{example00,example01,example02,example03}), the relations $\mathcal{R}$ and $\leq$ are transitive, $\mathcal{R}\;\subseteq\;\leq$ and moreover, $\mathcal{R}$ is irreflexive and $\leq$ is reflexive. In the pictures, we do not draw all the relations and always we assume the colsure of relations under the mentioned properties for the relations. For precise definition of Kripke semantics for intuitionistic modal logics, see \Cref{Sec-PropModKripke}. The Kripke counter-model for $A$ is $\mathcal{K}_0$:
\begin{tabular}{r l} \quad \quad \quad \quad \parbox{3cm}{\includegraphics[scale=.6]{drawing.eps}} \quad \quad & \parbox{9cm}{ \quad\quad$\beta\Vdash p$\quad,\quad $\beta\nVdash q$\quad,\quad $\gamma\Vdash q$\quad, \quad $\gamma\nVdash p$ \quad
\quad\quad$\alpha\,\mathcal{R}\,\beta,\!\gamma$ \quad , \quad $\alpha\leq\beta,\gamma$
\quad\quad$\alpha_0\,\mathcal{R}\,\alpha,\beta,\!\gamma$\quad , \quad $\alpha_0\leq \alpha,\beta,\gamma$ \quad , \quad $\alpha,\alpha_0\nVdash p,q$ } \end{tabular}
As it may be observed, the node $\alpha_0$ is not necessary. We add this extra (root) node, whenever we are not able to simulate the behaviour of the existing root of the tree. \Cref{Theorem-Propositional Completeness LC} ensures us that always for invalid propositions, such Kripke models exist. Next we will find some arithmetical sentences $B$ and $C$ and also a first-order Kripke model $\mathcal{K}_1\Vdash \hbox{\sf HA}{} $ such that $\mathcal{K}_1$ simulates $\mathcal{K}_0$ with $B$ and $C$ playing the role of $p$ and $q$, respectively:
\begin{center} \includegraphics[scale=.6]{drawing1-1.eps} \end{center} In the above picture, $\mathfrak{M}_\alpha$, $\mathfrak{M}_\beta$ and $\mathfrak{M}_\gamma$ are classical structures assigned to the corresponding nodes and $\mathbb{N}$ indicates the standard model of arithmetic. To explain
these classical structures and also the sentences $B$ and $C$, we first define a recursive function $F$ with the domain of natural numbers and with the range in the nodes of the Kripke model. Let us define: $$B:=\exists{x}(F(x)=\beta) \quad \text{and} \quad C:=\exists{x}(F(x)=\gamma)$$ Moreover, for any $\delta\in\{\alpha,\beta,\gamma\}$, we assume the classical structures $\mathfrak{M}_\delta$ such that \begin{equation}\label{EQ765} \mathfrak{M}_\delta \models T_\delta , \quad T_\delta:=\hbox{\sf PA}{} +(\lim_{x\to\infty}\!\!F(x)=\delta)+{\sf Prov}_{_{\sf HA}}(\gnumber{\varphi_{_\delta}}) , \quad
\varphi_{_\alpha}:=B\vee C,\quad \varphi_{_\beta}:=\varphi_{_\gamma}:=B\wedge C
\end{equation}
Note that $\mathfrak{M}_\delta\models {\sf Prov}_{_{\sf HA}}(\gnumber{\varphi_{_\delta}})$, and this implies $\mathcal{K}_1,\delta\Vdash {\sf Prov}_{_{\sf HA}}(\gnumber{\varphi_{_\delta}})$. This means that the node $\delta$ in the first-order Kripke model forces the interpretation of those boxed propositions which are forced at $\delta$ in the propositional Kripke model $\mathcal{K}_0$. As we will see in \Cref{corollar-4st&2st}:
$$\hbox{\sf PA}{} +(\lim_{x\to\infty}\!\!F(x)=\delta)\vdash {\sf Prov}_{_{\sf HA}}(\gnumber{\varphi_{_\delta}})$$
Hence we may define $T_\delta$ simply as $\hbox{\sf PA}{} +(\lim_{x\to\infty}\!\!F(x)=\delta)$, instead of our previous definition in
\cref{EQ765}.
The function $F$ is defined as follows: $F(0):=\alpha_0$ and $F(n+1)$ is defined to be some node $\delta$ such that $F(n)\,\mathcal{R}\,\delta$, if there exists some proof (in $\hbox{\sf PA}{} $) with the G\"odel number less than $n+1$ for the statement $$\neg[\lim_{x\to\infty}F(x)=\delta\wedge {\sf Prov}_{_{\sf HA}}(\gnumber{\varphi_{_\delta}})]$$
Otherwise define $F(n+1):=F(n)$. In other words, $F$ climbs at stage $n+1$ to the node $\delta$ if there is a witness for the inconsistency of $T_\delta$ with ${\sf Prov}_{_{\sf HA}}(\gnumber{\varphi_{_\delta}})$. What is $\varphi_{_\delta}$? The proposition $ \varphi_{_\delta}$ is the conjunction of all propositions $E$ such that $\Box E$ is forced at $\delta$. Since the number of such propositions are infinite, we only take care of those $E$ which are important to us, i.e. those which are a sub-formula of $A$. Without $\varphi_{_\delta}$, the function $F$ becomes exactly what Solovay used to prove his completeness theorems for $\hbox{\sf GL}{} $ \cite{Solovay}, as we used in \autoref{example00}. This is not enough for our aim. We need to have $T_\alpha\vdash {\sf Prov}_{_{\sf HA}}(\gnumber{B\vee C})$ and generally, $T_\delta\vdash {\sf Prov}_{_{\sf HA}}(\gnumber{\varphi_{_\delta}})$, which is not the case without the clause of $\varphi_{_\delta}$ in the recursive definition of $F$.
By arguments in \Cref{sec-transforming}, there exists some classical structures $\mathfrak{M}_\delta\models T_\delta$ such that $\mathcal{K}_1$ is a first-order Kripke model of $\hbox{\sf HA}{} $. This implies that $\mathfrak{M}_\beta\models B$, $\mathfrak{M}_\beta\not\models C$, $\mathfrak{M}_\gamma\models C$ and $\mathfrak{M}_\gamma\not \models B$. Since $T_\alpha\vdash {\sf Prov}_{_{\sf HA}}(\gnumber{B\vee C})$, we can deduce that $\mathfrak{M}_\alpha\models {\sf Prov}_{_{\sf HA}}(\gnumber{B\vee C})$. Also it is easy to show that for any first-order Kripke model $\mathcal{K}\Vdash \hbox{\sf HA}{} $, any node $\delta$ and arbitrary $\Sigma_1$-sentence $E$, we have $$\mathcal{K},\delta\Vdash E\quad \quad \Longleftrightarrow \quad \quad \mathfrak{M}_\delta\models E$$ Since ${\sf Prov}(x)$ is a $\Sigma_1$-predicate, we can deduce $\mathcal{K}_1,\alpha\Vdash {\sf Prov}_{_{\sf HA}}(\gnumber{B\vee C})$ and $\mathcal{K}_1,\alpha\nVdash {\sf Prov}_{_{\sf HA}}(\gnumber{B})\vee {\sf Prov}_{_{\sf HA}}(\gnumber{ C})$. Hence $\mathcal{K}_1 \nVdash {\sf Prov}_{_{\sf HA}}(\gnumber{B\vee C})\to ({\sf Prov}_{_{\sf HA}}(\gnumber{B})\vee {\sf Prov}_{_{\sf HA}}(\gnumber{ C}))$, as desired. We will consider this proposition ($A$) again in \Cref{example1} and refute it from $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $
with the direct use of our main theorem in \Cref{sec-transforming}.
\end{example}
\begin{example}\label{example02}\em In this example, we show that how to refute $A=\neg\neg\Box(\neg\neg p\to p)\to\Box(\neg\neg p\to p)$ from the provability logic of $\hbox{\sf HA}{} $ and also from the $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $. In this example, the $\hbox{\sf TNNIL}{} $ algorithm is involved.
The first thing is that we cannot directly refute $A$ from the provability logic of $\hbox{\sf HA}{} $ as we did in \Cref{example00,example01}. The difficulty comes from the nested implications in the left hand side which are not separated by a $\Box$. Note that $\neg p$ is a shorthand for $p\to \bot$. To overcome this difficulty, we iteratively use Visser's $\hbox{\sf NNIL}{} $ ({\sf N}o {\sf N}ested {\sf I}mplication to the {\sf L}eft) approximation \cite{Visser02}
in the modal language, i.e. inside any $\Box$ we compute the best $\hbox{\sf NNIL}{} $ approximation from below and replace it for the proposition. The approximated proposition for any modal proposition $E$ is denoted in this paper by $E^+$. Some of Visser's $\hbox{\sf NNIL}{} $ approximations are \cite{Visser02}: $$(\neg\neg p)^+=p\quad , \quad (\neg\neg p\to p)^+=p\vee\neg p \quad , \quad ((p\to q)\to r)^+=r\vee(p\wedge(q\to r))$$ The process of computing the approximation $(.)^+$ is complicated and we do not precisely define it in this example. It is explained in details in \Cref{sec-nnil}. We may briefly describe it in the following way.
Let $A$ be a non-modal proposition. Its $\hbox{\sf NNIL}{} $ approximation $A^+$, is some proposition with no nested implications to the left such that $\hbox{\sf IPC} \vdash A^+\to A$, and for any other $\hbox{\sf NNIL}{} $ proposition $B$ such that $\hbox{\sf IPC} \vdash B\to A$, we have $\hbox{\sf IPC} \vdash B\to A^+$. It is clear that, up to $\hbox{\sf IPC} $-deductive equivalency, such an approximation is unique.
\noindent We have the following approximation for $A$:
$$A^+=\Box(p\vee\neg p)\vee\neg\Box(p\vee \neg p)$$ Now we can handle this simplified proposition $A^+$ as we did in \Cref{example00,example01}. The following Kripke model $\mathcal{K}_0$ is a counter-model for $A^+$:
\begin{tabular}{r l} \quad \quad \quad \quad \quad \quad \parbox{3cm}{\includegraphics[scale=.6]{drawing2.eps}} & \parbox{9cm}{$\alpha_0,\alpha,\beta\nVdash p$\quad \quad $\gamma\Vdash p$\quad \quad and
$\alpha\,\mathcal{R}\,\beta,\gamma$ \quad \quad $\alpha\leq\beta,\gamma$ \quad \quad $\beta\leq\gamma$ \quad \quad $\beta\,\mathcal{R}\,\gamma$
$\alpha_0\,\mathcal{R}\,\alpha,\beta,\gamma$ \quad \quad $\alpha_0\leq\alpha,\beta,\gamma$ } \end{tabular}
One can define the recursive function $F$ exactly the same as \Cref{example01} with new definitions for $\varphi_{_\delta}$ and $B$: $$ \varphi_{_\alpha}:= \top \quad , \quad \varphi_{_\beta}=\varphi_{_\gamma}:=B\vee \neg B
\quad , \quad B:=(\exists{x}F(x)=\gamma)$$ Then we can define the first-order Kripke model $\mathcal{K}_1\Vdash \hbox{\sf HA}{} $ which simulates $\mathcal{K}_0$ in a same way as \Cref{example01}. Then we can deduce that $\mathcal{K}_1\nVdash {\sf Prov}_{_{\sf HA}}(\gnumber{B\vee \neg B})\vee \neg {\sf Prov}_{_{\sf HA}}(\gnumber{B\vee \neg B})$. Although we have refuted $\Box(p\vee\neg p)\to \neg\Box(p\vee\neg p)$ from the $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $, the proposition $A$ is not refuted yet. But the key point here is that by Visser's Rule, we have $\hbox{\sf HA}{} \vdash {\neg\neg {\sf Prov}_{_{\sf HA}}(\gnumber{\neg\neg B\to B})}\to {{\sf Prov}_{_{\sf HA}}(\gnumber{\neg\neg B\to B})}$ if and only if $\hbox{\sf HA}{} \vdash {\sf Prov}_{_{\sf HA}}(\gnumber{\neg\neg B\to B})\vee \neg {\sf Prov}_{_{\sf HA}}(\gnumber{\neg\neg B\to B})$. And also by formalized Visser's Rule, we have: $$\hbox{\sf HA}{} \vdash {\sf Prov}_{_{\sf HA}}(\gnumber{B\vee \neg B}) \leftrightarrow {\sf Prov}_{_{\sf HA}}(\gnumber{\neg\neg B\to B})$$ This will finish the refutation process, i.e. $\hbox{\sf HA}{} \nvdash \neg\neg {\sf Prov}_{_{\sf HA}}(\gnumber{\neg\neg B\to B})\to {\sf Prov}_{_{\sf HA}}(\gnumber{\neg\neg B\to B})$. The Visser's Rule says that for any $\Sigma_1$-sentence $B$, we have $\hbox{\sf HA}{} \vdash \neg\neg B\to B$ iff $\hbox{\sf HA}{} \vdash B\vee\neg B$. The proof of this rule first appeared in \cite{VisserThes} (see \Cref{corollar-NNIL properties} \cref{1corollar-NNIL properties}).
We will consider this proposition ($A$) again in \Cref{example2} and refute it from $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $ with the direct use of our main theorem in \Cref{sec-transforming}.
\end{example}
In all of the \Cref{example00,example01,example02}, the relation for modal operator did not play an independent role, i.e. $\mathcal{R}$ and $\leq$ either where equal (\Cref{example01,,example02}) or could be defined as equal relations (\Cref{example00}). This made too much simplifications in the definition of the recursive function $F$. In the following example, there exist some $\alpha$ and $\beta$ such that $\alpha\leq \beta$ but it is not the case that $\alpha\,\mathcal{R}\, \beta$.
\begin{example}\label{example03}\em In this example we refute the modal proposition $$A:=\Box(p\vee q)\to [ (\Box p\to (p\vee q\vee \Box q))\vee (\Box q\to (p\vee q\vee \Box p))]$$ Like \Cref{example00,example01}, we first find a Kripke counter-model $\mathcal{K}_0\nVdash A$:
\begin{tabular}{r l} \quad \quad \parbox{3cm}{\includegraphics[scale=.6]{drawing4.eps}} \quad \quad \quad \quad & \parbox{9cm}{ $\gamma_1\Vdash p$ \quad \quad $\gamma_2\Vdash q$ \quad \quad
$\alpha_0,\alpha,\beta_1,\beta_2,\gamma_2\nVdash p$ \quad \quad $\alpha_0,\alpha,\beta_1,\beta_2,\gamma_1\nVdash q$
$\alpha_0\leq \alpha,\beta_1,\beta_2,\gamma_1,\gamma_2$ \quad \quad $\alpha_0\,\mathcal{R}\, \alpha,\beta_1,\beta_2,\gamma_1,\gamma_2$
$\alpha\leq \beta_1,\beta_2,\gamma_1,\gamma_2$ \quad \quad $\alpha\,\mathcal{R}\, \gamma_1,\gamma_2$ \quad \quad $\beta_i\leq\gamma_i$ \quad \quad $\beta_i\,\mathcal{R}\, \gamma_i$
$\alpha \NR \beta_1$ \quad \quad $\alpha\NR \beta_2$ } \end{tabular}
We simulate this Kripke model with a first-order Kripke model $\mathcal{K}_1\Vdash\hbox{\sf HA}{} $:
\begin{center} \includegraphics[scale=.6]{drawing4-2.eps} \end{center}
We define the $\Sigma_1$-sentences $B$ and $C$ and also the sentences $\varphi_{_\delta}$ for any $\delta\neq \alpha_0$ like before: $$B:=(\exists{x}F(x)=\gamma_1)\quad \quad C:=(\exists{x}F(x)=\gamma_2)\quad \quad \varphi_{_\alpha}:=B\vee C \quad \quad \varphi_{_{\beta_1}}:=B \quad \quad \varphi_{_{\beta_2}}:= C $$ \begin{equation}\label{eq-prop-of-firs-order-kripke-model}
\varphi_{_{\gamma_1}}:=\varphi_{_{\gamma_2}}:=B\wedge C \quad \quad \mathfrak{M}_\delta\models T_\delta \quad \quad T_\delta:=\hbox{\sf PA}{} +\lim_{x\to \infty}F(x)=\delta+{\sf Prov}_{_{\sf HA}}(\gnumber{\varphi_{_{\delta}}}) \end{equation}
The recursive definition of $F$ is more complicated than previous examples. This is because we have really two different relations:
$\leq$ and $\mathcal{R}$. The clause in recursive definition of $F$ for the treatment of $\mathcal{R}$ is as before. For
$\leq$ we use a variant of Berarducci's primitive recursive function in \cite{Berarducci} which he used
for characterizing the interpretability logic of $\hbox{\sf PA}{} $. \\
We define $F(0):=\alpha_0$. Assume that we have defined $F(n):=\delta$, and we will define $F(n+1):=\delta'$
if one of the following cases occurs, otherwise we define $F(n+1):=F(n)=\delta$.
\begin{itemize}
\item $\delta\,\mathcal{R}\,\delta'$ and there exists some witness ( which is less than or equal to $n+1$) for the inconsistency of $T_{\delta'}$,
or in other words,
there exists some proof (in $\hbox{\sf PA}{} $) with the G\"odel number $\leq n+1$ for the statement $$\neg[\lim_{x\to\infty}F(x)=\delta'\wedge {\sf Prov}_{_{\sf HA}}(\gnumber{\varphi_{_{\delta'}}})]$$
\item All of the following conditions hold:
\begin{itemize}
\item $\delta\NR\delta'$ and $\delta\leq\delta'$,
\item There exists some witness (which is less than or equal to $n+1$) for the inconsistency of $T_{\delta'}$,
\item The inconsistency rank of $T_{\delta'}$ (we call it $r(\delta',n+1)$)
is less than the inconsistency rank of $T_\delta$ (we call it $r(\delta,n+1)$),
\item $F(r(\delta',n+1))\,\mathcal{R}\, \delta$. \end{itemize} The inconsistency rank of $T_\delta$ is defined to be the minimum $k$ such that there exists a witness (less than or equal to $n+1$)
for the inconsistency of $$\hbox{\sf PA}{} _k+\lim_{x\to\infty}F(x)=\delta+ {\sf Prov}_{_{\sf HA}}(\gnumber{\varphi_{_\delta}})$$ In above definition, $\hbox{\sf PA}{} _k$ is the theory $I\Sigma_1$ plus induction axiom for those formulas with G\"odel number less than $k$.
\end{itemize}
The crucial fact about the function $F$ is that $F$ would not climb over tree
(see \Cref{Lemma-limit is root}). This fact is crucial for proving that the first-order Kripke model $\mathcal{K}_1\Vdash \hbox{\sf HA}{} $ exists
such that it fulfils the conditions in \cref{eq-prop-of-firs-order-kripke-model}. By \Cref{corollar-4st&2st}, we have
$$T_\delta\vdash {\sf Prov}_{_{\sf HA}}(\gnumber{\varphi_{_\delta}})\quad \text{for any }\delta\neq\alpha_0$$
To simplify notations, we use $\Box A$ instead of ${\sf Prov}_{_{\sf HA}}(\gnumber{A})$ for arithmetical formula $A$. Hence we have
$$\mathfrak{M}_\alpha\models \Box (B\vee C) \quad \mathfrak{M}_{\beta_1}\models \Box B \quad
\mathfrak{M}_{\beta_2}\models \Box C \quad \mathfrak{M}_{\gamma_1} \models B\quad \mathfrak{M}_{\gamma_2}\models C$$
Moreover, we have
$$\mathfrak{M}_{\beta_1}\not \models B,C \quad
\quad \quad \mathfrak{M}_{\beta_2}\not \models B,C $$ We need two more conditions to deduce that
$$\mathcal{K}_1\nVdash \Box(B\vee C)\to [ (\Box B\to (B\vee C\vee \Box C))\vee (\Box C\to (B\vee C\vee \Box B))]$$
These two conditions are $\mathfrak{M}_{\beta_1}\not\models \Box C$ and $\mathfrak{M}_{\beta_2}\not\models \Box B$.
We will show in \Cref{Lemma-2st Properties of Solovay's Function} that these conditions hold as well.
The proof of this fact
take up all \Cref{subsection-proofofmaintheorem} and there, we use \Cref{Lemma-sigma_l translation} which is
the essential result in \Cref{Sec-Arithmetic}.
Why $\leq$ is not treated like $\mathcal{R}$ in recursive definition of $F$?
Because if we do so, we are not able to prove that the function $F$ is constant (\Cref{Lemma-limit is root}) and even
the consistency of $L=\alpha_0$ and consequently the consistency of all the theories $T_\delta$ will be lost. \end{example}
\subsection{What happens in classical case} The main result of this paper in classical case, i.e. the $\Sigma_1$-provability logic of $\hbox{\sf PA}{} $ is already characterized by A. Visser \cite{VisserThes} and is remarkably simpler than the intuitionistic case. A.~Visser showed: $${\sf GLV}\vdash A\quad\quad \Longleftrightarrow \quad\quad \forall{*} \ \hbox{\sf PA}{} \vdash A^*,$$ in which, $*$ ranges over all of the interpretations that $p^*$ is a $\Sigma_1$-sentence for atomic variables $p$ and ${\sf GLV}$ is $\hbox{\sf GL}{} $ plus the completeness axiom for atomic variables: $p\to \Box p$. For a proof of this fact see \cite{Boolos} page 135. It is shown \cite{reduction} that the provability logic of $\hbox{\sf PA}{} $ could be reduced to its $\Sigma_1$-provability logic.
\subsection{Map of sections} Let us explain the content of sections and their interrelationship.
All of the contents of this paper are minimally chosen for one major goal:
{\em soundness and completeness of $\sf{iH}_{\sigma}$ for arithmetical $\Sigma_1$-interpretations}, i.e.
\Cref{Theorem HA-Completeness,Theorem-Soundness}. In \Cref{sec-definitions}, we give
definitions of some elementary notions and also make some conventions.
In \Cref{Sec-Arithmetic}, we gather all the required statements with arithmetical nature.
Most of the lemmas and definitions are for proving a
refinement of Leivant's principle in \Cref{Lemma-sigma_l translation} (or its simplified form in
\Cref{Theorem-Leivant}). This will be used in \Cref{sec-transforming}.
In \Cref{sec-propositional}, we collect all required notions with propositional nature.
The most crucial fact we will show in this section is that in $\sf{iH}_{\sigma}$ (precise axiomatization of
$\sf{iH}_{\sigma}$ will come in \Cref{Sec-Aximatizing-TNNIL}),
one could transform any modal proposition $A$
to another proposition $A^+$ with simpler form, which is called $\hbox{\sf TNNIL}{} $ in this paper. Roughly speaking, in a $\hbox{\sf TNNIL}{} $-formula, every two nested implications in the left hand side are separated by a $\Box$. This is done in \Cref{Theorem HA-NNIL approximation is propositionally equivalent} and \Cref{corollar HA-NNIL approximation is propositionally equivalent}.
Then we show that the theory $\hbox{\sf LC}{}$ (intuitionistic version of $\hbox{\sf GL}{} $ plus the axiom schema $A\to\Box A$)
is $\hbox{\sf TNNIL}{} $-conservative over $\sf{iH}_{\sigma}$ (\Cref{Theorem-TNNIL Conservativity of LC over LLe+}). It turns out that
$\hbox{\sf LC}{}$ and $\sf{iH}_{\sigma}$ actually prove same $\hbox{\sf TNNIL}{} $ modal propositions (\Cref{Corollary-TNNIL-equaipotency}).
Moreover, it is shown in \Cref{Theorem-Propositional Completeness LC} that $\hbox{\sf LC}{}$ is sound and complete for a special class of
finite Kripke models (perfect Kripke models). In \Cref{sec-transforming}, we show that one could transform
a finite Kripke model of $\hbox{\sf LC}{}$ (with tree-frame) to a first-order Kripke model of $\hbox{\sf HA}{} $ (\Cref{Theorem-Main tool}).
This transformation is such that there is a natural correspondence between these two Kripke models.
Finally in \Cref{Section-Sigma Provability}, we use the results of \Cref{Sec-Arithmetic,,sec-propositional,,sec-transforming}, to prove the soundness and completeness of $\sf{iH}_{\sigma}$ for arithmetical
$\Sigma_1$-interpretations.
\section{Definitions and conventions}\label{sec-definitions} The propositional non-modal language $\mathcal{L}_0$ contains atomic variables, $\vee, \wedge, \ra, \bot$ and the propositional modal language, $\mathcal{L}_\Box$ has an additional operator $\Box$. In this paper, the atomic propositions (in modal or non-modal language) includes atomic variables and $\bot$. For an arbitrary proposition $A$, ${\sf Sub}(A)$ is defined to be the set of all sub-formulae of $A$, including $A$ itself. We take ${\sf Sub}(X):=\bigcup_{A\in X}{\sf Sub}(A)$ for a set of propositions $X$. We use $\Boxdot A$ as a shorthand for $A\wedge\Box A$. The logic \hbox{\sf IPC} is intuitionistic propositional non-modal logic over usual propositional non-modal language. The theory $\hbox{\sf IPC} _\Box$ is the same theory \hbox{\sf IPC} in the extended language of propositional modal language, i.e. its language is propositional modal language and its axioms and rules are same as \hbox{\sf IPC} . Because we have no axioms for $\Box$ in $\hbox{\sf IPC} _\Box$, it is obvious that $\Box A$ for each $A$, behaves exactly like an atomic variable inside $\hbox{\sf IPC} _\Box$.
First-order intuitionistic logic is denoted
$\hbox{\sf IQC}{} $ and the logic $\hbox{\sf CQC}{} $ is its classical closure, i.e. $\hbox{\sf IQC}{} $ plus the principle of excluded middle.
For a set of sentences and rules $\Gamma\cup\{A\}$ in the propositional non-modal, propositional modal or first-order language, $\Gamma\vdash A$ means that $A$ is derivable from $\Gamma$ in the system $\hbox{\sf IPC} , \hbox{\sf IPC} _\Box,\hbox{\sf IQC}{} $, respectively. For an arithmetical formula, $\ulcorner A\urcorner$ represents the G\"{o}del number of $A$. For an arbitrary arithmetical theory $T $ with a set of $\Delta_0$- axioms, we have the $\Delta_0$-predicate ${\sf Proof}_\tinysub{T}(x,\ulcorner A\urcorner)$, that is a formalization of ``$x$ is the code of a proof for $A$ in $T$". We also have the provability predicate ${\sf Prov}_\tinysub{T}(\ulcorner A \urcorner):=\exists{x}\ {\sf Proof}_\tinysub{T}(x,\ulcorner A \urcorner)$. The set of natural numbers is denoted by $\omega:=\{0,1,2,\ldots\}$.
\begin{definition}\label{Definition-Arithmetical substitutions} Suppose $T$ is a recursively enumerable (r.e.) arithmetical theory and $\sigma$ is a substitution i.e. a function from atomic variables to arithmetical sentences. We define the interpretation $\sigma_{_T}$ which extend the substitution $\sigma$ to all modal propositions $A$, inductively: \begin{itemize} \item $\sigma_{_T}(A):=\sigma(A)$ for atomic $A$, \item $\sigma_{_T}$ distributes over $\wedge, \vee, \ra$, \item $\sigma_{_T}(\Box A):={\sf Prov}_{_T}(\ulcorner\sigma_{_T}(A)\urcorner)$.
\end{itemize} We call $\sigma$ a $\Sigma_1$-substitution, if for every atomic $A$, $\sigma(A)$ is a $\Sigma_1$-sentence. We also say that $\sigma_{_T}$ is a $\Sigma_1$-interpretation if $\sigma$ is a $\Sigma_1$-substitution. \end{definition}
\begin{definition}\label{Definition-Provability Logic} The provability logic of a sufficiently strong theory $T$, is defined to be a modal propositional theory $\mathcal{PL}(T)$ such that $\mathcal{PL}(T)\vdash A$ iff for all arithmetical substitutions $\sigma$, \ \ $T\vdash\sigma_{_T}(A)$. If we restrict the substitutions to $\Sigma_1$-substitutions, then the new modal theory is $\mathcal{PL}_{\sigma}(T)$. \end{definition}
\begin{lemma}\label{Lemma-boxed as atomic} Let $A(p_1, \ldots, p_n)$ be a non-modal proposition with $p_i\neq p_j$ for all $0<i<j\leq n$. Then for all modal sentences $B_1, \ldots, B_n$
we have:
$$\hbox{\sf IPC} \vdash A \text{\ \ iff\ \ } \hbox{\sf IPC} _\Box\vdash A[p_1|\Box B_1,\ldots,p_n|\Box B_n]$$ \end{lemma} \begin{proof} By simple inductions on complexity of proofs in \hbox{\sf IPC} and $\hbox{\sf IPC} _\Box$. \end{proof}
\noindent We define {\sf NOI} (No Outside Implication) as the set of modal propositions $A$, such that any occurrence of $\ra$ is in the scope of some $\Box$. To be able to state an extension of Leivant's Principle (that is adequate to axiomatize $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $) we need a translation on the modal language which we call \emph{Leivant's translation}. We define it recursively as follows: \begin{itemize} \item $A^l:=A$ for atomic or boxed $A$, \item $(A\wedge B)^l:=A^l\wedge B^l$. \item $(A\vee B)^l:=\Boxdot A^l\vee\Boxdot B^l$. \item $(A\ra B)^l$ is defined by cases: If $A\in {\sf NOI}$, we define $(A\ra B)^l:=A\ra B^l$, otherwise we define $(A\ra B)^l:=A\ra B$. \end{itemize}
\begin{definition} \label{Def-Axiom schema and modal theories} Minimal provability logic {\sf iGL}, is the same as G\"{o}del-L\"{o}b provability logic \hbox{\sf GL}{} , with all tautologies of intuitionistic logic (in modal language) instead of tautologies of classical logic.
\ ${\sf iK4}$ is ${\sf iGL}$ without L\"{o}b's axiom. Note that we can get rid of the necessitation rule by adding $\Box A$ to the axioms, for each axiom $A$ in the above list. We will use this fact later in this paper. We list the following axiom schemas: \begin{itemize} \item The Completeness Principle: $\hbox{\sf CP}{} :=A\ra\Box A$. \item Restriction of Completeness Principle to atomic formulae: $\hbox{\sf CP}{} _{\sf a}:=p\ra\Box p$, for atomic $p$. \item Leivant's Principle: ${\sf Le}:=\Box(B\vee C)\ra\Box (\Box B\vee C)$. \cite{Leivant-Thesis} \item Extended Leivant's Principle: ${\sf Le}^+:=\Box A\ra\Box A^l$. \end{itemize} We define theories $\hbox{\sf LC}{}:={\sf iGL}+\hbox{\sf CP}{} $ and $\hbox{\sf{LLe}}{}^+:={\sf iGL}+{\sf Le}^++\hbox{\sf CP}{} _{\sf a}$. Note that in the presence of \hbox{\sf CP}{} and modus ponens, the necessitation rule is superfluous.
\end{definition}
\section{Arithmetic}\label{Sec-Arithmetic} In this section, we gather some preliminaries from intuitionistic arithmetic. Mostly we will prove some refinements of well-known theorems such as: $\Pi_2$-conservativity of $\hbox{\sf PA}{} $ over $\hbox{\sf HA}{} $,
G\"odel's diagnolization lemma and $\Sigma_1$-completeness of $\hbox{\sf HA}{} $. Most of these preliminaries will be used to prove a refinement of Leivant's principle $\Box (A\vee B)\to\Box(\Boxdot A\vee\Boxdot B)$ in the technical \Cref{Lemma-qtranslation2}.
\Cref{Theorem-Leivant} states a simplified version of \Cref{Lemma-qtranslation2}.
\subsection{Some arithmetical preliminaries} The first-order language of arithmetic contains three functions (successor, addition and multiplication), one predicate symbol and a constant: $(S,+,.,\leq,0)$. First-order intuitionistic arithmetic ($\hbox{\sf HA}{} $) is the theory over $\hbox{\sf IQC}{} $ with the axioms:
\begin{enumerate}
\item[Q1] $S(x)\neq 0$,
\item[Q2] $S(x)=S(y)\ra x=y$,
\item[Q3] $y=0\vee\exists{x}\;S(x)=y$,
\item[Q4] $x+0=x$,
\item[Q5] $x+S(y)=S(x+y)$,
\item[Q6] $x.0=0$,
\item[Q7] $x.S(y)=(x.y)+x$,
\item[Q8] $x\leq y\lr\exists{z}(z+x=y)$,
\item[Ind:] For each formula $A(x)$:
$${\sf Ind}(A,x):=\mathcal{UC}[A(0)\wedge\forall{x}(A(x)\ra A(S(x)))]\ra\forall{x}A(x)]$$
In which $\mathcal{UC}(B)$ is the universal closure of $B$.
\end{enumerate} Peano Arithmetic \hbox{\sf PA}{} \!\!, has the same axioms of \hbox{\sf HA}{} over \hbox{\sf CQC}{} \!\!. We also define $x<y$ as $x\leq y\wedge x\neq y$. Let $T$ be an r.e.~theory with the set of axioms $A_1,A_2,\ldots$. It is known in the literature (see e.g.~\cite[section~2.3]{Berarducci} or \cite[section~8.1]{Visser02}) that $T_n$ indicates the theory with the first $n$ axioms of $T$, i.e. $A_1,\ldots A_n$. In the following notation,
we order the axioms of $\hbox{\sf HA}{} $ and $\hbox{\sf PA}{} $ in a way which best fit the relevant lemmas and theorems in this paper.
\begin{notation} From now on, when we are working in first-order language of arithmetic, for a first-order sentence $A$, $\Box A$ and $\Box^+A$ are shorthand for ${\sf Prov}_\tinysub{\sf HA}(\ulcorner A\urcorner)$ and ${\sf Prov}_\tinysub{\sf PA}(\ulcorner A\urcorner)$ , respectively.
Let $i{\Sigma}_1$ be the theory $\hbox{\sf HA}{} $, where the induction principle is restricted to $\Sigma_1$-formulae. We also define the theories $\hbox{\sf HA}{} _x$ to be the theory with axioms of $\hbox{\sf HA}{} $, in which the induction principle is restricted to formulas satisfying at least one of the following conditions:
\begin{itemize} \item formulas of the form $(A\ra B)\ra B$ in which $A$ and $B$ are $\Sigma_1$. \item formulas with G\"{o}del number less than $x$. \end{itemize}
We can define similar concept for $\hbox{\sf PA}{} _x$. Note that classically, a formula of the form $(A\ra B)\ra B$ in which $A$ and $B$ are $\Sigma_1$, is equivalent to the $\Sigma_1$-formula $A\vee B$ and hence $\hbox{\sf PA}{} _0$ is the well-known theory $I\Sigma_1$. We also define $\Box_x A$ and $\Box^+_x A$ to be ${\sf Prov}_{_{{\sf HA}_x}}(\ulcorner A\urcorner) $ and ${\sf Prov}_{_{{\sf PA}_x}}(\ulcorner A\urcorner)$, respectively. \end{notation} \noindent
\noindent We recall that a function $f$ on $\omega:=\{0,1,2,\ldots\}$ is recursive iff there exists some $\Sigma_1$-formula $A_f(\bar{x},y)$ such that $\mathbb{N}\models A_f(\bar{x},y)$ iff $f(\bar{x})=y$. It is called provably total in $T$, iff $T\vdash\forall\bar{x}\exists{y}A_f(\bar{x},y)$.
It is well known that all primitive recursive functions are provably total in $I\Sigma_1$ with a $\Delta_0$-formula as defining formula. So we may use primitive recursive function symbols in the language of arithmetic with their defining axioms (as far as we work in $I\Sigma_1$).
\begin{lemma}\label{Lemma-Conservativity of HA} Let $A$, $B$ be $\Sigma_1$-formulae such that $\hbox{\sf PA}{} \vdash A\ra B$. Then $\hbox{\sf HA}{} \vdash A\ra B$. \end{lemma} \begin{proof} Let $\hbox{\sf PA}{} \vdash A\to B$. Then as it is well known in classical logic, we have $\hbox{\sf PA}{} \vdash \neg A\vee B$. Since $A$ and $B$ are $\Sigma_1$, there are some $\Delta_0$ formulas $A'(x)$ and $B'(y)$ such that $A=\exists x A'(x)$ and $B=\exists y B'(y)$. We may assume that $x$ is not free in $B'$ and $y$ is not free in $A'$. Hence we may deduce that $\hbox{\sf PA}{} \vdash \exists y (\neg A'(x)\vee B'(y))$. By $\Pi_2$-conservativity of $\hbox{\sf PA}{} $ over $\hbox{\sf HA}{} $ \cite{TD}(3.3.4), we can deduce that $\hbox{\sf HA}{} \vdash \exists y (\neg A'(x)\vee B'(y))$. Then we may deduce that $\hbox{\sf HA}{} \vdash \exists y (A'(x)\to B'(y))$ and hence (since $y$ is not free in $A'$) $\hbox{\sf HA}{} \vdash A'(x)\to B$ and by generalization rule $\hbox{\sf HA}{} \vdash \forall{x}(A'(x)\to B)$. This implies that $\hbox{\sf HA}{} \vdash A\to B$ (since $x$ is not free $B$). \end{proof}
\begin{lemma}\label{Lemma-decidability of delta formulae} For any $\Delta_0$-formula $A(\bar{x})$, we have $\hbox{\sf HA}{} _0\vdash\forall\bar{x}(A(\bar{x})\vee\neg A(\bar{x}))$. \end{lemma} \begin{proof} This is well-known in the literature. \end{proof}
\noindent The G\"{o}del-Gentzen translation associates a formula $A^g$ for any formula $A$ in a first-order language, and is defined inductively by the following items: \begin{itemize} \item $A^g:=A$, for atomic $A$, \item $(A\wedge B)^g:=A^g\wedge B^g$, \item $(A\vee B)^g:=\neg(\neg A^g\wedge\neg B^g)$, \item $(A\ra B)^g:=A^g\ra B^g$, \item $(\forall{x}A)^g:=\forall{x}A^g$ , \item $(\exists{x}A)^g:=\neg\neg\,\exists{x}\, A^g$. \end{itemize}
The Friedman translation associates a formula $A^C$, for an arbitrary formula $C$ and $A$ in first-order language. Roughly speaking, $A^C$ is the result of adding $C$ as a disjunct to all atomic sub-formulas of $A$. To define $A^C$, we assume that free variables of $C$ do not appear as bound variables of $A$. It is obvious that we can always take care of this detail by renaming bound variables of $A$ to fresh variables.
\begin{itemize} \item $A^C:=A\vee C$, for atomic $A$, \item $(A\wedge B)^C:=A^C\wedge B^C$, \item $ (A\vee B)^C:=A^C\vee B^C$, \item $(A\ra B)^C:=A^C\ra B^C$, \item $(\forall{x} A)^C:=\forall{x}A^C$, \item $(\exists{x} A)^C:=\exists{x}A^C$. \end{itemize}
As shown in \cite{TD}, we have the following properties for G\"{o}del-Gentzen and Friedman translations: \begin{itemize} \item For each $\Sigma_1$-formula $A$ in the language of arithmetic, $\hbox{\sf HA}{} \vdash A^g\lr\neg\neg A$ and $\hbox{\sf HA}{} \vdash A^C\leftrightarrow(A\vee C)$. \item For any $A$ in the language of arithmetic, $ \hbox{\sf CQC}{} \vdash A$ implies $\hbox{\sf IQC}{} \vdash A^g$. \item $\hbox{\sf HA}{} _0$ is closed under Friedman's translation with respect to $\Sigma_1$-formulas. i.e. for any $\Sigma_1$-formula $B$ and any $A$, $\hbox{\sf HA}{} _0\vdash A$ implies $\hbox{\sf HA}{} _0\vdash A^B$. Actually in \cite{TD}, this property is proved for $\hbox{\sf HA}{} $ instead of $\hbox{\sf HA}{} _0$, but this case is very similar to that one. \end{itemize}
\noindent We have the following variant of \Cref{Lemma-Conservativity of HA}.
\begin{lemma}\label{Lemma-Godel-Gentzen Translation} For any $\Sigma_1$-formula $A$, $\hbox{\sf PA}{} _0\vdash A$ implies $\hbox{\sf HA}{} _0\vdash A$. Hence for any $\Pi_2$-sentence $A$, $\hbox{\sf PA}{} _0\vdash A$ implies $\hbox{\sf HA}{} _0\vdash A$. \end{lemma} \begin{proof} First observe that $\hbox{\sf PA}{} _0\vdash B$ implies $\hbox{\sf HA}{} _0\vdash B^g$, by induction on proof of $B$ in $\hbox{\sf PA}{} _0$. We refer the reader to \cite{TD} for a detailed proof of this fact for $\hbox{\sf PA}{} $ and $\hbox{\sf HA}{} $ instead of $\hbox{\sf PA}{} _0$ and $\hbox{\sf HA}{} _0$. It should only be noted that for any instance $B$ of induction over $\Sigma_1$ formulae in $\hbox{\sf PA}{} _0$, by definition of G\"{o}del-Gentzen translation, $B^g$ belongs to the axioms of $\hbox{\sf HA}{} _0$.
Hence, we have $\hbox{\sf HA}{} _0\vdash \neg\neg A$, and thus $\hbox{\sf HA}{} _0\vdash (\neg\neg A)^A$. This implies $\hbox{\sf HA}{} _0\vdash A$, as desired. \end{proof}
\noindent Consider the mapping: \[ F: n \mapsto A({ S}^n(0)):=A(\overbrace{{ S\ldots S}}^{n\text{ times}}(0))\] Let $G$ be the primitive recursive function that assigns to $n$ the G\"odel number of $F(n)$. Instead of $G(x)$, we use the notation $\ulcorner A(\dot{x})\urcorner$ which is common in the literature. We may omit the dot over variables when no confusion is likely.
\begin{lemma}\label{Lemma-diagonalization lemma} For every formula $A(x,x_1\ldots,x_n)$ with free variables exactly as shown, there exists a formula $B(x_1,\ldots,x_n)$ such that $$\hbox{\sf HA}{} _0\vdash B(x_1,\ldots,x_n)\lr A(\ulcorner B(\dot{x}_1,\ldots,\dot{x}_n)\urcorner,x_1,\ldots,x_n)$$ Moreover, if the formula $A$ is $\Delta_0$, then $B$ is also $\Delta_0$. \end{lemma}
\begin{proof} It is easy to see that the usual proof of the fixed point lemma holds in this setting. \end{proof} The following lemma states the $\Sigma_1$-completeness of $\hbox{\sf HA}{} _0$. \begin{lemma}\label{Lemma-bounded Sigma completeness} $\hbox{\sf HA}{} _0$ proves all true $\Sigma_1$ sentences. Moreover this argument is formalizable and provable in $\hbox{\sf HA}{} _0$, i.e. for every $\Sigma_1$-formula $A(x_1,\ldots,x_k)$ we have $\hbox{\sf HA}{} _0\vdash{ A(x_1,\ldots,x_k)\ra\Box_0A(\dot{x}_1,\ldots,\dot{x}_k)}$. \end{lemma} \begin{proof} It is a well-known fact that any true (in the standard model $\mathbb{N}$) $\Sigma_1$-sentence is provable in $i{\Sigma}_1$. Moreover this argument is constructive and formalizable in $i{\Sigma}_1$. \end{proof}
\begin{lemma}\label{Lemma-Reflection} For every formula $A$, we have $\hbox{\sf PA}{} \vdash \forall{x}\ \Box^+(\Box^+_xA\ra A)$ and $\hbox{\sf HA}{} \vdash\forall{x}\ \Box(\Box_xA\ra A)$. \end{lemma} \begin{proof} The case of $\hbox{\sf PA}{} $ is well known. For the case $\hbox{\sf HA}{} $, see \cite{Smorynski-Troelstra} or \cite[Theorem 8.1]{Visser02}. \end{proof}
\subsubsection{Coding of finite sequences} We use some fixed method for encoding of finite sequences and use $\langle x_1,\ldots,x_n\rangle$ as the code of the finite sequence $(x_1,\ldots,x_n)$. We assume here that the encoding is a one-one correspondence between natural numbers and the associated finite sequences.
For details on coding of finite sequences, we refer the reader to \cite{Smorynski-Book}, Chapter 0.
\noindent Let $x=\langle x_0, x_1,\ldots,x_n\rangle$ and $y=\langle y_0,y_1,\ldots,y_m\rangle$. The following notations are used in this paper: \begin{itemize} \item ${\sf lth}(x)$ is defined as the length of the sequence with the code $x$, i.e. here ${\sf lth}(x):=n+1$, \item $x*y:=\langle x_0,\ldots,x_n,y_0,\ldots,y_m\rangle$, \item $(x)_i$ is defined (if $i<{\sf lth}(x)$) as the $i$-th element in the sequence with the code $x$, i.e. here $(x)_i:=x_{i}$. If also $i\geq{\sf lth}(x)$, we define $(x)_i:=0$, \item $\hat{x}$ is defined as the final element of the sequence with the code $x$, i.e. here $\hat{x}:=(x)_{{\sf lth}(x)\dot{-}1}$, \item $x$ is an initial segment of $y$ ($x\subseteq_{\sf i}y$) if ${\sf lth}(x)\leq{\sf lth}(y)$ and for all $ j< {\sf lth}(x)$, we have $(x)_j=(y)_j$. \end{itemize}
\subsubsection{Kripke models of \hbox{\sf HA}{} }\label{sec-KripkeModelFirstOrder} A first-order Kripke model for $\hbox{\sf HA}{} $ is a triple $\mathcal{K}=(K,<,\mathfrak{M})$ such that: \begin{itemize} \item The frame of $\mathcal{K}$, i.e. $(K,<)$, is a non-empty partially ordered set, \item $\mathfrak{M}$ is a function from $K$ to the first-order classical structures for the language of the arithmetic, i.e. $\mathfrak{M}(\alpha)$ is a first-order classical structure, for each $\alpha\in K$, \item For any $\alpha\leq\beta\in K$, $\mathfrak{M}(\alpha)$ is a weak substructure of $\mathfrak{M}(\beta)$. \end{itemize}
For any $\alpha\in K$ and first-order formula $A\in\mathcal{L}_\alpha$ (the language of arithmetic augmented with constant symbols $\bar{a}$ for each $a\in|\mathfrak{M}(\alpha)|$),
we define $\mathcal{K},\alpha\Vdash A$
(or simply $\alpha\Vdash A$, if no confusion is likely) inductively as follows:
\begin{itemize}
\item For atomic $A$, $\alpha\Vdash A$ iff $\mathfrak{M}(\alpha)\models A$.
Note that in the structure $\mathfrak{M}(\alpha)$, $\bar{a}$ is
interpreted as $a$,
\item $\mathcal{K},\alpha\Vdash A\vee B$ iff $\mathcal{K},\alpha\Vdash A$ or $\mathcal{K},\alpha\Vdash B$,
\item $\mathcal{K},\alpha\Vdash A\wedge B$ iff $\mathcal{K},\alpha\Vdash A$ and $\mathcal{K},\alpha\Vdash B$,
\item $\mathcal{K},\alpha\Vdash A\ra B$ iff for all $\beta\geq\alpha$, $\mathcal{K},\beta\Vdash A$ implies $\mathcal{K},\beta\Vdash B$,
\item If $A=\forall{x}B$, $\alpha\Vdash A$ iff for all $\beta\geq\alpha$ and each
$b\in|\mathfrak{M}(\beta)|$, we have $\beta\Vdash B[x:\bar{b}]$.
\end{itemize}
It is well-known in the literature that $\hbox{\sf HA}{} $ is complete for first-order Kripke models.
\begin{lemma}\label{Lemma-Sigma-local-global}
Let $\mathcal{K}=(K,<,\mathfrak{M})$ be a Kripke model of $\hbox{\sf HA}{} $ and $A$ be an arbitrary $\Sigma_1$-formula. Then for each $\alpha\in K$, we have $\alpha\Vdash A$ iff $\mathfrak{M}(\alpha)\models A$.
\end{lemma}
\begin{proof}
Use induction on the complexity of $A$ to show that for each $\alpha\in K$, we have
$\alpha\Vdash A$ iff $\mathfrak{M}(\alpha)\models A$. In the inductive step for $\to$ and $\forall$,
use \Cref{Lemma-decidability of delta formulae}.
\end{proof}
\subsection{q-Realizability and Leivant's principle}
A variant of realizability introduced by Kleene, is $\mathrel{\sf q}$-realizability (see \cite{TD}) which is defined inductively for arithmetical formula $A$ as follows:
\begin{itemize}
\item $x\mathrel{\sf q} A:=A$ for atomic $A$.
\item $x\mathrel{\sf q} (A_1\wedge A_2):= {\sf j}_1(x)\mathrel{\sf q} A_1\wedge {\sf j}_2(x)\mathrel{\sf q} A_2$,
\item $x\mathrel{\sf q} (A_1\vee A_2):=({\sf j}_1(x)=0\rightarrow {\sf j}_2(x)\mathrel{\sf q} A_1)\wedge({\sf j}_1(x)\neq0\rightarrow {\sf j}_2(x)\mathrel{\sf q} A_2)$,
\item $x\mathrel{\sf q} (A_1\ra A_2):=\forall{y}\, (y\mathrel{\sf q} A_1\rightarrow \exists{u}\, ({\sf T}xyu\wedge {\sf U}(u)\mathrel{\sf q} A_2))\wedge (A_1\ra A_2) $,
\item $x\mathrel{\sf q} \exists{y}A(y):=j_1(x)\mathrel{\sf q} A(j_2(x))$,
\item $x\mathrel{\sf q}\forall{y}A(y):=\forall{y}\,\exists{u}\, ({\sf T}xyu\wedge {\sf U}(u)\mathrel{\sf q} A(y))$ \end{itemize}
In the y above definition ${\sf j}_1$ and ${\sf j}_2$ are inverses for a one-to-one onto, pairing function, {\sf j}, such that $x={\sf j}({\sf j}_1(x),{\sf j}_2(x) )$. Also ${\sf T}xyu$ is Kleene's predicate formalizing ``$u$ is a computation for the Turing Machine with code $x$ with input $y$", and {\sf U} is the result extractor function, i.e. if $u$ is a computation for a Turing Machine, then ${\sf U}(u)$ is its output.
\begin{lemma}\label{Lemma-qrealizability}
For any formula $A$ we have $\hbox{\sf HA}{} _0\vdash x\!\mathrel{\sf q}\! A\ra A$. \end{lemma} \begin{proof} See \cite{TD}. \end{proof}
\noindent In the following, $\{x\}$ is partial recursive function of the Turing Machine with the code $x$. The notation $\{x\}y\!\!\downarrow$ means that ``the function $\{x\}$ is defined on input $y$", or equivalently ``the Turing machine with the code $x$ halts on the input $y$". It is well known that $\{x\}y\!\!\downarrow$ is a $\Sigma_1$ sentence. We use terms which contain some Kleene's bracket notation. In that case, we use $t\!\!\downarrow$ to mean that all the brackets in $t$ are defined (terminate).
One immediate consequence of $\mathrel{\sf q}$-realizability, is Church's Rule for $\hbox{\sf HA}{} $: \begin{lemma}\label{Lemma-Chirch rule} For every formula $A(x,y)$, if $\hbox{\sf HA}{} \vdash\forall{x}\,\exists{y}\,A(x,y)$, then there exists some $n\in \omega$ such that $\hbox{\sf HA}{} \vdash\forall{x}\, ({\{n\}(x)\!\!\downarrow}\wedge A(x,\{n\}(x)))$. \end{lemma} \begin{proof} See \cite{TD}. \end{proof}
\noindent It is easy to observe that ``$\hbox{\sf HA}{} \vdash A$" implies ``there exists some $n$ such that $\hbox{\sf HA}{} \vdash n\mathrel{\sf q} A$"(\cite{TD}). The point of the following lemma is that we can refine the above statement in the following way. There exists some recursive function $f$ such that ``$\hbox{\sf HA}{} _m\vdash A(k_1,\ldots,k_l)$" implies ``there exists some recursive function $g$ such that $\hbox{\sf HA}{} _{f(m)}\vdash g(k_1,\ldots,k_l)\mathrel{\sf q} A(k_1,\ldots,k_l)$". Moreover, we can formalize this statement in $\hbox{\sf HA}{} $.
\begin{lemma}\label{Lemma-upper bound for realizability} Suppose that $A(x_1,\ldots,x_m)$ is an arithmetical formula with free variables as shown. Then, there exists a provably \textup{(}in $\hbox{\sf HA}{} $\textup{)} total recursive function $f$ such that: $$\hbox{\sf HA}{} \vdash\Box_x A(\dot{x}_1,\ldots,\dot{x}_m)\ra\exists{z}\; \Box_{f(x)}({\{\dot{z}\}\langle \dot{x}_1,\ldots,\dot{x}_m \rangle\!\!\downarrow}\wedge\{\dot{z}\}\langle \dot{x}_1,\ldots, \dot{x}_m\rangle\mathrel{\sf q} A(\dot{x}_1,\ldots,\dot{x}_m))$$ \end{lemma}
\begin{proof} The proof is very similar to the proof of the soundness part of \cite[Theorem~4.10]{TD}. First define $f(n)$ in this way:
$$f(n):=\text{max}(\{\gnumber{B^{\mathrel{\sf q},x}}\ |\ \gnumber{B}< n,\text{ $x$ is a free variable of $B$}\}\cup\{n\})$$ in which, $ B^{\mathrel{\sf q},x}:=\{ t(u)\}\langle x\rangle\!\!\downarrow \wedge\; \{ t(u)\}\langle x\rangle \mathrel{\sf q} B$,
$u\!\neq\! x$ and $t(u)$ is a primitive recursive function that will be defined later in the proof.
Let's fix some sequence of numbers $\boldsymbol{m}$.
With induction on the complexity of the proof
$\hbox{\sf HA}{} _n\vdash A(\boldsymbol{m})$, we show that (by $A(\boldsymbol{m})$, we mean $A[\boldsymbol{x}:\boldsymbol{m}]$) by \begin{center}
$\hbox{\sf HA}{} \vdash$ ``$\hbox{\sf HA}{} _n\vdash A(\boldsymbol{m})$'' $\rightarrow$ $\exists z$
``$\hbox{\sf HA}{} _{f(n)}\vdash \{z\}\langle\boldsymbol{m}\rangle\!\!\downarrow \wedge \{z\}\langle\boldsymbol{m}\rangle \mathrel{\sf q} A(\boldsymbol{m})$''
\end{center}
We only treat the case where $A$ is an instance of induction schema. All the other cases are trivial.
Assume that $\gnumber{B}<n$ and
$$A(\boldsymbol{m})=(B[x:0]\wedge\forall{x}(B\to B[x:S(x)]))\to \forall{x}B$$
We should find some number $\{z\}\langle\boldsymbol{m}\rangle= k$ such that
$$\hbox{\sf HA}{} _{f(n)}\vdash k\mathrel{\sf q} [(B(0)\wedge\forall{x}(B(x)\to B(x+1))\to \forall{x}B]$$
By definition of $\mathrel{\sf q}$-realizability, we have: \begin{equation*}
k\mathrel{\sf q} A(\boldsymbol{m})=\overbrace{\forall{u}[u\mathrel{\sf q} (B(0)\wedge\forall{x}(B(x)\to B(x+1))\to (\{k\}(u)\!\!\downarrow\wedge \{k\}(u)\mathrel{\sf q} \forall{x}B)]}^{C}\wedge A(\boldsymbol{m})
\end{equation*}
Since $f(n)\geq n$, we have $\hbox{\sf HA}{} _{f(n)}\vdash A(\boldsymbol{m})$. Hence it remains only to show that
$\hbox{\sf HA}{} _{f(n)}\vdash C$. Define the primitive recursive function $t(u)$ in the following way. For any given $u$, $t(u)$ is the code of the Turing Machine that fulfills the following conditions: $$ \begin{cases} \{t(u)\}\langle 0\rangle=j_1(u)\\ \{t(u)\}(x+1)=\{\{j_2(u)\}\langle x\rangle\}\langle\{t(u)\}\langle x\rangle\rangle \end{cases} $$ Finally, let $k$ be the code of the Turing Machine that computes the primitive recursive function $t$. Now it is not difficult to observe that, by induction on $B^{\mathrel{\sf q},x}$, one could deduce $C$ in $\hbox{\sf HA}{} _0$, and hence $\hbox{\sf HA}{} _{f(n)}\vdash C$. This implies $\hbox{\sf HA}{} _{f(n)}\vdash A(\boldsymbol{m})$, as desired. \end{proof}
\begin{lemma}\label{Lemma-Reflection refinement} For every sentence $A$, there exists some provably (in $\hbox{\sf HA}{} $) total recursive function $h_A$ such that $\hbox{\sf HA}{} \vdash\forall{x}\, \Box_{h_{_{\!A}}\!(x)}(\Box_{\dot{x}}A\ra A)$. \end{lemma}
\begin{proof} By \Cref{Lemma-Reflection} we have $\hbox{\sf HA}{} \vdash\forall{x}\,\exists{y}\, \Box_y(\Box_{\dot{x}}A \ra A)$. Now we have the desired result by use of \Cref{Lemma-Chirch rule}. \end{proof}
\begin{lemma}\label{Lemma-Sigma sentences are autoq} Suppose that $A(x_1,\ldots,x_m)$ is a $\Sigma_1$-formula with variables as shown. Then there exists some $n_\tinysub{A}\in\mathbb{N}$, such that $$\hbox{\sf HA}{} \vdash A(x_1,\ldots,x_m)\rightarrow ({\{n_\tinysub{A}\}\langle x_1,\ldots,x_m \rangle\!\!\downarrow}\wedge\{n_\tinysub{A}\}\langle x_1,\ldots,x_m\rangle\mathrel{\sf q} A(x_1,\ldots,x_m))$$ \end{lemma} \begin{proof} This theorem for $\textbf{r}$-realizability instead of $\mathrel{\sf q}$-realizability is proved in \cite{TD}(Proposition 4.4.5). The proof for $\textbf{q}$-realizability is quite similar and we leave it to the reader. \end{proof}
It is well-known that the disjunction property holds for \hbox{\sf IPC} and $\hbox{\sf HA}{} $, however it is also shown that in case of \hbox{\sf HA}{} , the proof is
not formalizable in $\hbox{\sf HA}{} $, i.e. $\hbox{\sf HA}{} \nvdash\Box(A\vee B)\ra(\Box A\vee\Box B)$. But this is not the end of story! Daniel Leivant in his PhD dissertation \cite{Leivant-Thesis} showed that $\hbox{\sf HA}{} \vdash\Box(A\vee B)\ra\Box( A\vee\Box B)$. Albert Visser in an unpublished paper showed that we can extend Leivant's principle to the following version. For every $\Sigma_1$-sentence $A$, $\hbox{\sf HA}{} \vdash\Box (A\rightarrow (B\vee C))\ra\Box(A\ra(\Box B\vee C))$. In the following lemma, we will show that we can find (constructively) from the code $x$ of the proof of $A\ra(B\vee C)$, some $f(x)$ such that $\Box(A\ra(\Box_{f(x)}B\vee C))$ holds. Although the statement of this theorem would not be used later in this paper, we bring it here for better understanding of its generalization in a more technical lemma, i.e. \Cref{Lemma-qtranslation2}.
\begin{theorem}\label{Theorem-Leivant} For arbitrary sentences $A,B,C$ such that $A\in\Sigma_1$, there exists a provably (in $\hbox{\sf HA}{} $) total recursive function $f$ such that $$\hbox{\sf HA}{} \vdash \Box_x (A\ra(B\vee C))\rightarrow \Box_{f(x)}(A\ra(\Box_{f(x)}B\vee C))$$ \end{theorem} \begin{proof} First observe that, by \Cref{Lemma-Sigma sentences are autoq}, there exists some finite number $n_\tinysub{A}\in \mathbb{N}$ such that $\hbox{\sf HA}{} \vdash A\rightarrow ({\{n_\tinysub{A}\}\langle\rangle\!\!\downarrow}\wedge\{n_\tinysub{A}\}\langle\rangle\mathrel{\sf q} A)$. We set $t_0:=\{n_\tinysub{A}\}\langle\rangle$. Hence there exists some $n_0\in\mathbb{N}$ such that \begin{equation}\label{Eq Lei1} \hbox{\sf HA}{} \vdash\Box_{n_0}( A\rightarrow ({t_0\!\!\downarrow}\wedge t_0\mathrel{\sf q} A)) \end{equation}
We work inside $\hbox{\sf HA}{} $. Assume $\Box_x (A\ra(B\vee C) )$. By
\Cref{Lemma-upper bound for realizability}, there exists some $z$ such that $\Box_{g_{_0}(x)}({\{\dot{z}\}\langle\rangle\!\!\downarrow}\wedge \{\dot{z}\}\langle\rangle\mathrel{\sf q} (A\rightarrow (B\vee C)))$, in which $g_{_0}$ is the recursive function provided by \Cref{Lemma-upper bound for realizability}. We define $t_1:=\{\dot{z}\}\langle\rangle$ and hence we have $\Box_{g_{_0}(x)}t_1\!\!\downarrow$. If we set $g_{_1}(y):=g_{_0}(y)+n_{_0}$, by use of \cref{Eq Lei1}, we can deduce $\Box_{g_{_1}(x)}(A\rightarrow ({t_0\!\!\downarrow} \wedge \{t_1\}(t_0)\mathrel{\sf q} (B\vee C)))$. We set $t_2:=\{t_1\}(t_0)$. Then, by definition of $\mathrel{\sf q}$-realizability, we have:
\[ \Box_{g_{_1}(x)}(A\rightarrow ({t_2\!\!\downarrow}\wedge (j_1(t_2)=0\ra j_2(t_2)\mathrel{\sf q} B)\wedge (j_1(t_2)\neq 0\ra j_2(t_2)\mathrel{\sf q} C))).\]
Let $B':=(j_1(t_2)=0)\ra j_2(t_2)\mathrel{\sf q} B$ and $C':=(j_1(t_2)\neq 0)\ra j_2(t_2)\mathrel{\sf q} C$. Then we have ${\Box_{g_{_1}(x)}(A\ra B')}$
and, hence, by $\Sigma_1$-completeness (\Cref{Lemma-bounded Sigma completeness}), we can deduce $\Box_{0}\Box_{g_{_1}(x)}(A\ra B')$, that again by use of \Cref{Lemma-bounded Sigma completeness}, implies $\Box_0(A\ra\Box_{g_{_1}(x)}B')$. Thus we have $$\Box_{g_{_1}(x)}(A\rightarrow ({t_2\!\!\downarrow}\wedge\Box_{g_{_1}(x)}B'\wedge C'))$$
Again by
\Cref{Lemma-bounded Sigma completeness} and \Cref{Lemma-qrealizability}, $\Box_{g_1(x)}(A\rightarrow ({t_2\!\!\downarrow}\wedge(j_1(t_2)=0\rightarrow \Box_{g_{_1}(x)}B)\wedge(j_1(t_2)\neq0\ra C)))$. Since atomic formulae are decidable in $\hbox{\sf HA}{} $, so for any atomic formulae $D$, there exists some finite $n_2$ such that in $\hbox{\sf HA}{} _{n_2}$ we have decidability of $D$. Let $\hbox{\sf HA}{} _{n_2}+t_2\!\!\downarrow$ decide $j_1(t_2)=0$. If we set $f(x):=g_{_1}(x)+n_2$, we can deduce $\Box_{f(x)}(A\ra(\Box_{f(x)}B\vee C))$, as desired. \end{proof}
\subsection{The extended Leivant's Principle}\label{Sec-ExLePr} In this section, we study properties of the extended Leivant's principle, ${\sf Le}^+$. We prove that for any $\Sigma_1$- substitution $\sigma$, $\hbox{\sf HA}{} \vdash\sigma_{_{\sf HA}}({\sf Le}^+)$.
Define a translation $q_\sigma(A,x)$ recursively for a modal proposition $A$ and a $\Sigma_1$-substitution $\sigma$, as follows: \begin{itemize} \item $q_\sigma(A,x):=\sigma_{_{\sf HA}}(A)$, if $A$ is atomic or boxed, \item $q_\sigma(A\wedge B,x):=q_\sigma(A,j_1(x))\wedge q_\sigma(B,j_2(x))$, \item $q_\sigma(A\vee B,x):=(j_1(x)=0\ra q_\sigma(A,j_2(x)))\wedge(j_1(x)\neq 0\ra q_\sigma(B,j_2(x)))$, \item if $A=B\ra C$ and $B\in {\sf NOI}$, we define $q_\sigma(B\ra C,x):=\sigma_{_{\sf HA}}(B)\ra( \{x\}(n_\tinysub{B})\!\!\downarrow \wedge q_\sigma(C,\{x\}(n_\tinysub{B})))$, in which $n_\tinysub{B}$ is as in \Cref{Lemma-Sigma sentences are autoq}. If $B\not\in {\sf NOI}$, then define $q_\sigma(A,x):=\sigma_{_{\sf HA}}(A)$. \end{itemize}
\begin{lemma}\label{Lemma-qtranslation1} Let $A$ be a modal proposition and
$t$ be a term in first-order language of arithmetic which possibly contain Kleene's brackets. Then \begin{itemize} \item $\hbox{\sf HA}{} _0\vdash x\mathrel{\sf q}\sigma_{_{\sf HA}}(A)\ra q_\sigma(A,x)$, \item $\hbox{\sf HA}{} _0\vdash (t\!\!\downarrow\wedge q_\sigma(A,t))\rightarrow \sigma_{_{\sf HA}}(A)$, \end{itemize} \end{lemma} \begin{proof} Proof of both parts are by induction on the complexity of $A$. \end{proof}
For the next lemma, we need some auxiliary notation $\sigma_{_l}(A,x)$. Informally speaking, $\sigma_{_l}(A,x)$ is going to be $\sigma_{_{\sf HA}}(A^l)$ with one difference. The new added boxes in $A^l$
should be interpreted as provability in $\hbox{\sf HA}{} _x$. More precisely, we define it inductively as the following. \begin{itemize} \item $A$ is atomic or boxed. $\sigma_{_l}(A,x):=\sigma_{_{\sf HA}}(A)$, \item $A= B\wedge C$. then $\sigma_{_l}(A,x):=\sigma_{_l}(B,x)\wedge\sigma_{_l}(C,x)$, \item $A= B \vee C$. then $\sigma_{_l}(A,x):=\Boxdot_x\sigma_{_l}(B,x)\vee\Boxdot_x\sigma_{_l}(C,x)$,
in which $\Boxdot_x D$ is defined as $D\wedge\Box_x D$,
\item $A=B\to C$. Like the definition of $A^l$, we define $\sigma_{_l}(A,x)$ by cases. If $B\in {\sf NOI}$, then we define
$\sigma_{_l}(A,x):=\sigma_{_{\sf HA}}(B)\to\sigma_{_l}(C,x)$, otherwise we define $\sigma_{_l}(A,x):=\sigma_{_{\sf HA}}(A)$. \end{itemize}
\begin{lemma}\label{Lemma-prop-sigma_l} Let $A$ be a modal proposition. Then \begin{enumerate} \item \label{1Lemma-prop-sigma_l}$\hbox{\sf HA}{} _0\vdash (x\leq y \wedge \sigma_{_l}(A,x))\to\sigma_{_l}(A,y)$, \item \label{2Lemma-prop-sigma_l} $\hbox{\sf HA}{} _0\vdash \sigma_{_l}(A,x) \to \sigma_{_{\sf HA}}(A^l)$, \item \label{3Lemma-prop-sigma_l}$\hbox{\sf HA}{} _0\vdash \sigma_{_l}(A,x) \to \sigma_{_{\sf HA}}(A)$. \end{enumerate} \end{lemma} \begin{proof} Use induction on $A$. \end{proof}
\begin{lemma}\label{Lemma-qtranslation2} Let $A$ be a modal proposition, $D$ be any $\Sigma_1$-sentence and
$t$ be a term in first-order language of arithmetic which possibly contain Kleene's brackets. Then there exists
a provably total recursive function $f$ such that
$$\hbox{\sf HA}{} \vdash \Box_x(D\rightarrow (t\!\!\downarrow\wedge q_\sigma(A,t))\rightarrow \Box_{f(x)}(D\ra\sigma_{_{l}}(A,f(x)))$$ \end{lemma} \begin{proof} We use induction on $A$. For simplicity of notations, we assume here that $t$ is a normal term. One can easily build the general case.
\noindent{\em Atomic, Boxed or conjunction.} Trivial.
\noindent{\em Disjunction.} Let $A=B\vee C$. Then by definition of $q_\sigma$, we have $$\hbox{\sf HA}{} \vdash\Box_x(D\ra q_\sigma(B\vee C,t))\rightarrow [\Box_x((D\wedge j_1(t)=0)\ra q_\sigma(B,j_2(t)))\wedge\Box_x((D\wedge j_1(t)\neq 0)\ra q_\sigma(C,j_2(t))]$$ Hence by the induction hypothesis, there exists functions $g$ and $h$ such that \begin{align*} \hbox{\sf HA}{} \vdash & \Box_x(D\ra q_\sigma(B\vee C,t))\rightarrow \\ &\Box_{g(x)}((D\wedge j_1(t)=0)\rightarrow \sigma_{_l}(B,g(x)))\wedge\Box_{h(x)}((D\wedge j_1(t)\neq 0)\rightarrow \sigma_{_l}(C,h(x))) \end{align*} Let $f(x)$ be the maximum of $g(x)$ and $h(x)$. One can use the $\Sigma_1$-completeness of $\hbox{\sf HA}{} _0$ (\Cref{Lemma-bounded Sigma completeness}) and \Cref{Lemma-prop-sigma_l}
to derive $$\hbox{\sf HA}{} \vdash \Box_x(D\ra q_\sigma(B\vee C,t))\rightarrow \Box_{f(x)}(D\ra(\Boxdot_{f(x)} \sigma_{_l}(B,f(x))\vee\Boxdot_{f(x)} \sigma_{_l}(C,f(x))))$$
\noindent{\em Implication.} Assume that $A=B\ra C$. If $B\not\in {\sf NOI}$, by \Cref{Lemma-qtranslation1}, we are done. So assume that $B\in {\sf NOI}$. By definition of $q_\sigma$, there exists some term $t_1$ such that $$\hbox{\sf HA}{} \vdash \Box_x[D\ra q_\sigma(B\ra C,t)]\ra\Box_x[(D\wedge \sigma_{_{\sf HA}}(B))\ra(t_1\!\!\downarrow\wedge q_\sigma(C,t_1))]$$ Since $B\in{\sf NOI}$, $\sigma_{_{\sf HA}}(B)$ is a $\Sigma_1$-formula. Hence by the induction hypothesis, there exists some function $f$ such that $$\hbox{\sf HA}{} \vdash \Box_x(D\ra q_\sigma(A,t))\ra\Box_{f(x)}((D\wedge \sigma_{_{\sf HA}}(B))\rightarrow \sigma_{_l}(C,f(x)))$$ This by definition of $\sigma_{_l}(B\to C,f(x))$, implies the desired result. \end{proof}
\begin{lemma}\label{Lemma-sigma_l translation} For any $\Sigma_1$-substitution $\sigma$ and modal proposition $A$, there exists some provably total recursive function $g$ such that $ \hbox{\sf HA}{} \vdash \Box_x \sigma_{_{\sf HA}}(A)\to\Box_{g(x)}\sigma_{_l}(A,g(x))$. \end{lemma} \begin{proof} Work inside $\hbox{\sf HA}{} $. Assume $\Box_x\sigma_{_{\sf HA}}(A)$. By \Cref{Lemma-upper bound for realizability}, there exists some $y$ such that $${\Box_{f_0(x)} (t\!\!\downarrow\wedge\ t\mathrel{\sf q}\sigma_{_{\sf HA}}( A))}$$
in which $t:=\{y\}\langle\rangle$ and $f_0$ is a provably total recursive function
as stated in \Cref{Lemma-upper bound for realizability}. Hence by the first item of \Cref{Lemma-qtranslation1}, $\Box_{f_0(x)}(t\!\!\downarrow\wedge q_\sigma(A,t))$. Hence by \Cref{Lemma-qtranslation2}, we have the function $f$ such that
$\Box_{f(f_0(x))}\sigma_{_l}(A,f(f_0(x))$. \end{proof}
\begin{theorem}\label{Theorem-Soundness of HA for lle+} For any $\Sigma_1$-substitution $\sigma$, we have $\hbox{\sf HA}{} \vdash \sigma_{_{\sf HA}}({\sf Le}^+)$. \end{theorem} \begin{proof} Let $A$ be a modal proposition. We must show $\hbox{\sf HA}{} \vdash\Box\sigma_{_{\sf HA}}(A)\ra\Box\sigma_{_{\sf HA}}(A^l)$. Now the desired result may be deduced by \Cref{Lemma-sigma_l translation} and the second item of \Cref{Lemma-prop-sigma_l}. \end{proof} Although there are other ways of proving the above theorem (see \cite{Visser02} or \cite{IemhoffT}), we need its major preliminary lemma (i.e. \Cref{Lemma-sigma_l translation}) in the proof of the completeness theorem. Specially, we use \Cref{Lemma-sigma_l translation} in the proof of \Cref{Lemma-1.7st Properties of Solovay Function}.
\subsection{Interpretability} Let $T$ and $S$ be two first-order theories. Informally speaking, we say that $T$ interprets $S$ ($T\rhd S$) if there exists a translation from the language of $S$ to the language of $T$ such that $T$ proves the translation of all of the theorems of $S$. For a formal definition see \cite{VisserInterpretability}. It is well-known that for recursive theories $T$ and $S$ containing $\hbox{\sf PA}{} $, the assertion $T\rhd S$ is formalizable in first-order language of arithmetic. For two arithmetical sentences $A$ and $B$, we use the notation $A\rhd B$ to mean that $\hbox{\sf PA}{} +A$ interprets $\hbox{\sf PA}{} +B$. The following theorem due to Orey, first appeared in \cite{Feferman}.
\begin{theorem}\label{Theorem-Orey} For recursive theories $T$ and $S$ containing $\hbox{\sf PA}{} $, we have: \[ \hbox{\sf PA}{} \vdash (T\rhd S) \leftrightarrow\forall{x}\, \Box_T {\sf Con}(S^x),\]
in which $S^x$ is the restriction of the theory $S$ to axioms with G\"{o}del number $\leq x$ and ${\sf Con}(U):=\neg\,\Box_U\bot$. \end{theorem}
\begin{proof} See \cite{Feferman}. p.80 or \cite{Berarducci}. \end{proof}
\noindent\textbf{Convention.} From \Cref{Theorem-Orey}, one can easily observe that $\hbox{\sf PA}{} \vdash {(A\rhd B)}\lr{\forall{x}\,\Box^+(A\ra\neg\Box^+_x\neg B)}$. So from now on, $A\rhd B$ means its $\Pi_2$-equivalent $\forall{x}\,\Box^+(A\ra\neg\Box^+_x\neg B)$, even when we are working in weaker theories like $\hbox{\sf HA}{} $. We remind the reader that $\Box^+$ stands for provability in $\hbox{\sf PA}{} $.
\section{Propositional modal logics}\label{sec-propositional}
In this section, we collect all the required notions with propositional flavour. This section is mostly devoted to provide an axiomatic system for the $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $, i.e. $\sf{iH}_{\sigma}$,
and stating some of its essential properties that we need them later in the proof of soundness
(\Cref{Theorem-Soundness})
or completeness (\Cref{Theorem HA-Completeness}) of $\sf{iH}_{\sigma}$ for arithmetical $\Sigma_1$-interpretations.
The following are some of important results that will be used in the proof of completeness theorem. \begin{itemize}[leftmargin=*] \item In \Cref{Sec-Aximatizing-TNNIL}, it is shown that the axiomatic system $\sf{iH}_{\sigma}$ is capable of simplifying any modal proposition to an equivalent $\hbox{\sf TNNIL}{} ^-$ proposition (\Cref{corollar HA-NNIL approximation is propositionally equivalent}). This fact is useful for proof of the completeness theorem (\Cref{Theorem HA-Completeness}). \item In \Cref{sec.tnnil.conservativity}, the $\hbox{\sf TNNIL}{} $-conservativity of the theory $\hbox{\sf LC}{}$
over $\sf{iH}_{\sigma}$ (\Cref{Theorem-TNNIL Conservativity of LC over LLe+}) is proved. This conservativity plays an important role in the proof of completeness theorem. As far as working with $\hbox{\sf TNNIL}{} $-formulas, we get rid of all those complicated axioms of $\sf{iH}_{\sigma}$ and just use the more handful theory $\hbox{\sf LC}{}$. \item In \Cref{Sec-PropModKripke}, we will prove the finite model property for the theory $\hbox{\sf LC}{}$ (\Cref{Theorem-Propositional Completeness LC}). With the aid of our main theorem in next section (\Cref{Theorem-Main tool}), such finite counter-models are used to be transformed to a first-order counter-models of $\hbox{\sf HA}{} $. \end{itemize}
\subsection{The NNIL formulae and related topics}\label{sec-nnil}
The class of {\em No Nested Implications to the Left}, \hbox{\sf NNIL}{} formulae in a propositional language was introduced in \cite{Visser-Benthem-NNIL}, and more explored in \cite{Visser02}. The crucial result of \cite{Visser02} is providing an algorithm that as input, receives a non-modal proposition $A$ and returns its best \hbox{\sf NNIL}{} approximation $A^*$ from below, i.e., $\hbox{\sf IPC} \vdash A^*\ra A$ and for all \hbox{\sf NNIL}{} formula $B$ such that $\hbox{\sf IPC} \vdash B\ra A$, we have $\hbox{\sf IPC} \vdash B\ra A^*$. Also for all $\Sigma_1$-substitutions $\sigma$, we have $\hbox{\sf HA}{} \vdash \sigma_{_{\sf HA}}(\Box A\leftrightarrow\Box A^*)$ \cite{Visser02}. \begin{itemize}[leftmargin=*] \item In \Cref{subsubsec-NNIL-algorithm}, we state Visser's $\hbox{\sf NNIL}{} $-algorithm for computing $A^*$, and some of its useful properties. \item In \Cref{subsubsec-TNNIL-algorithm}, we explain the extension of this algorithm to the modal language (the $\hbox{\sf TNNIL}{} $-algorithm), which computes $A^+$
and is essentially the same as the $\hbox{\sf NNIL}{} $-algorithm with this extra rule: treat inside $\Box$ as a fresh proposition, i.o.w. in the inductive definition of the algorithm $(\Box A)^+:=\Box A^+$. Then we prove some useful properties of the $\hbox{\sf TNNIL}{} $-algorithm: \Cref{Lemma-TNNIL algorithm preserve theorems of iGL} and \Cref{corollar-NNIL properties}. The best feature of $\hbox{\sf TNNIL}{} $-algorithm is that for all $\Sigma_1$-substitutions $\sigma$, we have $\hbox{\sf HA}{} \vdash \sigma_{_{\sf HA}}(\Box A\leftrightarrow\Box A^+)$ (first part of \Cref{corollar-NNIL properties}). \item In \Cref{subsubsec-TNNIL^-algorith}, we define another algorithm $\hbox{\sf TNNIL}{} ^-$ for computing $A^-$, which is essentially the same as the $\hbox{\sf TNNIL}{} $-algorithm, with this minor difference: Only treat those sub-formulae which are boxed and leave the others. With this minor change, we even have a better feature for $A^-$, i.e., for all $\Sigma_1$-substitutions $\sigma$, we have $\hbox{\sf HA}{} \vdash \sigma_{_{\sf HA}}( A\leftrightarrow A^-)$ (\Cref{Lemma HA-NNIL properties minus}). \end{itemize} Now we define the class $\hbox{\sf NNIL}{} $ of modal propositions precisely by $\hbox{\sf NNIL}{} := \{A\mid \rho A\leq 1\}$, in which the complexity measure $\rho$, is defined inductively as follows: \begin{itemize}[leftmargin=*] \item $\rho(\Box A)=\rho(p)= \rho(\bot)=\rho(\top) = 0$, for an arbitrary atomic variables $p$ and modal proposition $A$, \item $\rho(A\wedge B) = \rho(A\vee B) = \text{max} (\rho A, \rho B)$, \item $\rho(A\ra B) = \text{max} (\rho A +1, \rho B)$, \end{itemize}
In the following, we define another complexity measure $\mathfrak{o}(.)$ on modal propositions. We need this measure for termination of the $\hbox{\sf NNIL}{} $-algorithm. \begin{definition}\label{Definition-non-modal complexity} Let $D$ be a modal proposition. Let \begin{itemize} \item $I(D):=\{E\in {\sf Sub}(D) \mid E \text{ is an implication that is not in the scope of a } \Box\}$.
\item $\mathfrak{i}(D):=\text{\em max}\{|I(E)|\mid E\in I(D)\}$, where $|X|$ is the number of elements of $X$. \item $\mathfrak{c}D:=$ the number of occurrences of logical connectives which are not in the scope of a $\Box$. \item $\mathfrak{d}D:=$ the maximum number of nested boxes. To be more precise, \begin{itemize} \item $\mathfrak{d}D:=0$ for atomic $D$, \item $\mathfrak{d}D:=\text{\em max}\{\mathfrak{d}D_1,\mathfrak{d}D_2\}$, where $D = D_1\circ D_2$ and $\circ\in\{\wedge, \vee, \ra\}$, \item $\mathfrak{d}\Box D:=\mathfrak{d}D+1$, \end{itemize} \item $\mathfrak{o}D:=(\mathfrak{d}D,\mathfrak{i}D,\mathfrak{c}D)$. \end{itemize} We order the measures $\mathfrak{o}D$ lexicographically, i.e., $(d,i,c)<(d',i',c')$ iff $d<d'$ or $d=d' , i<i'$ or $d=d', i=i' , c<c'$. \end{definition} For definition of $\hbox{\sf NNIL}{} $-algorithm, we use the bracket notation $[A]B$ from \cite{Visser02}. We also use a variant of this notation, $[A]'B$: \begin{definition}\label{definition-braket} For any two modal propositions $A$ and $B$, we define $[A]B$ and $[A]'B$
by induction on the complexity of $B$: \begin{itemize} \item $[A]B = [A]'B =B$, for atomic or boxed $B$, \item $[A](B_1\circ B_2) = [A](B_1)\circ [A](B_2)$, $[A]'(B_1\circ B_2) =[A]'(B_1)\circ [A]'(B_2)$ for $\circ\in\{\vee,\wedge\}$, \item $[A](B_1\ra B_2) = A\rightarrow (B_1\ra B_2)$, $[A]'(B_1\ra B_2)=A'\to( B_1\ra B_2)$, in which $A' = {A[B_1\ra B_2\mid B_2]}$, i.e., replace each outer occurrence of $B_1 \ra B_2$ (by outer occurrence we mean that it is not in the scope of any $\Box$) in $A$ by $B_2$,
\end{itemize} For a set $X$ of modal propositions, we also define $[A]X:=\bigvee_{B\in X}A[B]$ and $[A]'X:=\bigvee_{B\in X}[A]'B$. \end{definition} \begin{remark}\label{Remark0} It is easy to observe that $[A]B$ and $[A]'B$ are equivalent in $\hbox{\sf IPC} _\Box$. \end{remark}
\subsubsection{The $\hbox{\sf NNIL}{} $-algorithm}\label{subsubsec-NNIL-algorithm} For each modal proposition $A$, the proposition $A^*$ is defined by induction on $\mathfrak{o}A$ as follows \cite{Visser02}: \begin{enumerate}[leftmargin=*] \item $A$ is atomic or boxed, take $A^*:=A$.
\item $ A=B \wedge C$, take $A^*:=B^*\wedge C^*$. \item $ A=B\vee C$, take $A^*:=B^*\vee C^*$. \item $ A=B \ra C $, we have several sub-cases. In the following, an occurrence of $E$ in $D$ is called an {\em outer occurrence}, if $E$ is neither in the scope of an implication nor in the scope of a boxed formula. \begin{enumerate}[leftmargin=*] \item $C$ contains an outer occurrence of a conjunction. In this case, there is some formula $J(q)$ such that \begin{itemize} \item $q$ is a propositional variable not occurring in $A$. \item $q$ is outer in $J$ and occurs exactly once.
\item $C=J[q|(D\wedge E)]$. \end{itemize}
Now set $C_1:=J[q|D], C_2:=J[q|E]$ and $A_1:=B\ra C_1, A_2:=B\ra C_2$ and finally, define $A^*:=A_1^*\wedge A_2^*$. \item $B$ contains an outer occurrence of a disjunction. In this case, there is some formula $J(q)$ such that \begin{itemize} \item $q$ is a propositional variable not occurring in $A$. \item $q$ is outer in $J$ and occurs exactly once.
\item $B=J[q|(D\vee E)]$. \end{itemize}
Now set $B_1:=J[q|D], B_2:=J[q|E]$ and ${A_1:=B_1\ra C}, {A_2:=B_2\ra C}$ and finally, define $A^*:=A_1^*\wedge A_2^*$. \item $B=\bigwedge X$ and $C=\bigvee Y$ and $X,Y$ are sets of implications or atoms. We have several sub-cases: \begin{enumerate}[leftmargin=*] \item $X$ contains atomic variables or boxed formula $E$. We set $D:=\bigwedge(X\setminus\{E\})$ and take
${A^*:=E^*\ra(D\ra C)^*}$. \item $X$ contains $\top$. Define $D:=\bigwedge(X\setminus\{\top\})$ and take $A^*:=(D\ra C)^*$. \item $X$ contains $\bot$. Take $A^*:=\top$. \item $X$ contains only implications. For any $D=E\ra F\in X$, define $$B\!\downarrow\! D:=\bigwedge((X\setminus\{D\})\cup\{F\}).$$ Let $Z:=\{E\mid E\ra F\in X\}\cup\{C\}$ and define:
\begin{align*}
A^*:=\bigwedge\{((B\!\downarrow\! D)\ra C)^*|D\in X\}\wedge \bigvee \{([B]'E)^*\mid E\in Z\}
\end{align*} We should show $\mathfrak{o}([B]'E)<\mathfrak{o}A$. For a proof of this fact see \cite{Visser02}.
\end{enumerate} \end{enumerate} \end{enumerate}
\begin{remark}\label{rem1} {\em In fact in \cite{Visser02}, the $\hbox{\sf NNIL}{} $-algorithm is only for non-modal propositions. One may also compute the best $\hbox{\sf NNIL}{} $-approximation for modal propositions, in the following way. Let $A$ be a given modal proposition.
Let $B_1,\ldots,B_n$ be all boxed sub-formulae of $A$ which are not in the scope of any other boxes. Let $A'(p_1,\ldots,p_n)$ be the unique non-modal proposition such that $\{p_i\}_{1\leq i\leq n}$ are fresh atomic variables not occurring in $A$ and $A=A'[p_1|B_1,\ldots,p_n|B_n]$. Let $\gamma(A):=(A')^*[p_1|B_1,\ldots,p_n|B_n]$. Then it is easy to observe that $\hbox{\sf IPC} _{\Box}\vdash\gamma(A)\lr A^*$.} \end{remark}
The above defined algorithm is not deterministic, however from the following theorem we know that $A^*$ is unique up to $\hbox{\sf IPC} _{\Box}$ equivalence. Notation $A\vartriangleright_{_{{\sf IPC}_{\Box},{\sf NNIL}}}B$ ($A$, $\hbox{\sf NNIL}{} $-preserves $B$) from \cite{Visser02}, means that for each $\hbox{\sf NNIL}{} $ modal proposition $C$, if $\hbox{\sf IPC} _{\Box}\vdash C\ra A$, then $\hbox{\sf IPC} _{\Box}\vdash C\ra B$, in which $A, B$ are modal propositions.
\begin{theorem}\label{Theorem-NNIL Crucial Properties} For each modal proposition $A$, \begin{enumerate} \item \label{1Theorem-NNIL Crucial Properties}The $\hbox{\sf NNIL}{} $ algorithm with input $A$ terminates and the output formula $A^*$, is an $\hbox{\sf NNIL}{} $ proposition such that $\hbox{\sf IPC} _{\Box}\vdash A^*\ra A$. \item \label{2Theorem-NNIL Crucial Properties} $\hbox{\sf IPC} _{\Box}\vdash A^*\ra B$ iff $A\vartriangleright_{_{{\sf IPC}_{\Box},{\sf NNIL}}}B$. \item \label{3Theorem-NNIL Crucial Properties} $A^*$ is the best $\hbox{\sf NNIL}{} $ approximation of $A$ from below i.e. $\hbox{\sf IPC} _{\Box}\vdash A^*\ra A$ and for each $\hbox{\sf NNIL}{} $ proposition $B$, with $\hbox{\sf IPC} _{\Box}\vdash B\ra A$, we have $\hbox{\sf IPC} _{\Box}\vdash B\ra A^*$. \item \label{4Theorem-NNIL Crucial Properties}$\hbox{\sf IPC} _{\Box}\vdash A_1\ra A_2$ implies $\hbox{\sf IPC} _{\Box}\vdash A_1^*\ra A_2^*$. \item \label{5Theorem-NNIL Crucial Properties}$\hbox{\sf IPC} _{\Box}\vdash A\lr B$ implies $\hbox{\sf IPC} _{\Box}\vdash A^*\lr B^*$. \item \label{6Theorem-NNIL Crucial Properties}For each $\Sigma_1$-substitution $\sigma$, $\hbox{\sf HA}{} \vdash\Box\sigma_{_{\sf HA}}(A)\lr\Box\sigma_{_{\sf HA}}(A^*)$. \end{enumerate} \end{theorem} \begin{proof} \begin{enumerate} \item Direct consequence of \cite[Theorem~7.1]{Visser02}. First assume $A'[p_1,\ldots,p_n]$ be as in \Cref{rem1}. By \cite[Theorem~7.1]{Visser02}, we have $\hbox{\sf IPC} \vdash (A')^*\ra A'$, and hence by \Cref{Lemma-boxed as atomic},
$\hbox{\sf IPC} _{\Box}\vdash (A')^*[p_1|B_1,\ldots,p_n|B_n]\ra A$. \item Direct consequence of \cite[Theorem~7.2]{Visser02}. First suppose that $\hbox{\sf IPC} _{\Box}\vdash A^*\ra B$. Let $A', B'$ be non-modal propositions as defined in \Cref{rem1}, i.e,
$A=A'[p_1|C_1,\ldots,p_n|C_n], B=B'[p_1|C_1,\ldots,p_n|C_n]$. Then by \Cref{Lemma-boxed as atomic}, $\hbox{\sf IPC} \vdash(A')^*\ra B'$. Now by \cite[Theorem~7.2]{Visser02}, we have $A'\vartriangleright_{\hbox{\sf IPC} ,\hbox{\sf NNIL}{} }B'$, and then by \Cref{Lemma-boxed as atomic}, $A\vartriangleright_{\hbox{\sf IPC} _{\Box},\hbox{\sf NNIL}{} }B$. For the proof of the other way around, note that all of the previous deductions are reversible. \item Suppose $\hbox{\sf IPC} _{\Box}\vdash B\ra A$ and $B$ is $\hbox{\sf NNIL}{} $. Since $\hbox{\sf IPC} _{\Box}\vdash A^*\ra A^*$, from \cref{2Theorem-NNIL Crucial Properties} above, we get $A\vartriangleright_{\hbox{\sf IPC} _{\Box},\hbox{\sf NNIL}{} }A^*$. By $\hbox{\sf IPC} _{\Box}\vdash B\ra A$ and $B\in\hbox{\sf NNIL}{} $, we have $\hbox{\sf IPC} _{\Box}\vdash B\ra A^*$. \item Suppose that $\hbox{\sf IPC} _{\Box}\vdash A_1\ra A_2$. By part 1, $\hbox{\sf IPC} _{\Box}\vdash A_1^*\ra A_2$ and hence by \cref{3Theorem-NNIL Crucial Properties}, ${\hbox{\sf IPC} _{\Box}\vdash A_1^*\ra A_2^*}$. \item Direct consequence of \cref{4Theorem-NNIL Crucial Properties}. \item First suppose that $A$ is a non-modal proposition. Combining Theorem 10.2 and Corollary 7.2 from \cite{Visser02}, implies that $\hbox{\sf IPC} \vdash A^*\ra B$ iff $A\mid\!\sim^{\sf HA}_{{\sf HA},\Sigma}B$, in which $A\mid\!\sim^{{\sf HA}}_{{\sf HA},\Sigma}B$ means that for each $\Sigma_1$-substitution $\sigma$, we have $\hbox{\sf HA}{} \vdash\Box\sigma_{_{\sf HA}}(A)\ra\Box\sigma_{_{\sf HA}}(B)$. This implies that ${\hbox{\sf HA}{} \vdash\Box\sigma_{_{\sf HA}}(A)\lr\Box\sigma_{_{\sf HA}}(A^*)}$. Now for a modal proposition $A$, suppose that $A'(p_1,\ldots,p_n)$ and
$B_1,\ldots,B_n$ be such that $A=A'[p_1|B_1,\ldots,p_n|B_n]$, in which $A'$ is a non-modal proposition and $p_1,\ldots,p_n $ are fresh atomic variables (not occurred in $A$). Let $\sigma'$ be the substitution defined by $\sigma'(p_i):=\sigma_{_{\sf HA}}(B_i)$, for each $1\leq i\leq n$, and for any other atomic variable $q$, $\sigma'(q) = \sigma(q)$. Clearly, $\sigma'$ is again a $\Sigma_1$-substitution and hence we have $\hbox{\sf HA}{} \vdash\Box\sigma'_{_{\sf HA}}(A')\lr\Box\sigma'_{_{\sf HA}}((A')^*)$. This implies ${\hbox{\sf HA}{} \vdash\Box\sigma_{_{\sf HA}}(A)\lr\Box\sigma_{_{\sf HA}}(A^*)}$. \end{enumerate} \end{proof} \subsubsection{The $\hbox{\sf TNNIL}{} $-algorithm}\label{subsubsec-TNNIL-algorithm}
\begin{definition}\label{Def-TNNIL-Propositions} $\hbox{\sf TNNIL}{} $ (Thoroughly $\hbox{\sf NNIL}{} $) is the smallest class of propositions such that \begin{itemize} \item $\hbox{\sf TNNIL}{} $ contains all atomic propositions, \item if $A, B\in\hbox{\sf TNNIL}{} $, then $A\vee B, A\wedge B,\Box A\in\hbox{\sf TNNIL}{} $, \item if all $\ra$ occurring in $A$ are contained in the scope of a $\Box$ (or equivalently $A\in {\sf NOI}$) and $A, B\in\hbox{\sf TNNIL}{} $, then $A\ra B\in\hbox{\sf TNNIL}{} $. \end{itemize} Let $\hbox{\sf TNNIL}{} ^-$ indicates the set of all the propositions like $ A(\Box B_1,\ldots,\Box B_n)$, such that $A(p_1,\ldots,p_n)$ is an arbitrary non-modal proposition and $B_1,\ldots,B_n\in\hbox{\sf TNNIL}{} $. \end{definition}
Here we define $A^+$ to be the $\hbox{\sf TNNIL}{} $-formula approximating $A$. The major difference between $A^+$ and $A^*$ is that $\hbox{\sf IPC} _\Box\vdash A^+\to A$ may not hold any more. Informally speaking, to find $A^+$, we first compute $A^*$ and then replace all outer boxed formula $\Box B$ in $A$ by $\Box B^+$. To be more accurate, we define $A^+$ by induction on $\mathfrak{d}A$. Suppose that for all $B$ with
$\mathfrak{d}B<\mathfrak{d}A$, we have defined $B^+$. Now suppose that $A'(p_1,\ldots,p_n)$ and $\Box B_1,\ldots,\Box B_n$ are such that $A=A'[p_1|\Box B_1,\ldots,p_n|\Box B_n]$, where $A'$ is a non-modal proposition and $p_1,\ldots,p_n $ are fresh atomic variables (not occurred in $A$). It is clear that $\mathfrak{d}B_i<\mathfrak{d}A$ and then we can define
$A^+:=(A')^*[p_1|\Box B_1^+,\ldots,p_n|\Box B_n^+]$.
\begin{lemma}\label{Lemma-TNNIL algorithm preserve theorems of iGL} For every modal proposition $A$, \begin{enumerate} \item \label{item1-Lemma-TNNIL algorithm preserve theorems of iGL} If ${\sf iGL}\vdash A$ then ${\sf iGL}\vdash A^+$. \item \label{item2-Lemma-TNNIL algorithm preserve theorems of iGL} If ${\sf iK4}\vdash A$ then ${\sf iK4}\vdash A^+$. \end{enumerate} \end{lemma} \begin{proof} We prove the first part by induction on the complexity of proof ${\sf iGL}\vdash A$. Proof of the second part is similar to the first one. \begin{itemize}[leftmargin=*] \item $A$ is an axiom. \begin{itemize}[leftmargin=*] \item $A$ is L\"{o}b's axiom, i.e., $A=\Box(\Box B\ra B)\ra\Box B$. Then $A^+=\Box(\Box B^+\ra B^+)\ra\Box B^+$, that is valid also in ${\sf iGL}$. \item $A=\Box B\ra\Box\Box B$. Then $A^+=\Box B^+\ra\Box\Box B^+$, that is valid in ${\sf iGL}$. \item $A=(\Box(B\ra C)\wedge\Box B)\ra\Box C$. Then $ A^+=(\Box (B\ra C)^+\wedge\Box B^+)\ra\Box C^+$. On the other hand, $\hbox{\sf IPC} _{\Box}\vdash (B\wedge(B\ra C))\ra C$ and hence $\hbox{\sf IPC} _{\Box}\vdash (B\wedge(B\ra C))^*\ra C^*$, by \Cref{Theorem-NNIL Crucial Properties} \cref{4Theorem-NNIL Crucial Properties}. Now we can infer $\hbox{\sf IPC} _{\Box}\vdash( B^+\wedge (B\ra C)^+)\ra C^+$, by definition of $\hbox{\sf TNNIL}{} $-algorithm and \Cref{Lemma-boxed as atomic}. Finally, by the necessitation rule in ${\sf iGL}$, we have ${{\sf iGL}\vdash(\Box B^+\wedge\Box (B\ra C)^+)\rightarrow \Box C^+}$. \end{itemize} \item $A$ is a theorem of $\hbox{\sf IPC} _{\Box}$. Then $\hbox{\sf IPC} _{\Box}\vdash A^+$, by \Cref{Theorem-NNIL Crucial Properties} \cref{5Theorem-NNIL Crucial Properties} and
\Cref{Lemma-boxed as atomic}. \item $A = \Box B$ and $A$ is derived by applying the necessitation rule. Let ${\sf iGL}\vdash B$. By induction hypothesis, ${\sf iGL}\vdash B^+$ and then ${\sf iGL}\vdash\Box B^+$. \item $A$ is derived by modus ponens. Let ${\sf iGL}\vdash B$ and ${\sf iGL}\vdash B\ra A$. From these, we have ${\sf iGL}\vdash B^+\wedge(B\ra A)^+$ and then ${\sf iGL}\vdash(B\wedge(B\ra A))^+$. Since $\hbox{\sf IPC} _{\Box}\vdash (B\wedge(B\ra A))\ra A$, then by
\Cref{Theorem-NNIL Crucial Properties} \cref{4Theorem-NNIL Crucial Properties} we have $\hbox{\sf IPC} _{\Box}\vdash (B\wedge(B\ra A))^*\ra A^*$. Then by \Cref{Lemma-boxed as atomic}, $\hbox{\sf IPC} _{\Box}\vdash (B\wedge(B\ra A))^+\ra A^+$ and hence ${\sf iGL}\vdash A^+$ as desired. \end{itemize} \end{proof}
\begin{corollary}\label{corollar-NNIL properties} For any modal proposition $A$, \begin{enumerate} \item \label{1corollar-NNIL properties} For all $\Sigma_1$-substitution $\sigma$ we have $\hbox{\sf HA}{} \vdash\Box\sigma_{_{\sf HA}}(A)\lr\Box\sigma_{_{\sf HA}}(A^+)$ and hence $\hbox{\sf HA}{} \vdash\sigma_{_{\sf HA}}(A)$
iff $\hbox{\sf HA}{} \vdash\sigma_{_{\sf HA}}(A^+) $, \item \label{2corollar-NNIL properties}${\sf iGL}\vdash A_1\ra A_2$ implies ${\sf iGL}\vdash A_1^+\ra A_2^+$, and ${\sf iK4}\vdash A_1\ra A_2$ implies ${\sf iK4}\vdash A_1^+\ra A_2^+$, \item \label{3corollar-NNIL properties}${\sf iGL}\vdash A_1\lr A_2$ implies ${\sf iGL}\vdash A_1^+\lr A_2^+$, and ${\sf iK4}\vdash A_1\lr A_2$ implies ${\sf iK4}\vdash A_1^+\lr A_2^+$. \end{enumerate} \end{corollary} \begin{proof} The first assertion can be deduced simply by induction on $\mathfrak{d}A$ and using \Cref{Theorem-NNIL Crucial Properties} \cref{6Theorem-NNIL Crucial Properties}.
To prove the second part, first note that by \Cref{Theorem-NNIL Crucial Properties} \cref{4Theorem-NNIL Crucial Properties}, if $\hbox{\sf IPC} _{\Box}\vdash A_1\ra A_2$, then $\hbox{\sf IPC} _{\Box}\vdash A_1^*\ra A_2^*$. By \Cref{Lemma-boxed as atomic}, we can replace each outer occurrence of boxed formulae by arbitrary propositions, in particular, by their $\hbox{\sf TNNIL}{} $ approximations. We should take care of these replacements to be such that equal propositions be substituted by equal approximations and unequal propositions substituted by unequal ones.
Then by definition of $A_i^+$, we have $\hbox{\sf IPC} _{\Box}\vdash A_1^+\ra A_2^+$.
Now suppose that ${\sf iGL}\vdash A_1\ra A_2$ (${\sf iK4}\vdash A_1\ra A_2$). Let $A=A_1\to A_2$. This implies $\hbox{\sf IPC} _{\Box}\vdash (A\wedge A_1 )\ra A_2$. Then $\hbox{\sf IPC} _{\Box}\vdash (A\wedge A_1)^+\rightarrow A_2^+$, and hence by $\hbox{\sf TNNIL}{} $-algorithm, $\hbox{\sf IPC} _{\Box}\vdash (A^+\wedge A_1^+)\ra A_2^+$. This implies $\hbox{\sf IPC} _{\Box}+A^+\vdash A_1^+\ra A_2^+ $ and by \Cref{Lemma-TNNIL algorithm preserve theorems of iGL}, ${\sf iGL}\vdash A_1^+\ra A_2^+$ (${\sf iK4}\vdash A_1^+\ra A_2^+$).
Proof of the third part is a direct consequence of the second part. \end{proof}
\subsubsection{The $\hbox{\sf TNNIL}{} ^-$-algorithm}\label{subsubsec-TNNIL^-algorith} \begin{corollary}\label{corollar HA-NNIL properties} There exists a $\hbox{\sf TNNIL}{} ^-$-algorithm such that for any modal proposition $A$, it halts and produces a proposition $A^-\in\hbox{\sf TNNIL}{} ^-$ such that $\hbox{\sf IPC} _\Box\vdash A^+\ra A^-$. \end{corollary} \begin{proof} Let $A:=B(\Box C_1,\ldots,\Box C_n)$, and $B(p_1,\ldots,p_n)$ is non-modal. Clearly such $B$ exists. Then define $A^-:=B(\Box C_1^+,\ldots,\Box C_n^+)$. Now definition of $A^+$ implies $A^+=(A^-)^*$ and
hence \Cref{Theorem-NNIL Crucial Properties} \cref{1Theorem-NNIL Crucial Properties}
implies that $A^-$ has desired property. \end{proof}
\begin{lemma}\label{Lemma HA-NNIL properties minus} For each modal proposition $A$ and $\Sigma_1$-substitution $\sigma$, $\hbox{\sf HA}{} \vdash \sigma_{_{\sf HA}}A\lr\sigma_{_{\sf HA}}A^-$. \end{lemma} \begin{proof} Use definition of $(.)^-$ and \Cref{corollar-NNIL properties} \cref{1corollar-NNIL properties}. \end{proof}
\begin{remark} {\em Note that $\hbox{\sf LC}{}\vdash A\lr B$ does not imply $\hbox{\sf LC}{}\vdash A^+\lr B^+$. A counterexample is $A:=\neg\neg p$ and $B:=\neg\Boxdot(\neg p)$. We have $A^+=A^*=p$ and $ B^+=(\Box\neg p\ra p)$. Now one can use Kripke models to show $\hbox{\sf LC}{}\nvdash (\Box\neg p\ra p)\to p$.} \end{remark}
\begin{remark}\label{Remark-New def for TNNIL algorithm}{\em In the algorithm produced for $\hbox{\sf NNIL}{} $, let's change the step (1) in this way (and use new symbol $(.)^\dag$ instead of $(.)^*$) \begin{enumerate} \item $A^\dag:= A$ for atomic $A$, and $(\Box B)^\dag:=\Box B^\dag$, \end{enumerate} Then the new algorithm also halts, and for any modal proposition $A$, we have ${\sf iK4}\vdash A^\dag\lr A^+$.} \end{remark}
\subsection{The Box Translation}\label{sec.box.translation}
The following definition of the box-translation, is essentially from \cite[Definition~4.1]{Visser82}. The box-translation extends the well-known G\"odel-McKinsey-Tarski translation. In this subsection, we prove that ${\sf iGL}$ is closed under box-translation (\Cref{Proposition-propositional properties of Box translation}).
\begin{definition}\label{Definition-Box translation} For every proposition $A$ in the modal propositional language, we associate a proposition $A^\Box$, called the box-translation of $A$, in the following way: \begin{itemize} \item $A^\Box:= A\wedge\Box A$, for atomic $A$, \item $(A\circ B)^\Box:=A^\Box\circ B^\Box$, for $\circ\in\{\vee,\wedge\}$, \item $(A\ra B)^\Box:=(A^\Box\ra B^\Box)\wedge\Box(A^\Box\ra B^\Box)$, \item $(\Box A)^\Box:=\Box(A^\Box)$. \end{itemize}
\end{definition}
\begin{lemma}\label{Lemma-Box-translation-prop-0} For any modal proposition $A$, we have ${\sf iK4}\vdash A^\Box\to\bo A^\bo$. \end{lemma} \begin{proof} Easy induction over the complexity of $A$. \end{proof}
\noindent In the following lemma we state some properties of $\Boxdot$.
\begin{lemma}\label{Lemma-Properties of boxdot} For any modal proposition $A$, the following propositions are provable in ${\sf iK4}$: \begin{enumerate}
\item \label{1Lemma-Properties of boxdot}$\Box\Boxdot A\lr\Box A\leftrightarrow\Boxdot \Box A$,
\item \label{2Lemma-Properties of boxdot}$\Boxdot A^\Box\lr A^\Box$.
\end{enumerate} \end{lemma} \begin{proof} The first part is easily deduced in ${\sf iK4}$. For the second part use \Cref{Lemma-Box-translation-prop-0}. \end{proof}
We say that a modal theory $T$ is {\em closed under box-translation} if for every proposition $A$, $T\vdash A$ implies $T\vdash A^\Box$.
\begin{proposition}\label{Proposition-propositional properties of Box translation} The theory ${\sf iGL}$ is closed under the box-translation. \end{proposition} \begin{proof} The proof can be carried out in three steps: \begin{enumerate}[leftmargin=*] \item For any proposition $A$ first we show that $\hbox{\sf IPC} _\Box\vdash A$ implies ${\sf iK4}\vdash A^\Box$. This can be done by a routine induction on the length of the proof in $\hbox{\sf IPC} $. Note that for any axiom $A$ of $\hbox{\sf IPC} $, we have ${\sf iK4}\vdash A^\Box$. As for the rule of modus ponens, suppose that $\hbox{\sf IPC} _\Box\vdash A$ and $\hbox{\sf IPC} _\Box\vdash A\ra B$. By induction hypothesis, then ${\sf iK4}\vdash A^\Box$ and ${\sf iK4}\vdash(A^\Box\ra B^\Box)\wedge\Box(A^\Box\ra B^\Box)$ and so ${\sf iK4}\vdash B^\Box$. \item Next observe that $$(\Box A\ra\Box\Box A)^\Box=\Box A^\Box\ra\Box\Box A^\Box$$ and also $${\sf iK4}\vdash[(\Box(A\ra B)\wedge \Box A)\ra\Box B]^\Box\leftrightarrow[(\Box(A^\Box\ra B^\Box)\wedge \Box A^\Box)\ra\Box B^\Box]$$ \item Observe that the box translation of an instance of L\"ob's axiom $\hbox{\sf L}{} $, is also an instance of $\hbox{\sf L}{} $. \end{enumerate} \end{proof}
\subsection{Axiomatizing the $\hbox{\sf TNNIL}{} $-algorithm}\label{Sec-Aximatizing-TNNIL}
In this subsection we present axioms which we need for the $\hbox{\sf TNNIL}{} ^-$-algorithm $(.)^-$. More precisely, we will find some axiom set $X$ such that ${X\vdash A^-\lr A}$.
To do that, we use some relation $\brt$ on modal propositions. A variant of this relation for non-modal case, first appeared in \cite{Visser02}.
The relation $\brt$ is defined to be the smallest relation on modal propositions satisfying the following conditions: \begin{itemize}
\item[A1.] If ${\sf iK4}\vdash A\ra B$, then $A\brt B$,
\item[A2.] If $A\brt B$ and $B\brt C$, then $A\brt C$,
\item[A3.] If $C\brt A$ and $C\brt B$, then $C\brt A\wedge B$,
\item[A4.] If $A\brt B$, then $\bo A\brt \bo B$,
\item[B1.] If $A\brt C$ and $B\brt C$, then $A\vee B\brt C$,
\item[B2.] Let $X$ be a set of implications, $B:=\bigwedge X$ and $A:=B\ra C$.
Also assume that $Z:={\{E | E\ra F\in X\}}\cup \{C\}$. Then $A\brt [B]Z $,
\item[B3.] If $A\brt B$, then for any atomic or boxed $C$ we have $C\ra A\brt C\ra B$.
\end{itemize} \begin{remark}\label{Remark1} Let $A$, $B$ and $Z$ be as in {\em B2}. Then the relation $\brt$ has the following additional property: $$A\brt [B]'Z$$ The reason goes as follows. First observe, by induction on $E$ and using A1-A3, that $[B]E\brt [B]'E$. That by use of A1-A3 and B1, implies that $[B]Z\brt[B]'Z$. Hence by A2 and B2, we have $A\brt [B]'Z$.
\end{remark} The notation $A\blrt B$ means $A\brt B$ and $B\brt A$. Let us define the theory
$$\sf{iH}_{\sigma}:=\hbox{\sf{LLe}}{}^++\hbox{\sf CP}{} _{a}+\{\Box A\ra\Box B| A\brt B\}$$
Note that by A1, the relation $\brt$ contains all the pairs $(A,B)$ such that ${\sf iK4}\vdash A\to B$. But
it worth mentioning that the inclusion is strict. The axiom which makes $\brt$ strictly superset of
$\{(A,B):{\sf iK4}\vdash A\to B\}$ is B2, i.e. in the absence of B2, the relation $\brt$ is the same as provable implications
in ${\sf iK4}$.
However, with B2 the story is different, e.g. one can observe that $\neg\neg p\brt p$, for any atomic $p$, holds while
${\sf iK4}\nvdash \neg\neg p\to p$.
\noindent\textbf{Notation.} In the rest of the paper, we use $A\equiv B$ as a shorthand for ${\sf iK4}\vdash A\lr B$.
\noindent The following theorem, shows that A1-A4 and B1-B3, axiomatize the $\hbox{\sf TNNIL}{} $ algorithm: \begin{theorem}\label{Theorem HA-NNIL approximation is propositionally equivalent} For any modal proposition $A$, we have $A\blrt A^+$. \end{theorem} \begin{proof} We prove the desired result by induction on $\mathfrak{o}(A)$. Suppose we have the desired result for each proposition $B$ with $\mathfrak{o}(B)<\mathfrak{o}(A)$. We treat $A$ by the following cases.
\begin{enumerate}[leftmargin=*] \item (A1) $A$ is atomic. Then $A^+=A$ by definition, and the result holds trivially. \item (A1-A4, B1) $ A=\Box B , A=B\wedge C, A=B\vee C$. All these cases hold by induction hypothesis. In boxed case, we use induction hypothesis and A4. In conjunction, we use A1-A3 and in disjunction we use A1, A2 and B1. \item $ A=B \ra C $. There are several sub-cases. Similar to the definition of the $\hbox{\sf NNIL}{} $-algorithm, an occurrence of a sub-formula $B$ of $A$ is said to be an {\em outer occurrence} in $A$, if it is neither in the scope of a $\Box$ nor in the scope of $\ra$. \begin{enumerate}[leftmargin=*] \item (A1-A3) $C$ contains an outer occurrence of a conjunction. We can treat this case using induction hypothesis and $\hbox{\sf TNNIL}{} $-algorithm. \item (A1-A3) $B$ contains an outer occurrence of a disjunction. We can treat this case by induction hypothesis and $\hbox{\sf TNNIL}{} $-algorithm. \item $B=\bigwedge X$ and $C=\bigvee Y$, where $X$ and $Y$ are sets of implications, atoms and boxed formulae. We have several sub-cases: \begin{enumerate}[leftmargin=*] \item (A1, A2, B3) $X$ contains atomic variables. Let $p$ be an atomic variable in $X$. Set $D:=\bigwedge(X\setminus\{p\})$. Then $A^+ \equiv p\to (D\to C)^+$. On the other hand, we have by induction hypothesis and A2 and B3, that $p\rightarrow (D\ra C)^+\blrt p\ra(D\ra C)$. Finally by A1 and A2, we have $A^+\blrt A$. \item (A1, A2, B3) $X$ contains boxed formula. Similar to the previous case. \item (A1, A2) $X$ contains $\top$ or $\bot$. Trivial. \item (A1-A3, B1-B3) $X$ contains only implications. This case needs the axiom B2 and it seems to be the interesting case. we have: $$A^+\equiv \bigwedge\left\lbrace\left(B\!\downarrow\! D\ra C\right)^+ \mid D\in X\right\rbrace\wedge\bigvee\{([B]'E)^+:E\in Z\} \quad$$
By the argument in \cite{Visser02}, we have $\mathfrak{o}\left(B\!\downarrow\! D\ra C\right)<\mathfrak{o}(A)$ and ${\mathfrak{o}([B]'E)<\mathfrak{o}(A)}$ and hence one can apply induction hypothesis on $B\!\downarrow\! D\ra C$ and $[B]'E$. Then by induction hypothesis, A1-A3, B1 and B3, we have: \begin{align*} A^+&\brt\!\blt \bigwedge\left\lbrace B\!\downarrow\! D\ra C \mid D\in X\right\rbrace\wedge [B]'Z \end{align*}
First we show that for each $E\in Z$, \begin{equation}\label{Eq-100} {\sf iK4}\vdash\left(\bigwedge\{(B\!\downarrow\! D)\ra C\mid D\in X\}\wedge [B]'E\right)\ra A \end{equation} Since $[B]E$ and $[B]'E$ are $\hbox{\sf IPC} _\Box$-equivalent (\Cref{Remark0}), it's enough to show that \begin{equation}\label{Eq-101} {\sf iK4}\vdash\left(\bigwedge\{(B\!\downarrow\! D)\ra C\mid D\in X\}\wedge [B]E\right)\ra A \end{equation} If $E=C$, we are done by $\hbox{\sf IPC} _\Box\vdash[B]C\ra(B\ra C)$. So let $E$ be the antecedent of some $E\ra F\in X$. We reason in ${\sf iK4}$. Assume $\bigwedge\{(B\!\downarrow\! D\ra C\mid D\in X\}$, $[B]E$ and $B$ as the hypothesis. We want to derive $C$. From $B$ and $[B]E$, we derive $E$. Also from $B$, we derive $E\ra F$, and so $F$. Hence we have $\bigwedge(X\setminus\{E\ra F\})\wedge F$, which implies $C$, as desired.
Now \cref{Eq-101} by use of A1 and A2 implies $A^+\brt A$.
To show the other way around, i.e. $A\brt A^+$, we first show \begin{equation}\label{Eq-101}
A\brt \bigwedge\left\lbrace B\!\downarrow\! D\ra C \mid D\in X\right\rbrace\wedge[B]'Z \end{equation}
and then by use of induction hypothesis and A2, we can deduce $A\brt A^+$, as desired. So it remains to show that \cref{Eq-101} holds. We have $\hbox{\sf IPC} _\bo\vdash A\rightarrow \bigwedge\left\lbrace B\!\downarrow\! D\ra C \mid D\in X\right\rbrace$, and hence by A1, $A\brt \bigwedge\left\lbrace B\!\downarrow\! D\ra C \mid D\in X\right\rbrace $. On the other hand, by \Cref{Remark1}, we have $A\brt [B]'Z$. Now A3 implies \cref{Eq-101}, as desired. \end{enumerate} \end{enumerate} \end{enumerate} \end{proof}
\begin{corollary}\label{corollar HA-NNIL approximation is propositionally equivalent} $\sf{iH}_{\sigma}\vdash A^-\lr A$. \end{corollary} \begin{proof} Let $A=B(\Box C_1,\Box C_2,\ldots,\Box C_n)$ where $ B(p_1,\ldots,p_n)$ is a non-modal proposition.
By definition of $A^-$, we have $A^-=B(\Box C_1^+,\ldots,\Box C_n^+)$.
Then \Cref{Theorem HA-NNIL approximation is propositionally equivalent} implies that ${\sf{iH}_{\sigma}\vdash \Box (C_i)^+\leftrightarrow\Box C_i}$. Hence $\sf{iH}_{\sigma}\vdash A^-\lr A$, as desired. \end{proof}
\subsection{$\hbox{\sf TNNIL}{} $-Conservativity of $\hbox{\sf LC}{}$ over $\hbox{\sf{LLe}}{}^+$}\label{sec.tnnil.conservativity} It is clearly the case that $\hbox{\sf LC}{}\supseteq\hbox{\sf{LLe}}{}^+$. One can use Kripke models (from the next section) to show $\neg\neg\Box\bot\in\hbox{\sf LC}{}\setminus\hbox{\sf{LLe}}{}^+$. This implies that the inclusion is strict. As we will see later in this section, $\hbox{\sf LC}{}$ and $\hbox{\sf{LLe}}{}^+$ have the same $\hbox{\sf TNNIL}{} $-theorems. To prove this, we need some lemmas.
\begin{lemma} ${\sf iK4}+{\sf Le}^+\vdash {\sf Le}$. \end{lemma} \begin{proof} Assume some axiom instance of ${\sf Le}$, $\Box(B\vee C)\ra\Box(\Box B\vee C)$. Let $A:=B\vee C$. By axiom schema ${\sf Le}^+$, we have $\Box A\ra\Box A^l$, which is $\Box(B\vee C)\ra\Box(\Boxdot B\vee \Boxdot C)$. This implies (inside ${\sf iK4}$) $\Box(B\vee C)\ra\Box(\Box B\vee C)$ . \end{proof}
\begin{lemma}\label{Lemma-leivant's translation properties} For each modal proposition $A$, \begin{enumerate} \item \label{1Lemma-leivant's translation properties}If $A\in{\sf NOI}$, then ${\sf iK4}+{\sf CP}_{\sf a}\vdash A\ra\Box A$. \item \label{2Lemma-leivant's translation properties}${\sf iK4}\vdash A^l\ra A$. \item \label{3Lemma-leivant's translation properties}If $A\in {\sf NOI}$, then ${\sf iK4}+\hbox{\sf CP}{} _{\sf a}\vdash A^l\lr A$. \item \label{4Lemma-leivant's translation properties}${\sf LLe}^+\vdash \Box A^l \lr\Box A$. \end{enumerate} \end{lemma} \begin{proof} Proofs of \cref{1Lemma-leivant's translation properties,2Lemma-leivant's translation properties,3Lemma-leivant's translation properties} are routine by induction on $A$. \Cref{4Lemma-leivant's translation properties} is deduced from \cref{2Lemma-leivant's translation properties}, i.e. we have $\Box A^l\ra\Box A$, by \cref{2Lemma-leivant's translation properties} and $\Box A^l\leftarrow\Box A$ is exactly ${\sf Le}^+$. \end{proof}
\begin{lemma}\label{Lemma-leivant's translation vs box translation} For any $\hbox{\sf TNNIL}{} $ formula $A$, we have \begin{enumerate} \item \label{1Lemma-leivant's translation vs box translation}$\hbox{\sf{LLe}}{}^+\vdash\Boxdot A^l\lr\Boxdot A^\Box$, \item \label{2Lemma-leivant's translation vs box translation}$\hbox{\sf{LLe}}{}^+\vdash A^\Box\ra A$, \item \label{3Lemma-leivant's translation vs box translation}If $A\in {\sf NOI}$, then $\hbox{\sf{LLe}}{}^+\vdash A^\Box\lr A$, \item \label{4Lemma-leivant's translation vs box translation}$\hbox{\sf{LLe}}{}^+\vdash \Box A\lr\Box A^\Box$. \end{enumerate} \end{lemma} \begin{proof} We prove all items by induction on the complexity of $A$, simultaneously. In the middle of proof, when we are using induction hypothesis of item $i, 1\leq i\leq 4$, we mention the number in parenthesis that number and also when we deduce some item of lemma, we also mention the number of that part in parentheses as well.
\noindent{\em Atomic:} For atomic $A$, we have $A^l=A$ and $A^\Box=\Boxdot A $, hence by properties of $\Boxdot$ (\Cref{Lemma-Properties of boxdot} \cref{1Lemma-Properties of boxdot}), ${\sf iK4}\vdash \Boxdot A^l\lr\Boxdot A^\Box$ (\cref{1Lemma-leivant's translation vs box translation}) and $\hbox{\sf{LLe}}{}^+\vdash A^\Box \lr A$ (\cref{2Lemma-leivant's translation vs box translation,3Lemma-leivant's translation vs box translation}),
which by necessitation, that implies $\hbox{\sf{LLe}}{}^+\vdash\Box A^\bo\lr\bo A$ (\cref{4Lemma-leivant's translation vs box translation}).
\noindent{\em Boxed:} Let $A=\Box B$. Then by definition, $A^l=A$ and $A^\Box=\Box B^\Box$. Hence by induction hypothesis (\cref{1Lemma-leivant's translation vs box translation}), $\hbox{\sf{LLe}}{}^+\vdash \Boxdot B^l\lr\Boxdot B^\Box$. Then $\hbox{\sf{LLe}}{}^+\vdash \Boxdot\Box B^l\lr\Boxdot\Box B^\Box$. On the other hand, by \Cref{Lemma-leivant's translation properties} \cref{4Lemma-leivant's translation properties}, $\hbox{\sf{LLe}}{}^+\vdash \Box B^l\lr\Box B$. Hence $\hbox{\sf{LLe}}{}^+\vdash \Boxdot\Box B\lr\Boxdot\Box B^\Box$ (\cref{1Lemma-leivant's translation vs box translation}). Also by induction hypothesis (\cref{4Lemma-leivant's translation vs box translation}), $\hbox{\sf{LLe}}{}^+\vdash \Box B\lr\Box B^\Box$. Hence $\hbox{\sf{LLe}}{}^+\vdash A\lr A^\Box$ (\cref{2Lemma-leivant's translation vs box translation,3Lemma-leivant's translation vs box translation}). That, by necessitation, implies $\hbox{\sf{LLe}}{}^+\vdash \bo A\leftrightarrow\bo A^\Box$ (\cref{4Lemma-leivant's translation vs box translation}).
\noindent{\em Conjunction: } This case is trivial.
\noindent{\em Disjunction: } Assume $A=B\vee C$. If $A\in {\sf NOI}$, then $B,C\in {\sf NOI}$ and hence induction hypothesis for $B$ and $C$ (\cref{3Lemma-leivant's translation vs box translation}) implies $\hbox{\sf{LLe}}{}^+\vdash A^\Box\lr A$ (\cref{3Lemma-leivant's translation vs box translation}).
For the other parts, we have, by definition, $A^l=\Boxdot B^l\vee\Boxdot C^l$. Hence by induction hypothesis (\cref{1Lemma-leivant's translation vs box translation}), $\hbox{\sf{LLe}}{}^+\vdash A^l\lr(\Boxdot B^\Box\vee\Boxdot C^\Box)$. Hence, by \Cref{Lemma-Properties of boxdot} \cref{2Lemma-Properties of boxdot}, we derive
$\hbox{\sf{LLe}}{}^+\vdash \Boxdot A^l\lr\Boxdot A^\Box$ (\cref{1Lemma-leivant's translation vs box translation}). To prove the \cref{2Lemma-leivant's translation vs box translation}, note that, by induction hypothesis (\cref{2Lemma-leivant's translation vs box translation}), $\hbox{\sf{LLe}}{}^+\vdash B^\Box\ra B$ and $\hbox{\sf{LLe}}{}^+\vdash C^\bo\ra C$. Hence $\hbox{\sf{LLe}}{}^+\vdash (B\vee C)^\bo\rightarrow (B\vee C)$ (\cref{2Lemma-leivant's translation vs box translation}). To prove \cref{4Lemma-leivant's translation vs box translation}, we note that, by \cref{1Lemma-leivant's translation vs box translation} for $A$, we have $\hbox{\sf{LLe}}{}^+\vdash \Boxdot A^l\lr\Boxdot A^\Box$. Hence $\hbox{\sf{LLe}}{}^+\vdash \bo\Boxdot A^l\lr\bo\Boxdot A^\Box$, which implies $\hbox{\sf{LLe}}{}^+\vdash \bo A^l\lr\bo A^\Box$ (by \Cref{Lemma-Properties of boxdot} \cref{1Lemma-Properties of boxdot}). Now \Cref{Lemma-leivant's translation properties} \cref{4Lemma-leivant's translation properties}
implies $\hbox{\sf{LLe}}{}^+\vdash \bo A\lr\bo A^\Box$ (\cref{4Lemma-leivant's translation vs box translation}).
\noindent{\em Implication: } Assume $A=B\ra C$. Clearly $A\not\in {\sf NOI}$ and $B\in {\sf NOI}$. We only show induction claim for \cref{1Lemma-leivant's translation vs box translation}. The other items can be shown easily. By induction hypothesis (\cref{3Lemma-leivant's translation vs box translation}), $\hbox{\sf{LLe}}{}^+\vdash B^\Box\lr B$, and also by
\Cref{Lemma-leivant's translation properties} \cref{1Lemma-leivant's translation properties}, $\hbox{\sf{LLe}}{}^+\vdash \Boxdot B\lr B$. Note that $\Boxdot A^l = \Boxdot(B\ra C^l)$ and hence $\hbox{\sf{LLe}}{}^+\vdash \Boxdot A^l\lr\Boxdot(\Boxdot B\ra C^l)$. By
\Cref{Lemma-leivant's translation properties} \cref{3Lemma-leivant's translation properties}, $\hbox{\sf{LLe}}{}^+\vdash B^l\lr B$, hence $\hbox{\sf{LLe}}{}^+\vdash \Boxdot A^l\lr\Boxdot(\Boxdot B^l\ra C^l)$. Now properties of $\Boxdot$ implies that $\hbox{\sf{LLe}}{}^+\vdash\Boxdot A^l\lr\Boxdot(\Boxdot B^l\ra\Boxdot C^l) $, and induction hypothesis (\cref{1Lemma-leivant's translation vs box translation}), implies $\hbox{\sf{LLe}}{}^+\vdash \Boxdot A^l\lr\Boxdot(\Boxdot B^\Box\ra\Boxdot C^\Box)$. This implies, again by properties of $\Boxdot $, the desired result, $\hbox{\sf{LLe}}{}^+\vdash \Boxdot A^l\lr\Boxdot A^\Box$ (\cref{1Lemma-leivant's translation vs box translation}). \end{proof}
\begin{lemma}\label{Lemma-LLe+-versus-iGL} If $\hbox{\sf LC}{}\vdash A$, then ${\sf iGL}\vdash A^\Box$. \end{lemma} \begin{proof} From $\hbox{\sf LC}{}\vdash A$ we have ${\sf iGL}\vdash\bigwedge_i\Boxdot(B_i\ra\Box B_i)\ra A$. Hence by \Cref{Proposition-propositional properties of Box translation}, we have ${\sf iGL}\vdash[\bigwedge_i\Boxdot(B_i\ra\Box B_i)\ra A]^\Box$. This implies ${\sf iGL}\vdash\bigwedge_i\Boxdot(B_i^\Box\rightarrow \Box B_i^\Box)\ra A^\Box$.
Now \Cref{Lemma-Properties of boxdot} \cref{2Lemma-Properties of boxdot} implies ${\sf iGL}\vdash A^\Box$, as desired. \end{proof}
\begin{theorem}\label{Theorem-TNNIL Conservativity of LC over LLe+} $\hbox{\sf LC}{}$ is $\hbox{\sf TNNIL}{} $-conservative over $\hbox{\sf{LLe}}{}^+$. \end{theorem} \begin{proof} Let $\hbox{\sf LC}{}\vdash A$.
From \Cref{Lemma-LLe+-versus-iGL} we have ${\sf iGL}\vdash A^\Box $ and hence $\hbox{\sf{LLe}}{}^+\vdash A^\Box$. Now \Cref{Lemma-leivant's translation vs box translation} \cref{2Lemma-leivant's translation vs box translation} implies that $\hbox{\sf{LLe}}{}^+\vdash A$. \end{proof}
\subsection{Kripke semantics for \hbox{\sf LC}{}}\label{Sec-PropModKripke}
Let us first review results and notations from \cite{IemhoffT} which will be used here. Assume two binary relations $R$ and $S$ on a set. Define $\alpha(R\circ S)\gamma$ iff there exists some $\beta$ such that $\alpha R \beta $ and $\beta S \gamma$.
A Kripke model $\mathcal{K}$, for intuitionistic modal logic, is a quadruple $(K,<,\mathcal{R},V)$, such that $K$ is a set (we call its elements as nodes), $(K,<)$ is a partial ordering, $\mathcal{R}$ is a binary relation on $K$ such that $(\leq\circ\, \mathcal{R})\subseteq\;\mathcal{R}$, and $V$ is a binary relation between nodes and atomic variables such that $\alpha V p$ and $\alpha \leq \beta$ implies $\beta V p$. Then we can extend $V $ to the modal language with $\mathcal{R}$ corresponding to $\Box$ and $\leq $ for intuitionistic $\ra$. More precisely, we define $\Vdash$ inductively as an extension of $V$ as follows: \begin{itemize}
\item $\mathcal{K},\alpha\Vdash p$ iff $\alpha Vp$, for atomic variable $p$,
\item $\mathcal{K},\alpha\Vdash A\vee B$ iff $\mathcal{K},\alpha\Vdash A$ or $\mathcal{K},\alpha\Vdash B$,
\item $\mathcal{K},\alpha\Vdash A\wedge B$ iff $\mathcal{K},\alpha\Vdash A$ and $\mathcal{K},\alpha\Vdash B$,
\item $\mathcal{K},\alpha\nVdash\bot$ and $\mathcal{K},\alpha\Vdash\top$,
\item $\mathcal{K},\alpha\Vdash A\ra B$ iff for all $\beta\geq\alpha$, $\mathcal{K},\beta\Vdash A$ implies $\mathcal{K},\beta\Vdash B$,
\item $\mathcal{K},\alpha\Vdash \bo A$ iff for all $\beta$ with $\alpha\,\mathcal{R}\, \beta$, we have $\mathcal{K},\beta\Vdash A$. \end{itemize} If also we assume that $\mathcal{R}$ is empty and restrict our attention to non-modal language, we have the usual Kripke models for intuitionistic (non-modal) logic. In the rest of paper, we may simply write $\alpha\Vdash A$ instead of $\mathcal{K},\alpha\Vdash A$,
if no confusion is likely. By an induction on the complexity of $A$, one can observe that $\alpha\Vdash A$ implies $\beta\Vdash A$ for all $A$ and $\alpha\leq \beta$. We define the following notions. \begin{itemize} \item If $\alpha\leq\beta$, $\beta$ is called to be above $\alpha$ and $\alpha$ is beneath $\beta$. If $\alpha\;\mathcal{R}\;\beta$, $\beta $ is called to be a successor of $\alpha$. We define $\mathcal{R}(\alpha)$ to be the set of all successors of $\alpha$. \item A Kripke model is finite if its set of nodes is finite. A Kripke model is tree-frame if its set of nodes with ordering $\leq$ is a tree. \item A Kripke model $\mathcal{K}=(K,<,\mathcal{R},V)$ is reverse well-founded iff $K$ is well-founded with the ordering $\mathcal{R}^{-1}$. \item $\mathcal{K}$ is called {\em neat} iff $\alpha\,\mathcal{R}\,\gamma$ and $\alpha\leq\beta\leq\gamma$ implies $\alpha\,\mathcal{R}\,\beta$ or $\beta\,\mathcal{R}\,\gamma$. \item $\mathcal{K}$ is called {\em brilliant} iff $(\mathcal{R}\;\circ \leq)\subseteq\mathcal{R}$. (\cite{IemhoffT}) \item $\mathcal{K}$ is called {\em perfect} iff it is brilliant, reverse well-founded and $R\subseteq\,<$. \item Suppose $X$ is a set of propositions that is closed under sub-formulae (we call such $X$ to be adequate). An $X$-saturated set of propositions $\Gamma$ with respect to some theory $T$ is a subset of $X$ that \begin{itemize}
\item For each $A\vee B\in X $, $T+\Gamma\vdash A\vee B$ implies $A\in\Gamma$ or $B\in \Gamma$.
\item For each $A\in X$, $T+\Gamma\vdash A$ implies $A\in\Gamma$. \end{itemize} \end{itemize} \begin{lemma}\label{Lemma-saturation} Let $T\nvdash A$ and let $X$ be an adequate set. Then there is an $X$-saturated set $\Gamma$ such that $T\cap X\subseteq\Gamma\nvdash A$. \end{lemma} \begin{proof} See \cite{IemhoffT}. \end{proof}
\begin{theorem}\label{Theorem-Propositional Completeness LC} $\hbox{\sf LC}{}$ is sound and complete for finite neat perfect Kripke models with tree frames. \end{theorem} \begin{proof} Soundness part can easily be proved by induction on the complexity of formulae. For the completeness, we first find some finite perfect Kripke counter-model for each $A$ with $\hbox{\sf LC}{}\nvdash A$, and then convert it to a perfect Kripke model with finite tree frame.
Assume $\hbox{\sf LC}{}\nvdash A$. Let ${\sf Sub}(A)$ be the set of sub-formulae of
$A$. Then define $$X:=\{B,\Box B \ | \ B\in {\sf Sub}(A)\}$$ It is obvious that $X$ is a finite adequate set. We define $\mathcal{K}=(K,<,\mathcal{R},V)$ as follows. Take $K$ as the set of all $X$-saturated sets with respect to $\hbox{\sf LC}{}$, and $\leq$ is the subset relation over $K$. Define $\alpha\,\mathcal{R}\, \beta$ iff for all $\Box B\in X$, $ \Box B\in \alpha$ implies $B\in \beta$, and also there exists some $\Box C\in \beta\setminus \alpha$. Finally define $\alpha V p$ iff $p\in \alpha$, for atomic $p$. \\ It only remains to show that $\mathcal{K}$ is a finite perfect Kripke model that refutes $A$. To do this, we first show by induction on $B\in X$ that $B\in \alpha$ iff $\alpha\Vdash B$, for each $\alpha\in K$. The only non-trivial case is $B=\Box C$. Let $\Box C\not\in \alpha$. We must show $\alpha\nVdash \Box C$. The other direction is easier to prove and we leave it to reader. Let
$\beta_0:=\{D\in X\ |\ \alpha\vdash\Box D\}$. If $\beta_0,\Box C\vdash C$, then, by definition of $\beta_0$, we have $\alpha\vdash\Box \beta_0$ and hence by L\"{o}b's axiom, $\alpha\vdash \Box C$, contradicting $\Box C\not\in \alpha$. Hence $\beta_0,\Box C\nvdash C$ and so there exists some $X$-saturated set $\beta$ such that $\beta\nvdash C$, $\beta\supseteq \beta_0\cup\{\Box C\}$. Hence $\beta\in K$ and $\alpha\,\mathcal{R}\, \beta$. Then by induction hypothesis, $\beta\nVdash C$ and hence $\alpha\nVdash \Box C$.
Since $\hbox{\sf LC}{}\nvdash A$, by \Cref{Lemma-saturation}, there exists some $X$-saturated set $\alpha\in K$ such that $\alpha\nvdash A$, and hence by the above argument we have $\alpha\nVdash A$. \\ $\mathcal{K}$ trivially satisfies all the properties of perfect Kripke model. As a sample, we show that why $\mathcal{R}\subseteq\, <$ holds. Assume $\alpha\,\mathcal{R}\, \beta$ and let $B\in \alpha$. If $B$ is boxed formula, like $C$, then by definition, $C\in \beta$ and hence $\beta\vdash B$ and we are done. So assume $B$ is not a boxed formula. Then by definition of $X$, we have $\Box B\in X$ and by the completeness axiom in $\hbox{\sf LC}{}$, we have $\alpha\vdash\Box B$ and hence by definition of $\mathcal{R}$, it is the case that $B\in \beta$. This shows $\alpha\subseteq \beta$ and hence $\alpha\leq \beta$. But $\alpha$ is not equal to $\beta$, because $\alpha\,\mathcal{R}\, \beta$ implies existence of some $\Box C\in \beta\setminus \alpha$. Hence $\alpha< \beta$, as desired.
Now we explain how to convert $\mathcal{K}$ to a
Kripke model $\mathcal{T}:=(T,<_t,\mathcal{R}_{t},V_t)\nVdash A$
with a neat tree frame. Let $T$ be the set of all finite (excluding empty sequence) sequences $\langle \alpha_1, \ldots,\alpha_n\rangle$ such that ${\alpha_1 <\ldots<\alpha_n}$. Let $\leq_t$ be the initial segment relation. Then define $\langle \alpha_1, \ldots,\alpha_n\rangle\;\mathcal{R}_{t}\;\langle\alpha_1, \ldots,\alpha_{n+k}\rangle$
iff $\alpha_{n+i} \,\mathcal{R}\, \alpha_{n+i+1}$ for some $0\leq i<k$. Finally, define $\langle \alpha_1, \ldots,\alpha_n\rangle\; V_t \; p$, for atomic $p$, iff $\alpha_n\;V\;p$. Now one can prove by induction on $B$, that for any $\alpha=\langle \alpha_1, \ldots,\alpha_n\rangle\in T$, $\mathcal{T},\alpha\Vdash B$ iff $\mathcal{K},\alpha_n\Vdash B$. Hence $\mathcal{T}\nVdash A$. \end{proof} \noindent Since $\hbox{\sf LC}{}$ has finite model property, as it is expected, we can easily deduce the decidability of $\hbox{\sf LC}{}$: \begin{corollary}\label{Corollary-decidability of LC} $\hbox{\sf LC}{}$ is decidable. \end{corollary} \begin{proof} Let $A$ be given. Assume that $n$ is the number of elements of $X$ defined in the above proof. It shows us that we should only check if for all Kripke models $\mathcal{K}$ with $2^n$ nodes (only over atomic variables that appear in $A$), we have $\mathcal{K}\Vdash A$. If that was the case, we say ``yes" to $\hbox{\sf LC}{}\vdash A$?, otherwise the answer is ``no" to $\hbox{\sf LC}{}\vdash A$?. \end{proof}
\subsubsection*{Relation to intuitionistic non-modal Kripke models}
The usual intuitionistic non-modal Kripke models are the same Kripke models as is defined above, without the additional relation $\mathcal{R}$. Extending it to all non-modal propositions is the same as the one for modal language. It is well-known that $\hbox{\sf IPC} $ is sound and complete for non-modal Kripke models. We have the following conservativity result.
\begin{theorem}\label{Theorem-nonmodal conservativity of LC over IPC} $\hbox{\sf LC}{}+\bo\bot$ is conservative over $\hbox{\sf IPC} $ in non-modal language, i.e. for any non-modal proposition $A$, if $\hbox{\sf LC}{}+\bo\bot\vdash A$, then $\hbox{\sf IPC} \vdash A$. \end{theorem} \begin{proof} We reason contrapositively. Assume that $\hbox{\sf IPC} \nvdash A$. Then by completeness, there exists some non-modal Kripke model $\mathcal{K}=(K,\leq,V)\nVdash A$. Let $\mathcal{R}:=\emptyset$ and $\mathcal{K}':=(K,\leq,\mathcal{R},V)$. It is easy to observe that $\mathcal{K}'$ is a Kripke model and for all non-modal proposition $B$ and $u\in K$, we have $\mathcal{K},u\Vdash B$ iff $\mathcal{K}',u\Vdash B$. Hence we have $\mathcal{K}'\nVdash A$. Now soundness theorem (\Cref{Theorem-Propositional Completeness LC}) implies $\hbox{\sf LC}{}+\bo\bot\nvdash A$. \end{proof}
\section{Transforming Kripke models}\label{sec-transforming} Smor\'ynski (\cite{Smorynski-Thesis}) showed that one could simulate the behaviour of a propositional non-modal Kripke model by a first-order Kripke model of $\hbox{\sf HA}{} $. Also Solovay (\cite{Solovay}) showed that one could simulate the behaviour of a Kripke model of classical modal logic inside $\hbox{\sf PA}{} $. However, the combination of these two ideas could be assumed as major obstacle towards the characterization of the provability logic of $\hbox{\sf HA}{} $. In this section, we will show that one could simulate the behaviour of perfect Kripke models by first-order Kripke models of $\hbox{\sf HA}{} $. This would lead us to the characterization of the $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $. More precisely, we will prove the following theorem.
\begin{theorem}\label{Theorem-Main tool} Let $\mathcal{K}_0=(K_0,\mathcal{R}_{0},<_0,V_0)$ be a finite neat perfect Kripke model with tree frame
and $\Gamma\subseteq\hbox{\sf TNNIL}{} ^-$ be a finite set. Then there exists some arithmetical $\Sigma_1$-substitution $\sigma$ and a Kripke model $\mathcal{K}_1=(K_0,<_0,\mathfrak{M})$ such that for all $A\in \Gamma$ and $\alpha\in K_0$ we have $\mathcal{K}_0,\alpha\Vdash A$ iff $\mathcal{K}_1,\alpha\Vdash \sigma_{_{\sf HA}}(A)$. \end{theorem}
Before we continue with the rather long proof of \Cref{Theorem-Main tool}, that will take up all of this section, let us explain the outline of the proof.
First we define a recursive function $F$ (the Solovay function) with the domain of natural numbers. $F(0)$ is defined to be some fresh node $\alpha_0$. The function $F$, always climbs over the frame ${(K_0,\mathcal{R}_{0},<_0)}$, but it is reluctant to do so. It only goes to some node $\beta$ at some stage $n+1$ (i.e. $F(n+1)=\beta$), if $n+1$ is a witness (in some sense which would be clarified in this section) for this statement \begin{center} ``$F$ is not going to stay in $\beta$ forever or $\neg\sigma_{_{\sf HA}}(\Box\varphi_{_\beta})$". \end{center} In this definition, $\varphi_{_\beta}$ is the conjunction of all sentences $B$ such that $ \Box B\in{\sf Sub}(\Gamma)$ and $\beta\Vdash\Box B$. Here, ${\sf Sub}(\Gamma)$ is the set of sub-formulae
of some formula in $\Gamma$. The most interesting (and difficult part to prove as well) property of the function $F$
is that this function actually (in the standard model of arithmetic $\mathbb{N}$) does not climb over tree at all, i.e.
the function $F$ is constant, $\mathbb{N}\models\forall{x}F(x)=\alpha_0$. In contrast with the classical case, proving this fact for the intuitionistic case is rather complicated.
Let $L\succcurlyeq\alpha$ denote ``the function $F$ would go above $\alpha$ or remain equal to $\alpha$".
Then we define the substitution $\sigma(p):=\bigvee_{\alpha\Vdash p}L\succcurlyeq\alpha$.
Then we define the $I$-frame $\mathcal{I}=(K_0,<_0,T)$, where $T_\alpha$ is defined to be $\hbox{\sf PA}{} $ plus the following statement:
``The limit of the function $F$ is $\alpha$". Finally, with the aid of
\Cref{Theorem-Smorynski's general method of Kripke model construction}
we find the desired Kripke model $\mathcal{K}_1$, by assigning an appropriate classical model of $T_\alpha$ to the node $\alpha$.
We will show that $T_\alpha\vdash \sigma_{_{\sf HA}}( \Box\varphi_{_\alpha})$
(\Cref{corollar-4st&2st}) and also $T_\alpha\vdash \sigma_{_{\sf HA}}(\neg\Box B)$ for any
$\Box B\in {\sf Sub}(\Gamma)$ and $\alpha\nVdash\Box B$
(\Cref{Lemma-2st Properties of Solovay's Function}). In this way, we can simulate the role of modal operator $\Box$
in the first-order Kripke model $\mathcal{K}_1$.
\noindent {\bf Notation.} In the rest of this section, we fix the Kripke model $\mathcal{K}_0=(K_0,\mathcal{R}_{0},<_0,V_0)$
and the set $S:=\{B\in{\sf Sub}(\Gamma) \mid B\in\hbox{\sf TNNIL}{} \}$. We also assume that $\alpha_0\not\in K_0$
and define
$$
\mathcal{R}:=\mathcal{R}_{0} \cup \{ (\alpha_0,\alpha)\mid\alpha\in K_0\}
\quad \quad
<\; :=\; <_0\cup\; \{ (\alpha_0,\alpha)\mid\alpha\in K_0\}
\quad \quad
K:=K_0\cup \{\alpha_0\} $$ In other words, we add $\alpha_0$ in beneath of all nodes of $\mathcal{K}_0$. Finally we define $\mathcal{K}:=(K,\mathcal{R},<,V_0)$.
\subsection{Definition of the Solovay function} \label{subsection-solovay} Solovay used some special recursive function (here we call it the {\em Solovay function}) to prove the completeness of $\hbox{\sf GL}{} $ (The G\"odel-L\"ob logic) for arithmetical interpretations in $\hbox{\sf PA}{} $ (See \cite{Solovay}). The Solovay function in \cite{Solovay}, is a function $G:\mathbb{N}\longrightarrow X$, in which $X$ is a finite partially ordered set ordered by $\preccurlyeq$. The recursive definition of $G$ is such that $G$ climbs over $X$, i.e. $G(x)\preccurlyeq G(x+1)$ and moreover, it goes to some new node iff there exists a witness that $G$ would not remain there. More precisely, $G(x+1)\neq G(x)$ iff $x+1$ is the code of a proof (in $\hbox{\sf PA}{} $) for the fact that the limit of the function $G$ is not $G(x+1)$. Although it is true (in the standard model) that $G$ will not climb over $X$ (i.e. $G$ is a constant function), $\hbox{\sf PA}{} $ can't prove this fact. In this subsection, we define a similar recursive function (we call it $F$) for the proof of our main theorem
(\Cref{Theorem-Main tool}), and state and prove some of its properties.
For technical reasons, we first define the set of all codes of sequences $z=\langle F(0),\ldots,F(x)\rangle$ by an arithmetical formula $\theta(z)$, and then define $\phi_\theta(x,y):=\exists{z}({\sf lth}(z)=x+1\wedge \theta(z)\wedge \hat{z}=y)$ as the graph of a function $F_\theta$ and finally, let $F:=F_\theta$. It is clear that we can also define $\theta_F(z)$ from the function $F$ in the following way.
$$\theta_F(z):=\exists{x}(z=\langle F(0),\ldots,F(x)\rangle) \text{ or equivalently } \theta_F(z):=\forall{\,x \!<\! {\sf lth}(z)}(F(x)=(z)_{x})$$
To be able to speak about $\mathcal{K}$ inside $\hbox{\sf HA}{} $, we need some conventions. Suppose that $K=\{\alpha_0,\alpha_1,\ldots,\alpha_k\}$. Hence for each $\alpha\in K$, there exists a unique index $0\leq i\leq k$ such that $\alpha=\alpha_i$. We define $\overline{\alpha}$ to be $\bar{i}$ ($\bar{n}$ is $n$-th numeral in the language of arithmetic, i.e. $\bar{i}:=S^i(0)$). We may simply use $\alpha$ instead of $\overline{\alpha}$, if no confusion is likely. The following notations for arbitrary terms $t$ and $s$ in the language of arithmetic will be used later. \begin{itemize} \item $\overline{K}(t):=\bigvee_{\alpha\in K}(t=\overline{\alpha})$, \item $t\prec s:=\bigvee_{\alpha\lneqq\beta}(t=\overline{\alpha}\wedge s=\overline{\beta})$, $t\preccurlyeq s:=\bigvee_{\alpha\leq\beta}(t=\overline{\alpha}\wedge s=\overline{\beta})$, \item $t\;\overline{\mathcal{R}}\; s:=\bigvee_{\alpha\mathcal{R}\beta}(t=\overline{\alpha}\wedge s=\overline{\beta})$, \item $\varphi_{_\alpha}:=\bigwedge_{B\in {\sf Sub}(\Gamma), B\in{\sf TNNIL},\alpha\Vdash \Box B}B$. \end{itemize} In the following definition, $L_\theta=y$ as the arithmetical formula equivalent to ``The limit of the function $F_\theta$ is equal to $y$". Similarly, define $\alpha\prec L_\theta$ and so on. \begin{definition} Let $\theta(z)$ be a $\Sigma_1$-formula in the language of arithmetic. Then \begin{itemize} \item $L_\theta=y$ is a shorthand for $\exists{u}\forall{z}(\theta(u*z)\to \hat{z}=y)$ in which $\hat{z}$ is the final element of the sequence with the code $z$, \item For each $\alpha\in K$, $\alpha\preccurlyeq L_\theta$, $\alpha\prec L_\theta$ and $\alpha\,\mathcal{R}\, L_\theta$ are shorthands for $\bigvee_{\alpha\leq\beta}\exists{x}(\theta(x)\wedge \hat{x}=\beta)$, $\bigvee_{\alpha\lneqq\beta}\exists{x}(\theta(x)\wedge \hat{x}=\beta)$ and $\bigvee_{\alpha\mathcal{R}\beta}\exists{x}(\theta(x)\wedge \hat{x}=\beta)$, respectively, \item The arithmetical substitution $\sigma$ is defined on propositional variable $p$ by $$\sigma(p):=\bigvee_{\beta\Vdash{p}}\beta\preccurlyeq L_\theta.$$ and finally, we extend $\sigma$ to all propositions by interpreting $\Box$ as provability in \hbox{\sf HA}{} , i.e., $\sigma_\theta:=\sigma_{_{\sf HA}}$, in which $\sigma_{_{\sf HA}}$ is defined from $\sigma$ as in \Cref{Definition-Arithmetical substitutions}, \item Let $g$ be a recursive function with $\theta_g(z)$ as the formula $\exists{x}(z=\langle g(0),\ldots,g(x)\rangle)$. We define $L_g=y$, $L_g\succ \alpha$, $L_g\succeq \alpha$, $\alpha\,\mathcal{R}\, L_g$ and $\sigma_\tinysub{g}$ to be $L_{\theta_g}=y$, $L_{\theta_g}\succ \alpha$, $L_{\theta_g}\succeq \alpha$, $\alpha\,\mathcal{R}\, L_{\theta_g}$ and $\sigma_{_{\theta_g}}$, respectively. \end{itemize} \end{definition}
Following Berarducci (\cite{Berarducci}), we define a primitive recursive function as follows: $$r_{_{\theta}}(\bar{\alpha},x)={\sf min}\left(\{ k \mid \exists{u\leq x}{\sf Proof}_\tinysub{{\sf PA}_k} (u,\ulcorner\neg(L=\alpha\wedge\Box\sigma_{_\theta}(\varphi_{_\alpha}))\urcorner)\}\cup\{x+1\}\right)$$ Note that $r_{_\theta}(x,y)$ depends also on a $\Sigma_1$-formula which is appeared in the subscript of $\sigma$. We also should note that $L=\alpha$ is defined in reference to $\theta$ as well.
We may omit subscripts of the interpretation $\sigma_{_\theta}$ and the function $r_{_\theta}$ when no confusion is likely.
A variant of this function was first appeared in \cite{Berarducci}, to define Solovay functions for characterizing interpretability logic of $\hbox{\sf PA}{} $. It is easy to observe that $r(\alpha,x)$ is always equal or less than $x+1$, and $r(\alpha,x)\leq x$ iff $$\exists{y\leq x}{\sf Proof}_\tinysub{\sf PA}(y,\ulcorner\neg(L=\alpha\wedge\Box\sigma_{_{F}}(\varphi_{_\alpha}))\urcorner)$$
Now we are in a position to define the Solovay-like function for $\mathcal{K}$. Informally speaking, ${F:\mathbb{N}\ra K}$ is defined in such a way that fulfils the following conditions. $F(0):=\alpha_0$, and \begin{equation}\label{SolovayFunction} F(x+1):= \begin{cases} \beta& \text{ if}\ (x+1)_0=\langle1,\beta\rangle, F(x)R\beta \ \text{and} \ r_{_F}(\beta,x+1)\leq x+1, \\ \gamma & \text{ if } (x+1)_0=\langle2,\gamma\rangle, \neg F(x)R\gamma \text{ and } F(x)\leq\gamma \text{ and }\\ &\ \ \ \ r_{_F}(\gamma,x+1)<r_{_F}(F(x),x+1)\ \text{and}
\quad F(r_{_F}(\gamma,x+1))\,\mathcal{R}\,\gamma,\\ F(x)& \text{ otherwise.} \end{cases} \end{equation} As it is clear from the definition, $F$ is used in its own definition, i.e. we are in a loop. This will be overcome by the Diagonalization lemma. To be able to define $F$, we first define
$\theta(z)$ and then define $F(x)=y$ (the graph of the function $F$) as
$$\exists{z}({\sf lth}(z)=x+1\wedge\theta(z)\wedge (z)_{x}=y)$$
By Diagonalization lemma (\Cref{Lemma-diagonalization lemma}), we find a $\Delta_0$ formula $\theta(y)$ such that
\begin{equation}\label{Eq1} \hbox{\sf HA}{} _0\vdash\theta(y)\leftrightarrow({\sf lth}(y)\geq 1\wedge (y)_0=\overline{\alpha_0}\wedge\forall{x< {\sf lth}(y)}(x\neq0\rightarrow \chi(x,y))) \end{equation}
in which $\chi(x,y)$ is defined as disjunction of the following three formulae: \begin{align} \nonumber &\chi_1:= \bigvee_{\beta\in K} [(x)_0=\langle1,\bar{\beta}\rangle \wedge (y)_x=\bar{\beta}\wedge (y)_{x\dot{-}1}\, \bar{\mathcal{R}}\,\bar{\beta} \wedge {r}(\bar{\beta},x)\leq x]\\ \nonumber & \chi_2:= \bigvee_{\beta\in K} [(x)_0=\langle2,\bar{\beta}\rangle\wedge (y)_x=\bar{\beta}\wedge \neg (y)_{x\dot{-}1}\,\bar{\mathcal{R}}\,\bar{\beta}\wedge (y)_{x\dot{-}1}\preccurlyeq \bar{\beta} \wedge {r}((y)_x,x)< {r}((y)_{x\dot{-}1},x))\\ \nonumber & \ \ \ \ \ \wedge (y)_{{r}((y)_x,x)}\,\bar{\mathcal{R}}\, (y)_x)]\\ \nonumber &\chi_3:= [(y)_x=(y)_{x\dot{-}1}] \wedge \bigwedge_{\beta\in K}\neg[ (x)_0=\langle1,\bar{\beta}\rangle\wedge (y)_{x\dot{-}1}\,\bar{\mathcal{R}}\, \bar{\beta} \wedge {r}(\bar{\beta},x)\leq x] \wedge\\ \nonumber & \bigwedge_{\beta\in K}\neg[(y)_{x\dot{-}1}\preccurlyeq \bar{\beta} \wedge \neg (y)_{x\dot{-}1}\,\bar{\mathcal{R}}\,\bar{\beta} \wedge (x)_0=\langle2,\bar{\beta}\rangle \wedge {r}(\bar{\beta},x)<{r}((y)_{x\dot{-}1},x) \wedge (y)_{{r}(\bar{\beta},x)}\,\bar{\mathcal{R}}\,\bar{\beta})] \end{align} In the above formulae, $r(x,y)$ is $r_{_\theta}(x,y)$. Now we show that a provably total recursive function $F$ can be defined from $\theta(y)$.
\begin{lemma}\label{Lemma-1st Solovay} The formula $\theta$ is $\Delta_0$ and \begin{enumerate} \item \label{1Lemma-1st Solovay}$\hbox{\sf HA}{} _0\vdash ({\sf lth}(y_1)\neq 0\wedge \theta(y_1*y_2))\to\theta(y_1)$, \item \label{2Lemma-1st Solovay}$\hbox{\sf HA}{} _0\vdash (\theta(y_1)\wedge\theta(y_2)\wedge{\sf lth}(y_1)={\sf lth}(y_2))\ra y_1=y_2 $, \item \label{3Lemma-1st Solovay}$\hbox{\sf HA}{} _0\vdash\forall{x}\exists{y}({\sf lth}(y)=x+1 \wedge \theta(y))$. \end{enumerate} \end{lemma} \begin{proof} It is not difficult to observe that the first item holds by definition of $\theta$ in \cref{Eq1}. To prove the other items, it is enough to show $\hbox{\sf HA}{} _0\vdash\forall{x}\exists!{y}({\sf lth}(y)=x+1\wedge\theta(y))$, in which $!\exists$, as usual, is the {\em uniqueness} existential quantifier. This can be simply done by induction on $x$. \end{proof}
Now, let us define $\phi(x,y):=\exists{z}(\theta(z)\wedge{\sf lth}(z)=x+1\wedge \hat{z}=y)$. Note that $\phi(x,y)$ is actually a $\Delta_0$ formula. The reason is the following. we can bound existential quantifier by the primitive recursive function $h(z)$ with the following primitive recursive definition: \begin{itemize}
\item $h(0):=\langle k\rangle$, in which $k$ is the number of nodes of Kripke model,
\item $h(z+1):=h(z)*\langle k\rangle$. \end{itemize} Hence $\hbox{\sf HA}{} _0\vdash\phi(x,y)\leftrightarrow\exists{z\leq h(z)}[{\sf lth}(z)=x+1\wedge (z)_{x+1}=y\wedge\phi(z)]$.
\begin{notation} {\em The above lemma (\Cref{Lemma-1st Solovay}) says that $\phi(x,y)$ is the graph of a $\Delta_0$- function $F$. In the rest of the paper, we use $F$ as a function symbol with the graph $\phi(x,y)$. We use $\sigma$ and $L$ instead of $\sigma_\tinysub{\theta}$ and $L_\theta$, respectively. For simplicity of notations, when we work in the first-order language of arithmetic, instead of $\sigma_{_{\sf HA}}(B)$, we may use the notation $B$. For instance assume that $p$ is an atomic variable in the propositional language. When we write down the formula $\hbox{\sf HA}{} \vdash \Box (\Box p\to p)\to\Box p$, we actually mean $\hbox{\sf HA}{} \vdash \sigma_{_{\sf HA}}(\Box (\Box p\to p)\to\Box p)$. This abuse of notations, wipes out many unimportant symbols from the rest of \Cref{sec-transforming}.} \end{notation}
\noindent One can observe that the function $F$ fulfils the recursive conditions of \cref{SolovayFunction}.
\subsection{Elementary properties of the Solovay function} In this part, we will see some elementary properties of the function $F$.
\begin{lemma}\label{Lemma-Properties of Solovay's Function} The function $F$ has the following properties: \begin{enumerate} \item \label{1Lemma-Properties of Solovay's Function}$\hbox{\sf HA}{} _0\vdash\forall{x,y}({F}(x)\preccurlyeq {F}(x+y))$, \item \label{2Lemma-Properties of Solovay's Function}For any $\alpha\in K$, $\hbox{\sf PA}{} \vdash\exists{x}{F}(x)=\alpha\ra\bigvee_{\alpha\leq\beta}L=\beta$, \item \label{3Lemma-Properties of Solovay's Function}For any $\alpha\in K$, $\hbox{\sf PA}{} \vdash \alpha\prec L\leftrightarrow\bigvee_{\alpha< \beta}L=\beta$ and $\hbox{\sf PA}{} \vdash \alpha\,\bar{\mathcal{R}}\,L\leftrightarrow\bigvee_{\alpha\mathcal{R}\beta}L=\beta$. \end{enumerate} \end{lemma} \begin{proof} \begin{enumerate}[leftmargin=*] \item By recursive definition of $F$, $\hbox{\sf HA}{} _0\vdash{F}(x)\preccurlyeq {F}(x+1)$. Let ${A(y):=\bar{F}(x)\preccurlyeq \bar{F}(x+y)}$ and use induction on $y$ in $A(y)$. \item We prove this fact by induction (in meta-language) on the tree $(K,\lneqq)$ with reverse order. Suppose that for all $\beta\gneqq\alpha$, we have $\hbox{\sf PA}{} \vdash\exists{x}{F}(x)=\beta\ra\bigvee_{\beta\leq \gamma}L=\gamma$. Then $$\hbox{\sf PA}{} \vdash{F}(x)=\alpha\ra(\forall{y\geq x}{F}(y)=\alpha\vee\exists{y\geq x}{F}(y)\neq\alpha)$$
By part 1 and definition of $L=\alpha$, we get $\hbox{\sf PA}{} \vdash {F}(x)=\alpha\ra(L=\alpha\vee\exists{y\geq x}(\alpha\prec {F}(y)))$. Now induction hypothesis implies $\hbox{\sf PA}{} \vdash{F}(x)=\alpha\ra\bigvee_{\beta\geq\alpha}L=\beta$. \item Proof of this part is an immediate consequence of part 2 and perfectness of $\mathcal{K}$. \end{enumerate} \end{proof}
\begin{lemma}\label{Lemma-Properties of Solovay's Function.4}
For any $\alpha, \beta\in K$ with $\alpha\,\mathcal{R}\,\beta$, $\hbox{\sf HA}{} _0\vdash L=\alpha\ra\neg\Box^+ \neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$. \end{lemma} \begin{proof} We argue inside $\hbox{\sf HA}{} _0$. Assume $L=\alpha$ and ${\sf Proof}_\tinysub{\sf PA} (x,\ulcorner\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)\urcorner)$. Let $y>x$ such that $(y+1)_0=\langle2,\beta\rangle$. Then because $L=\alpha$, we have $F(y)=\alpha$. On the other hand, by recursive definition of $F$, $F(y+1):=\beta$, a contradiction. \end{proof}
\begin{lemma}\label{Lemma-1.5st Properties of Solovay's Function} For any $\delta, \alpha, \beta\in K$ with $\delta\,\mathcal{R}\,\alpha\leq \beta $, $\hbox{\sf HA}{} _0+L=\delta\vdash(L=\alpha\wedge\Box\varphi\emptycommand_\alpha)\rhd (L=\beta\wedge\Box\varphi\emptycommand_\beta)$. \end{lemma} \begin{proof} If $\alpha\,\mathcal{R}\,\beta$, by \Cref{Lemma-Properties of Solovay's Function.4}, $\hbox{\sf HA}{} _0\vdash L=\alpha\ra\neg\Box^+\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$. So $${\mathbb{N}\models \Box^+(L=\alpha\ra\neg\Box^+\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand))}$$ and hence by \Cref{Lemma-bounded Sigma completeness} ($\Sigma_1$-completeness of $\hbox{\sf HA}{} _0$), we can deduce $\hbox{\sf HA}{} _0\vdash L=\alpha\rhd(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$. So assume $\alpha\;\NR\;\beta$ and $\alpha\neq\beta$. By definition of $A\rhd B$, we must show $$\hbox{\sf HA}{} _0+L=\delta\vdash \forall{x}\Box^+[(L=\alpha\wedge\Box\varphi\emptycommand_\alpha)\rightarrow \neg\Box^+_x\neg(L=\beta\wedge\Box\varphi\emptycommand_\beta)].$$ We work inside $\hbox{\sf HA}{} _0$. Assume $L=\delta$ and fix some large enough $x$ such that $F(x)=\delta$. Then for each $u\leq x$, we have $F(u)\,\mathcal{R}\,\beta$. Now work in the scope of $\Box^+$. By $\Sigma$-completeness of $\hbox{\sf PA}{} $, we have $\forall{u}\leq x F(u)\,\mathcal{R}\,\beta$. Assume $L=\alpha$, $\Box\varphi_{_\alpha}\emptycommand$ and $\Box^+_x\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$. We should deduce $\bot$. By $\Box^+_x\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$, for sufficiently large $y$ (larger than $\langle2,\beta\rangle*z$, in which $z$ is a proof code in $\hbox{\sf PA}{} _x$ for $\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$), we have $r(\beta,y)\leq x$. If $r(\alpha,y)\leq r(\beta,y)$, then $\Box^+_x\neg(L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand)$, and hence by \Cref{Lemma-Reflection}, we have $\neg(L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand)$, a contradiction. If $r(\alpha,y)> r(\beta,y)$, since $r(\beta,y)\leq x$, then $F(r(\beta,y))\,\mathcal{R}\,\beta$. So by recursive definition of $F$, there exists some $z\geq y$ such that $F(z)=\beta$, contradicting $L=\alpha$. \end{proof}
\subsection{Deciding the boxed formulas}\label{subsection-proofofmaintheorem}
In this subsection, we will show that $\hbox{\sf HA}{} +L=\alpha+\Box\varphi_{_\alpha}\emptycommand$ can decide boxed propositions in ${\sf Sub}(\Gamma)$. More precisely, for all $\Box B\in {\sf Sub}(\Gamma)$ and $\alpha\in K$, $$ \begin{cases} \hbox{\sf HA}{} +L=\alpha+\Box\varphi_{_\alpha}\emptycommand\vdash \Box B\emptycommand \ \ \ & \text{ if } \alpha\Vdash \Box B \\ \hbox{\sf HA}{} +L=\alpha+\Box\varphi_{_\alpha}\emptycommand\vdash \neg\Box B\emptycommand \ \ \ & \text{ if } \alpha\nVdash \Box B \end{cases} $$ Note that by definition of $\varphi_{_\alpha}$, if $\alpha\Vdash \Box B$, then $B$ is a conjunct of $\varphi_{_\alpha}$. Hence in case $\alpha\Vdash \Box B$, we obviously have $\hbox{\sf HA}{} +\Box\varphi_{_\alpha}\emptycommand\vdash\Box B\emptycommand$. Moreover we will show in \Cref{sec-54} (\Cref{corollar-4st&2st}) that
$\hbox{\sf HA}{} \vdash L=\alpha \to \Box \varphi_{_\alpha}\emptycommand$ for $\alpha\in K_0$, and then the following improvement of the above equation holds: $$ \begin{cases} \hbox{\sf HA}{} +L=\alpha \vdash \Box B\emptycommand \ \ \ & \text{ if } \alpha\Vdash \Box B \\ \hbox{\sf HA}{} +L=\alpha \vdash \neg\Box B\emptycommand \ \ \ & \text{ if } \alpha\nVdash \Box B \end{cases} $$ \begin{lemma}\label{Lemma-1.6st Properties of Solovay Function} Let $B\in {\sf Sub}(\Gamma)$ be such that all occurrences of $\ra$ in $B$ are in the scope of some $\Box$ {\em ($B\in{\sf NOI}$)}, $\alpha\in K$ and $\alpha\Vdash B$. Then $\hbox{\sf HA}{} _0\vdash(L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand)\ra B\emptycommand$. Moreover, this argument is formalizable and provable in $\hbox{\sf HA}{} _0$, i.e. $\hbox{\sf HA}{} _0\vdash\Box_0((L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand)\ra B\emptycommand)$. \end{lemma} \begin{proof} One can prove $\hbox{\sf HA}{} _0\vdash (L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand)\ra B\emptycommand$, by induction on the complexity of $B$. Then by \Cref{Lemma-bounded Sigma completeness}, we derive its formalized form in $\hbox{\sf HA}{} _0$. \end{proof}
\begin{notation} We say that $\alpha\nVdash_\tinysub{\sf max} A$ if $\alpha\nVdash A$ and for all $\beta\gneqq\alpha$ we have $\beta\Vdash A$. \end{notation} \noindent We have the following observations: \begin{itemize} \item $\alpha\nVdash_\tinysub{\sf max} B\to C$ iff ``$\alpha\Vdash B$ and $\alpha\nVdash_\tinysub{\sf max} C$'', \item $\alpha\nVdash_\tinysub{\sf max} B\vee C$ implies ``$\alpha\nVdash B$ and $\alpha\nVdash C$'', \item $\alpha\nVdash_\tinysub{\sf max} B\wedge C$ iff ``$\alpha\nVdash_\tinysub{\sf max} B$ or $\alpha\nVdash_\tinysub{\sf max} C$''. \end{itemize}
\noindent Let $A$ be a $\hbox{\sf TNNIL}{} $-formula such that $\alpha\nVdash_\tinysub{\sf max} A$.
In \Cref{Lemma-1.7st Properties of Solovay Function} and \Cref{Lemma-1.84st Properties of Solovay Function}, we need to put $\Box_x$ before all occurrences of subformulas $B$ in the right of $\ra$, when it is not the case that $\alpha\nVdash_\tinysub{\sf max} B$. This is the content of the following definition. \begin{definition} \em Let $A$ be a modal proposition, $\alpha\in K$ and $x$ be a variable.
We define the first-order sentence $d(A,\alpha,x)$, by induction on $A$. If this is not the case that $\alpha\nVdash_\tinysub{\sf max} A$, then we define $d(A,\alpha,x):=\Box_x \sigma_{_{\sf HA}}(A)$, and if $\alpha\nVdash_\tinysub{\sf max} A$, we define the formula $d(A,\alpha,x)$ by cases:
\begin{itemize}
\item $A$ is atomic or boxed. $d(A,\alpha,x):=\sigma_{_{\sf HA}}(A)$,
\item $A=B\to C$. Define $d(A,\alpha,x)$ by cases. If $B\not\in{\sf NOI}$, then let $d(A,\alpha,x):=\sigma_{_{\sf HA}}(A)$,
otherwise let $d(A,\alpha,x):=\sigma_{_{\sf HA}}(B)\to d(C,\alpha,x)$,
\item $A=B\wedge C$. If $\alpha\nVdash_\tinysub{\sf max} B$ then $d(A,\alpha,x):=d(B,\alpha,x)$, else $d(A,\alpha,x):=d(C,\alpha,x)$,
\item $A=B\vee C$. $d(A,\alpha,x):=d(B,\alpha,x)\vee d(C,\alpha,x)$. \end{itemize} \end{definition} \noindent In the following lemma, we use definition of $\sigma_{_l}(A,x)$ from \Cref{Sec-ExLePr}: \begin{lemma}\label{Lemma-1.69st Properties of Solovay Function}
Let $A$ be a modal proposition, $\alpha\in K$ such that $\alpha\nVdash_\tinysub{\sf max} A$. Then
$$\hbox{\sf HA}{} _0\vdash \sigma_{_l}(A,x)\to d(A,\alpha,x)$$ \end{lemma} \begin{proof} Use induction on $A$.
\end{proof} \begin{lemma}\label{Lemma-1.7st Properties of Solovay Function} Let $A$ be a modal proposition. Then there exists some provably {\em (}in $\hbox{\sf HA}{} ${\em)} total recursive function $g_{_A}$ such that for any $\alpha\in K$ with $\alpha\nVdash_\tinysub{\sf max} A$ we have $$\hbox{\sf HA}{} \vdash\Box_x A\emptycommand \ra\Box_{g_{_A}(x)}d(A,\alpha,g_{_A}(x))$$ \end{lemma} \begin{proof} Use \Cref{Lemma-sigma_l translation} and \Cref{Lemma-1.69st Properties of Solovay Function}. \end{proof}
\noindent Let $\hbox{\sf HA}{} \vdash A$, for arbitrary $A$ in the language of arithmetic. Then by the compactness theorem, one could deduce that $\hbox{\sf HA}{} _n\vdash A$ for some $n\in \omega$. In the following definition of the $n_1$ and $n_2$, we make use of this fact. \noindent Define $m\in\omega$ as the maximum of the following $n_i$'s: \begin{itemize} \item $n_1$. By \Cref{Lemma-1.7st Properties of Solovay Function} and the compactness theorem, we can find some $n_1$ such that for each $B\in {\sf Sub}(\Gamma)$, $g_{_B}$ is provably total in $\hbox{\sf HA}{} _{n_1}$. \item $n_2$. For each $\alpha\in K$ and $B\in {\sf Sub}(\Gamma)$ such that $\alpha\nVdash_\tinysub{\sf max} B$, by \Cref{Lemma-1.7st Properties of Solovay Function} and the compactness theorem, there exists some $n$ such that $\hbox{\sf HA}{} _n$ proves the desired sentence of the Lemma. Let $n_2$ be the maximum of such $n$. \item $n_3$. By \Cref{Lemma-Reflection refinement}, for each $\alpha\in K$, there exists some provably (in $\hbox{\sf HA}{} $) total function $h_\alpha$, such that $h_\alpha(x)\geq x$ and $\hbox{\sf HA}{} \vdash \Box_{h_\alpha(x)}(\Box_x\neg(L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand)\rightarrow \neg(L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand))$. Hence by the compactness theorem, there exists some $n_\alpha\in\omega$ such that $h_\alpha$ is provably total in $\hbox{\sf HA}{} _{n_\alpha}$ and $${\hbox{\sf HA}{} _{n_\alpha}\vdash \Box_{h_\alpha(x)}(\Box_x\neg(L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand)\rightarrow \neg(L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand))}$$ Let
$n_3:=\text{max}\{n_\alpha | \alpha\in K\}$. \end{itemize} Then define $\hat{g}_{_B}(x)$ as the maximum of $g_{_B}(x)$, $m$ and $x$. Assume some $B\in {\sf Sub}(\Gamma)$. We define the provably (in $\hbox{\sf HA}{} _m$) total recursive function $f_{_B}$, by induction on the complexity of $B$: $$f_{_B}(x):= \begin{cases}
\text{max}(X)& \text{\ \ if }X=\{h_\alpha(f_{_C}(\hat{g}_{_B}(x)))\ |\ C\in {\sf Sub}(B) , C\neq B, \alpha\in K \}\neq\emptyset\\ \hat{g}_{_B}(x) & \text{\ \ else} \end{cases} $$ where $h_\alpha$ is as we stated in definition of $n_3$. From the above definition, one can observe that for each atomic $C\in {\sf Sub}(\Gamma)$, the set $X$ is empty. Hence we have $f_{_C}(x)=x$. Since each non-atomic formula $B$ has some atomic sub-formula $C$, one can deduce that $f_{_B}(x)\geq \hat{g}_{_B}(x)\geq x,m$. Moreover, all of the above functions are provably total in $\hbox{\sf HA}{} _m$.
\begin{lemma}\label{Lemma-1.84st Properties of Solovay Function} Let $B,E\in {\sf Sub}(\Gamma)\cap\hbox{\sf TNNIL}{} $ and $\beta\in K$, such that $\beta\nVdash_\tinysub{\sf max} B$, $\beta\nVdash_\tinysub{\sf max} E$ and ${B\in{\sf Sub}(E)}$. Then
\begin{equation}\label{Eq-7} \hbox{\sf HA}{} _m\vdash [ F(f_{_E}(x))\,\mathcal{R}\,\beta\wedge\Box_x E\emptycommand ] \to \Box_{f_{_E}(x)} \left( (L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)\to\neg d(B,\beta,\hat{g}_{_E}(x)) \right) \end{equation} \end{lemma} \begin{proof} We prove \cref{Eq-7} by induction on the complexity of $B$. As induction hypothesis, assume that for any sub-formula $C$ of $B$ ($C\neq B$) and any $E'\in {\sf Sub}(\Gamma)\cap \hbox{\sf TNNIL}{} $ and $\gamma\in K$, such that $C\in{\sf Sub}(E')$ and $\gamma\nVdash_\tinysub{\sf max} C,E'$,
we have \begin{equation*} \hbox{\sf HA}{} _m\vdash \left( F(f_{_{E'}}(x))\,\mathcal{R}\,\gamma\wedge\Box_x E'\emptycommand \right) \to \Box_{f_{_{E'}}(x)} \left( (L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)\to\neg d(C,\gamma,\hat{g}_{_{E'}}(x)) \right) \end{equation*} We consider different cases. \begin{itemize}[leftmargin=*] \item $B$ is atomic. Then $d(B,\beta,\hat{g}_{_B}(x))=\sigma(B)$
and the desired result holds by definition of the substitution $\sigma$ and $\beta\nVdash B$
and also by
\Cref{Lemma-Properties of Solovay's Function} \cref{1Lemma-Properties of Solovay's Function},
\item $B=\Box C$. Then $d(B,\beta,\hat{g}_{_B}(x))=\sigma_{_{\sf HA}}(B)=\Box \sigma_{_{\sf HA}}(C)$. Since $\beta\nVdash_\tinysub{\sf max}\Box C$, there exists some $\gamma$ such that $\beta\,\mathcal{R}\,\gamma\nVdash_\tinysub{\sf max} C$. Then, by induction hypothesis, $$\hbox{\sf HA}{} _m\vdash\left(F\left(f_{_C}\left(x\right)\right)\,\mathcal{R}\,\gamma \wedge\Box_x C\emptycommand\right)\rightarrow \Box_{f_{_C}(x)} \left( (L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand) \to \neg d(C,\gamma,\hat{g}_{_C}(x))
\right)$$
By \Cref{Lemma-1.7st Properties of Solovay Function}, we have
$\hbox{\sf HA}{} _m\vdash \Box_x C\emptycommand\to\Box_{f_{_C}(x)}d(C,\gamma,\hat{g}_{_C}(x))$. Hence $$\hbox{\sf HA}{} _m\vdash{\left(L=\beta \wedge\Box C\emptycommand\right)}\ra
{\Box \neg\left(L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand\right)}$$
By \Cref{Lemma-Properties of Solovay's Function.4}, we have
$\hbox{\sf HA}{} _m\vdash \neg \left(L=\beta \wedge\Box C\emptycommand\right)$. Hence by \Cref{Lemma-bounded Sigma completeness}, $\hbox{\sf HA}{} _0 \vdash \Box_m \neg \left(L=\beta \wedge\Box C\emptycommand\right)$. Since $f_{_B}(x)\geq m$, we have $\hbox{\sf HA}{} _m\vdash \Box_{f_{_B}(x)} \left((L=\beta \wedge\Box \varphi_{_\beta}\emptycommand)\to \neg d(B,\beta,\hat{g}_{_B}(x))\right)$, which implies \cref{Eq-7}. \item $B=C\to D$. In this case $\beta\Vdash C\in{\sf NOI}$, $\beta\nVdash_\tinysub{\sf max} D$
and $d(B,\beta,\hat{g}_{_B}(x))=\sigma_{_{\sf HA}}(C)\to d(D,\beta,\hat{g}_{_B}(x))$. Hence, by induction hypothesis, \begin{equation*} \hbox{\sf HA}{} _m\vdash\left(F\left(f_{_E}\left(x\right)\right)\,\mathcal{R}\,\beta \wedge\Box_x E\emptycommand\right)\rightarrow
\Box_{f_{_E}(x)} \left( (L=\beta\wedge\Box\varphi_{_\beta}\emptycommand) \to \neg d(D,\beta,\hat{g}_{_E}(x))
\right) \end{equation*} Then by \Cref{Lemma-1.6st Properties of Solovay Function},
$$ \hbox{\sf HA}{} _m\vdash\left(F\left(f_{_E}\left(x\right)\right)\,\mathcal{R}\,\beta \wedge\Box_x E\emptycommand\right)
\rightarrow
\Box_{f_{_E}(x)}\left(
(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)\to\neg(C\emptycommand\to d(D,\beta,\hat{g}_{_E}(x)) \right)$$
\item $B=C\wedge D$. Since $\beta\nVdash_\tinysub{\sf max} B$, either $\beta\nVdash_\tinysub{\sf max} C$ or $\beta\nVdash_\tinysub{\sf max} D$ holds.
We only treat the case that ${\beta\nVdash_\tinysub{\sf max} C}$. The other case is similar. Assume that $\beta\nVdash_\tinysub{\sf max} C$. Then by definition, $d(B,\beta,y)=d(C,\beta,y)$.
Now the induction hypothesis for $C$, directly implies the desired result, i.e. \cref{Eq-7}.
\item $B=C\vee D$. This case is the interesting one. We have 4 sub-cases: (1) $\beta\nVdash_\tinysub{\sf max} C$ and $\beta\nVdash_\tinysub{\sf max} D$,
(2)\nolinebreak \ not $\beta\nVdash_\tinysub{\sf max} C$ and $\beta\nVdash_\tinysub{\sf max} D$, (3) $\beta\nVdash_\tinysub{\sf max} C$ and not $\beta\nVdash_\tinysub{\sf max} D$,
(4) not $\beta\nVdash_\tinysub{\sf max} C$ and not $\beta\nVdash_\tinysub{\sf max} D$. We only treat the case (3) here. Other cases can be treated
similarly. Assume that the case (3) occurs. By definition,
${d(B,\beta,\hat{g}_{_E}(x))}={d(C,\beta,\hat{g}_{_E}(x))\vee\Box_{\hat{g}_{_E}(x)}D\emptycommand}$. From the induction hypothesis for $C$, \begin{equation}\label{Eq-17} \hbox{\sf HA}{} _m\vdash \left( F(f_{_E}(x))\,\mathcal{R}\,\beta\wedge\Box_x E\emptycommand\right) \to
\Box_{f_{_E}(x)} \left((L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)\to\neg d(C,\beta,\hat{g}_{_E}(x)) \right) \end{equation} So it is enough to show that \begin{equation}\label{Eq-18} \hbox{\sf HA}{} _m\vdash \left( F(f_{_E}(x))\,\mathcal{R}\,\beta\wedge\Box_x E\emptycommand\right) \to
\Box_{f_{_E}(x)} \left((L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)\to\neg \Box_{\hat{g}_{_E}(x)} D\emptycommand \right) \end{equation} Since $\beta\nVdash D$ and not $\beta\nVdash_\tinysub{\sf max} D$, there exists some $\gamma\gneqq\beta$ such that $\gamma\nVdash_\tinysub{\sf max} D$. If $\beta\,\mathcal{R}\,\gamma$, then we can repeat the reasoning as in the case $B=\Box C$. So assume that $\beta\NR\gamma$. By the induction hypothesis for $D$ and $\gamma$, we have \begin{equation*} \hbox{\sf HA}{} _m\vdash \left( F(f_{_D}(x))\,\mathcal{R}\,\gamma\wedge\Box_x D\emptycommand\right) \to
\Box_{f_{_D}(x)} \left((L=\gamma\wedge\Box \varphi_{_\gamma}\emptycommand)\to\neg d(D,\gamma,\hat{g}_{_D}(x)) \right) \end{equation*} On the other hand, by \Cref{Lemma-1.7st Properties of Solovay Function}, we have \begin{equation*} \hbox{\sf HA}{} _m\vdash \Box_x D\emptycommand\to \Box_{f_{_D}(x)}d(D,\gamma,\hat{g}_{_D}(x)) \end{equation*} Hence \begin{equation}\label{Eq-19} \hbox{\sf HA}{} _m\vdash \left( F(f_{_D}(x))\,\mathcal{R}\,\gamma\wedge\Box_x D\emptycommand\right) \to
\Box_{f_{_D}(x)} \neg(L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand) \end{equation} We argue inside $\hbox{\sf HA}{} _m$. Assume $F(f_{_E}(x))\,\mathcal{R}\,\beta$ and $\Box_x E\emptycommand$. Since $f_{_E}(x)\geq f_{_D}(\hat{g}_{_E}(x))$, by the assumption of $F(f_{_E}(x))\,\mathcal{R}\,\beta$, we have $F(f_{_D}(\hat{g}_{_E}(x)))\,\mathcal{R}\,\gamma$, and by \Cref{Lemma-bounded Sigma completeness}, we get $\Box_m(F(f_{_D}(\hat{g}_{_E}(x)))\,\mathcal{R}\,\gamma)$. Hence if we replace $\hat{g}_{_E}(x)$ for $x$ in \cref{Eq-19}, we may deduce \begin{equation}\label{Eq-21} \Box_m\left(\Box_{\hat{g}_{_E}(x)}D\emptycommand\rightarrow \Box_{f_{_D}(\hat{g}_{_E}(x))}\neg(L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)\right) \end{equation} Now we work inside $\Box_{f_{_E}(x)}$. We have $F(f_{_E}(x))\,\mathcal{R}\,\beta$. Assume $\Box_{\hat{g}_{_E}(x)}D\emptycommand$ and $L=\beta$ and $\Box\varphi_{_\beta}\emptycommand$. We should deduce $\bot$. From $\Box_{\hat{g}_{_E}(x)}D\emptycommand$ and \cref{Eq-21}, we have $\Box_{t(x)}\neg(L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)$, in which $t(x):=f_{_D}(\hat{g}_{_E}(x))$. So there exists some $y_1$ such that ${\sf Proof}_{{\sf HA}_{t(x)}}(y_1,\ulcorner \neg(L=\gamma\wedge\varphi_{_\gamma}\emptycommand) \urcorner)$. Also by $L=\beta$, there exists some $y_2\geq y_1$ such that $\forall{z\geq y_2}F(z)=\beta$. Let some $y$ greater than $\langle2,\gamma\rangle^*y_2$ and $t(x)$. If $r(\beta,y+1)\leq t(x)$, then $\Box_{t(x)}\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$. Now, since $ f_{_E}(x)\geq h_\beta(t(x))$ and we are working in $\Box_{f_{_E}(x)}$, by \Cref{Lemma-Reflection refinement}, we have $\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$ and hence $\bot$. If $t(x)<r(\beta,y+1)$, since $r(\gamma,y+1)\leq t(x)$, by recursive definition of $F$, then $F(y+1)=\gamma$, which contradicts with $L=\beta$. \end{itemize} \end{proof}
\begin{corollary}\label{Lemma-1.9st Properties of Solovay Function} For each $B\in {\sf Sub}(\Gamma)\cap\hbox{\sf TNNIL}{} $ and $\beta\in K$ such that $\beta\nVdash_\tinysub{\sf max} B$, $$\hbox{\sf HA}{} _m\vdash\left(F\left(f_{_B}\left(x\right)\right)\,\mathcal{R}\,\beta \wedge\Box_x B\emptycommand\right)\rightarrow \Box_{f_{_B}(x)} \neg\left(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand\right)$$ \end{corollary} \begin{proof} Use \Cref{Lemma-1.84st Properties of Solovay Function} for $E=B$. Then by \cref{Eq-7} and \Cref{Lemma-1.7st Properties of Solovay Function}, one can deduce the desired result. \end{proof}
\begin{theorem}\label{Lemma-2st Properties of Solovay's Function} For each $B\in {\sf Sub}(\Gamma)\cap\hbox{\sf TNNIL}{} $ and $\alpha\in K$ such that $\alpha\nVdash \Box B$, $$\hbox{\sf HA}{} \vdash L\!=\!\alpha\ra\neg\Box B\emptycommand $$ \end{theorem} \begin{proof} From $\alpha\nVdash\Box B$, we conclude that there exists some $\beta\in K$ such that $\alpha\,\mathcal{R}\,\beta$ and $\beta\nVdash_\tinysub{\sf max} B$. Now \Cref{Lemma-1.9st Properties of Solovay Function} implies $\hbox{\sf HA}{} \vdash (L=\alpha\wedge\Box B\emptycommand)\rightarrow \Box\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$. On the other hand, by \Cref{Lemma-Properties of Solovay's Function.4}, $\hbox{\sf HA}{} \vdash L=\alpha\to \neg \Box\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$. Hence $\hbox{\sf HA}{} \vdash (L=\alpha\wedge\Box B\emptycommand)\ra\bot$, as desired. \end{proof}
\subsection{The Solovay function is a constant function}\label{sec-54} In this subsection, we will show that $L=\alpha_0$ is a true statement in the standard model (\Cref{Lemma-limit is root}). This fact is necessary for showing that for any $\alpha\in K$, the theory $L=\alpha+\hbox{\sf PA}{} $ is consistent. \begin{lemma}\label{Lemma-3th Properties of Solovay's Function} For each $\alpha\lneqq\beta\in K$ with $\alpha\NR\beta$, $$\hbox{\sf HA}{} \vdash\exists{x}F(x)=\alpha\ra\Box^+\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$$ \end{lemma} \begin{proof} By $\Pi_2$ conservativity of $\hbox{\sf PA}{} $ over $\hbox{\sf HA}{} $, it is enough to prove the above assertion in $\hbox{\sf PA}{} $ instead of $\hbox{\sf HA}{} $. We work inside $\hbox{\sf PA}{} $. Fix some $x$ such that $F(x)=\alpha$. Then for each $y\leq x$, we have $F(y)\preccurlyeq\alpha$. Now, work inside $\Box^+$. Assume $L=\beta$ and $\Box\varphi_{_\beta}\emptycommand$. Then there exists some minimum $z$ such that $F(z+1)=\beta$. So there exists some $\delta$ such that $F(z)=\delta$. Since $F(x)=\alpha$, we have $\beta\gneqq\delta\geq\alpha$. Hence $\delta\NR\beta$. So by recursive definition of $F$, $r(\beta,z+1)<r(\delta,z+1)$ and $F(r(\beta,z+1))\,\mathcal{R}\,\beta$. Since $\alpha\NR\beta$, we have
$F(r(\beta,z+1))\precneqq F(x)=\alpha$,
which implies $r(\beta,z+1)< x$. Since $x\leq z$, we have $r(\beta,z+1)<z$ and hence $\Box_x\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$. Thus by \Cref{Lemma-Reflection}, $\neg(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)$, that is a contradiction. \end{proof}
\begin{lemma}\label{Lemma-1st} For any $\beta\in K$ and $B\in {\sf Sub}(\Gamma)\cap\hbox{\sf TNNIL}{} $, \begin{itemize} \item if $\beta\Vdash B$, then $\hbox{\sf HA}{} \vdash (L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)\ra B\emptycommand$, \item if $\beta\nVdash B$ and any occurrence of $\ra$ in $B$ is in the scope of some $\Box$ {\em($B\in{\sf NOI}$)}, then ${\hbox{\sf HA}{} \vdash(L=\beta \wedge\Box\varphi_{_\beta}\emptycommand)\ra\neg B\emptycommand}$. \end{itemize} \end{lemma} \begin{proof} We prove both items by induction on the complexity of $B$. \begin{itemize} \item $B$ is atomic. Then, by definition of the substitution $\sigma$, $\hbox{\sf HA}{} \vdash B\emptycommand\lr\bigvee_{\gamma\Vdash B}\exists{x}{F}(x)=\gamma$. If $\beta\Vdash B$, then $\hbox{\sf HA}{} \vdash L=\beta\ra B\emptycommand$. If $\beta\nVdash B$, then for each $\gamma\Vdash B$, we have $\gamma\nleq\beta$, and hence by
\Cref{Lemma-Properties of Solovay's Function} \cref{1Lemma-Properties of Solovay's Function}, $\hbox{\sf HA}{} \vdash L=\beta\ra\neg\exists{x}{F}(x)=\gamma$ . Hence $\hbox{\sf HA}{} \vdash L=\beta\ra\neg B\emptycommand$. \item $B$ is a conjunction or disjunction. We have the desired conclusions by the induction hypotheses. \item $B=\Box C$. First assume $\beta\Vdash\Box C$. Then, by definition of $\varphi_{_\beta}\emptycommand$, \ $C\emptycommand$ is a conjunct of $\varphi_{_\beta}\emptycommand$, and then $\hbox{\sf HA}{} \vdash(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)\ra B\emptycommand$. For the other side, assume $\beta\nVdash\Box C$. Then \Cref{Lemma-2st Properties of Solovay's Function} implies ${\hbox{\sf HA}{} \vdash L=\beta\ra\neg\Box C\emptycommand}$. \item $B=C\ra D$. Since $B$ is $\hbox{\sf TNNIL}{} $, we have $C\in{\sf NOI}$. First assume that $\beta\Vdash C\ra D$. If $\beta\Vdash C$, then $\beta\Vdash D$, and hence by the induction hypothesis, $$\hbox{\sf HA}{} \vdash(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)\ra(C\emptycommand\ra D\emptycommand).$$ If $\beta\nVdash C$, then again by the induction hypothesis, $\hbox{\sf HA}{} \vdash(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)\ra\neg C\emptycommand$, and hence $\hbox{\sf HA}{} \vdash(L=\beta\wedge\Box\varphi_{_\beta}\emptycommand)\ra(C\emptycommand\ra D\emptycommand)$. \end{itemize} \end{proof}
\begin{lemma}\label{Lemma-1.5st} Let $\alpha \in K$ and for each $\beta\geq\alpha$, we have $\hbox{\sf HA}{} \vdash\beta\,\mathcal{R}\, L\rightarrow \varphi_{_\beta}\emptycommand$. Then for each $\beta\geq\alpha$ and $\gamma\gneqq\beta$ such that $\beta\NR\gamma$, we have $$\hbox{\sf HA}{} \vdash\exists{x}F(x)=\beta\ra\Box^+L\not=\gamma$$ \end{lemma} \begin{proof} Fix some $\beta\geq\alpha$. We use induction on $\gamma$. Suppose that for each $ \gamma_0\gneqq\gamma\gneqq\beta$ with $\beta\NR\gamma_0$, we have $\hbox{\sf HA}{} \vdash{\exists{x}F(x)=\beta}\ra{\Box^+L\not=\gamma_0}$. Then \begin{align*} \hbox{\sf HA}{} &\vdash \exists{x}F(x)=\beta \rightarrow \Box^+\neg(L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)
&\text{\Cref{Lemma-3th Properties of Solovay's Function}}&\\ \hbox{\sf HA}{} &\vdash {\exists{x}F(x)=\beta} \rightarrow {\Box^+((\exists{x}F(x)=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)\ra L\neq\gamma)}& &\\ \hbox{\sf HA}{} &\vdash \exists{x}F(x)=\beta\rightarrow \Box^+((\exists{x}F(x)=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)\rightarrow \gamma\,\mathcal{R}\, L) & \text{induction hypothesis and neatness}&\\ \hbox{\sf HA}{} &\vdash \exists{x}F(x)=\beta\rightarrow \Box(\exists{x}F(x)=\gamma\ra(\Box\varphi_{_\gamma}\emptycommand\rightarrow \varphi_{_\gamma}\emptycommand)) & \text{hypothesis of lemma and \Cref{Lemma-Conservativity of HA}}&\\ \hbox{\sf HA}{} &\vdash \exists{x}F(x)=\beta\rightarrow \Box(\exists{x}F(x)=\gamma\rightarrow \Box\varphi_{_\gamma}\emptycommand) & \text{L\"{o}b's axiom, $\Sigma_1$-completeness of $\hbox{\sf HA}{} $}& \end{align*} This in combination with $\hbox{\sf HA}{} \vdash \exists{x}F(x)=\beta\rightarrow \Box^+\neg(L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)$ implies $$\hbox{\sf HA}{} \vdash {\exists{x}F(x)=\beta\ra\Box^+L\neq\gamma}$$ \end{proof}
\begin{lemma}\label{Lemma-1.7st} For any $\gamma\in K_0$, $\hbox{\sf PA}{} \vdash \exists{x}F(x)=\gamma\ra\Box^+\neg(L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)$. \end{lemma} \begin{proof} We work inside $\hbox{\sf PA}{} $. Assume $\exists{x}{F}(x)=\gamma$. There exists a minimum $x_0$ such that $F(x_0)=\gamma$. Then by recursive definition of $F$, we have $F(x)\prec F(x_0)$ for all $x< x_0$, and $F(x_0\dot{-}1)=\beta$, and one of the following cases holds: \begin{enumerate} \item $\beta\,\mathcal{R}\,\gamma$ and $r(\gamma,x_0)\leq x_0$, by definition of $r$, we can deduce $$\exists{x\leq x_0}\,{\sf Proof}_{_{\sf PA}}(x,\ulcorner \neg(L=\gamma\wedge\varphi_{_\gamma}\emptycommand)\urcorner)$$ and then $\Box^+\neg(L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)$.
\item $\beta\NR\gamma, \beta\prec\gamma$ and $r(\gamma,x_0)<r(\beta,x_0)$. Because $r(\beta,x_0)\leq x_0+1$, we can deduce $r(\gamma,x_0)\leq x_0$. By repeating the above argument, we get $\Box^+\neg(L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)$. \end{enumerate} \end{proof}
\begin{lemma}\label{Lemma-2st} Let $\beta \in K_0$ and for each $\gamma\geq \beta$, we have $\hbox{\sf HA}{} \vdash\gamma\,\mathcal{R}\, L\rightarrow \varphi_{_\gamma}\emptycommand$. Then for each $\gamma\geq \beta$, we have $\hbox{\sf HA}{} \vdash\exists{x}{F}(x)=\gamma\ra\Box\varphi_{_\gamma}\emptycommand$. \end{lemma} \begin{proof} By \Cref{Lemma-1.7st}, $\hbox{\sf PA}{} \vdash\exists{x}{F}(x)=\gamma\rightarrow \Box^+\neg(L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)$. Then \Cref{Lemma-Conservativity of HA} implies $$ \hbox{\sf HA}{} \vdash\exists{x}{F}(x)=\gamma\rightarrow \Box^+\neg(L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)
$$ Hence \begin{align*} \hbox{\sf HA}{} \vdash\exists{x}{F}(x)=\gamma & \ra\Box^+(\Box\varphi_{_\gamma}\emptycommand\ra L\not=\gamma)\\ & \ra\Box^+(\Box\varphi_{_\gamma}\emptycommand\ra L\succ \gamma) \tag{by $\Sigma_1$-completeness and
\Cref{Lemma-Properties of Solovay's Function}} \\ & \ra\Box^+(\Box\varphi_{_\gamma}\emptycommand\rightarrow \gamma\,\mathcal{R}\, L) \tag{by \Cref{Lemma-1.5st} and \Cref{Lemma-Properties of Solovay's Function}}\\ & \ra\Box(\Box\varphi_{_\gamma}\emptycommand\rightarrow \gamma\,\mathcal{R}\, L) \tag{by $\Pi_2$-conservativity of $\hbox{\sf PA}{} $ over $\hbox{\sf HA}{} $}\\ & \ra\Box(\Box\varphi_{_\gamma}\emptycommand\ra\varphi_{_\gamma}\emptycommand) \tag{by hypothesis} \\ & \ra\Box\varphi_{_\gamma}\emptycommand \tag{L\"{o}b's axiom} \end{align*} \end{proof}
\begin{lemma}\label{Lemma-3st} Let $\beta \in K_0$ and let for each $\gamma\geq \beta$, we have $\hbox{\sf HA}{} \vdash\gamma\,\mathcal{R}\, L \ra\varphi_{_\gamma}\emptycommand$. Then for each $\gamma\geq\beta$
and $B\in {\sf Sub}(\Gamma)\cap\hbox{\sf TNNIL}{} $, $\gamma\Vdash B$ implies $\hbox{\sf HA}{} \vdash\exists{x}{F}(x)\!=\!\gamma\ra B\emptycommand$. \end{lemma}
\begin{proof} We prove this by induction on the frame $(K,\lneqq)$ with reverse order. Let some $\gamma\geq\beta$ and as the (first) induction hypothesis, assume that for each $\gamma_0\gneqq \gamma$ and $B\in {\sf Sub}(\Gamma)\cap\hbox{\sf TNNIL}{} $, if $\gamma_0\Vdash B$, then ${\hbox{\sf HA}{} \vdash\exists{x}F(x)=\gamma_0\ra B\emptycommand}$. We will show that for each $B\in {\sf Sub}(\Gamma)\cap\hbox{\sf TNNIL}{} $, if $\gamma\Vdash B$, then ${\hbox{\sf HA}{} \vdash\exists{x}F(x)=\gamma\ra B\emptycommand}$. We prove this by a (second) induction on the complexity of $B\in {\sf Sub}(\Gamma)\cap\hbox{\sf TNNIL}{} $. Let some $B\in {\sf Sub}(\Gamma)\cap\hbox{\sf TNNIL}{} $
and $\gamma\Vdash B$ and as the (second) induction hypothesis, assume that for each $C\in {\sf Sub}(\Gamma)\cap\hbox{\sf TNNIL}{} $ with lower complexity than \nolinebreak$B$ (i.e. $C$ is a strict sub-formula of $B$) such that $\gamma\Vdash C$, we have $\hbox{\sf HA}{} \vdash\exists{x}F(x)=\gamma\ra C\emptycommand$. We will show $\hbox{\sf HA}{} \vdash\exists{x}F(x)=\gamma\ra B\emptycommand$. We have following cases. \begin{itemize}[leftmargin=*] \item $B$ is atomic. It is trivial by definition of $B\emptycommand$. \item $B$ is conjunction or disjunction. The result follows easily by (second) induction hypothesis. \item $B=\Box C$. Suppose that $\gamma\Vdash\Box C$. Then by definition of $\varphi_{_\gamma}$, we have $\hbox{\sf HA}{} \vdash\Box\varphi_{_\gamma}\emptycommand\ra\Box C\emptycommand$. Now the result is a consequence of \Cref{Lemma-2st}. \item $B=C\ra D\in {\sf Sub}(\Gamma)\cap\hbox{\sf TNNIL}{} $ and $C$ does not have an occurrence of implication which is not in the scope of any box. Suppose that $\gamma\Vdash C\ra D$. There are two sub-cases. \begin{enumerate}[leftmargin=*] \item $\gamma\Vdash C$. Then, by (second) induction hypothesis, we can derive $\hbox{\sf HA}{} \vdash\exists{x}{F}(x)=\gamma\ra D\emptycommand$, and hence $\hbox{\sf HA}{} \vdash\exists{x}{F}(x)=\gamma\rightarrow (C\emptycommand\ra D\emptycommand)$. \item $\gamma\nVdash C$. Then \Cref{Lemma-1st} implies that $\hbox{\sf HA}{} \vdash (L=\gamma\wedge\Box\varphi_{_\gamma}\emptycommand)\ra\neg C\emptycommand$, and by \Cref{Lemma-2st}, ${\hbox{\sf HA}{} \vdash L=\gamma\ra\neg C\emptycommand}$. Now we have $\hbox{\sf HA}{} \vdash C\emptycommand\ra L\not=\gamma$ and hence $${\hbox{\sf HA}{} \vdash(\exists{x}{F}(x)=\gamma\wedge C\emptycommand)\ra L\not=\gamma}$$ So ${\hbox{\sf PA}{} \vdash(\exists{x}{F}(x)=\gamma\wedge C\emptycommand)\ra L\not=\gamma}$. Then by \Cref{Lemma-Properties of Solovay's Function} \cref{3Lemma-Properties of Solovay's Function} $${\hbox{\sf PA}{} \vdash(\exists{x}{F}(x)=\gamma\wedge C\emptycommand)\ra L\succ \gamma}$$
On the other hand, $C$ is implication-free and $C\emptycommand$ is $\Sigma_1$, so by \Cref{Lemma-Conservativity of HA}, we have $\hbox{\sf HA}{} \vdash(\exists{x}{F}(x)=\gamma\wedge C\emptycommand)\ra L\succ \gamma$. For arbitrary $\gamma_0\gneqq \gamma$, we have $\gamma_0\Vdash C\ra D$. So by the first induction hypothesis, we can derive $\hbox{\sf HA}{} \vdash\exists{x}F(x)=\gamma_0\rightarrow (C\ra D)\emptycommand$. By definition of $\gamma\prec L$, we have $\hbox{\sf HA}{} \vdash\gamma\prec L\rightarrow (C\ra D)\emptycommand$. Hence $\hbox{\sf HA}{} \vdash(\exists{x}{F}(x)=\gamma\wedge C\emptycommand)\rightarrow (C\emptycommand\ra D\emptycommand)$, which implies $\hbox{\sf HA}{} \vdash\exists{x}{F}(x)=\gamma\rightarrow (C\emptycommand\ra D\emptycommand)$. \end{enumerate} \end{itemize} \end{proof}
\begin{lemma}\label{Lemma-4st} For any $\alpha\in K$, \ $\hbox{\sf HA}{} \vdash \alpha\,\mathcal{R}\, L \ra\varphi_{_\alpha}\emptycommand$. \end{lemma} \begin{proof} Our proof is by reverse induction on the frame $(K,\lneqq)$. As the induction hypothesis, assume that for each $\beta \gneqq \alpha$, we have $\hbox{\sf HA}{} \vdash\beta\,\mathcal{R}\, L\ra\varphi_{_\beta}\emptycommand$. For each $\beta$ with $\alpha\,\mathcal{R}\,\beta$, by definition of $\varphi_{_\alpha}$, we have $\beta\Vdash\varphi_{_\alpha}$. Hence by the induction hypothesis and \Cref{Lemma-3st}, $\hbox{\sf HA}{} \vdash\exists{x}{F}(x)=\beta\ra\varphi_{_\alpha}\emptycommand$. Then $\hbox{\sf HA}{} \vdash\bigvee_{\alpha\,\mathcal{R}\,\beta}\exists{x}{F}(x)=\beta\ra\varphi_{_\alpha}\emptycommand$, which implies $\hbox{\sf HA}{} \vdash \alpha\,\mathcal{R}\, L\ra\varphi_{_\alpha}\emptycommand$. \end{proof}
As a direct consequence of \Cref{Lemma-2st} and \Cref{Lemma-4st} we have the following result.
\begin{corollary}\label{corollar-4st&2st} For any $\alpha\in K_0$, $\hbox{\sf HA}{} \vdash \exists{x}F(x)=\alpha\to\Box\varphi_{_\alpha}\emptycommand$. \end{corollary}
\begin{lemma}\label{Lemma-4.5st} Let $\alpha\in K$ and $ \mathbb{N}\models L=\alpha$. Then $\hbox{\sf HA}{} \vdash\alpha\prec L\rightarrow \alpha\,\mathcal{R}\, L$. \end{lemma} \begin{proof} Assume $\alpha\lneqq \beta$ and $\alpha\NR\beta$. \Cref{Lemma-4st} implies that for each $\gamma\geq \alpha$, $\hbox{\sf HA}{} \vdash\gamma\,\mathcal{R}\, L\ra\varphi_{_\gamma}\emptycommand$. So \Cref{Lemma-1.5st} implies $\hbox{\sf HA}{} \vdash L=\alpha\rightarrow \Box^+ L\neq\beta$. Then $\mathbb{N}\models L=\alpha\rightarrow \Box^+L\neq\beta$, which implies $\hbox{\sf PA}{} \vdash L\neq\beta$. Hence $\hbox{\sf PA}{} \vdash \alpha\prec L\ra\alpha\,\mathcal{R}\, L$. Now by $\Pi_2$-conservativity of $\hbox{\sf PA}{} $ over $\hbox{\sf HA}{} $, $\hbox{\sf HA}{} \vdash \alpha\prec L\ra\alpha\,\mathcal{R}\, L$. \end{proof}
\begin{lemma}\label{Lemma-5st} Let $\alpha\in K$ and $\mathbb{N}\models L\!=\!\alpha$. Then $\hbox{\sf PA}{} \cup\{L\!=\!\alpha,\Box\varphi_{_\alpha}\emptycommand\}$ is consistent. \end{lemma} \begin{proof} Suppose not, i.e., $\hbox{\sf PA}{} \vdash(L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand)\ra\bot$. Then $\hbox{\sf PA}{} \vdash\Box\varphi_{_\alpha}\emptycommand\ra L\not=\alpha$. By $\Sigma_1$-completeness of $\hbox{\sf PA}{} $, we have $\hbox{\sf PA}{} \vdash\exists{x}{F}(x)=\alpha$ and then by \Cref{Lemma-Properties of Solovay's Function}, $\hbox{\sf PA}{} \vdash\Box\varphi_{_\alpha}\emptycommand\ra L\succ \alpha$. By $\Pi_2$-conservativity of $\hbox{\sf PA}{} $ over $\hbox{\sf HA}{} $, $\hbox{\sf HA}{} \vdash\Box\varphi_{_\alpha}\emptycommand\ra L\succ \alpha$. \Cref{Lemma-4.5st} implies $\hbox{\sf HA}{} \vdash\Box\varphi_{_\alpha}\emptycommand\rightarrow \alpha\,\mathcal{R}\, L$. Then by
\Cref{Lemma-4st}, $\hbox{\sf HA}{} \vdash\Box\varphi\emptycommand_\alpha\ra\varphi\emptycommand_\alpha$. Then by L\"{o}b's theorem, $\hbox{\sf HA}{} \vdash\varphi_{\alpha}\emptycommand$. Hence $\hbox{\sf HA}{} \vdash\Box\varphi_{_\alpha}\emptycommand$. That implies $PA\vdash L\not=\alpha$, a contradiction with $\mathbb{N}\models L=\alpha$. \end{proof}
\begin{theorem}\label{Lemma-limit is root} $\mathbb{N}\models L=\alpha_0$. \end{theorem} \begin{proof} Suppose not, i.e., $\mathbb{N}\models L\not=\alpha_0$. Then by
\Cref{Lemma-Properties of Solovay's Function}
\cref{2Lemma-Properties of Solovay's Function,3Lemma-Properties of Solovay's Function}, $\mathbb{N}\models L=\alpha$, for some $\alpha>\alpha_0$. By
\Cref{Lemma-1.7st}, $\mathbb{N}\models\exists{x}F(x)=\alpha\ra\Box^+ \neg(L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand)$. This implies $\hbox{\sf PA}{} \vdash\neg(L=\alpha\wedge\Box\varphi_{_\alpha}\emptycommand)$, a contradiction with \Cref{Lemma-5st}. \end{proof}
\begin{corollary}\label{corollar-limit is root} For any $\alpha\leq\beta\in K$ we have $(L=\alpha\wedge \varphi_{_\alpha}\emptycommand) \rhd (L=\beta\wedge \varphi_{_\beta}\emptycommand)$. \end{corollary} \begin{proof} We should show that $\mathbb{N}\models (L=\alpha\wedge \varphi_{_\alpha}\emptycommand) \rhd (L=\beta\wedge \varphi_{_\beta}\emptycommand)$. This is a direct consequence of \Cref{Lemma-limit is root}, \Cref{Lemma-1.5st Properties of Solovay's Function} and \Cref{Lemma-Properties of Solovay's Function.4}. \end{proof}
\subsection{Proof of the main theorem} In this subsection, we will prove \Cref{Theorem-Main tool}.
With the general method of constructing Kripke models for $\hbox{\sf HA}{} $, invented by Smory\'nski \cite{Smorynski-Troelstra},
interpretability of theories containing $\hbox{\sf PA}{} $ plays an important role in constructing Kripke models of \nolinebreak$\hbox{\sf HA}{} $.
\begin{definition}\label{Definition-Iframe} A triple $\mathcal{I}:=(K,<,T)$ is called an I-frame iff it has the following properties: \begin{itemize} \item $(K,<)$ is a finite tree, \item $T$ is a function from $K$ to arithmetical r.e. consistent theories containing $\hbox{\sf PA}{} $, \item if $\beta<\gamma$, then $T_\beta$ interprets $T_\gamma$ $(\, T_\beta\rhd T_\gamma\,)$. \end{itemize} \end{definition}
\begin{theorem}\label{Theorem-Smorynski's general method of Kripke model construction} For every I-frame $\mathcal{I}:=(K,<,T)$ there exists a first-order Kripke model $\mathcal{K}={(K,<,\mathfrak{M})}$ such that $\mathcal{K}\Vdash \hbox{\sf HA}{} $ and moreover $\mathfrak{M}(\alpha)\models T_\alpha$, for any $\alpha\in K$.
Note that both of the I-frame and Kripke model are sharing the same frame $(K,<)$. \end{theorem} \begin{proof} See \cite[page~372-7]{Smorynski-Troelstra}. For more detailed proof of a generalization of this theorem, see \cite[Theorem~4.8]{ArMo14}. \end{proof}
\begin{lemma}\label{Lemma-Kripke simulation} For any $\beta\in K_0$, let $T_\beta:=\hbox{\sf PA}{} \cup \{L\!=\!\beta\}$ and define $\mathcal{I}:=(K_0,<_0,T)$. Then \begin{enumerate} \item $\mathcal{I}$ is an I-frame. \item There exists a first-order Kripke model $\mathcal{K}_1:=(K_0,<_0,\mathfrak{M})$ of $\hbox{\sf HA}{} $ such that for all $\beta\in K_0$ and $B\in {\sf Sub}(\Gamma)$, $\mathcal{K}_0,\beta\Vdash B$ iff $\mathcal{K}_1,\beta\Vdash \sigma_{_{\sf HA}}(B)$. \end{enumerate} \end{lemma} \begin{proof} \begin{enumerate}[leftmargin=*] \item \Cref{corollar-limit is root} implies that for each $\alpha\leq\beta$, $T_\alpha\rhd T_\beta$. Moreover, \Cref{Lemma-5st} implies that $T_{\alpha_0}$ is consistent. These finish the requirements of $\mathcal{I}$ to be an $I$-frame. \item By \Cref{Theorem-Smorynski's general method of Kripke model construction}, we can find a first-order Kripke model $\mathcal{K}_1:=(K_0,<_0,\mathfrak{M})$, such that for all $\beta\in K_0$, $\mathfrak{M}(\beta)\models T_\beta$ and $\mathcal{K}_1\Vdash\hbox{\sf HA}{} $. Now we prove, by induction on the complexity of $B\in {\sf Sub}(\Gamma)$, that for all $\beta\in K_0$, $\mathcal{K}_0,\beta\Vdash B$ iff $\mathcal{K}_1,\beta\Vdash \sigma_{_{\sf HA}}(B)$. \begin{itemize}[leftmargin=*] \item $B$ is atomic. Then by definition, $\sigma_{_{\sf HA}}(B):=\bigvee_{\gamma\Vdash B}L\succeq\gamma$. For left to right direction, let $\beta\Vdash B$. By second part of \Cref{Lemma-Properties of Solovay's Function}, $T_\beta\vdash\bigvee_{\gamma\Vdash B}L\succeq\gamma$ and hence $\mathfrak{M}(\beta)\models\bigvee_{\gamma\Vdash B}L\succeq\gamma$. Since $\bigvee_{\gamma\Vdash B}L\succeq\gamma$ is $\Sigma_1$-formula, by \Cref{Lemma-Sigma-local-global}, we have
$\beta\Vdash\bigvee_{\gamma\Vdash B}L\succeq\gamma$.
For the other way around, let $\beta\nVdash B$. Then \Cref{Lemma-Properties of Solovay's Function} \cref{2Lemma-Properties of Solovay's Function} implies $T_\beta\vdash\neg L\succeq\gamma$, for all $\gamma\Vdash B$. This implies that $\mathfrak{M}(\beta)\not\models L\succeq\gamma$, which by use of \Cref{Lemma-Sigma-local-global} implies $\beta\nVdash L\succeq\gamma$. So $\beta\nVdash\bigvee_{\gamma\Vdash B}L\succeq\gamma$. \item $B$ is conjunction, disjunction or implication. Result follows easily by induction hypothesis and inductive definition of $\Vdash$. \item $B=\Box C$. Let $\beta\Vdash B$. Then by definition of $\varphi_{_\beta}$ and \Cref{corollar-4st&2st}, $T_\beta\vdash \sigma_{_{\sf HA}}(\Box C)$, and so $\mathfrak{M}(\beta)\models \sigma_{_{\sf HA}}(\Box C)$. By \Cref{Lemma-Sigma-local-global}, $\beta\Vdash \sigma_{_{\sf HA}}(\Box C)$. For the other way around, suppose $\beta\nVdash\Box C$. Then \Cref{Lemma-2st Properties of Solovay's Function} implies $T_\beta\vdash\neg \sigma_{_{\sf HA}}(\Box C)$ and hence $\mathfrak{M}(\beta)\not\models \sigma_{_{\sf HA}}(\Box C)$. Then \Cref{Lemma-Sigma-local-global} implies $\beta\nVdash \sigma_{_{\sf HA}}(\Box C)$. \end{itemize} \end{enumerate} \end{proof}
\begin{proof}{(of \Cref{Theorem-Main tool})} Let $\sigma$ be the substitution that we have defined at the beginning of this section and $\mathcal{K}_1$ be as we have by \Cref{Lemma-Kripke simulation}. Then the assertion of \Cref{Lemma-Kripke simulation} implies that for any $A\in {\sf Sub}(\Gamma)$ we have: \begin{equation*} \mathcal{K}_0,\alpha\Vdash A\quad \text{ iff }\quad \mathcal{K}_1,\alpha\Vdash \sigma_{_{\sf HA}}(A) \end{equation*} \end{proof}
\section{The $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $}\label{Section-Sigma Provability} In this section, we will show that $\mathcal{PL}_\sigma(\hbox{\sf HA}{} )=\sf{iH}_{\sigma}$ (see \Cref{Definition-Provability Logic}).
Moreover, we will show that $\sf{iH}_{\sigma}$ is decidable. As a by-product of \Cref{Theorem-Main tool}, we show that $\hbox{\sf HA}{} +\Box\bot$ has de Jongh property. Before we continue with the statement and proof of soundness and completeness theorems, let us apply our techniques presented in this paper to \Cref{example01,example02} in \cref{sec-introduction}.
\begin{example}\label{example1}\em Let $A=\Box (p\vee q)\to(\Box p\vee\Box q)$. We will refute the modal proposition $A$ from the provability logic (and $\Sigma_1$-provability logic) of $\hbox{\sf HA}{} $.
First of all note that the Kripke model $\mathcal{K}_0$ from \Cref{example01} is a counter-model for $A$. Then by \Cref{Theorem-Main tool}, there exists some first-order Kripke model $\mathcal{K}_1\Vdash\hbox{\sf HA}{} $ and some $\Sigma_1$-substitution \nolinebreak$\sigma$ such that $\mathcal{K}_1\nVdash \sigma_{_{\sf HA}}(A)$. Hence we have $\hbox{\sf HA}{} \nvdash \sigma_{_{\sf HA}}(A)$, in other words, if we define $B:=\sigma(p)$ and $C:=\sigma(q)$, then we have $\hbox{\sf HA}{} \nvdash\Box(B\vee C)\to(\Box B\vee\Box C)$. \\
\end{example}
\begin{example}\label{example2}\em In this example, we show that how to refute $A=\neg\neg\Box(\neg\neg p\to p)\to\Box(\neg\neg p\to p)$ from the provability logic of $\hbox{\sf HA}{} $ and also from the $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $. First we compute the $\hbox{\sf TNNIL}{} $-approximation of $A$, i.e. $A^+$. By $\hbox{\sf TNNIL}{} $-algorithm (\Cref{subsubsec-TNNIL^-algorith}), $A^+\!=\!{\Box(p\vee\neg p)\vee\neg\Box(p\vee \neg p)}$. Then $\mathcal{K}_0$ from \Cref{example02} is a countermodel for $A^+$. Then by \Cref{Theorem-Main tool}, there exists some first-order Kripke model $\mathcal{K}_1\Vdash\hbox{\sf HA}{} $ and some $\Sigma_1$-substitution $\sigma$ such that $\mathcal{K}_1\nVdash \sigma_{_{\sf HA}}(A^+)$. Hence $\hbox{\sf HA}{} \nvdash \sigma_{_{\sf HA}}(A^+)$. Since $\hbox{\sf HA}{} \vdash \sigma_{_{\sf HA}}(\Box A\lr\Box A^+)$ (\Cref{corollar-NNIL properties}), we can deduce $\hbox{\sf HA}{} \nvdash \sigma_{_{\sf HA}}(A)$. \end{example}
\subsection*{Soundness} \begin{theorem}\label{Theorem-Soundness} $\sf{iH}_{\sigma}$ is sound for $\Sigma_1$-arithmetical interpretations in $\hbox{\sf HA}{} $, i.e. $\sf{iH}_{\sigma}\subseteq\mathcal{PL}_{\sigma}(\hbox{\sf HA}{} )$. \end{theorem} \begin{proof} We must show that $\Sigma_1$-interpretations of all axioms of $\sf{iH}_{\sigma}$ hold in $\hbox{\sf HA}{} $. For a proof that $\Sigma_1$-interpretations of axioms $\Box A\ra\Box B$ with $A\brt B$, hold in $\hbox{\sf HA}{} $, see \cite{Visser02}, Theorem 10.2. The other axioms are well-known or obvious, except for the Extended Leivant's principle, which holds by \Cref{Theorem-Soundness of HA for lle+}. \end{proof}
The following corollary, shows that $\hbox{\sf LC}{}$ captures the $\hbox{\sf TNNIL}{} $ part of the theory $\sf{iH}_{\sigma}$. In other words, as far as we are interested in $\hbox{\sf TNNIL}{} $ propositions, we can work with the rather simple theory $\hbox{\sf LC}{}$ instead of $\sf{iH}_{\sigma}$. \begin{corollary}\label{Corollary-TNNIL-equaipotency} For any $\hbox{\sf TNNIL}{} $ modal proposition $A$, $\hbox{\sf LC}{}\vdash A$ if and only if $\sf{iH}_{\sigma}\vdash A$. \end{corollary} \begin{proof} The deduction from left to right is by \Cref{Theorem-TNNIL Conservativity of LC over LLe+} and the fact that $\sf{iH}_{\sigma}\vdash \hbox{\sf{LLe}}{}^+$, which holds by definition of $\sf{iH}_{\sigma}$. For the right to left direction, assume that $\hbox{\sf LC}{}\nvdash A$. Then by \Cref{Theorem-Propositional Completeness LC}, there exists some Kripke model $\mathcal{K}_0$ such that $\mathcal{K}_0\nVdash A$. Then by \Cref{Theorem-Main tool}, we can find
some $\Sigma_1$-substitution $\sigma$ and some first-order Kripke model $\mathcal{K}_1$, such that $\mathcal{K}_1\Vdash \hbox{\sf HA}{} $
and $\mathcal{K}_1\nVdash\sigma_{_{\sf HA}}(A)$. Hence $\hbox{\sf HA}{} \nvdash \sigma_{_{\sf HA}}(A)$. Now \Cref{Theorem-Soundness} implies that $\sf{iH}_{\sigma}\nvdash A$, as desired. \end{proof}
\subsection*{Completeness} \begin{theorem}\label{Theorem HA-Completeness} $\sf{iH}_{\sigma}$ is complete for $\Sigma_1$-arithmetical interpretations in $\hbox{\sf HA}{} $, i.e. $$\mathcal{PL}_\sigma(\hbox{\sf HA}{} )\subseteq\sf{iH}_{\sigma}$$ \end{theorem} \begin{proof} Let $\sf{iH}_{\sigma}\nvdash A$. Then by \Cref{corollar HA-NNIL approximation is propositionally equivalent}, $\sf{iH}_{\sigma}\nvdash A^-$. Then by \Cref{Theorem-NNIL Crucial Properties} \cref{1Theorem-NNIL Crucial Properties}, $\sf{iH}_{\sigma}\nvdash (A^-)^*$, which implies $\sf{iH}_{\sigma}\nvdash A^+$. Hence $\hbox{\sf{LLe}}{}^+\nvdash A^+$ and by \Cref{Theorem-TNNIL Conservativity of LC over LLe+}, $\hbox{\sf LC}{}\nvdash A^+$. Then by \Cref{Theorem-Propositional Completeness LC}, there exists some Kripke model $\mathcal{K}_0$ such that $\mathcal{K}_0\nVdash A^+$. Then by \Cref{Theorem-Main tool}, we can find
some $\Sigma_1$-substitution $\sigma$ and some first-order Kripke model $\mathcal{K}_1$, such that $\mathcal{K}\Vdash \hbox{\sf HA}{} $
and $\mathcal{K}\nVdash\sigma_{_{\sf HA}}(A^+)$. Hence $\hbox{\sf HA}{} \nvdash \sigma_{_{\sf HA}}(A^+)$. Now \Cref{corollar-NNIL properties} \cref{1corollar-NNIL properties} implies $\hbox{\sf HA}{} \nvdash \sigma_{_{\sf HA}}(A)$, as desired. \end{proof}
Although the axioms of the theory $\sf{iH}_{\sigma}$ sounds very complicated, however we have the following surpring result.
\begin{theorem} The $\Sigma_1$-provability logic of $\hbox{\sf HA}{} $ {\em ($\sf{iH}_{\sigma}$)} is decidable. \end{theorem} \begin{proof} Let $A$ be a given modal proposition. We explain how to decide $\sf{iH}_{\sigma}\vdash A$ or $\sf{iH}_{\sigma}\nvdash A$. First by $\hbox{\sf TNNIL}{} $ algorithm, compute $A^+$. Then by \Cref{Corollary-decidability of LC}, we can decide $\hbox{\sf LC}{}\vdash A^+$. If $\hbox{\sf LC}{}\vdash A^+$ , we say `yes" to $\sf{iH}_{\sigma}\vdash A$, and otherwise we say ``no" to $\sf{iH}_{\sigma}\vdash A$. \Cref{Corollary-TNNIL-equaipotency} guarantees validity of the algorithm.
\end{proof} \begin{theorem}\label{Theorem-de jongh property for HA+boxbot} $\hbox{\sf HA}{} +\bo\bot$ has the de Jongh property, i.e. for all non-modal proposition $A$, $\hbox{\sf IPC} \vdash A$ iff for all arithmetical substitution $\sigma$, $\hbox{\sf HA}{} +\bo\bot\vdash\sigma(A)$. \end{theorem} \begin{proof} If $\hbox{\sf IPC} \vdash A$, we apparently have $\hbox{\sf HA}{} \vdash \sigma(A)$, for all $\sigma$, and hence $\hbox{\sf HA}{} +\bo\bot\vdash \sigma(A)$.
For the other way around, let $\hbox{\sf IPC} \nvdash A$. Hence by \Cref{Theorem-NNIL Crucial Properties} \cref{1Theorem-NNIL Crucial Properties}, $\hbox{\sf IPC} \nvdash A^*$. Then by \Cref{Theorem-nonmodal conservativity of LC over IPC}, $\hbox{\sf LC}{}\nvdash\bo\bot\ra A^*$. This implies that $\hbox{\sf{LLe}}{}^+\nvdash\bo\bot\ra A^*$. Hence by \Cref{Theorem-Main tool}, there exists some substitution $\sigma$ such that $\hbox{\sf HA}{} +\bo\bot\nvdash \sigma(A)$, as desired. \end{proof}
\end{document} | arXiv | {
"id": "1409.5699.tex",
"language_detection_score": 0.6519680619239807,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\begin{abstract} Let $G$ be a simply connected semisimple group over $\mathbb{C}$. We show that a certain involution of an open subset of the affine Grassmannian of $G$, defined previously by Achar and the author, corresponds to the action of the nontrivial Weyl group element of $\mathrm{SL}(2)$ on the framed moduli space of $\mathbb{G}_m$-equivariant principal $G$-bundles on $\mathbb{P}^2$. As a result, the fixed-point set of the involution can be partitioned into strata indexed by conjugacy classes of homomorphisms $N\to G$ where $N$ is the normalizer of $\mathbb{G}_m$ in $\mathrm{SL}(2)$. When $G=\mathrm{SL}(r)$, the strata are Nakajima quiver varieties $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})$ of type D. \end{abstract}
\maketitle
\section{Introduction} \label{sect:intro}
\subsection{Outline} \label{ss:outline}
Let $G$ be a simply connected semisimple algebraic group over $\mathbb{C}$. The ind-group $G[z]:=G(\mathbb{C}[z])$ consists of variety morphisms $g(z):\mathbb{G}_a\to G$, where $z$ represents the coordinate on $\mathbb{G}_a\cong\mathbb{A}^1$. We obtain three closed sub-ind-varieties of $G[z]$ by imposing conditions related to the group structures on $\mathbb{G}_a$ and $G$: \begin{itemize} \item Requiring that $g(0)=1$, we obtain the first congruence subgroup $G[z]_1$. \item Requiring that $g(0)=1$ and $g(-z)=g(z)^{-1}$, we obtain, say, $Y\subset G[z]_1$. \item Requiring that $g(z)$ is a group homomorphism, we obtain a copy of the nilpotent cone $\mathcal{N}$ of $G$, via the embedding $e:\mathcal{N}\hookrightarrow Y:x\mapsto \exp(xz)$. \end{itemize}
It is known that $G[z]_1$ and $\mathcal{N}$ parametrize suitable kinds of principal $G$-bundles on a rational surface. The main result of this article provides an analogous interpretation for $Y$, forming the middle vertical arrow of the commutative diagram: \begin{equation}\label{eqn:intro-diag} \vcenter{\xymatrix@C=20pt{ \ \mathcal{N}\ \ar@{^{(}->}[r]^-{e} \ar@{<-}[d] & \ Y\ \ar@{^{(}->}[r] \ar@{<-}[d] & G[z]_1 \ar@{<-}[d]_(.5){\Psi}\\ \mathrm{Bun}_G(\mathbb{A}^2/\mathrm{SL}(2))\, \ar@{^{(}->}[r] & \mathrm{Bun}_G(\mathbb{A}^2/N)\, \ar@{^{(}->}[r] & \mathrm{Bun}_G(\mathbb{A}^2/\mathbb{G}_m) }} \end{equation} Here, following the notation of~\cite{bf}, for any subgroup $\Gamma$ of $\mathrm{SL}(2)$, $\mathrm{Bun}_{G}(\mathbb{A}^2/\Gamma)$ denotes the moduli space of $\Gamma$-equivariant principal $G$-bundles on $\mathbb{P}^2$ equipped with a trivialization on the line at infinity $\mathbb{P}^2\smallsetminus\mathbb{A}^2$. The left-hand vertical arrow in~\eqref{eqn:intro-diag} refers to Kronheimer's result~\cite[Theorem 1]{kron} concerning the case where $\Gamma=\mathrm{SL}(2)$ (equivalently, by~\cite{donaldson}, the case of `$\mathrm{SU}(2)$-equivariant instantons on $\mathbb{R}^4$'). The right-hand vertical arrow refers to the result of Braverman and Finkelberg~\cite[Theorem 5.2]{bf} concerning the case where $\Gamma$ is the diagonal subgroup $\mathbb{G}_m$ of $\mathrm{SL}(2)$ (`$S^1$-equivariant instantons'). For the middle vertical arrow we take $\Gamma$ to be \[ N:=N_{\mathrm{SL}(2)}(\mathbb{G}_m)=\langle\mathbb{G}_m,[\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}]\rangle. \]
The vertical arrows in~\eqref{eqn:intro-diag} are $G$-equivariant bijective (ind-)variety morphisms which induce variety isomorphisms between corresponding strata. The relevant $G$-stable stratifications on the (ind-)varieties in~\eqref{eqn:intro-diag} will be recalled or introduced below. A unifying principle is that in the column corresponding to $\Gamma\subseteq\mathrm{SL}(2)$, the strata are indexed by certain $G$-conjugacy classes of homomorphisms $\Gamma\to G$.
\subsection{The main theorem}
Henceforth we will set $z=t^{-1}$ and identify $G[t^{-1}]_1$ with the `big opposite Bruhat cell' in the affine Grassmannian $\mathsf{Gr}$ of $G$, namely \[ \mathsf{Gr}_{0}:=G[t^{-1}]_1G[t]/G[t]\subset\mathsf{Gr}:=G[t,t^{-1}]/G[t]. \] Here $G[t,t^{-1}]$ is short for $G(\mathbb{C}[t,t^{-1}])$, and $G[t]$ and other such notation used hereafter should be interpreted similarly.
In~\cite{ah}, Achar and the author defined an involution $\iota$ of $\mathsf{Gr}_{0}=G[t^{-1}]_1$ by \begin{equation} \iota(g(t^{-1}))=g(-t^{-1})^{-1}. \end{equation} The fixed-point set $(\mathsf{Gr}_{0})^\iota$ of this involution is the ind-variety named $Y$ above.
Recall that $\mathsf{Gr}$ is the disjoint union of certain irreducible quasiprojective varieties $\mathsf{Gr}^\lambda$ (the `Schubert cells'), where $\lambda$ runs over the set $\Lambda^+$ of dominant coweights of $G$. Consequently, $\mathsf{Gr}_{0}$ is the disjoint union of the irreducible quasi-affine varieties \[ \mathsf{Gr}_0^\lambda:=\mathsf{Gr}^\lambda\cap\mathsf{Gr}_{0}. \] This is the stratification of $\mathsf{Gr}_0$ mentioned in \S\ref{ss:outline}.
It was shown in~\cite[Lemma 2.2]{ah} that $\iota(\mathsf{Gr}_0^\lambda)=\mathsf{Gr}_0^{-w_0\lambda}$, where $w_0$ denotes the longest element of the Weyl group. Hence $Y=(\mathsf{Gr}_{0})^\iota$ is the disjoint union of the varieties $(\mathsf{Gr}_0^\lambda)^\iota$, where $\lambda$ runs over the set \[
\Lambda_1^+:=\{\lambda\in\Lambda^+\,|\,w_0\lambda=-\lambda\}. \] For general $\lambda\in\Lambda_1^+$, $(\mathsf{Gr}_0^\lambda)^\iota$ may be disconnected, and it is a natural problem to describe the connected components.
As mentioned above, Braverman and Finkelberg~\cite[Theorem 5.2]{bf} defined a $G$-equivariant bijection \begin{equation} \Psi:\mathrm{Bun}_{G}(\mathbb{A}^2/\mathbb{G}_m)\to\mathsf{Gr}_{0} \end{equation} which restricts, for each $\lambda\in\Lambda^+$, to an isomorphism of varieties \begin{equation} \label{eqn:bf-isom} \mathrm{Bun}_{G}^\lambda(\mathbb{A}^2/\mathbb{G}_m)\simto\mathsf{Gr}_0^\lambda, \end{equation} where $\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)$ denotes the connected component of $\mathrm{Bun}_{G}(\mathbb{A}^2/\mathbb{G}_m)$ defined by requiring that the action of $\mathbb{G}_m$ on the fibre of the $G$-bundle at the origin of $\mathbb{A}^2$ gives rise to a homomorphism $\mathbb{G}_m\to G$ in the $G$-conjugacy class of $\lambda$.
The moduli space $\mathrm{Bun}_{G}(\mathbb{A}^2/\mathbb{G}_m)$ carries a natural action of $N_{\mathrm{GL}(2)}(\mathbb{G}_m)$. Our main result identifies the corresponding action on $\mathsf{Gr}_{0}=G[t^{-1}]_1$, and gives a new explanation of the significance of the involution $\iota$: namely, it corresponds to the action of the nontrivial Weyl group element of $\mathrm{SL}(2)$. \begin{thm} \label{thm:normalizer} Under the bijection $\Psi:\mathrm{Bun}_{G}(\mathbb{A}^2/\mathbb{G}_m)\to\mathsf{Gr}_{0}$, \begin{enumerate} \item for any $\alpha,\beta\in\mathbb{C}^\times$, $[\begin{smallmatrix}\alpha&0\\0&\beta\end{smallmatrix}]\in\mathrm{GL}(2)$ acts on $\mathsf{Gr}_{0}$ by $g(t^{-1})\mapsto g(\alpha\beta t^{-1})$; \item the element $[\begin{smallmatrix}0&1\\1&0\end{smallmatrix}]\in\mathrm{GL}(2)$ acts on $\mathsf{Gr}_{0}$ by $g(t^{-1})\mapsto g(t^{-1})^{-1}$. \end{enumerate} Consequently, the element $[\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}]\in\mathrm{SL}(2)$ acts on $\mathsf{Gr}_{0}$ via the involution $\iota$. \end{thm} \noindent Part (1) of Theorem~\ref{thm:normalizer} is an easy consequence of the definition of $\Psi$ in~\cite{bf}, but part (2) is not so obvious, because that definition breaks the symmetry between the horizontal and vertical directions in $\mathbb{A}^2$. To prove Theorem~\ref{thm:normalizer}, we will reformulate the definition of $\Psi$ using explicit descriptions of $G$-bundles via transition functions.
\begin{rmk} \label{rmk:slice} The statement of Braverman and Finkelberg~\cite[Theorem 5.2]{bf} is more general than we have recalled above: they actually showed an isomorphism between a more general subvariety $\mathsf{Gr}_\mu^\lambda$ in the affine Grassmannian, where $\mu$ is not necessarily the zero coweight, and a moduli space $\mathrm{Bun}_{G,\mu}^\lambda(\mathbb{A}^2/\mathbb{G}_m)$ defined using the embedding $\mathbb{G}_m\hookrightarrow G\times\mathrm{SL}(2)$ where the projection onto the $G$ factor is $\mu$. In the case where $w_0\mu=-\mu$, one could ask for an analogue of Theorem~\ref{thm:normalizer}(2) describing the action of the non-identity component of $N_{G\times\mathrm{GL}(2)}(\mathbb{G}_m)$. We have not investigated this. \end{rmk}
From Theorem~\ref{thm:normalizer} we deduce moduli-space interpretations of $(\mathsf{Gr}_{0})^\iota$ and $(\mathsf{Gr}_0^\lambda)^\iota$ in terms of the group $N=N_{\mathrm{SL}(2)}(\mathbb{G}_m)=\langle\mathbb{G}_m,[\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}]\rangle$, and a decomposition of $(\mathsf{Gr}_0^\lambda)^\iota$ as a disconnected union. \begin{thm} \label{thm:normalizer2} The bijection $\Psi$ restricts to a bijection \[ \mathrm{Bun}_G(\mathbb{A}^2/N)\to(\mathsf{Gr}_{0})^\iota. \] For any $\lambda\in\Lambda^+_1$, $\Psi$ restricts to an isomorphism of varieties \[ \mathrm{Bun}_{G}^\lambda(\mathbb{A}^2/N)\simto(\mathsf{Gr}_0^\lambda)^\iota, \] where $\mathrm{Bun}_{G}^\lambda(\mathbb{A}^2/N):=\mathrm{Bun}_G(\mathbb{A}^2/N)\cap\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)$. This isomorphism matches up the corresponding pieces of disconnected union decompositions on both sides: \[ \mathrm{Bun}_G^\lambda(\mathbb{A}^2/N)=\coprod_{\xi\in\Xi(\lambda)} \mathrm{Bun}_{G}^\xi(\mathbb{A}^2/N)\quad \text{ and }\quad (\mathsf{Gr}_0^\lambda)^\iota=\coprod_{\xi\in\Xi(\lambda)} (\mathsf{Gr}_0)^{\iota,\xi}, \] where $\Xi(\lambda)$ is the set of $G$-conjugacy classes of homomorphisms $N\to G$ whose restriction to $\mathbb{G}_m$ is $G$-conjugate to $\lambda$. \end{thm} \noindent Here, and throughout the paper, we use the symbol $\coprod$ for a disconnected union of varieties, in contrast to the symbol $\bigsqcup$ which merely denotes a disjoint union.
The definition of $\mathrm{Bun}_G^\xi(\mathbb{A}^2/N)$ is analogous to that of $\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)$: namely, it is the subvariety of $\mathrm{Bun}_G(\mathbb{A}^2/N)$ defined by requiring that the action of $N$ on the fibre of the $G$-bundle at the origin in $\mathbb{A}^2$ gives rise to a homomorphism $N\to G$ in the $G$-conjugacy class $\xi$. See Proposition~\ref{prop:sigma} for a definition of $(\mathsf{Gr}_0)^{\iota,\xi}=\Psi(\mathrm{Bun}_G^\xi(\mathbb{A}^2/N))$ that is independent of moduli spaces.
Note that $(\mathsf{Gr}_0)^\iota$ is the disjoint union of the varieties $(\mathsf{Gr}_0)^{\iota,\xi}$ as $\xi$ runs over the set $\Xi=\bigsqcup_{\lambda\in\Lambda_1^+}\Xi(\lambda)$ of $G$-conjugacy classes of homomorphisms $N\to G$. This is the stratification of $(\mathsf{Gr}_0)^\iota$ mentioned in \S\ref{ss:outline}. For general $G$, we do not know for which $\xi\in\Xi$ the varieties $(\mathsf{Gr}_0)^{\iota,\xi}$ are nonempty, or whether they can be disconnected.
\subsection{ADHM descriptions}
When $G$ is a classical group, we have the Atiyah--Drinfel'd--Hitchin--Manin (ADHM) description~\cite{ahdm,barth,donaldson} of $\mathrm{Bun}_G(\mathbb{A}^2/\Gamma)$, developed further by Nakajima (see~\cite{nak1,nak2,nak3} for $\mathrm{SL}(r)$ and~\cite[Appendix A]{nak4} for other classical groups). Thus Theorem~\ref{thm:normalizer2} gives rise to such a description of $(\mathsf{Gr}_0)^{\iota,\xi}$.
In particular, consider the case where $G=\mathrm{SL}(r)$, so that principal $G$-bundles are equivalent to rank-$r$ vector bundles with trivial determinant bundle. In this case, for any $\lambda\in\Lambda^+$, the ADHM description of $\mathrm{Bun}_{G}^\lambda(\mathbb{A}^2/\mathbb{G}_m)$ identifies it with a Nakajima quiver variety $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})$ of type $\mathrm{A}$; see \S\ref{ss:Gm-case}. As explained by Nakajima in~\cite[Section 2(v)]{nak4}, one should think of the underlying graph as being the McKay graph of the subgroup $\mathbb{G}_m\subset\mathrm{SL}(2)$, hence of type $\mathrm{A}_{\infty}$ (infinite in both directions), although the dimension vectors $\mathbf{v}$ and $\mathbf{w}$ are zero outside a finite set of vertices. Combining this with the result of Braverman and Finkelberg, one recovers the result of Mirkovi\'c and Vybornov~\cite{mvy-cr} that the varieties $\mathsf{Gr}_0^\lambda$ for $G=\mathrm{SL}(r)$ are isomorphic to quiver varieties of type $\mathrm{A}$; see Proposition~\ref{prop:mv}. (This works for more general $\mathsf{Gr}_\mu^\lambda$ as in Remark~\ref{rmk:slice}.)
Applying similar reasoning to our Theorem~\ref{thm:normalizer2}, we obtain:
\begin{thm} \label{thm:intro-glr} Suppose $G=\mathrm{SL}(r)$. Then the nonempty varieties $(\mathsf{Gr}_0)^{\iota,\xi}$ are connected, and are isomorphic to certain Nakajima quiver varieties $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})$ of type $\mathrm{D}$, described explicitly in Proposition~\ref{prop:glr}. \end{thm}
\noindent Here the graph underlying the quiver varieties is really the McKay graph of the subgroup $N\subset\mathrm{SL}(2)$, which is of type $\mathrm{D}_{\infty}$; see \S\ref{ss:subgroups}.
In the case where $G=\mathrm{SL}(2)$, $\Lambda_1^+=\Lambda^+=\{m\alpha\,|\,m\in\mathbb{N}\}$ where $\alpha$ denotes the positive coroot of $G$. It is known that $\mathsf{Gr}_0^{m\alpha}$ is isomorphic to the intersection ${\mathcal{O}}_{(2m)}\cap\mathcal{S}_{(m,m)}$ where ${\mathcal{O}}_{(2m)}$ denotes the regular nilpotent orbit in $\mathfrak{sl}(2m)$ and $\mathcal{S}_{(m,m)}$ denotes the Slodowy slice to an orbit of Jordan type $(m,m)$ in $\mathfrak{sl}(2m)$; see \S\ref{ss:proof-gl2}.
\begin{thm} \label{thm:intro-gl2} Suppose $G=\mathrm{SL}(2)$. If $m>0$ is even, $(\mathsf{Gr}_0^{m\alpha})^\iota$ is empty; and if $m$ is odd, \[ (\mathsf{Gr}_0^{m\alpha})^\iota \cong {\mathcal{O}}_{(2m)}^{\mathrm{C}_m}\cap\mathcal{S}_{(m,m)}^{\mathrm{C}_m}, \] where ${\mathcal{O}}_{(2m)}^{\mathrm{C}_m}$ denotes the regular nilpotent orbit in the symplectic Lie algebra $\mathfrak{sp}(2m)$, and $\mathcal{S}_{(m,m)}^{\mathrm{C}_m}$ denotes the Slodowy slice to an orbit of Jordan type $(m,m)$ in $\mathfrak{sp}(2m)$. \end{thm}
\subsection{Structure of the paper}
Section~\ref{sect:recollections} contains no new results. We repeat some standard definitions concerning the affine Grassmannian in \S\ref{ss:gr}, recall some of the relations between the affine Grassmannian and the nilpotent cone in \S\ref{ss:achar}, and introduce in \S\ref{ss:sl2} the example of $G=\mathrm{SL}(2)$ which will recur throughout.
In Section~\ref{sect:moduli} we revisit the result of Braverman and Finkelberg~\cite[Theorem 5.2]{bf} concerning $\mathrm{Bun}_G(\mathbb{A}^2/\mathbb{G}_m)$, reformulating their proof in somewhat more elementary terms in \S\!\S\ref{ss:p1}--\ref{ss:gm-equiv}. This enables a simple proof of Theorem~\ref{thm:normalizer} in \S\ref{ss:proof}. We then deduce Theorem~\ref{thm:normalizer2} in \S\ref{ss:n-equiv}, before setting up some further notation in \S\ref{ss:p2}.
Section~\ref{sec:glr} considers the case $G=\mathrm{SL}(r)$, where the moduli spaces $\mathrm{Bun}_G(\mathbb{A}^2/\Gamma)$ have alternative descriptions in terms of quiver varieties. We first recall the fundamental $\Gamma=\{1\}$ case in \S\ref{ss:1-case}, then discuss the McKay correspondence for reductive subgroups $\Gamma$ of $\mathrm{SL}(2)$ in \S\ref{ss:subgroups} and (a special case of) the definition of quiver varieties in \S\ref{ss:quiver-varieties}, before returning to the study of $\mathrm{Bun}_G(\mathbb{A}^2/\Gamma)$ in \S\ref{ss:gamma-equiv}. Then we consider the $\Gamma=\mathbb{G}_m$ case in detail in \S\ref{ss:Gm-case}, a necessary preliminary for the $\Gamma=N$ case in \S\ref{ss:N-case}, which includes the proof of Theorem~\ref{thm:intro-glr} in Proposition~\ref{prop:glr}. Finally, \S\ref{ss:proof-gl2} gives the proof of Theorem~\ref{thm:intro-gl2}.
\section{Recollections} \label{sect:recollections}
\subsection{Subvarieties of the affine Grassmannian} \label{ss:gr}
As in the introduction, let $G$ be a simply connected semisimple algebraic group over $\mathbb{C}$, and let \[ \mathsf{Gr}:=G[t,t^{-1}]/G[t] \] be its affine Grassmannian, an ind-variety on which $G[t,t^{-1}]$ acts transitively. See~\cite[Proposition 4.6]{laszlosorger} for the reducedness and irreducibility of $\mathsf{Gr}$.
Fix a maximal torus $T$ and a Borel subgroup $B$ of $G$ with $T\subset B$. Let $\Lambda=\Hom(\mathbb{G}_m,T)$ be the coweight lattice of $G$ (written additively, as usual), and let $\Lambda^+\subset\Lambda$ be the set of dominant coweights relative to $B$. The Weyl group $W$ of $(G,T)$ acts on $\Lambda$, and each $W$-orbit has a unique representative in $\Lambda^+$. Since every homomorphism $\mathbb{G}_m\to G$ is $G$-conjugate to one whose image lies in $T$, and two coweights $\mathbb{G}_m\to T$ are $G$-conjugate if and only if they lie in the same $W$-orbit, we can think of $\Lambda^+$ as parametrizing the $G$-conjugacy classes of homomorphisms $\mathbb{G}_m\to G$. Recall that if $\lambda\in\Lambda^+$, then the unique representative in $\Lambda^+$ of the $W$-orbit of $-\lambda$ is $-w_0\lambda$, where $w_0$ denotes the longest element of $W$. In particular, the condition $w_0\lambda=-\lambda$ is equivalent to saying that $\lambda$ and $-\lambda$ are $G$-conjugate.
Let $R\subset\Lambda$ denote the set of coroots of $G$, and $R^+\subset R$ the subset of positive coroots relative to $B$. Since $G$ is assumed to be simply connected, $R$ spans $\Lambda$. We have the usual partial order $\leq$ on $\Lambda$ defined by \[ \mu\leq\lambda \Longleftrightarrow \lambda-\mu\in\mathbb{N} R^+, \] and it is well known that $\lambda\geq 0$ for all $\lambda\in\Lambda^+$.
For any $\lambda\in\Lambda$, let $t^\lambda$ be the element of $T[t,t^{-1}]$ corresponding to $\lambda:\mathbb{G}_m\to T$, and denote the corresponding $G[t]$-orbit in the affine Grassmannian by \[ \mathsf{Gr}^\lambda:=G[t]\,t^\lambda\, G[t]/G[t]\subset\mathsf{Gr}. \] It is well known that each $\mathsf{Gr}^\lambda$ is a smooth irreducible quasiprojective variety, and that $\mathsf{Gr}^{\lambda}=\mathsf{Gr}^\mu$ if and only if $\lambda$ and $\mu$ belong to the same $W$-orbit. Hence \begin{equation} \mathsf{Gr}=\bigsqcup_{\lambda\in\Lambda^+}\mathsf{Gr}^\lambda. \end{equation} For $\lambda\in\Lambda^+$, let $\overline{\mathsf{Gr}}^\lambda$ denote the closure of $\mathsf{Gr}^\lambda$ in $\mathsf{Gr}$, an irreducible projective variety (these are the `Schubert varieties' in $\mathsf{Gr}$). We have \begin{equation} \overline{\mathsf{Gr}}^\lambda=\bigsqcup_{\substack{\mu\in\Lambda^+\\\mu\leq\lambda}}\mathsf{Gr}^\mu, \end{equation} where the union on the right-hand side is finite.
Recall that $G[t^{-1}]_1$ denotes the first congruence subgroup of $G[t^{-1}]$, i.e.\ the kernel of the homomorphism $G[t^{-1}]\to G$ setting $t^{-1}$ to $0$. For $\mu\in\Lambda^+$, define \[ \mathsf{Gr}_\mu:=G[t^{-1}]_1\, t^{w_0\mu}\, G[t]/G[t]\subset\mathsf{Gr}. \] The following intersections are nonempty exactly when $\mu\leq\lambda$: \[ \mathsf{Gr}_\mu^\lambda:= \mathsf{Gr}_\mu\cap\mathsf{Gr}^\lambda,\qquad \overline{\mathsf{Gr}}_\mu^\lambda:=\mathsf{Gr}_\mu\cap\overline{\mathsf{Gr}}^\lambda. \] In fact, $\overline{\mathsf{Gr}}_\mu^\lambda$ is a transverse slice to $\mathsf{Gr}^\mu$ inside $\overline{\mathsf{Gr}}^\lambda$; see~\cite[Lemma 2.9]{bf}. Note that we are following the convention and notation of~\cite{kwwy} for these transverse slices: in~\cite{bf}, a variety isomorphic to what we have called $\mathsf{Gr}_\mu^\lambda$ is written $\mathcal{W}_{G,\mu}^\lambda$.
In this paper we focus on the case $\mu=0$. We have \begin{equation} \mathsf{Gr}_0 = G[t^{-1}]_1 G[t]/G[t] = \bigsqcup_{\lambda\in\Lambda^+}\mathsf{Gr}_0^\lambda, \end{equation} where $\mathsf{Gr}_0^\lambda$ is an open subvariety of $\mathsf{Gr}^\lambda$. Note that the $G$-action on $\mathsf{Gr}$ preserves $\mathsf{Gr}_0$ and each $\mathsf{Gr}_0^\lambda$. For general $\lambda\in\Lambda^+$, $\mathsf{Gr}_0^\lambda$ consists of infinitely many $G$-orbits.
For any $\lambda\in\Lambda^+$ we have \begin{equation} \overline{\mathsf{Gr}}_0^\lambda=\bigsqcup_{\substack{\mu\in\Lambda^+\\\mu\leq\lambda}}\mathsf{Gr}_0^\mu. \end{equation} Note that $\overline{\mathsf{Gr}}_0^\lambda$ is the closure of $\mathsf{Gr}_0^\lambda$ in $\mathsf{Gr}_0$.
As in the introduction, we identify $\mathsf{Gr}_0$ with $G[t^{-1}]_1$ via the isomorphism \begin{equation} G[t^{-1}]_1\simto\mathsf{Gr}_{0}:g(t^{-1})\mapsto g(t^{-1})\,G[t]/G[t]. \end{equation} This isomorphism is $G$-equivariant, where $G$ acts on $G[t^{-1}]_1$ by conjugation. Since $G[t^{-1}]_1$ is an affine ind-variety, so is $\mathsf{Gr}_0$, and each $\overline{\mathsf{Gr}}_0^\lambda$ is an affine variety.
\subsection{Relations to nilpotent orbits} \label{ss:achar}
Let $\mathfrak{g}$ be the Lie algebra of $G$, and $\mathcal{N}\subset\mathfrak{g}$ the nilpotent cone. Recall that the adjoint action of $G$ on $\mathfrak{g}$ preserves $\mathcal{N}$, and $\mathcal{N}$ is the union of finitely many $G$-orbits.
In~\cite{ah}, Achar and the author studied the relationship between these nilpotent orbits and (some of) the varieties $\mathsf{Gr}_0^\lambda$. This involves two main morphisms: the involution $\iota:\mathsf{Gr}_{0}\to\mathsf{Gr}_{0}$ defined in the introduction, and the morphism \[ \pi:\mathsf{Gr}_{0}=G[t^{-1}]_1\to\mathfrak{g} \] obtained by viewing $\mathfrak{g}$ as $\ker(G(\mathbb{C}[t^{-1}]/(t^{-2}))\to G)$. More concretely, if we regard $G$ as a closed subgroup of some $\mathrm{GL}(n)$ so that elements of $G[t^{-1}]_1$ can be written $1+x_1t^{-1}+x_2t^{-2}+\cdots+x_mt^{-m}$ where $x_i\in\mathrm{Mat}(n)$, then $\pi$ is defined by \begin{equation} \pi(1+x_1t^{-1}+x_2t^{-2}+\cdots+x_mt^{-m})=x_1\ \in\mathfrak{g}\subseteq\mathrm{Mat}(n). \end{equation} Note that by definition of $\iota$, we have $\pi\circ\iota=\pi$. Both $\iota$ and $\pi$ are $G$-equivariant.
One of the main results of~\cite{ah} states that for $\lambda\in\Lambda^+$, $\mathsf{Gr}_0^\lambda$ consists of finitely many $G$-orbits if and only if $\pi(\mathsf{Gr}_0^\lambda)\subset\mathcal{N}$, and that moreover these equivalent conditions hold precisely when $\lambda$ is a \emph{small} coweight, meaning that there is no $\alpha\in\Lambda^+\cap R$ such that $2\alpha\leq\lambda$.
As mentioned in the introduction, \cite[Lemma 2.2]{ah} showed that \begin{equation} \iota(\mathsf{Gr}_0^\lambda)=\mathsf{Gr}_0^{-\lambda}=\mathsf{Gr}_0^{-w_0\lambda}\ \text{ for all }\lambda\in\Lambda^+. \end{equation} Write $\Lambda^+_1$ for the set of $\lambda\in\Lambda^+$ satisfying $w_0\lambda=-\lambda$. Thus, when $\lambda\in\Lambda^+_1$, one has an induced involution $\iota$ of $\mathsf{Gr}_0^\lambda$ and can consider the fixed-point subvariety $(\mathsf{Gr}_0^\lambda)^\iota$, which is possibly disconnected. Note that $(\mathsf{Gr}_0^\lambda)^\iota$ is $G$-stable.
As was also mentioned in the introduction, we have a canonical $G$-equivariant embedding \[ e:\mathcal{N}\hookrightarrow(\mathsf{Gr}_{0})^\iota:x\mapsto\exp(xt^{-1}), \] where $\exp:\mathfrak{g}\to G$ is the exponential map (whose restriction to $\mathcal{N}$ is a variety morphism). Note that $e$ is a section of $\pi$ in the sense that $\pi(e(x))=x$.
\begin{rmk} The embedding $e$ is not to be confused with the \emph{Lusztig embeddings} \begin{equation} \label{eqn:embeddings} \begin{split} &\mathcal{N}\hookrightarrow\mathsf{Gr}_{0}:x\mapsto 1+xt^{-1}\text{ and}\\ &\mathcal{N}\hookrightarrow\mathsf{Gr}_{0}:x\mapsto (1-xt^{-1})^{-1}, \end{split} \end{equation} which make sense only when $G=\mathrm{SL}(n)$. The embeddings in~\eqref{eqn:embeddings} are sections of $\pi$ which are interchanged by $\iota$; they were introduced in~\cite{lusztig} (see also~\cite[Section 4.1]{ah}). \end{rmk}
For the nilpotent cone $\mathcal{N}$, the stratification mentioned in \S\ref{ss:outline} is just the stratification into $G$-orbits. These nilpotent orbits ${\mathcal{O}}\subset\mathcal{N}$ are well known to be in bijection with the $G$-conjugacy classes of homomorphisms $\mathrm{SL}(2)\to G$: the bijection maps the $G$-conjugacy class of a homomorphism $\varphi:\mathrm{SL}(2)\to G$ to the $G$-orbit of $(d\varphi)([\begin{smallmatrix}0&1\\0&0\end{smallmatrix}])$. Moreover, a homomorphism $\varphi:\mathrm{SL}(2)\to G$ is determined up to $G$-conjugacy by its restriction to the diagonal subgroup $\mathbb{G}_m\subset\mathrm{SL}(2)$, and this restriction to $\mathbb{G}_m$ is $G$-conjugate to a unique dominant coweight $\lambda:\mathbb{G}_m\to T$, which must be an element of $\Lambda_1^+$. Hence every nilpotent orbit ${\mathcal{O}}$ gives rise to an element $\lambda_{\mathcal{O}}\in\Lambda^+_1$, often displayed visually as the \emph{weighted Dynkin diagram} of ${\mathcal{O}}$.
In $\mathrm{SL}(2)[t,t^{-1}]$ we have the equation \begin{equation} \label{eqn:sl2} \exp([\begin{smallmatrix}0&t^{-1}\\0&0\end{smallmatrix}])=[\begin{smallmatrix}0&1\\-1&t\end{smallmatrix}] [\begin{smallmatrix}t&0\\0&t^{-1}\end{smallmatrix}][\begin{smallmatrix}1&0\\t&1\end{smallmatrix}]. \end{equation} Since $\exp\circ(d\varphi)=\varphi\circ\exp$, applying $\varphi:\mathrm{SL}(2)\to G$ to both sides of~\eqref{eqn:sl2} shows that $e({\mathcal{O}})\subseteq \mathsf{Gr}_0^{\lambda_{\mathcal{O}}}$, and hence \begin{equation} \label{eqn:exp-inclusion} e({\mathcal{O}})\subseteq (\mathsf{Gr}_0^{\lambda_{\mathcal{O}}})^\iota,\ \text{ for all nilpotent orbits ${\mathcal{O}}$ of $G$.} \end{equation} Usually the inclusion in~\eqref{eqn:exp-inclusion} is strict, but as a consequence of~\cite[Proposition 6.6]{ah}: \begin{equation} \label{eqn:exp-equality} \text{If $\lambda_{\mathcal{O}}$ is small, then }e({\mathcal{O}})=(\mathsf{Gr}_0^{\lambda_{\mathcal{O}}})^\iota. \end{equation} Indeed, in this case, $\pi$ induces an isomorphism $(\mathsf{Gr}_0^{\lambda_{\mathcal{O}}})^\iota\simto{\mathcal{O}}$ whose inverse is the restriction of $e$ to ${\mathcal{O}}$.
\begin{rmk} Another phenomenon discovered in~\cite{ah} is that, when $G$ is almost simple and ${\mathcal{O}}\subset\mathcal{N}$ is a nilpotent orbit such that $\lambda_{\mathcal{O}}$ is small and $(\mathsf{Gr}_0^{\lambda_{\mathcal{O}}})^\iota$ is a proper subvariety of $\mathsf{Gr}_0^{\lambda_{{\mathcal{O}}}}$, the quotient of the complement $\mathsf{Gr}_0^{\lambda_{{\mathcal{O}}}}\smallsetminus(\mathsf{Gr}_0^{\lambda_{\mathcal{O}}})^\iota$ by the involution $\iota$ is isomorphic via $\pi$ to a different nilpotent orbit ${\mathcal{O}}'\subset\mathcal{N}$, containing ${\mathcal{O}}$ in its closure. The proof in~\cite{ah} was case-by-case; we hope that the moduli-space interpretation of $\iota$ given by Theorem~\ref{thm:normalizer} will assist in finding a uniform explanation. \end{rmk}
\subsection{An example} \label{ss:sl2}
Suppose that $G=\mathrm{SL}(2)$. In this case we have $\Lambda_1^+=\Lambda^+=\{m\alpha\,|\,m\in\mathbb{N}\}$ where $\alpha$ is the positive coroot of $G$. Note that $m\alpha$ is small only when $m\in\{0,1\}$. The zero and nonzero nilpotent orbits have associated coweights $0$ and $\alpha$ respectively.
When $m=0$, $(\mathsf{Gr}_0^0)^\iota=\mathsf{Gr}_0^0=\{1\}$. When $m>0$, we have \[ \begin{split}
\mathsf{Gr}_0^{m\alpha}&=\{1+x_1t^{-1}+x_2t^{-2}+\cdots+x_mt^{-m}\,|\,x_i\in\mathrm{Mat}(2),\, x_m\neq 0,\\ &\qquad\qquad\qquad\qquad\qquad\det(1+x_1t^{-1}+x_2t^{-2}+\cdots+x_mt^{-m})=1\},\\
(\mathsf{Gr}_0^{m\alpha})^\iota&=\{1+x_1t^{-1}+x_2t^{-2}+\cdots+x_mt^{-m}\,|\,x_i\in\mathrm{Mat}(2),\, x_m\neq 0,\\ &\qquad\qquad\qquad\qquad(1+x_1t^{-1}+x_2t^{-2}+\cdots+x_mt^{-m})\\ &\qquad\qquad\qquad\qquad\qquad\times(1-x_1t^{-1}+x_2t^{-2}-\cdots+(-1)^mx_mt^{-m})=1\}. \end{split} \] The varieties $\overline{\mathsf{Gr}}_0^{m\alpha}=\bigsqcup_{m'\leq m} \mathsf{Gr}_0^{m'\alpha}$ and $(\overline{\mathsf{Gr}}_0^{m\alpha})^\iota=\bigsqcup_{m'\leq m}(\mathsf{Gr}_0^{m'\alpha})^\iota$ are described in the same way, but omitting the condition $x_m\neq 0$. In particular, \begin{equation}
e(\mathcal{N})=\{1+xt^{-1}\,|\,x\in\mathrm{Mat}(2),\,x^2=0\}=(\overline{\mathsf{Gr}}_0^{\alpha})^\iota=\overline{\mathsf{Gr}}_0^{\alpha}, \end{equation} exemplifying~\eqref{eqn:exp-equality}.
\begin{lem} \label{lem:even-empty} $(\mathsf{Gr}_0^{m\alpha})^\iota$ is empty when $m$ is even and positive. \end{lem}
\begin{proof} In the above description of $(\mathsf{Gr}_0^{m\alpha})^\iota$ for $m>0$, the equation forces the off-diagonal entries of $1+x_1t^{-1}+\cdots+x_mt^{-m}$ to be odd functions of $t^{-1}$; it also forces $x_m^2=0$. So if $m$ is even we have the contradictory requirements that $x_m\neq 0$, $x_m^2=0$, and $x_m$ is diagonal. \end{proof}
We will describe $(\mathsf{Gr}_0^{m\alpha})^\iota$ for $m$ odd in \S\ref{ss:proof-gl2}, when we prove Theorem~\ref{thm:intro-gl2}.
\section{Moduli-space interpretations} \label{sect:moduli}
To prove Theorem~\ref{thm:normalizer} (more specifically, part (2) of that statement), we need to revisit the moduli-space interpretation of $\mathsf{Gr}_0^\lambda$ given by Braverman and Finkelberg~\cite{bf}, which builds on their previous work with Gaitsgory~\cite{bfg}.
\subsection{Principal bundles on $\mathbb{P}^1$} \label{ss:p1}
First recall the standard moduli-space interpretation of the affine Grassmannian $\mathsf{Gr}$ from~\cite[Section 5]{mv} (see also~\cite[Section 2D]{kwwy}). Namely, it is the moduli space $\mathrm{Bun}_G(\mathbb{P}^1;\mathbb{P}^1\smallsetminus 0)$ of principal $G$-bundles on $\mathbb{P}^1$ equipped with a chosen trivialization on $\mathbb{P}^1\smallsetminus 0$.
Recall that any principal $G$-bundle $\mathcal{F}$ on $\mathbb{P}^1$ must be trivial when restricted to $\mathbb{A}^1=\mathbb{P}^1\smallsetminus\infty$ and when restricted to $\mathbb{P}^1\smallsetminus 0$. So $\mathcal{F}$ can be constructed by gluing together trivial principal $G$-bundles on $\mathbb{A}^1$ and on $\mathbb{P}^1\smallsetminus 0$ using some transition function on $\mathbb{A}^1\smallsetminus 0$, i.e.\ an element of $G[t,t^{-1}]$. We obtain an isomorphic bundle if we left multiply by an element of $G[t^{-1}]$ (changing the trivialization on $\mathbb{P}^1\smallsetminus 0$) or right multiply by an element of $G[t]$ (changing the trivialization on $\mathbb{A}^1$). So the moduli space of principal $G$-bundles on $\mathbb{P}^1$ can be expressed as a quotient stack: \begin{equation} \mathrm{Bun}_G(\mathbb{P}^1)\cong G[t^{-1}]\backslash G[t,t^{-1}]/G[t]. \end{equation} Note that the action of $G[t^{-1}]\times G[t]$ on $G[t,t^{-1}]$ is not free, which corresponds to the fact that principal $G$-bundles on $\mathbb{P}^1$ have nontrivial automorphism groups. Indeed, even a trivial principal $G$-bundle on $\mathbb{P}^1$ has automorphism group $G$; this trivial bundle is parametrized by the open substack $G[t^{-1}]\backslash G[t^{-1}]_1G[t]/G[t]\cong G\backslash\textrm{pt}$, corresponding to the `big opposite Bruhat cell' $G[t^{-1}]_1G[t]=G[t^{-1}]G[t]$ of $G[t,t^{-1}]$.
If we include the trivialization on $\mathbb{P}^1\smallsetminus 0$ as part of the data, then only the (free) action of $G[t]$ remains, so we get \begin{equation} \mathrm{Bun}_G(\mathbb{P}^1;\mathbb{P}^1\smallsetminus 0)\cong G[t,t^{-1}]/G[t]=\mathsf{Gr}, \end{equation} now not merely a stack but an ind-variety. In this moduli space, the open subspace parametrizing trivial bundles is $G[t^{-1}]_1G[t]/G[t]=\mathsf{Gr}_{0}$.
Note that if instead we included only the trivialization at $\infty$ as part of the data, we would get the moduli stack \begin{equation} \label{eqn:stack} \mathrm{Bun}_G(\mathbb{P}^1;\infty)\cong G[t^{-1}]_1\backslash G[t,t^{-1}]/G[t], \end{equation} in which the open substack parametrizing trivial bundles is a point.
\subsection{Principal bundles on $\mathbb{P}^1\times\mathbb{P}^1$}
The Braverman--Finkelberg result~\cite[Theorem 5.2]{bf} takes place one dimension up from the above picture, on a rational surface (it is a by-product of their pursuit of the `double affine Grassmannian').
The setting is the moduli space $\mathrm{Bun}_G(\mathbb{A}^2)$, introduced in~\cite{atiyah,bfg,donaldson}. In one definition, \begin{equation} \label{eqn:old-def} \mathrm{Bun}_G(\mathbb{A}^2):=\mathrm{Bun}_G(\mathbb{P}^2;\mathbb{P}^2\smallsetminus\mathbb{A}^2) \end{equation} is the moduli space of principal $G$-bundles on $\mathbb{P}^2$ equipped with a chosen trivialization on the complement of $\mathbb{A}^2$. As such, $\mathrm{Bun}_G(\mathbb{A}^2)$ clearly carries an action of $G\times \mathrm{GL}(2)$, where $G$ acts by changing the trivialization on $\mathbb{P}^2\smallsetminus\mathbb{A}^2$, and $\mathrm{GL}(2)$ acts on the base space $\mathbb{P}^2$, preserving $\mathbb{A}^2$. However, to define the map $\Psi$ referred to in Theorem~\ref{thm:normalizer}, Braverman and Finkelberg use the alternative definition \begin{equation} \label{eqn:new-def} \mathrm{Bun}_G(\mathbb{A}^2):=\mathrm{Bun}_G(\mathbb{P}^1\times\mathbb{P}^1;(\mathbb{P}^1\times\mathbb{P}^1)\smallsetminus\mathbb{A}^2). \end{equation} In this definition the action of $\mathrm{GL}(2)$ is obscured, but the action of the subgroup $N_{\mathrm{GL}(2)}(\mathbb{G}_m)$ appearing in Theorem~\ref{thm:normalizer} remains. So we will use~\eqref{eqn:new-def} until we have proved that theorem, and then explain (in \S\ref{ss:p2}) how to switch to~\eqref{eqn:old-def}.
\begin{rmk} \label{rmk:chern} Braverman and Finkelberg rarely refer to the whole moduli space $\mathrm{Bun}_G(\mathbb{A}^2)$, because $\mathrm{Bun}_G(\mathbb{A}^2)$ can easily be decomposed into its connected components by considering the second Chern class of a bundle. In view of~\cite[Theorem 5.2 part (1)]{bf}, this decomposition will be superseded once we impose the $\mathbb{G}_m$-equivariance, so we do not need to recall it here. \end{rmk}
Set $D_\infty=(\mathbb{P}^1\times\mathbb{P}^1)\smallsetminus\mathbb{A}^2$. Then a (closed) point of $\mathrm{Bun}_G(\mathbb{A}^2)$ is an isomorphism class of pairs $(\mathcal{F},\Phi)$ where $\mathcal{F}$ is a principal $G$-bundle on $\mathbb{P}^1\times\mathbb{P}^1$ and $\Phi:\mathcal{F}|_{D_\infty}\simto G\times D_\infty$ is an isomorphism of principal $G$-bundles on $D_\infty$, i.e.\ a trivialization of $\mathcal{F}$ on $D_\infty$. In a convenient abuse of notation, we will write $(\mathcal{F},\Phi)\in\mathrm{Bun}_G(\mathbb{A}^2)$ to mean that $(\mathcal{F},\Phi)$ is such a pair.
We now present $\mathrm{Bun}_G(\mathbb{A}^2)$ as a quotient, in the same spirit as~\eqref{eqn:stack}. We use the obvious open covering of $\mathbb{P}^1\times\mathbb{P}^1$ by the following four copies of $\mathbb{A}^2$: \[ \begin{split} U_{00}&:=\mathbb{A}^2=\mathbb{A}^1\times\mathbb{A}^1,\\ U_{01}&:= \mathbb{A}^1\times(\mathbb{P}^1\smallsetminus 0),\\ U_{1 0}&:= (\mathbb{P}^1\smallsetminus 0)\times\mathbb{A}^1,\\ U_{1 1}&:=(\mathbb{P}^1\smallsetminus 0)\times(\mathbb{P}^1\smallsetminus 0). \end{split} \] Let $t$ and $u$ be coordinates on the first and second $\mathbb{P}^1$ factors, respectively. Then the morphisms $U_{00}\to G$ form the ind-group $G[t,u]$, and similarly the other open sets give rise to $G[t,u^{-1}]$, $G[t^{-1},u]$ and $G[t^{-1},u^{-1}]$ respectively. We will use the notation $(h_{00},h_{01},h_{1 0},h_{11})$ for an element of the product \begin{equation} \label{eqn:group} G[t,u]\times G[t,u^{-1}]\times G[t^{-1},u]\times G[t^{-1},u^{-1}]. \end{equation}
A principal $G$-bundle $\mathcal{F}$ on $\mathbb{P}^1\times\mathbb{P}^1$ must be trivial on each of the above four open sets, so it can be constructed by gluing together trivial principal $G$-bundles on these open sets using transition functions on the overlaps. The transition functions constitute a quadruple $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})$, where \begin{equation} \label{eqn:containments} \begin{split} g_{01}^{00}&\in G[t,u,u^{-1}],\\ g_{10}^{00}&\in G[t,t^{-1},u],\\ g_{11}^{01}&\in G[t,t^{-1},u^{-1}],\\ g_{11}^{10}&\in G[t^{-1},u,u^{-1}], \end{split} \end{equation} and these must satisfy the equation \begin{equation} \label{eqn:cocycle} g_{11}^{01}g_{01}^{00}=g_{11}^{10}g_{10}^{00}\qquad \text{in $G[t,t^{-1},u,u^{-1}]$.} \end{equation} Here, $g_{01}^{00}$ gives the transition function on $U_{00}\cap U_{01}$, and so forth; the two equal sides of~\eqref{eqn:cocycle} give the transition function on $U_{00}\cap U_{11}$, and the transition function on $U_{01}\cap U_{10}$ is given by the two equal sides of the equivalent equation \begin{equation} \label{eqn:cocycle2} (g_{11}^{01})^{-1}g_{11}^{10}=g_{01}^{00}(g_{10}^{00})^{-1}\qquad \text{in $G[t,t^{-1},u,u^{-1}]$.} \end{equation}
We conclude that the moduli space $\mathrm{Bun}_G(\mathbb{P}^1\times\mathbb{P}^1)$, as a stack, is the quotient of the space of quadruples $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})$ satisfying~\eqref{eqn:containments} and~\eqref{eqn:cocycle} by the group~\eqref{eqn:group}, where the action is given by: \begin{equation} \label{eqn:action} \begin{split} (h_{00},h_{01},h_{1 0},h_{11})&\cdot(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\\ &=(h_{01}g_{01}^{00}h_{00}^{-1},h_{10}g_{10}^{00}h_{00}^{-1},h_{11}g_{11}^{01}h_{01}^{-1},h_{11}g_{11}^{10}h_{10}^{-1}). \end{split} \end{equation}
To obtain $\mathrm{Bun}_G(\mathbb{A}^2)$, we must modify this description to incorporate the trivialization $\Phi$ of $\mathcal{F}$ on $D_\infty=(U_{01}\cup U_{10}\cup U_{11})\smallsetminus U_{00}$. First, we must impose two additional conditions on the quadruple $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})$, namely that \begin{equation} \label{eqn:rigidity} g_{11}^{01}\in G[t,t^{-1},u^{-1}]_1^u\text{ and }g_{11}^{10}\in G[t^{-1},u,u^{-1}]_1^t, \end{equation} where $G[t,t^{-1},u^{-1}]_1^u$ means $\ker(G[t,t^{-1},u^{-1}]\to G[t,t^{-1}])$ and $G[t^{-1},u,u^{-1}]_1^t$ is defined analogously. The first condition in~\eqref{eqn:rigidity} ensures that the given trivializations on $U_{01}\smallsetminus U_{00}$ and $U_{11}\smallsetminus U_{00}$ match up along their overlap, and the second condition has a similar interpretation. Let $Z$ denote the space of quadruples $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})$ satisfying~\eqref{eqn:containments}, \eqref{eqn:cocycle} and~\eqref{eqn:rigidity}. Any element of $Z$ gives rise to a pair $(\mathcal{F},\Phi)$ of the sort we are trying to parametrize.
Second, to avoid changing the isomorphism class of $(\mathcal{F},\Phi)$, we must take a quotient not by the full group~\eqref{eqn:group} but by its subgroup \begin{equation} \label{eqn:subgroup} H:=G[t,u]\times G[t,u^{-1}]_1^u\times G[t^{-1},u]_1^t\times G[t^{-1},u^{-1}]_1^{t,u}, \end{equation} where $G[t,u^{-1}]_1^u$ means $\ker(G[t,u^{-1}]\to G[t])$, $G[t^{-1},u]_1^t$ is defined analogously, and $G[t^{-1},u^{-1}]_1^{t,u}$ is the intersection of $G[t^{-1},u^{-1}]_1^{t}$ and $G[t^{-1},u^{-1}]_1^{u}$. To sum up, \begin{equation} \label{eqn:p1p1presentation} \mathrm{Bun}_G(\mathbb{A}^2)=\mathrm{Bun}_G(\mathbb{P}^1\times\mathbb{P}^1;D_\infty)\cong Z/H \end{equation} as a quotient stack, where the action of $H$ on $Z$ is given by~\eqref{eqn:action} as before.
In this description, the action of $G$ on $\mathrm{Bun}_G(\mathbb{A}^2)$ results from an action of $G$ on $Z$, obtained by regarding $G$ as the subgroup of the group~\eqref{eqn:group} responsible for a uniform change of the trivialization $\Phi$: namely, for $g\in G$ and $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\in Z$, we have \begin{equation} \label{eqn:G-action} g\cdot(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})=(gg_{01}^{00},gg_{10}^{00},gg_{11}^{01}g^{-1},gg_{11}^{10}g^{-1}). \end{equation} The action of the diagonal subgroup of $\mathrm{GL}(2)$ on $\mathrm{Bun}_G(\mathbb{A}^2)$ results from the following action on $Z$: \begin{equation} \label{eqn:diag-action} [\begin{smallmatrix}\alpha&0\\0&\beta\end{smallmatrix}]\cdot(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})
=(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\left|_{\substack{t\mapsto \alpha^{-1}t\\u\mapsto \beta^{-1}u}}\right.\, , \end{equation} and the action of $[\begin{smallmatrix}0&1\\1&0\end{smallmatrix}]$ on $\mathrm{Bun}_G(\mathbb{A}^2)$ results from the following action on $Z$: \begin{equation} \label{eqn:switch-action} [\begin{smallmatrix}0&1\\1&0\end{smallmatrix}]\cdot(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})
=(g_{10}^{00},g_{01}^{00},g_{11}^{10},g_{11}^{01})\left|_{\substack{t\mapsto u\\u\mapsto t}}\right.\,. \end{equation}
The following fact is implicit in the proof of~\cite[Proposition 3.4]{bfg}.
\begin{prop} \label{prop:factorization} If $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\in Z$, then, regarding $g_{11}^{01}$ and $g_{11}^{10}$ as elements of $G[t,t^{-1}][[u^{-1}]]_1^u$ and $G[[t^{-1}]][u,u^{-1}]_1^t$ respectively, we can uniquely write them in the form \[ g_{11}^{01}=(g_{11}^{01})'(g_{11}^{01})''\;\text{ and }\;g_{11}^{10}=(g_{11}^{10})'(g_{11}^{10})'' \] where \[ \begin{split} &(g_{11}^{01})'\in G[t^{-1}][[u^{-1}]]_1^{t,u},\ (g_{11}^{01})''\in G[t][[u^{-1}]]_1^u,\\ &(g_{11}^{10})'\in G[[t^{-1}]][u^{-1}]_1^{t,u},\ (g_{11}^{10})''\in G[[t^{-1}]][u]_1^t. \end{split} \] \end{prop}
\begin{proof} By symmetry we need only prove the claim concerning $g_{11}^{01}$. The uniqueness in the claimed expression $g_{11}^{01}=(g_{11}^{01})'(g_{11}^{01})''$ is clear, because \[ G[t^{-1}][[u^{-1}]]_1^{t,u}\cap G[t][[u^{-1}]]_1^u=\{1\}. \] Recall from the previous subsection that when a principal $G$-bundle on $\mathbb{P}^1$ is represented by a transition function $g\in G[t,t^{-1}]$, the bundle is trivial exactly when $g\in G[t^{-1}]_1 G[t]$. So if $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\in Z$ gives rise to $(\mathcal{F},\Phi)\in\mathrm{Bun}_G(\mathbb{A}^2)$, the claim that $g_{11}^{01}\in G[t^{-1}][[u^{-1}]]_1^{t,u}G[t][[u^{-1}]]_1^u$ is equivalent to saying that $\mathcal{F}$ is trivial when restricted to $\mathbb{P}^1\times \mathcal{D}$, where $\mathcal{D}$ is a formal neighbourhood of $\infty$ in $\mathbb{P}^1$; this is the form in which the claim appears in the proof of~\cite[Proposition 3.4]{bfg}. The reason is that $\mathcal{F}$ is assumed to be trivial when restricted to $\mathbb{P}^1\times\{\infty\}$, and the set of points $u_0\in\mathbb{P}^1$ such that $\mathcal{F}$ is nontrivial on $\mathbb{P}^1\times\{u_0\}$ must be Zariski-closed, hence finite; these finitely many lines $\mathbb{P}^1\times\{u_0\}$ are known as the `jumping lines' of $\mathcal{F}$. \end{proof}
As a consequence we have the following result, mentioned in~\cite[Section 3.5]{bfg} and~\cite[Section 4.4]{bf}, which is the reason that $\mathrm{Bun}_G(\mathbb{A}^2)\cong Z/H$ is not merely a stack but an ind-variety.
\begin{prop} \label{prop:free} The action of $H$ on $Z$ is free. In other words, a pair $(\mathcal{F},\Phi)\in\mathrm{Bun}_G(\mathbb{A}^2)$ has no nontrivial automorphisms. \end{prop}
\begin{proof} Suppose that we have \begin{equation} \label{eqn:fix-eqn} (h_{00},h_{01},h_{10},h_{11})\cdot(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10}) =(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10}) \end{equation} for some $(h_{00},h_{01},h_{1 0},h_{11})\in H$ and $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\in Z$. Taking the third component of both sides of~\eqref{eqn:fix-eqn} gives \begin{equation} \label{eqn:fix-eqn3} h_{11}g_{11}^{01}h_{01}^{-1}=g_{11}^{01}. \end{equation} Writing $g_{11}^{01}=(g_{11}^{01})'(g_{11}^{01})''$ as in Proposition~\ref{prop:factorization} and using the uniqueness in that expression, we obtain $h_{11}(g_{11}^{01})'=(g_{11}^{01})'$ and $(g_{11}^{01})''h_{01}^{-1}=(g_{11}^{01})''$, implying $h_{11}=1$ and $h_{01}=1$. Then the other components of~\eqref{eqn:fix-eqn} immediately imply $h_{00}=1$ and $h_{10}=1$ also. \end{proof}
As observed in~\cite[Section 4.4]{bf}, it follows from Proposition~\ref{prop:free} that for any group $\Gamma$ acting on $\mathbb{P}^1\times\mathbb{P}^1$ in such a way as to preserve $\mathbb{A}^2$, the moduli space $\mathrm{Bun}_G(\mathbb{A}^2/\Gamma)$ of $\Gamma$-equivariant pairs $(\mathcal{F},\Phi)$ as above can be identified with the fixed-point set $\mathrm{Bun}_G(\mathbb{A}^2)^\Gamma$ of the $\Gamma$-action on $\mathrm{Bun}_G(\mathbb{A}^2)$. That is, for a pair $(\mathcal{F},\Phi)\in\mathrm{Bun}_G(\mathbb{A}^2)$, $\Gamma$-equivariance can be treated as a property rather than as extra structure.
\subsection{$\mathbb{G}_m$-equivariant bundles} \label{ss:gm-equiv}
Following~\cite[Section 5]{bf}, we now describe the moduli space $\mathrm{Bun}_G(\mathbb{A}^2/\mathbb{G}_m)=\mathrm{Bun}_G(\mathbb{A}^2)^{\mathbb{G}_m}$ where $\mathbb{G}_m$ is the diagonal subgroup of $\mathrm{SL}(2)$. Note that since $\mathbb{A}^2/\mathbb{G}_m$ is one-dimensional, we should not be surprised to find subvarieties of the ordinary one-variable affine Grassmannian reappearing in the description.
Parts of the next two results are implicit in the proof of~\cite[Theorem 5.2]{bf}.
\begin{prop} \label{prop:psi-prelim} Suppose that $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\in Z$ is such that its $H$-orbit is fixed by the $\mathbb{G}_m$-action. Then: \begin{enumerate} \item We can uniquely write $g_{11}^{01}$ and $g_{11}^{10}$ in the form \[ g_{11}^{01}=(g_{11}^{01})'(g_{11}^{01})''\;\text{ and }\;g_{11}^{10}=(g_{11}^{10})'(g_{11}^{10})'' \] where \[ \begin{split} &(g_{11}^{01})'\in G[t^{-1},u^{-1}]_1^{t,u},\ (g_{11}^{01})''\in G[t,u^{-1}]_1^u,\\ &(g_{11}^{10})'\in G[t^{-1},u^{-1}]_1^{t,u},\ (g_{11}^{10})''\in G[t^{-1},u]_1^t. \end{split} \] \item With the above notation, we have \[
((g_{11}^{01})')^{-1}(g_{11}^{10})'=\gamma|_{t\mapsto tu} \] for some, clearly unique, $\gamma\in G[t^{-1}]_1=\mathsf{Gr}_{0}$. \end{enumerate} \end{prop}
\begin{proof} The proof of (1) is similar to that of Proposition~\ref{prop:factorization}. By symmetry, we need only prove the claim concerning $g_{11}^{01}$, and the uniqueness of the claimed expression is obvious. If $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})$ gives rise to $(\mathcal{F},\Phi)\in\mathrm{Bun}_G(\mathbb{A}^2)$, the claim that $g_{11}^{01}\in G[t^{-1},u^{-1}]_1^{t,u}G[t,u^{-1}]_1^u$ is equivalent to saying that $\mathcal{F}$ is trivial when restricted to $\mathbb{P}^1\times(\mathbb{P}^1\smallsetminus 0)$. The reason, as mentioned in the proof of~\cite[Theorem 5.2]{bf}, is that since $\mathcal{F}$ is $\mathbb{G}_m$-equivariant, its set of jumping lines $\mathbb{P}^1\times\{u_0\}$ must be $\mathbb{G}_m$-stable, which means that the only possible jumping line is $\mathbb{P}^1\times\{0\}$.
Now we prove (2). The assumption that the $H$-orbit of $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\in Z$ is fixed by the $\mathbb{G}_m$-action is equivalent to saying that for any $z\in\mathbb{G}_m$, there is some $(h_{00}^z,h_{01}^z,h_{10}^z,h_{11}^z)\in H$ such that \begin{equation} \label{eqn:z-fixed} \begin{split}
&(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\left|_{\substack{t\mapsto z^{-1}t\\u\mapsto zu\phantom{{}^{-1}}}}\right.\\ &=(h_{01}^z g_{01}^{00}(h_{00}^z)^{-1},h_{10}^z g_{10}^{00}(h_{00}^z)^{-1},h_{11}^z g_{11}^{01}(h_{01}^z)^{-1},h_{11}^z g_{11}^{10}(h_{10}^z)^{-1}). \end{split} \end{equation} Note that by Proposition~\ref{prop:free}, $(h_{00}^z,h_{01}^z,h_{10}^z,h_{11}^z)$ is uniquely determined by $z$.
Taking the third component of both sides of~\eqref{eqn:z-fixed} gives \begin{equation}
g_{11}^{01}\left|_{\substack{t\mapsto z^{-1}t\\u\mapsto zu\phantom{{}^{-1}}}}\right.=h_{11}^z g_{11}^{01}(h_{01}^z)^{-1}. \end{equation} Writing $g_{11}^{01}=(g_{11}^{01})'(g_{11}^{01})''$ as in (1) and using the uniqueness in that expression, we obtain \begin{equation} \label{eqn:split1}
(g_{11}^{01})'\left|_{\substack{t\mapsto z^{-1}t\\u\mapsto zu\phantom{{}^{-1}}}}\right.=h_{11}^z (g_{11}^{01})'. \end{equation} By symmetry, we also have \begin{equation} \label{eqn:split2}
(g_{11}^{10})'\left|_{\substack{t\mapsto z^{-1}t\\u\mapsto zu\phantom{{}^{-1}}}}\right.=h_{11}^z (g_{11}^{10})'. \end{equation} Combining~\eqref{eqn:split1} and~\eqref{eqn:split2}, we see that $((g_{11}^{01})')^{-1}(g_{11}^{10})'\in G[t^{-1},u^{-1}]_1^{t,u}$ is preserved by the substitution $t\mapsto z^{-1}t$, $u\mapsto zu$ for all $z\in\mathbb{G}_m$, proving part (2). \end{proof}
\begin{prop} \label{prop:psi} There is a $G$-equivariant bijection \[ \Psi:\mathrm{Bun}_G(\mathbb{A}^2/\mathbb{G}_m)\to\mathsf{Gr}_{0} \] which sends the $H$-orbit of $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\in Z$ to $\gamma\in\mathsf{Gr}_{0}$, defined as in Proposition~\ref{prop:psi-prelim}\textup{(}2\textup{)}. The inverse is described as follows. If $\gamma\in\mathsf{Gr}_0^\lambda$ for $\lambda\in\Lambda^+$, i.e.\ \[ \gamma=q_1 t^\lambda q_2\;\text{ for some }\,q_1,q_2\in G[t], \] then $\Psi^{-1}(\gamma)$ is the $H$-orbit of the quadruple $(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})\in Z$ defined by \[ \begin{split}
\tilde{g}_{01}^{00}&=(q_1|_{t\mapsto tu})\,u^\lambda,\\
\tilde{g}_{10}^{00}&=(q_2^{-1}|_{t\mapsto tu})\,t^{-\lambda},\\ \tilde{g}_{11}^{01}&=1,\\
\tilde{g}_{11}^{10}&=\gamma|_{t\mapsto tu}. \end{split} \] Here $u^\lambda\in T[u,u^{-1}]$ means the same as $t^{\lambda}$, but written with the variable $u$. \end{prop}
\begin{ex} \label{ex:sl2-special} Take $G=\mathrm{SL}(2)$ and let $\gamma=[\begin{smallmatrix}1&t^{-1}\\0&1\end{smallmatrix}]\in\mathsf{Gr}_0^\alpha$. By~\eqref{eqn:sl2}, we can choose $q_1=[\begin{smallmatrix}0&1\\-1&t\end{smallmatrix}]$ and $q_2=[\begin{smallmatrix}1&0\\t&1\end{smallmatrix}]$. Then the special representative of the $H$-orbit $\Psi^{-1}(\gamma)$ specified in Proposition~\ref{prop:psi} is $([\begin{smallmatrix}0&u^{-1}\\-u&t\end{smallmatrix}],[\begin{smallmatrix}t^{-1}&0\\-u&t\end{smallmatrix}],[\begin{smallmatrix}1&0\\0&1\end{smallmatrix}],[\begin{smallmatrix}1&t^{-1}u^{-1}\\0&1\end{smallmatrix}])\in Z$. \end{ex}
\begin{proof} We first show that $\Psi$ is well defined. Suppose that $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\in Z$ is such that its $H$-orbit is fixed by the $\mathbb{G}_m$-action, and define $(g_{11}^{01})'$ and $(g_{11}^{10})'$ as in Proposition~\ref{prop:psi-prelim}. If we act on $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})$ by $(h_{00},h_{01},h_{10},h_{11})\in H$, then $(g_{11}^{01})'$ is replaced by $h_{11}(g_{11}^{01})'$ and $(g_{11}^{10})'$ by $h_{11}(g_{11}^{10})'$, so $((g_{11}^{01})')^{-1}(g_{11}^{10})'$ is left unchanged. Thus the element $\gamma\in G[t^{-1}]_1$ defined in Proposition~\ref{prop:psi-prelim}(2) is indeed an invariant of the $H$-orbit of $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})$. The fact that $\Psi$ is $G$-equivariant is equally easy, using the formula for the $G$-action on $Z$ given in~\eqref{eqn:G-action}.
To show the surjectivity of $\Psi$, it suffices to show that $(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})$ as defined in the statement does indeed have the property that its $H$-orbit is fixed by the $\mathbb{G}_m$-action. This follows from the obvious equation \begin{equation} \label{eqn:z-fixed-special}
(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})\left|_{\substack{t\mapsto z^{-1}t\\u\mapsto zu\phantom{{}^{-1}}}}\right.\\ =(\tilde{g}_{01}^{00}\lambda(z),\tilde{g}_{10}^{00}\lambda(z),\tilde{g}_{11}^{01},\tilde{g}_{11}^{10}) \end{equation} for all $z\in\mathbb{G}_m$.
To prove the injectivity of $\Psi$, we must show that if $\Psi$ sends the $H$-orbit of $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\in Z$ to $\gamma\in\mathsf{Gr}_0^\lambda$, then that $H$-orbit contains the above quadruple $(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})$. Now~\eqref{eqn:cocycle2} implies that \[
(g_{11}^{01})''g_{01}^{00}(g_{10}^{00})^{-1}((g_{11}^{10})'')^{-1}=((g_{11}^{01})')^{-1}(g_{11}^{10})'=(q_1|_{t\mapsto tu})t^\lambda u^\lambda (q_2|_{t\mapsto tu}), \] which rearranges to \begin{equation} \label{eqn:key}
u^{-\lambda}(q_1^{-1}|_{t\mapsto tu})(g_{11}^{01})''g_{01}^{00}=t^\lambda (q_2|_{t\mapsto tu})(g_{11}^{10})''g_{10}^{00}. \end{equation} The left-hand side of~\eqref{eqn:key} clearly belongs to $G[t,u,u^{-1}]$ and the right-hand side clearly belongs to $G[t,t^{-1},u]$, so their common value is some element $h_{00}$ of $G[t,u]$. We then have \begin{equation} (h_{00},(g_{11}^{01})'',(g_{11}^{10})'',((g_{11}^{01})')^{-1})\cdot(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})=(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10}), \end{equation} as required. \end{proof}
\subsection{Proof of Theorem~\ref{thm:normalizer}} \label{ss:proof}
We have used the same letter $\Psi$ for the bijection defined in Proposition~\ref{prop:psi} as we used in the introduction for the bijection defined by Braverman and Finkelberg. Of course, this is to be justified by the next result, which says that the two bijections are the same.
Recall from the introduction that for any $\lambda\in\Lambda^+$, $\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)$ denotes the closed subvariety of $\mathrm{Bun}_G(\mathbb{A}^2/\mathbb{G}_m)$ parametrizing $\mathbb{G}_m$-equivariant pairs $(\mathcal{F},\Phi)$ for which the induced action of $\mathbb{G}_m$ on the fibre of $\mathcal{F}$ at $(0,0)\in\mathbb{A}^2$ gives rise to a homomorphism $\mathbb{G}_m\to G$ in the $G$-conjugacy class of $\lambda$. As observed in~\cite{bf}, $\mathrm{Bun}_G(\mathbb{A}^2/\mathbb{G}_m)$ decomposes as a disconnected union $\coprod_{\lambda\in\Lambda^+}\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)$.
\begin{prop} \label{prop:identification} The bijection $\Psi:\mathrm{Bun}_G(\mathbb{A}^2/\mathbb{G}_m)\to\mathsf{Gr}_{0}$ defined in Proposition~\ref{prop:psi} restricts to an isomorphism of varieties \[ \mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)\simto\mathsf{Gr}_0^\lambda\ \text{ for each }\lambda\in\Lambda^+, \] which is the same as the isomorphism defined by Braverman and Finkelberg in the $\mu=0$ special case of ~\cite[Theorem 5.2(2)]{bf}. \end{prop}
\begin{proof} Let $\gamma\in\mathsf{Gr}_{0}$, let $\lambda\in\Lambda^+$ be such that $\gamma\in\mathsf{Gr}_0^\lambda$, and let $(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})\in Z$ be the special representative of $\Psi^{-1}(\gamma)$ defined in Proposition~\ref{prop:psi}. This quadruple $(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})$ defines a pair $(\mathcal{F},\Phi)\in\mathrm{Bun}_G(\mathbb{A}^2)$ as usual. The fact that $\tilde{g}_{11}^{01}=1$ means that the restriction of the trivialization $\Phi$ to $\mathbb{P}^1\times\{\infty\}$ extends to $\mathbb{P}^1\times(\mathbb{P}^1\smallsetminus 0)$.
The first component of~\eqref{eqn:z-fixed-special} exactly says that the action of $\mathbb{G}_m$ on the fibre of $\mathcal{F}$ at $(0,0)\in\mathbb{A}^2$ gives rise to the homomorphism $\lambda:\mathbb{G}_m\to G$. We conclude that $\Psi^{-1}(\mathsf{Gr}_0^\lambda)$ is indeed $\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)$.
It is not hard to show directly that $\Psi$ restricts to an isomorphism of varieties $\Psi^{-1}(\mathsf{Gr}_0^\lambda)\simto\mathsf{Gr}_0^\lambda$, but we will omit this since it follows anyway from the identification with the known isomorphism.
Braverman and Finkelberg describe their isomorphism $\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)\simto\mathsf{Gr}_0^\lambda$ in the proof of~\cite[Theorem 5.2]{bf}, which incorporates the proof of~\cite[Lemma 5.3]{bf}. They use a principle established in~\cite[Proposition 3.4]{bfg}, which states, in part, that if the restriction of $\Phi$ to $\mathbb{P}^1\times\{\infty\}$ extends to $\mathbb{P}^1\times(\mathbb{P}^1\smallsetminus 0)$, then the isomorphism class of $(\mathcal{F},\Phi)$ is determined by the morphism $f_{(\mathcal{F},\Phi)}:\mathbb{P}^1\to\mathrm{Bun}_G(\mathbb{P}^1;\mathbb{P}^1\smallsetminus 0)=\mathsf{Gr}$ which maps each $t_0\in\mathbb{P}^1$ to the isomorphism class of the restriction of $(\mathcal{F},\Phi)$ to $\{t_0\}\times\mathbb{P}^1$. Then the isomorphism $\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)\simto\mathsf{Gr}_0^\lambda$ sends $(\mathcal{F},\Phi)$ to $f_{(\mathcal{F},\Phi)}(1)$. In the case of the pair $(\mathcal{F},\Phi)$ determined by $(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})$ as above, the fact that $\tilde{g}_{11}^{10}=\gamma|_{t\mapsto tu}$ means that $f_{(\mathcal{F},\Phi)}(t_0)=\gamma|_{t\mapsto t_0 t}$ for all $t_0\in\mathbb{A}^1\smallsetminus 0$, so the Braverman--Finkelberg isomorphism sends $(\mathcal{F},\Phi)$ to $\gamma$ as claimed. \end{proof}
With our explicit definition of $\Psi$, the proof of Theorem~\ref{thm:normalizer} is trivial: \begin{proof} Suppose that $(g_{01}^{00},g_{10}^{00},g_{11}^{01},g_{11}^{10})\in Z$ is such that its $H$-orbit is fixed by the $\mathbb{G}_m$-action, and define $(g_{11}^{01})'$, $(g_{11}^{10})'$, $\gamma$ as in Proposition~\ref{prop:psi-prelim}. In view of~\eqref{eqn:diag-action}, acting by $[\begin{smallmatrix}\alpha&0\\0&\beta\end{smallmatrix}]$ changes $(g_{11}^{01})'$ and $(g_{11}^{10})'$ by the substitutions $t\mapsto \alpha^{-1}t$, $u\mapsto\beta^{-1}u$, so it changes $\gamma$ by the substitution $t\mapsto \alpha^{-1}\beta^{-1} t$, as claimed. In view of~\eqref{eqn:switch-action}, acting by $[\begin{smallmatrix}0&1\\1&0\end{smallmatrix}]$ swaps $(g_{11}^{01})'$ and $(g_{11}^{10})'$ and interchanges $t$ and $u$, so it replaces $\gamma$ by $\gamma^{-1}$, as claimed.\end{proof}
\begin{rmk} \label{rmk:uhlenbeck} Another, equally important, part of~\cite[Theorem 5.2]{bf} is the statement that the isomorphism $\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)\simto\mathsf{Gr}_0^\lambda$ extends to an isomorphism between natural `closures' of these varieties, namely the Uhlenbeck moduli space $\mathcal{U}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)$ and the variety $\overline{\mathsf{Gr}}_0^\lambda$. (Really, as mentioned in Remark~\ref{rmk:slice}, Braverman and Finkelberg consider the more general situation of the transverse slice $\overline{\mathsf{Gr}}_\mu^\lambda$ where $\mu$ is not necessarily $0$.) Since an automorphism of a variety is determined by its restriction to a dense subset, it follows immediately from Theorem~\ref{thm:normalizer} that the action of $N_{\mathrm{GL}(2)}(\mathbb{G}_m)$ on $\mathcal{U}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)\cup\mathcal{U}_G^{-w_0\lambda}(\mathbb{A}^2/\mathbb{G}_m)$ corresponds to the action on $\overline{\mathsf{Gr}}_0^\lambda\cup\overline{\mathsf{Gr}}_0^{-w_0\lambda}$ by the same formulas as in Theorem~\ref{thm:normalizer}. \end{rmk}
\subsection{$N$-equivariant bundles} \label{ss:n-equiv}
We now turn to the moduli space $\mathrm{Bun}_G(\mathbb{A}^2/N)$. As explained after Proposition~\ref{prop:free}, $\mathrm{Bun}_G(\mathbb{A}^2/N)$ can be identified with the fixed-point set in $\mathrm{Bun}_G(\mathbb{A}^2)$ of the action of $N$, and since $N=\langle\mathbb{G}_m,[\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}]\rangle$, this is the same as the fixed-point set in $\mathrm{Bun}_G(\mathbb{A}^2/\mathbb{G}_m)$ of the action of $[\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}]$. So most of Theorem~\ref{thm:normalizer2} is an immediate consequence of Theorem~\ref{thm:normalizer}; we need only explain the parts relating to disconnected union decompositions.
Let $\Xi$ denote the set of $G$-conjugacy classes of homomorphisms $N\to G$. We have a natural disjoint union $\Xi=\bigsqcup_{\lambda\in\Lambda^+}\Xi(\lambda)$, where $\Xi(\lambda)$ is the set of $G$-conjugacy classes containing some $\tau:N\to G$ whose restriction to $\mathbb{G}_m$ is $\lambda$.
For a given $\lambda\in\Lambda^+$, extending it to $\tau:N\to G$ amounts to choosing a (necessarily semisimple) element $\sigma=\tau([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])\in G$ satisfying \begin{equation} \label{eqn:sigma-eqn} \sigma^2=\lambda(-1)\;\text{ and }\;\sigma\lambda(z)\sigma^{-1}=\lambda(z)^{-1}\,\text{ for all }\,z\in\mathbb{G}_m. \end{equation} Let $\Sigma(\lambda)$ denote the set of $\sigma\in G$ satisfying~\eqref{eqn:sigma-eqn}, which is a union of $Z_G(\lambda)$-conjugacy classes where $Z_G(\lambda)$ means the centralizer of the image of $\lambda$. Then $\Xi(\lambda)$ is in bijection with the set of $Z_G(\lambda)$-conjugacy classes in $\Sigma(\lambda)$. Note that $\Sigma(\lambda)$ and hence $\Xi(\lambda)$ are empty if $w_0\lambda\neq-\lambda$, i.e.\ $\lambda\notin\Lambda_1^+$.
Now suppose that $\lambda\in\Lambda_1^+$. Since the $Z_G(\lambda)$-conjugacy classes in $\Sigma(\lambda)$ are all closed, we have a disconnected union \begin{equation} \label{eqn:disconnected} \mathrm{Bun}_{G}^\lambda(\mathbb{A}^2/N)=\coprod_{\xi\in\Xi(\lambda)} \mathrm{Bun}_{G}^\xi(\mathbb{A}^2/N), \end{equation} where $\mathrm{Bun}_{G}^\xi(\mathbb{A}^2/N)$ is the subvariety of $\mathrm{Bun}_G(\mathbb{A}^2/N)$ parametrizing $N$-equivariant pairs $(\mathcal{F},\Phi)$ for which the induced action of $N$ on the fibre of $\mathcal{F}$ at $(0,0)\in\mathbb{A}^2$ gives rise to a homomorphism $N\to G$ in the $G$-conjugacy class $\xi$. By Theorem~\ref{thm:normalizer}, there is a corresponding disconnected union \begin{equation} \label{eqn:disconnected2} (\mathsf{Gr}_0^\lambda)^\iota=\coprod_{\xi\in\Xi(\lambda)} (\mathsf{Gr}_0)^{\iota,\xi}, \end{equation} where $(\mathsf{Gr}_0)^{\iota,\xi}:=\Psi(\mathrm{Bun}_{G}^\xi(\mathbb{A}^2/N))$. We will give an alternative description of $(\mathsf{Gr}_0)^{\iota,\xi}$ in Proposition~\ref{prop:sigma} below. This completes the proof of Theorem~\ref{thm:normalizer2}.
If $\Xi(\lambda)$ is empty, then so are $\mathrm{Bun}_{G}^\lambda(\mathbb{A}^2/N)$ and $(\mathsf{Gr}_0^\lambda)^\iota$. (In the case $G=\mathrm{SL}(2)$, this provides another proof of Lemma~\ref{lem:even-empty}.) If $\Xi(\lambda)$ is nonempty, it is still possible that the varieties $\mathrm{Bun}_{G}^\xi(\mathbb{A}^2/N)$ and $(\mathsf{Gr}_0)^{\iota,\xi}$ are empty for some $\xi\in\Xi(\lambda)$. For example, this happens when $\lambda=0$ and $\xi$ corresponds to the $G$-conjugacy class of a nontrivial involution $\sigma\in G$. We do not know a general criterion for nonemptiness of $\mathrm{Bun}_{G}^\xi(\mathbb{A}^2/N)\cong(\mathsf{Gr}_0)^{\iota,\xi}$, but we will give one in the case $G=\mathrm{SL}(r)$ in Proposition~\ref{prop:glr}.
By analogy with the varieties $\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)\cong\mathsf{Gr}_0^\lambda$, it is natural to suspect that the nonempty varieties $\mathrm{Bun}_{G}^\xi(\mathbb{A}^2/N)\cong(\mathsf{Gr}_0)^{\iota,\xi}$ are all connected. We will prove this in the case $G=\mathrm{SL}(r)$ in Proposition~\ref{prop:glr}. It does not seem to be easily deducible from the following alternative description of $(\mathsf{Gr}_0)^{\iota,\xi}$.
\begin{prop} \label{prop:sigma} Let $\lambda\in\Lambda_1^+$ and $\gamma\in(\mathsf{Gr}_0^\lambda)^\iota$. \begin{enumerate} \item If we write $\gamma=q_1t^\lambda q_2$ for $q_1,q_2\in G[t]$, the element \[
\sigma:=\left(t^\lambda(q_2|_{t\mapsto 0})(q_1|_{t\mapsto 0})t^\lambda\right)|_{t\mapsto 0}\in G \] is well defined and belongs to $\Sigma(\lambda)$. Moreover, its $Z_G(\lambda)$-conjugacy class depends only on $\gamma$, not on the choice of $q_1,q_2$. \item Let $\xi\in\Xi(\lambda)$. Then $\gamma$ belongs to $(\mathsf{Gr}_0)^{\iota,\xi}$ if and only if $\sigma$ belongs to the $Z_G(\lambda)$-conjugacy class determined by $\xi$. \end{enumerate} \end{prop}
\begin{proof} The assumption that $\iota(\gamma)=\gamma$ implies that \begin{equation}
(q_1|_{t\mapsto -t})t^\lambda\lambda(-1)(q_2|_{t\mapsto -t})q_1t^\lambda q_2=1, \end{equation} and hence (after replacing $t$ by $tu$ and rearranging) \begin{equation} \label{eqn:tu}
t^\lambda (q_2|_{t\mapsto tu})(q_1|_{t\mapsto -tu}) t^\lambda=
u^{-\lambda}(q_1^{-1}|_{t\mapsto tu})(q_2^{-1}|_{t\mapsto -tu})u^{-\lambda}\lambda(-1). \end{equation}
Now the left-hand side of~\eqref{eqn:tu} clearly belongs to $G[t,t^{-1},u]$ while the right-hand side clearly belongs to $G[t,u,u^{-1}]$. We conclude that their common value, $p$ say, in fact belongs to $G[t,u]$, so it make sense to define $\sigma=p|_{t,u\mapsto 0}\in G$. If we make the substitution $u\mapsto 0$ before the substitution $t\mapsto 0$, we obtain the expression for $\sigma$ given in the statement, showing that it is well defined.
We could prove the remaining assertions in (1) concerning $\sigma$ by simple calculations, but in order to prove (2) also it is more efficient to argue as follows.
Recall from Proposition~\ref{prop:psi} the special quadruple $(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})\in Z$ in the $H$-orbit corresponding to $\gamma$ under $\Psi$. Since $\gamma$ is fixed by $\iota$, the $H$-orbit of $(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})$ is preserved by the action of $[\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}]\in\mathrm{SL}(2)$. In fact, an easy calculation using~\eqref{eqn:diag-action} and~\eqref{eqn:switch-action} shows that \begin{equation} \label{eqn:switch-fixed-special} [\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}]\cdot(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})=(p^{-1},1,1,\gamma_{t\mapsto -tu})\cdot(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10}). \end{equation}
The quadruple $(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})$ defines as usual a pair $(\mathcal{F},\Phi)\in\mathrm{Bun}_{G}^\lambda(\mathbb{A}^2/N)$. Let $\tau:N\to G$ be the homomorphism obtained from the action of $N$ on the fibre of $\mathcal{F}$ at $(0,0)\in\mathbb{A}^2=U_{00}$, using the original identification of $\mathcal{F}|_{U_{00}}$ with the trivial $G$-bundle on $U_{00}$. As already seen in the proof of Proposition~\ref{prop:identification}, the first component of~\eqref{eqn:z-fixed-special} says that the restriction of $\tau$ to $\mathbb{G}_m$ is $\lambda$. Likewise, the first component of~\eqref{eqn:switch-fixed-special} says that $\tau([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])=p|_{t,u\mapsto 0}=\sigma$. From this it is automatic that $\sigma$ belongs to $\Sigma(\lambda)$, and that its $Z_G(\lambda)$-conjugacy class depends only on $\gamma$ (because the $G$-conjugacy class of $\tau$ depends only on the isomorphism class of $(\mathcal{F},\Phi)$). This completes part (1), and part (2) also follows by definition of $(\mathsf{Gr}_0)^{\iota,\xi}$. \end{proof}
\begin{ex} Continue Example~\ref{ex:sl2-special} with $G=\mathrm{SL}(2)$, $\lambda=\alpha$ (the positive coroot), and $\gamma=[\begin{smallmatrix}1&t^{-1}\\0&1\end{smallmatrix}]$. Using the same choice of $q_1$ and $q_2$ as in Example~\ref{ex:sl2-special}, the element $\sigma$ defined in Proposition~\ref{prop:sigma} is $[\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}]$. In this case, $\Sigma(\alpha)$ consists of a single $Z_G(\alpha)$-conjugacy class, so $\Xi(\alpha)$ has only one element. \end{ex}
\begin{ex} More generally, for any $G$ suppose that $\gamma=e(x)$ for some $x\in\mathcal{N}$. As seen in~\eqref{eqn:exp-inclusion}, $\gamma\in(\mathsf{Gr}_0^{\lambda_{{\mathcal{O}}}})^\iota$ where ${\mathcal{O}}$ is the $G$-orbit of $x$. If we let $\varphi:\mathrm{SL}(2)\to G$ be a homomorphism such that $(d\varphi)([\begin{smallmatrix}0&1\\0&0\end{smallmatrix}])=x$, then applying $\varphi$ to the previous example shows that the element $\sigma$ defined in Proposition~\ref{prop:sigma} is $\varphi([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])$. So Proposition~\ref{prop:sigma} says that $e(x)\in(\mathsf{Gr}_0)^{\iota,\xi}$ where $\xi$ is the $G$-conjugacy class of the restriction of $\varphi$ to $N$. This was already clear from the definition $(\mathsf{Gr}_0)^{\iota,\xi}=\Psi(\mathrm{Bun}_{G}^\xi(\mathbb{A}^2/N))$: it follows from the commutativity of the diagram~\eqref{eqn:intro-diag}. \end{ex}
\begin{rmk} As explained in~\cite[Section 5.1]{bf}, the varieties $\mathrm{Bun}_{G}^\lambda(\mathbb{A}^2/\mathbb{G}_m)$ are special cases of certain moduli spaces of $\Gamma_{\mathrm{A}_k}$-equivariant principal $G$-bundles on $\mathbb{P}^2$, where $\Gamma_{\mathrm{A}_k}\cong\mathbb{Z}/(k+1)\mathbb{Z}$ is a finite cyclic subgroup of $\mathrm{SL}(2)$ (alternatively, moduli spaces of `instantons on a type-$\mathrm{A}_k$ singularity'). This is because $\mathbb{G}_m$ contains a copy of $\Gamma_{\mathrm{A}_k}$ for all $k$, and is Zariski-generated by the union of these finite subgroups; one should think of $\mathbb{G}_m$ as the type-$\mathrm{A}_\infty$ subgroup of $\mathrm{SL}(2)$. Analogously, $N$ is the type-$\mathrm{D}_\infty$ subgroup of $\mathrm{SL}(2)$; see \S\ref{ss:subgroups}. Hence the varieties $\mathrm{Bun}_G^\xi(\mathbb{A}^2/N)$ are special cases of certain moduli spaces of $\Gamma_{\mathrm{D}_k}$-equivariant principal $G$-bundles on $\mathbb{P}^2$, where $\Gamma_{\mathrm{D}_k}$ is a binary dihedral subgroup of $\mathrm{SL}(2)$ (`instantons on a type-$\mathrm{D}_k$ singularity'). It would be interesting to extend the results of~\cite{bf} to the latter moduli spaces. \end{rmk}
\subsection{Principal bundles on $\mathbb{P}^2$} \label{ss:p2}
For the remainder of the paper it will be more convenient to use the definition~\eqref{eqn:old-def} of $\mathrm{Bun}_G(\mathbb{A}^2)$ rather than the definition~\eqref{eqn:new-def}, so we want to translate our explicit formula for the bijection $\Psi$ (given in Proposition~\ref{prop:psi}) to the setting of~\eqref{eqn:old-def}.
As explained in~\cite[Section 3]{atiyah} and~\cite[Section 4.1]{bfg}, the two definitions are linked using the diagram \begin{equation} \vcenter{ \xymatrix@R=10pt@C=10pt{ &X\ar[dl]_-{\pi_1}\ar[dr]^-{\pi_2}&\\ \mathbb{P}^2&&\mathbb{P}^1\times\mathbb{P}^1 }} \end{equation} where \[
X:=\{([z_0:z_1:z_2],\,t,\,u)\in\mathbb{P}^2\times\mathbb{P}^1\times\mathbb{P}^1\,|\,z_1=z_0t,\,z_2=z_0u\}, \] and $\pi_1$, $\pi_2$ are the obvious projections. In the definition of $X$, we have used homogeneous coordinates $[z_0:z_1:z_2]$ on the $\mathbb{P}^2$ factor and inhomogeneous coordinates $t$ and $u$ (as before) on the $\mathbb{P}^1$ factors. One must interpret the equations accordingly: for example, if $t=\infty$ then the equation $z_1=z_0t$ should be read as $z_0=0$. Thus, the map $\pi_1$ is a blow-up of $\mathbb{P}^2$ at the two points $[0:0:1]$ and $[0:1:0]$ on the line at infinity, and the map $\pi_2$ is a blow-up of $\mathbb{P}^1\times\mathbb{P}^1$ at the point $(\infty,\infty)$. Neither blow-up affects the open subset $\mathbb{A}^2$, so the pull-backs give isomorphisms \begin{equation} \mathrm{Bun}_G(\mathbb{P}^2;\ell_\infty)\simto\mathrm{Bun}_G(X;X\smallsetminus\mathbb{A}^2)\overset{\sim}{\leftarrow} \mathrm{Bun}_G(\mathbb{P}^1\times\mathbb{P}^1;D_\infty), \end{equation} where $\ell_\infty=\mathbb{P}^2\smallsetminus\mathbb{A}^2$ is the line at infinity.
We can present $\mathrm{Bun}_G(\mathbb{P}^2;\ell_\infty)$ as a quotient in much the same way as~\eqref{eqn:p1p1presentation}. We use the usual open covering of $\mathbb{P}^2$: \[ \begin{split}
U_0&:=\{[z_0:z_1:z_2]\,|\,z_0\neq 0\}=\mathbb{A}^2,\\
U_1&:=\{[z_0:z_1:z_2]\,|\,z_1\neq 0\},\\
U_2&:=\{[z_0:z_1:z_2]\,|\,z_2\neq 0\}. \end{split} \] A pair $(\mathcal{F},\Phi)\in\mathrm{Bun}_G(\mathbb{P}^2;\ell_\infty)$ can be constructed by gluing together trivial bundles on these open sets using transition functions $(g_1^0,g_2^0,g_2^1)$, where \begin{equation} \label{eqn:p2conditions} \begin{split} g_1^0&\in G[z_1/z_0,z_0/z_1,z_2/z_0],\\ g_2^0&\in G[z_1/z_0,z_2/z_0,z_0/z_2],\\ g_2^1&\in \ker(G[z_0/z_1,z_2/z_1,z_1/z_2]\to G[z_2/z_1,z_1/z_2]),\\ g_2^0&=g_2^1g_1^0\quad\text{ in }G(z_0,z_1,z_2). \end{split} \end{equation} Let $Z'$ denote the space of triples $(g_1^0,g_2^0,g_2^1)$ satisfying~\eqref{eqn:p2conditions}. On this space we have an action of the group \begin{equation} \begin{split} H':=&G[z_1/z_0,z_2/z_0]\\ &\times \ker(G[z_0/z_1,z_2/z_1]\to G[z_2/z_1]) \\ &\times \ker(G[z_0/z_2,z_1/z_2]\to G[z_1/z_2]), \end{split} \end{equation} defined by \begin{equation}\label{eqn:p2action} (h_0,h_1,h_2)\cdot (g_1^0,g_2^0,g_2^1) = (h_1g_1^0h_0^{-1},h_2g_2^0h_0^{-1},h_2g_2^1h_1^{-1}), \end{equation} for $(h_0,h_1,h_2)\in H'$ and $(g_1^0,g_2^0,g_2^1)\in Z'$. Then \begin{equation} \label{eqn:p2presentation} \mathrm{Bun}_G(\mathbb{A}^2)=\mathrm{Bun}_G(\mathbb{P}^2;\ell_\infty)\cong Z'/H', \end{equation} and Proposition~\ref{prop:free} implies that the $H'$-action on $Z'$ is free.
In this description, the action of $G$ on $\mathrm{Bun}_G(\mathbb{A}^2)$ is induced by the following action on $Z'$: \begin{equation} \label{eqn:p2-G-action} g\cdot(g_1^0,g_2^0,g_2^1)=(gg_1^0,gg_2^0,gg_2^1g^{-1}). \end{equation} The action of the diagonal subgroup of $\mathrm{GL}(2)$ on $\mathrm{Bun}_G(\mathbb{A}^2)$ is induced by the following action on $Z'$: \begin{equation} \label{eqn:p2-diag-action} [\begin{smallmatrix}\alpha&0\\0&\beta\end{smallmatrix}]\cdot(g_1^0,g_2^0,g_2^1)
=(g_1^0,g_2^0,g_2^1)\left|_{\substack{z_1/z_0\mapsto \alpha^{-1}z_1/z_0\\z_2/z_0\mapsto \beta^{-1}z_2/z_0}}\right.\, . \end{equation} As before, we identify $\mathrm{Bun}_G(\mathbb{A}^2/\mathbb{G}_m)$ with the fixed-point set of $\mathbb{G}_m$ on $\mathrm{Bun}_G(\mathbb{A}^2)$, i.e.\ the set of $H'$-orbits in $Z'$ that are fixed by the action of $\mathbb{G}_m$.
The bijection $\Psi:\mathrm{Bun}_G(\mathbb{A}^2/\mathbb{G}_m)\to\mathsf{Gr}_{0}$, or more conveniently its inverse, is described as follows. \begin{prop} \label{prop:p2-psi} If $\gamma\in\mathsf{Gr}_0^\lambda$ for $\lambda\in\Lambda^+$, i.e.\ \[ \gamma=q_1 t^\lambda q_2\;\text{ for some }\,q_1,q_2\in G[t], \] then $\Psi^{-1}(\gamma)$ is the $H'$-orbit of the triple $(\tilde{g}_{1}^{0},\tilde{g}_{2}^{0},\tilde{g}_{2}^{1})\in Z'$ defined by \[ \begin{split}
\tilde{g}_{1}^{0}&=(q_2^{-1}|_{t\mapsto z_1z_2/z_0^2})\,(z_1/z_0)^{-\lambda},\\
\tilde{g}_{2}^{0}&=(q_1|_{t\mapsto z_1z_2/z_0^2})\,(z_2/z_0)^\lambda,\\
\tilde{g}_{2}^{1}&=\gamma|_{t\mapsto z_1z_2/z_0^2}. \end{split} \] \end{prop}
\begin{ex} \label{ex:sl2-special-p2} Continue Example~\ref{ex:sl2-special} with $G=\mathrm{SL}(2)$ and $\gamma=[\begin{smallmatrix}1&t^{-1}\\0&1\end{smallmatrix}]\in\mathsf{Gr}_0^\alpha$. Choosing $q_1$ and $q_2$ as in Example~\ref{ex:sl2-special}, the special representative of $\Psi^{-1}(\gamma)$ specified in Proposition~\ref{prop:p2-psi} is $([\begin{smallmatrix}z_0/z_1&0\\-z_2/z_0&z_1/z_0\end{smallmatrix}],[\begin{smallmatrix}0&z_0/z_2\\-z_2/z_0&z_1/z_0\end{smallmatrix}],[\begin{smallmatrix}1&z_0^2/z_1z_2\\0&1\end{smallmatrix}])\in Z'$. \end{ex}
\begin{proof} Let $(\mathcal{F}_1,\Phi_1)\in\mathrm{Bun}_G(\mathbb{P}^2;\ell_\infty)$ be determined by the triple $(\tilde{g}_{1}^{0},\tilde{g}_{2}^{0},\tilde{g}_{2}^{1})\in Z'$ defined as in Proposition~\ref{prop:p2-psi}, and let $(\mathcal{F}_2,\Phi_2)\in\mathrm{Bun}_G(\mathbb{P}^1\times\mathbb{P}^1;D_\infty)$ be determined by the quadruple $(\tilde{g}_{01}^{00},\tilde{g}_{10}^{00},\tilde{g}_{11}^{01},\tilde{g}_{11}^{10})\in Z$ defined as in Proposition~\ref{prop:psi} (using the same expression $\gamma=q_1 t^\lambda q_2$). It suffices to show that the pull-backs $\pi_1^*(\mathcal{F}_1,\Phi_1)$ and $\pi_2^*(\mathcal{F}_2,\Phi_2)$ give the same point of $\mathrm{Bun}_G(X;X\smallsetminus\mathbb{A}^2)$. We will only consider the bundles $\pi_1^*\mathcal{F}_1$ and $\pi_2^*\mathcal{F}_2$; it is easy to identify the two trivializations on $X\smallsetminus\mathbb{A}^2$.
Now $\pi_1^*\mathcal{F}_1$ can be obtained by gluing trivial $G$-bundles on $\pi_1^{-1}(U_0)$, $\pi_1^{-1}(U_1)$, and $\pi_1^{-1}(U_2)$ using the transition functions $\pi_1^*\tilde{g}_{1}^{0},\pi_1^*\tilde{g}_{2}^{0},\pi_1^*\tilde{g}_{2}^{1}$ on the respective overlaps $\pi_1^{-1}(U_0\cap U_1)$, $\pi_1^{-1}(U_0\cap U_2)$, $\pi_1^{-1}(U_1\cap U_2)$. Note that $\pi_1$ induces an isomorphism $\pi_1^{-1}(U_0\cap U_1)\simto U_0\cap U_1$, and $\pi_1^*\tilde{g}_{1}^{0}$ is the morphism $\pi_1^{-1}(U_0\cap U_1)\to G$ obtained by composing this isomorphism with $\tilde{g}_{1}^{0}$; and similarly for the other overlaps.
We have an analogous description of $\pi_2^*\mathcal{F}_2$. Since $\tilde{g}_{11}^{01}=1$, $\pi_2^*\mathcal{F}_2$ is obtained by gluing trivial $G$-bundles on $\pi_2^{-1}(U_{00})$, $\pi_2^{-1}(U_{10})$, and $\pi_2^{-1}(U_{01}\cup U_{11})$ using the transition functions $\pi_2^*\tilde{g}_{10}^{00},\pi_2^*\tilde{g}_{01}^{00},\pi_2^*\tilde{g}_{11}^{10}$ on the respective overlaps \[ \begin{split} &\pi_2^{-1}(U_{00}\cap U_{10}),\\ &\pi_2^{-1}(U_{00}\cap(U_{01}\cup U_{11}))=\pi_2^{-1}(U_{00}\cap U_{01}),\\ &\pi_2^{-1}(U_{10}\cap (U_{01}\cup U_{11}))=\pi_2^{-1}(U_{10}\cap U_{11}). \end{split} \]
It now suffices to check that the two collections of transition functions agree on dense subsets of their domains; and since $X$ is irreducible, it suffices to check the agreement on generic points. For a generic point $([z_0:z_1:z_2],t,u)$ of $X$, we have \[ \begin{split}
\pi_1^*\tilde{g}_{1}^{0}([z_0:z_1:z_2],t,u)&=(q_2^{-1}|_{t\mapsto z_1z_2/z_0^2})\,(z_1/z_0)^{-\lambda}\\
&=(q_2^{-1}|_{t\mapsto tu})\,t^{-\lambda}=\pi_2^*\tilde{g}_{10}^{00}([z_0:z_1:z_2],t,u), \end{split} \] and the arguments for the other transition functions are analogous. \end{proof}
\section{The case $G=\mathrm{SL}(r)$} \label{sec:glr}
In this section we assume that $G=\mathrm{SL}(r)$ for some integer $r\geq 2$. In the definition~\eqref{eqn:old-def} of $\mathrm{Bun}_G(\mathbb{A}^2)$, we can replace the principal $G$-bundle $\mathcal{F}$ on $\mathbb{P}^2$ by a rank-$r$ vector bundle $\mathcal{E}$ on $\mathbb{P}^2$. (Note that we do not need to impose the condition that $\mathcal{E}$ has trivial determinant bundle, since the existence of the trivialization $\Phi$ on $\ell_\infty$ implies this automatically.) We then have ADHM-style or `monad' descriptions of all our moduli spaces as suitable kinds of Nakajima quiver varieties. We will recall these descriptions and see what they tell us about the varieties $\mathsf{Gr}_0^\lambda$ and $(\mathsf{Gr}_0)^{\iota,\xi}$ in this case. For $\mathsf{Gr}_0^\lambda$, this recovers a known result of Mirkovi\'c and Vybornov~\cite{mvy-cr}, as already observed in~\cite{bf}. The application to the varieties $(\mathsf{Gr}_0)^{\iota,\xi}$ is all that is actually new here, but it is easiest to explain in a more general context.
\subsection{Vector bundles on $\mathbb{P}^2$} \label{ss:1-case}
When $G=\mathrm{SL}(r)$ for $r\geq 2$, the moduli space $\mathrm{Bun}_{G}(\mathbb{A}^2)=\mathrm{Bun}_{G}(\mathbb{P}^2;\ell_\infty)$ has connected components $\mathrm{Bun}_{G}^n(\mathbb{A}^2)$ where $n$ is a nonnegative integer representing the second Chern class of the bundle. As explained in~\cite[Theorem 2.1]{nak3},~\cite[Section 2]{vv} and~\cite[Section 5.1]{bfg}, $\mathrm{Bun}_{G}^n(\mathbb{A}^2)$ is isomorphic to the quiver variety $\mathfrak{M}_0^{\mathrm{reg}}(n,r)$ of type $\widetilde{\mathrm{A}}_0$ (that is, corresponding to the quiver with one vertex and one loop). We now recall the definitions of the latter variety and of the isomorphism, mainly following the conventions of~\cite{nak1,nak2,nak3}.
Set $V=\mathbb{C}^n$ and $W=\mathbb{C}^r$, so that $G=\mathrm{SL}(W)$. Let $\Lambda(V,W)$ be the variety of quadruples $(B_1,B_2,\mathbf{i},\mathbf{j})$, where \[ B_1,B_2:V\to V,\quad \mathbf{i}:W\to V,\quad \mathbf{j}:V\to W \] are linear maps satisfying the ADHM equation \begin{equation} \label{eqn:adhm} [B_1,B_2]+\mathbf{i}\mathbf{j}=0. \end{equation} We have a natural action of $\mathrm{GL}(V)\times G$ on $\Lambda(V,W)$.
A quadruple $(B_1,B_2,\mathbf{i},\mathbf{j})\in\Lambda(V,W)$ is said to be \emph{stable} if $V$ has no nonzero subspace which is preserved by $B_1,B_2$ and is contained in the kernel of $\mathbf{j}$, and \emph{costable} if $V$ has no proper subspace which is preserved by $B_1,B_2$ and contains the image of $\mathbf{i}$. Let $\Lambda(V,W)^{s}$ (respectively, $\Lambda(V,W)^{sc}$) be the open subset of $\Lambda(V,W)$ consisting of quadruples that are stable (respectively, stable and costable); these subsets are clearly preserved by the action of $\mathrm{GL}(V)\times G$.
It follows by the same arguments as in~\cite[Section 3]{nak2} that: \begin{equation} \label{eqn:stability-freeness} \text{The action of $\mathrm{GL}(V)$ on $\Lambda(V,W)^{s}$ is free.} \end{equation} So there is a geometric quotient variety $\mathfrak{M}(V,W)=\Lambda(V,W)^s/\mathrm{GL}(V)$, whose points are identified with the $\mathrm{GL}(V)$-orbits in $\Lambda(V,W)^s$. We let $\mathfrak{M}_0(V,W)$ denote the affine variety $\Lambda(V,W)/\!/\mathrm{GL}(V)$, whose points are ientified with the closed $\mathrm{GL}(V)$-orbits in $\Lambda(V,W)$. We have an action of $G$ on both $\mathfrak{M}(V,W)$ and $\mathfrak{M}_0(V,W)$, and there is a natural $G$-equivariant projective morphism \[ \pi:\mathfrak{M}(V,W)\to\mathfrak{M}_0(V,W), \] mapping a $\mathrm{GL}(V)$-orbit ${\mathcal{O}}$ in $\Lambda(V,W)^s$ to the unique closed $\mathrm{GL}(V)$-orbit in $\overline{{\mathcal{O}}}$, where $\overline{{\mathcal{O}}}$ is the closure of ${\mathcal{O}}$ in $\Lambda(V,W)$.
We define $\mathfrak{M}^{\mathrm{reg}}(V,W)$ to be the $G$-stable open subvariety $\Lambda(V,W)^{sc}/\mathrm{GL}(V)$ of $\mathfrak{M}(V,W)$. The morphism $\pi$ maps $\mathfrak{M}^{\mathrm{reg}}(V,W)$ isomorphically onto an open subvariety $\mathfrak{M}_0^{\mathrm{reg}}(V,W)$ of $\mathfrak{M}_0(V,W)$. In fact, the $\Gamma=\{1\}$ case of~\cite[Lemma 1(ii)]{vv} says that $\Lambda(V,W)^{sc}$ consists exactly of the points in $\Lambda(V,W)^s$ whose $\mathrm{GL}(V)$-orbit is closed in $\Lambda(V,W)$, so the points of either $\mathfrak{M}^{\mathrm{reg}}(V,W)$ or $\mathfrak{M}_0^{\mathrm{reg}}(V,W)$ are identified with the closed $\mathrm{GL}(V)$-orbits in $\Lambda(V,W)$ that lie in $\Lambda(V,W)^s$. The notation $\mathfrak{M}_0^{\mathrm{reg}}(V,W)$ is the one used by Nakajima, but we will mostly refer to $\mathfrak{M}^{\mathrm{reg}}(V,W)$.
The following result, essentially due to Barth~\cite{barth,donaldson}, is part of a similar moduli-space interpretation of the whole of $\mathfrak{M}(V,W)$; see~\cite[Theorem 2.1]{nak3}. \begin{prop} \label{prop:barth} We have a $G$-equivariant isomorphism \[ \Theta:\mathfrak{M}^{\mathrm{reg}}(V,W)\simto\mathrm{Bun}_G^n(\mathbb{A}^2) \] which sends the $\mathrm{GL}(V)$-orbit of a quadruple $(B_1,B_2,\mathbf{i},\mathbf{j})\in\Lambda(V,W)^{sc}$ to the point parametrizing the rank-$r$ vector bundle $\ker(b)/\mathrm{im}(a)$ on $\mathbb{P}^2$ where the maps \[ V\otimes {\mathcal{O}}_{\mathbb{P}^2}(-1) \overset{a}{\hookrightarrow} (V\oplus V\oplus W)\otimes {\mathcal{O}}_{\mathbb{P}^2} \overset{b}{\twoheadrightarrow} V\otimes {\mathcal{O}}_{\mathbb{P}^2}(1) \] are defined by \[ a=\begin{bmatrix}z_0B_1-z_1\mathrm{id}_V\\z_0B_2-z_2\mathrm{id}_V\\z_0 \mathbf{j} \end{bmatrix},\qquad b=\begin{bmatrix}-(z_0B_2-z_2\mathrm{id}_V) & z_0B_1-z_1\mathrm{id}_V & z_0 \mathbf{i} \end{bmatrix}. \] Here $z_0,z_1,z_2\in\Gamma(\mathbb{P}^2,{\mathcal{O}}_{\mathbb{P}^2}(1))$ are homogeneous coordinates as before. \end{prop} \noindent Some explanation is required. The equation~\eqref{eqn:adhm} ensures that $b\circ a=0$, so $\ker(b)/\mathrm{im}(a)$ makes sense. The condition that $(B_1,B_2,\mathbf{i},\mathbf{j})$ is stable and costable is equivalent to saying that $a$ is injective and $b$ is surjective, not just in the category of vector bundles on $\mathbb{P}^2$ but on every fibre; for this, combine~\cite[Lemma 2.7]{nak3} with its dual. When we restrict to the line at infinity $\ell_\infty=\mathbb{P}^2\smallsetminus\mathbb{A}^2$ where $z_0=0$, the maps $a$ and $b$ become \[ \begin{bmatrix}-z_1\mathrm{id}_V\\-z_2\mathrm{id}_V\\0 \end{bmatrix}\ \text{ and }\ \begin{bmatrix}z_2\mathrm{id}_V & -z_1\mathrm{id}_V & 0\end{bmatrix}, \] so the restriction of $\ker(b)/\mathrm{im}(a)$ to $\ell_\infty$ is isomorphic in an obvious way to the trivial rank-$r$ vector bundle $W\otimes{\mathcal{O}}_{\ell_\infty}$; this is the trivialization used to regard $\ker(b)/\mathrm{im}(a)$ as a point of $\mathrm{Bun}_G(\mathbb{A}^2)$. From this the $G$-equivariance of $\Theta$ is obvious.
On $\mathrm{Bun}_G^n(\mathbb{A}^2)$, as we saw in the previous section, we have an action not just of $G$ but of $G\times\mathrm{GL}(2)$, where $[\begin{smallmatrix}\alpha&\beta\\\gamma&\delta\end{smallmatrix}]\in\mathrm{GL}(2)$ acts as the pull-back under the action of $[\begin{smallmatrix}\alpha&\beta\\\gamma&\delta\end{smallmatrix}]^{-1}$ on $\mathbb{P}^2$. The corresponding $\mathrm{GL}(2)$-action on $\mathfrak{M}^{\mathrm{reg}}(V,W)$ is described as follows. We have a $\mathrm{GL}(2)$-action on $\Lambda(V,W)$ by the rule \begin{equation} \label{eqn:gl2-action} [\begin{smallmatrix}\alpha&\beta\\\gamma&\delta\end{smallmatrix}]\cdot(B_1,B_2,\mathbf{i},\mathbf{j}) =(\alpha B_1+\gamma B_2,\beta B_1+\delta B_2,(\alpha\delta-\beta\gamma)\mathbf{i},\mathbf{j}). \end{equation} More intrinsically, the $\mathrm{GL}(2)$-action is defined by regarding $B_1,B_2$ as the components of a single linear map $B:V\to V\otimes\mathbb{C}^2$, and $\mathbf{i}$ as a map $W\to V\otimes\Lambda^2(\mathbb{C}^2)$, and then letting $\mathrm{GL}(2)$ act via its action on $\mathbb{C}^2$; this formulation is from~\cite[Section 2]{vv}. Since this $\mathrm{GL}(2)$-action on $\Lambda(V,W)$ commutes with the action of $\mathrm{GL}(V)\times G$ and preserves the subset $\Lambda(V,W)^{s}$ (respectively, $\Lambda(V,W)^{sc}$), it induces a $\mathrm{GL}(2)$-action on $\mathfrak{M}(V,W)$ (respectively, $\mathfrak{M}^{\mathrm{reg}}(V,W)$) which commutes with the action of $G$.
\begin{prop} \label{prop:barth-equiv} The isomorphism $\Theta:\mathfrak{M}^{\mathrm{reg}}(V,W)\simto\mathrm{Bun}_G^n(\mathbb{A}^2)$ is $\mathrm{GL}(2)$-equivariant, for the $\mathrm{GL}(2)$-actions described above. \end{prop}
\begin{proof} This is implicit in~\cite{nak3}, and various special cases which are no easier than the full claim were explicitly proved in~\cite[Section 3]{furutahashimoto},~\cite[Theorem 1]{vv} and~\cite[Lemma 4.3]{kumar}. For reassurance we spell out the proof. Suppose that $(B_1,B_2,\mathbf{i},\mathbf{j})\in\Lambda(V,W)^{sc}$ gives rise to the vector bundle $\ker(b)/\mathrm{im}(a)$ as in Proposition~\ref{prop:barth}. Then by definition, the pull-back under the action of $[\begin{smallmatrix}\alpha&\beta\\\gamma&\delta\end{smallmatrix}]$ on $\mathbb{P}^2$ of the vector bundle corresponding to $[\begin{smallmatrix}\alpha&\beta\\\gamma&\delta\end{smallmatrix}]\cdot(B_1,B_2,\mathbf{i},\mathbf{j})$ is $\ker(b')/\mathrm{im}(a')$, where \[ a'=\begin{bmatrix}\alpha&\beta&0\\\gamma&\delta&0\\0&0&1\end{bmatrix}a\qquad \text{ and }\qquad b'=(\alpha\delta-\beta\gamma)\,b\begin{bmatrix}\alpha&\beta&0\\\gamma&\delta&0\\0&0&1\end{bmatrix}^{-1}. \] Clearly we have an isomorphism between $\ker(b)/\mathrm{im}(a)$ and $\ker(b')/\mathrm{im}(a')$ respecting the trivializations on $\ell_\infty$. \end{proof}
As an immediate consequence of Proposition~\ref{prop:barth-equiv}, the isomorphism $\Theta$ of Proposition~\ref{prop:barth} restricts to an isomorphism between fixed-point subvarieties \begin{equation} \label{eqn:barth-gamma} \mathfrak{M}^{\mathrm{reg}}(V,W)^\Gamma\simto\mathrm{Bun}_G^n(\mathbb{A}^2/\Gamma), \end{equation} for any subgroup $\Gamma\subseteq\mathrm{GL}(2)$, where $\mathrm{Bun}_G^n(\mathbb{A}^2/\Gamma):=\mathrm{Bun}_G(\mathbb{A}^2/\Gamma)\cap\mathrm{Bun}_G^n(\mathbb{A}^2)$. (We continue to rely on Proposition~\ref{prop:free} to identify $\mathrm{Bun}_G(\mathbb{A}^2/\Gamma)$ with $\mathrm{Bun}_G(\mathbb{A}^2)^\Gamma$.) Our aim now is to analyse the left-hand side of~\eqref{eqn:barth-gamma}.
\subsection{Reductive subgroups of $\mathrm{SL}(2)$} \label{ss:subgroups}
We need to digress briefly to recall some aspects of the McKay correspondence, and how they extend from the well-known case of finite subgroups of $\mathrm{SL}(2)$ to the (slightly) more general case of reductive subgroups of $\mathrm{SL}(2)$.
Let $\Gamma$ be a reductive subgroup of $\mathrm{SL}(2)$. Then $\Gamma$ is either finite (hence cyclic or binary di-, tetra-, octa- or icosahedral), conjugate to the diagonal $\mathbb{G}_m$ or to $N=N_{\mathrm{SL}(2)}(\mathbb{G}_m)$, or equal to $\mathrm{SL}(2)$ itself.
We define the \emph{doubled McKay quiver} of $\Gamma$ as follows: its vertex set $I_\Gamma$ is in bijection with the isomorphism classes of irreducible representations $\{S_i\,|\,i\in I_\Gamma\}$ of $\Gamma$, and the number of directed edges from vertex $i$ to vertex $j$ is the multiplicity $\dim \Hom_\Gamma(S_i\otimes\mathbb{C}^2,S_j)$, where $\Gamma$ acts on $\mathbb{C}^2$ via the embedding $\Gamma\hookrightarrow\mathrm{SL}(2)$. Since $\mathbb{C}^2$ is a self-dual representation of $\Gamma$, the number of edges from $j$ to $i$ equals that from $i$ to $j$. Moreover, when $\Gamma\neq\{1\}$ we have no edges from a vertex to itself. Hence the doubled McKay quiver is always obtained from an undirected graph, the \emph{McKay graph}, by replacing each undirected edge with two directed eges in opposite directions. The McKay graph is a simple graph unless $|\Gamma|\in\{1,2\}$. In fact: \begin{itemize} \item When $\Gamma=\{1\}$, the McKay graph is the `affine Dynkin diagram of type $\widetilde{\mathrm{A}}_0$', i.e.\ it has a single vertex and a single edge which is a loop joining that vertex to itself. \item (The \emph{McKay correspondence} of~\cite{mckay}.) When $\Gamma$ is cyclic of order $k+1$ for some $k\geq 1$, respectively binary dihedral of order $4(k-2)$ for some $k\geq 4$, respectively binary tetrahedral, binary octahedral or binary icosahedral, the McKay graph is the affine Dynkin diagram of type $\widetilde{\mathrm{A}}_k$, respectively $\widetilde{\mathrm{D}}_k$, respectively $\widetilde{\mathrm{E}}_6$, $\widetilde{\mathrm{E}}_7$ or $\widetilde{\mathrm{E}}_8$. (The affine Dynkin diagram of type $\widetilde{\mathrm{A}}_1$ has two vertices and two edges joining them.) One says that $\Gamma$ itself is of type $\mathrm{A}_k$, respectively $\mathrm{D}_k$, respectively $\mathrm{E}_6$, $\mathrm{E}_7$ or $\mathrm{E}_8$. \item When $\Gamma=\mathbb{G}_m$, the McKay graph is the Dynkin diagram of type $\mathrm{A}_\infty$ (infinite in both directions), with vertex set $\mathbb{Z}$. Here $S_i$, $i\in\mathbb{Z}$, is the one-dimensional representation $z\mapsto z^i$. \item When $\Gamma=N$, the McKay graph is the Dynkin diagram of type $\mathrm{D}_\infty$, with vertex set $\{(0,+),(0,-)\}\cup\mathbb{Z}^+$ and edges as follows: \[ \vcenter{ \xymatrix@R=2ex{ (0,+)\ar@{-}[dr]\\ &1\ar@{-}[r]&2\ar@{-}[r]&3\ar@{-}[r]&\cdot\cdots\cdot\\ (0,-)\ar@{-}[ur] } } \] Here $S_{0,+}$ is the trivial representation, $S_{0,-}$ is the nontrivial one-dimensional representation (in which $\mathbb{G}_m$ acts trivially and $[\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}]$ acts by $-1$), and $S_i$ for $i>0$ is the two-dimensional irreducible representation whose restriction to $\mathbb{G}_m$ is the direct sum of the representations labelled by $i$ and $-i$. \item When $\Gamma=\mathrm{SL}(2)$, the McKay graph is the Dynkin diagram of type $\mathrm{A}_{+\infty}$ (infinite in one direction), with vertex set $\mathbb{N}$. Here $S_i$, $i\in\mathbb{N}$, is the $(i+1)$-dimensional irreducible representation. \end{itemize}
\begin{rmk} It is an observation going back to Slodowy~\cite[Section 6.2, Appendix III]{slod} that certain inclusions of finite subgroups of $\mathrm{SL}(2)$ induce particularly nice relationships between their McKay graphs. For example, in the terminology introduced by the author and A.~Licata in~\cite{hl}, the fact that a binary dihedral group of order $4(k-2)$ contains an index-$2$ cyclic subgroup of order $2(k-2)$ is reflected in the fact that the affine Dynkin diagram of type $\widetilde{\mathrm{D}}_k$ can be obtained by applying the \emph{split-quotient} construction to a suitable graph involution of the affine Dynkin diagram of type $\widetilde{\mathrm{A}}_{2k-5}$; see~\cite[Example 2.7]{hl}. The same principles also apply to the infinite reductive subgroups of $\mathrm{SL}(2)$: the fact that $N$ contains $\mathbb{G}_m$ as an index-$2$ subgroup is reflected in the fact that the diagram of type $\mathrm{D}_\infty$ can be obtained by applying the split-quotient construction to the involution $i\mapsto -i$ of the diagram of type $\mathrm{A}_\infty$ (infinite in both directions). \end{rmk}
In the discussion of quiver varieties that follows, we will need to use certain results that were originally stated only for finite subgroups $\Gamma$ of $\mathrm{SL}(2)$ (for example in~\cite{hl,nak1,nak2,vv}). The relevant results all extend to the case of infinite reductive subgroups $\Gamma$ of $\mathrm{SL}(2)$ with the same proof.
\subsection{Quiver varieties} \label{ss:quiver-varieties}
Continue to let $\Gamma$ be a reductive subgroup of $\mathrm{SL}(2)$. Suppose that we are given a representation $\rho:\Gamma\to\mathrm{GL}(V)$ of $\Gamma$ on $V=\mathbb{C}^n$. We could also consider a representation of $\Gamma$ on $W=\mathbb{C}^r$, but for simplicity we assume that this representation is trivial, which agrees with the $\mu=0$ case we have considered elsewhere in the paper (see Remark~\ref{rmk:slice}).
Let $i:\Gamma\hookrightarrow\mathrm{GL}(2)$ be the inclusion. Then $\Gamma$ acts on $\Lambda(V,W)$ via the homomorphism $(\rho,i):\Gamma\to\mathrm{GL}(V)\times\mathrm{GL}(2)$, and we can consider the fixed-point subvariety $\Lambda(V,W)^{\Gamma,\rho}$. This carries an action of $\mathrm{GL}_{\Gamma,\rho}(V)\times G$, where $\mathrm{GL}_{\Gamma,\rho}(V)\subseteq\mathrm{GL}(V)$ is the automorphism group of the representation $\rho$. Let $\Lambda(V,W)^{s,\Gamma,\rho}$, $\Lambda(V,W)^{sc,\Gamma,\rho}$ denote the intersections of $\Lambda(V,W)^{\Gamma,\rho}$ with $\Lambda(V,W)^{s}$, $\Lambda(V,W)^{sc}$. We define \[ \begin{split} \mathfrak{M}^{\Gamma,\rho}(V,W)&:=\Lambda(V,W)^{s,\Gamma,\rho}/\mathrm{GL}_{\Gamma,\rho}(V),\\ \mathfrak{M}^{\Gamma,\rho,\mathrm{reg}}(V,W)&:=\Lambda(V,W)^{sc,\Gamma,\rho}/\mathrm{GL}_{\Gamma,\rho}(V). \end{split} \] Then $\mathfrak{M}^{\Gamma,\rho,\mathrm{reg}}(V,W)$ is an open subvariety of $\mathfrak{M}^{\Gamma,\rho}(V,W)$; for general $\rho$, either the former variety or both varieties could be empty (that is, $\Lambda(V,W)^{\Gamma,\rho}$ could have no intersection with $\Lambda(V,W)^{sc}$ or even with $\Lambda(V,W)^{s}$).
One can also define $\mathfrak{M}_0^{\Gamma,\rho}(V,W):=\Lambda(V,W)^{\Gamma,\rho}/\mathrm{GL}_{\Gamma,\rho}(V)$ and a projective morphism $\pi:\mathfrak{M}^{\Gamma,\rho}(V,W)\to \mathfrak{M}_0^{\Gamma,\rho}(V,W)$, in the same way as in the $\Gamma=\{1\}$ case which was considered in \S\ref{ss:1-case}. The comments in \S\ref{ss:1-case} about $\mathfrak{M}^{\mathrm{reg}}(V,W)$ versus $\mathfrak{M}_0^{\mathrm{reg}}(V,W)$ apply equally well to $\mathfrak{M}^{\Gamma,\rho,\mathrm{reg}}(V,W)$.
The varieties $\mathfrak{M}^{\Gamma,\rho}(V,W)$ are examples of \emph{Nakajima quiver varieties}; for short, we will refer to the varieties $\mathfrak{M}^{\Gamma,\rho,\mathrm{reg}}(V,W)$ as quiver varieties also. As Nakajima observed in~\cite{nak1}, by splitting up the representation $(V,\rho)$ into its isotypic components, one can regard the $B$ components of a point $(B_1,B_2,\mathbf{i},\mathbf{j})\in\Lambda(V,W)^{\Gamma,\rho}$ as a configuration of linear maps $(B_h)$ assigned to the edges $h$ of the doubled McKay quiver of $\Gamma$, and an element of $\mathrm{GL}_{\Gamma,\rho}(V)$ as a tuple of invertible linear transformations assigned to the vertices of the McKay graph of $\Gamma$. Thus the above definition of $\mathfrak{M}^{\Gamma,\rho}(V,W)$, which comes from~\cite[Section 2]{vv}, becomes a special case of the usual definition of quiver variety from~\cite{nak1,nak2}. We will make the latter definition explicit in the $\Gamma=\mathbb{G}_m$ and $\Gamma=N$ cases in \S\ref{ss:Gm-case} and \S\ref{ss:N-case} respectively.
The following is a special case of a general result about quiver varieties, due to Nakajima and Crawley-Boevey:
\begin{prop} \label{prop:connected} Let $\rho:\Gamma\to\mathrm{GL}(V)$ be a representation. If $\mathfrak{M}^{\Gamma,\rho}(V,W)$ is nonempty, it is nonsingular and connected. The same holds for $\mathfrak{M}^{\Gamma,\rho,\mathrm{reg}}(V,W)$. \end{prop}
\begin{proof} Since $\mathfrak{M}^{\Gamma,\rho,\mathrm{reg}}(V,W)$ is an open subvariety of $\mathfrak{M}^{\Gamma,\rho}(V,W)$, it suffices to prove the statements about $\mathfrak{M}^{\Gamma,\rho}(V,W)$. The nonsingularity statement is~\cite[Corollary 3.12]{nak2}, and the connectedness statement is proved in~\cite[Section 1]{c-b}. \end{proof}
Because of the isomorphism~\eqref{eqn:barth-gamma}, we are interested in the fixed-point subvariety $\mathfrak{M}(V,W)^\Gamma$, whose definition does not involve $\rho$ (only the inclusion $i:\Gamma\hookrightarrow\mathrm{GL}(2)$). The following result was implicit in~\cite{nak1,vv} and appeared explicitly, but without any details of the proof, as~\cite[equation (3.40)]{hl}.
\begin{prop} \label{prop:disconnection-full} The inclusion maps $\Lambda(V,W)^{s,\Gamma,\rho}\hookrightarrow\Lambda(V,W)^s$ induce a $G$-equivariant isomorphism \[ \coprod_{\rho}\, \mathfrak{M}^{\Gamma,\rho}(V,W) \simto \mathfrak{M}(V,W)^\Gamma, \] where the domain is a disconnected union over the finite set of equivalence classes of representations $\rho:\Gamma\to\mathrm{GL}(V)$. Hence the nonempty varieties $\mathfrak{M}^{\Gamma,\rho}(V,W)$ constitute the connected components of $\mathfrak{M}(V,W)^\Gamma$. \end{prop}
\noindent As we mentioned in~\cite{hl}, one can give an `elementary' proof of Proposition~\ref{prop:disconnection-full}, which is analogous to (but simpler than) the proof of~\cite[Theorem 3.9]{hl}. The first step, analogous to~\cite[Lemma 3.17]{hl}, is to note that the inclusion $\Lambda(V,W)^{s,\Gamma,\rho}\hookrightarrow\Lambda(V,W)^s$ does indeed induce an injection $\mathfrak{M}^{\Gamma,\rho}(V,W)\hookrightarrow\mathfrak{M}(V,W)^\Gamma$, which follows immediately from~\eqref{eqn:stability-freeness}. The next step, analogous to~\cite[Lemma 3.18]{hl}, is to show that every point of $\mathfrak{M}(V,W)^\Gamma$ belongs to the image of $\mathfrak{M}^{\Gamma,\rho}(V,W)\hookrightarrow\mathfrak{M}(V,W)^\Gamma$ for a unique $\rho$ (up to equivalence). Then the rest of the argument is essentially the same as in the proof of~\cite[Theorem 3.9]{hl}: the union is disconnected because the reductive group $\Gamma$ has a discrete classification of representations, and the bijection must be an isomorphism because $\mathfrak{M}(V,W)$ is nonsingular, so every connected component of $\mathfrak{M}(V,W)^\Gamma$ is also nonsingular.
However, as we also mentioned in~\cite{hl}, it is more enlightening to interpret Proposition~\ref{prop:disconnection-full} using moduli spaces. Since~\eqref{eqn:barth-gamma} involved $\mathfrak{M}^{\mathrm{reg}}(V,W)^\Gamma$ rather than the whole of $\mathfrak{M}(V,W)^\Gamma$, we will restrict ourselves to recalling the moduli-space interpretation of the version of Proposition~\ref{prop:disconnection-full} concerning $\mathfrak{M}^{\mathrm{reg}}(V,W)^\Gamma$ (which would be a corollary of the full statement Proposition~\ref{prop:disconnection-full}).
\subsection{$\Gamma$-equivariant vector bundles} \label{ss:gamma-equiv}
If $(\mathcal{E},\Phi)$ is a pair in $\mathrm{Bun}_G(\mathbb{A}^2)$, then the second Chern class of $\mathcal{E}$ is $\dim H^1(\mathbb{P}^2,\mathcal{E}(-\ell_\infty))$. Moreover, the proof of Proposition~\ref{prop:barth} in~\cite[Chapter 2]{nak3} identifies $H^1(\mathbb{P}^2,\mathcal{E}(-\ell_\infty))$ with the vector space $V=\mathbb{C}^n$ in the definition of $\mathfrak{M}(V,W)$. Hence we have a disconnected union \begin{equation} \label{eqn:disconnection} \mathrm{Bun}_G^n(\mathbb{A}^2/\Gamma)=\coprod_\rho\, \mathrm{Bun}_G^\rho(\mathbb{A}^2/\Gamma), \end{equation} where the union is over equivalence classes of representations $\rho:\Gamma\to\mathrm{GL}(V)$. Namely, $\mathrm{Bun}_G^\rho(\mathbb{A}^2/\Gamma)$ is the subvariety of $\mathrm{Bun}_G^n(\mathbb{A}^2/\Gamma)$ parametrizing $\Gamma$-equivariant pairs $(\mathcal{E},\Phi)$ where the induced representation of $\Gamma$ on $V\cong H^1(\mathbb{P}^2,\mathcal{E}(-\ell_\infty))$ is equivalent to $\rho$. As in the above sketched proof of Proposition~\ref{prop:disconnection-full}, the union must be disconnected because $\Gamma$ is reductive.
The following result of Varagnolo and Vasserot is part of~\cite[Theorem 1]{vv}.
\begin{prop} \label{prop:barth-rho} For each $\rho:\Gamma\to\mathrm{GL}(V)$, the isomorphism $\Theta$ of Proposition~\ref{prop:barth} restricts to an isomorphism \[ \mathfrak{M}^{\Gamma,\rho,\mathrm{reg}}(V,W)\simto \mathrm{Bun}_G^\rho(\mathbb{A}^2/\Gamma). \] Here we use the identification of $\mathfrak{M}^{\Gamma,\rho,\mathrm{reg}}(V,W)$ with a subvariety of $\mathfrak{M}^{\mathrm{reg}}(V,W)$ induced by the inclusion map $\Lambda(V,W)^{sc,\Gamma,\rho}\hookrightarrow\Lambda(V,W)^{sc}$. As a consequence, $\mathrm{Bun}_G^\rho(\mathbb{A}^2/\Gamma)$ is connected if it is nonempty. \end{prop}
Combining~\eqref{eqn:disconnection} and Proposition~\ref{prop:barth-rho}, we get the desired variant of Proposition~\ref{prop:disconnection-full}: \begin{equation} \label{eqn:reg-isom} \mathfrak{M}^{\mathrm{reg}}(V,W)^\Gamma= \coprod_{\rho}\, \mathfrak{M}^{\Gamma,\rho,\mathrm{reg}}(V,W). \end{equation} On the other hand, combining Proposition~\ref{prop:connected} and Proposition~\ref{prop:barth-rho}, we see that in the disconnected union~\eqref{eqn:disconnection}, the nonempty varieties $\mathrm{Bun}_G^\rho(\mathbb{A}^2/\Gamma)$ on the right-hand side are exactly the connected components of $\mathrm{Bun}_G^n(\mathbb{A}^2/\Gamma)$. Hence the nonempty varieties $\mathrm{Bun}_G^\rho(\mathbb{A}^2/\Gamma)$, as not just $\rho$ but also $n$ varies, are the connected components of $\mathrm{Bun}_G(\mathbb{A}^2/\Gamma)$.
As we have already observed in the $\Gamma=\mathbb{G}_m$ and $\Gamma=N$ cases (in \S\ref{ss:proof} and \S\ref{ss:n-equiv} respectively), the moduli space $\mathrm{Bun}_G(\mathbb{A}^2/\Gamma)$ has another natural decomposition as a disconnected union, where one considers the action of $\Gamma$ on the fibre of the vector bundle at the $\Gamma$-fixed point $[1:0:0]\in\mathbb{A}^2$: \begin{equation} \label{eqn:disconnection2} \mathrm{Bun}_G(\mathbb{A}^2/\Gamma)=\coprod_\tau\, \mathrm{Bun}_G^\tau(\mathbb{A}^2/\Gamma). \end{equation} Here the union is over $G$-conjugacy classes of homomorphisms $\tau:\Gamma\to G$, i.e.\ equivalence classes of determinant-$1$ representations of $\Gamma$ on $W$ (\emph{not} on $V$ as above). The relationship between~\eqref{eqn:disconnection} and~\eqref{eqn:disconnection2} is pinned down by the next result, which is due to Nakajima.
\begin{prop} \label{prop:fundamental} Let $\rho:\Gamma\to\mathrm{GL}(V)$ be a representation. \begin{enumerate} \item The variety $\mathfrak{M}^{\Gamma,\rho,\mathrm{reg}}(V,W)\cong\mathrm{Bun}_G^\rho(\mathbb{A}^2/\Gamma)$ is nonempty if and only if there is a homomorphism $\tau:\Gamma\to G$ such that we have the following equivalence of representations of $\Gamma$: \begin{equation} \label{eqn:fundamental} (V,\rho)\oplus (V,\rho) \oplus (W,\tau) \cong \left( (V,\rho)\otimes(\mathbb{C}^2,i) \right) \oplus (W,\mathrm{triv}), \end{equation} where $i:\Gamma\hookrightarrow\mathrm{GL}(2)$ is the inclusion and $\mathrm{triv}:\Gamma\to\mathrm{GL}(W)$ is the trivial homomorphism. It is clear that $\tau$ is unique up to $G$-conjugacy if it exists. \item If $\mathrm{Bun}_G^\rho(\mathbb{A}^2/\Gamma)$ is nonempty, then, in the disconnected union~\eqref{eqn:disconnection2}, it is contained in $\mathrm{Bun}_G^\tau(\mathbb{A}^2/\Gamma)$ where $\tau$ is as in part \textup{(1)}. \end{enumerate} \end{prop}
\begin{proof} Part (1) follows from the nonemptiness criterion given in~\cite[Corollary 10.8]{nak2} (the ``only if'' direction was proved earlier in~\cite[Lemma 8.1]{nak1}), after translating from the setting of general quivers to the specific case of the McKay graph of $\Gamma$. Note that the assumption of~\cite[Proposition 10.5]{nak2} does hold in our setting because $r\geq 2$. Part (2) amounts to saying that if $(\mathcal{E},\Phi)\in\mathrm{Bun}_G(\mathbb{A}^2/\Gamma)$, and $\Gamma$ acts via $\rho$ on $V\cong H^1(\mathbb{P}^2,\mathcal{E}(-\ell_\infty))$, then the action of $\Gamma$ on the fibre of $\mathcal{E}$ at $[1:0:0]\in\mathbb{A}^2$ is via the representation $\tau:\Gamma\to G$ defined by~\eqref{eqn:fundamental}. This follows from the description $\mathcal{E}=\ker(b)/\mathrm{im}(a)$ given in Proposition~\ref{prop:barth}; we need Proposition~\ref{prop:barth-equiv} to identify the $V\oplus V$ in the domain of $b$ with $V\otimes\mathbb{C}^2$ as in~\eqref{eqn:fundamental}. \end{proof}
As is well known from~\cite{nak1,nak2}, one can translate~\eqref{eqn:fundamental} into very concrete terms. The equivalence class of a representation $\rho:G\to\mathrm{GL}(V)$ may be encoded in the \emph{multiplicity vector} $\mathbf{v}=(v_i)_{i\in I_\Gamma}$ indexed by the vertices of the McKay graph of $\Gamma$, where $v_i$ is the multiplicity of the irreducible representation $S_i$ in $(V,\rho)$. Note that even if $\Gamma$ is infinite, $\mathbf{v}$ is finitely-supported. By definition, the multiplicity vector of $(V,\rho)\otimes(\mathbb{C}^2,i)$ is $A_\Gamma\mathbf{v}$ where $A_\Gamma=(a_{ij})_{i,j\in I_\Gamma}$ is the adjacency matrix of the McKay graph (for the non-simple graphs, we define $A_\Gamma=(2)$ when $\Gamma=\{1\}$ and $A_\Gamma=(\begin{smallmatrix}0&2\\2&0\end{smallmatrix})$ when $|\Gamma|=2$). The multiplicity vector of $(W,\mathrm{triv})$ is $r\delta_0$, where $\delta_0=(\delta_{0i})$ and $0\in I_\Gamma$ is the vertex corresponding to the trivial representation. Define the \emph{Cartan matrix} $C_\Gamma=(c_{ij})_{i,j\in I_\Gamma}$ by $c_{ij}=2\delta_{ij}-a_{ij}$. Then~\eqref{eqn:fundamental} is equivalent to saying that: \begin{equation} \label{eqn:fundamental1} \text{The multiplicity vector of $(W,\tau)$ is $r\delta_0-C_\Gamma \mathbf{v}$.} \end{equation} The existence of $\tau:\Gamma\to G$ satisfying~\eqref{eqn:fundamental} is therefore equivalent to the condition \begin{equation} \label{eqn:fundamental2} r\delta_0-C_\Gamma \mathbf{v} \geq 0\ \text{ (componentwise nonnegativity).} \end{equation} Here we have used the fact if $\tau$ is a representation of $\Gamma$ on $W$ satisfying~\eqref{eqn:fundamental}, then automatically $\tau(\Gamma)\subset\mathrm{SL}(W)=G$.
When $\Gamma$ is finite, $C_\Gamma$ is the Cartan matrix of the corresponding affine Dynkin diagram. It is then well known that $C_\Gamma$ has corank 1, and its kernel is generated by the vector $(\dim S_i)_{i\in I_\Gamma}$ giving the dimensions of the irreducible representations of $\Gamma$. This means that if $(V,\rho)$ gives rise to $(W,\tau)$ as in Proposition~\ref{prop:fundamental}(1), then so does $(V,\rho)\oplus(\mathbb{C}\Gamma,\mathrm{reg})^{\oplus m}$ for any $m\in\mathbb{N}$, where $\mathrm{reg}$ denotes the regular representation of $\Gamma$; but $(W,\tau)$ determines $(V,\rho)$ up to this ambiguity. Thus each nonempty $\mathrm{Bun}_G^\tau(\mathbb{A}^2/\Gamma)$ has infinitely many connected components $\mathrm{Bun}_G^\rho(\mathbb{A}^2/\Gamma)$, of which, however, there is at most one with any fixed value of the second Chern class $n=\dim V$, as observed in~\cite[Remark 4.6(2)]{bf}.
When $\Gamma$ is infinite, there is no finitely-supported nonzero vector in the kernel of $C_\Gamma$, so there is at most one $(V,\rho)$ giving rise to a given $(W,\tau)$, and each nonempty $\mathrm{Bun}_G^\tau(\mathbb{A}^2/\Gamma)$ is connected. We will now see how the correspondence between $\rho$ and $\tau$ works explicitly in the $\Gamma=\mathbb{G}_m$ and $\Gamma=N$ cases.
\subsection{The $\Gamma=\mathbb{G}_m$ case} \label{ss:Gm-case}
When $\Gamma=\mathbb{G}_m$, we continue to label $G$-conjugacy classes of homomorphisms $\tau:\mathbb{G}_m\to G$ by dominant coweights $\lambda\in\Lambda^+$, which in the present $G=\mathrm{SL}(r)$ case we identify with weakly decreasing $r$-tuples $(\lambda_1,\lambda_2,\cdots,\lambda_r)\in\mathbb{Z}^r$ such that $\lambda_1+\cdots+\lambda_r=0$.
As in the previous subsection, we encode an equivalence classes of representations $\rho:\mathbb{G}_m\to\mathrm{GL}(V)$ by its multiplicity vector $\mathbf{v}=(v_i)_{i\in\mathbb{Z}}$. The condition~\eqref{eqn:fundamental2} becomes \begin{equation} \label{eqn:fund-gm} r\delta_{0i}-2v_i+v_{i-1}+v_{i+1}\geq 0\text{ for all }i\in\mathbb{Z}, \end{equation} and if~\eqref{eqn:fund-gm} holds, the resulting coweight $\lambda=(\lambda_1,\lambda_2,\cdots,\lambda_r)$ is determined by the property that the number $m_i(\lambda)$ of occurrences of a given $i\in\mathbb{Z}$ among $\lambda_1,\cdots,\lambda_r$ equals the left-hand side of~\eqref{eqn:fund-gm}. All dominant coweights arise in this way: for a given $\lambda\in\Lambda^+$, the vector $\mathbf{v}$ giving rise to it is defined by \begin{equation} \label{eqn:v-gm} v_i=\begin{cases} \displaystyle\sum_{s=1}^r\max\{\lambda_s-i,0\},&\text{ if $i\geq 0$,}\\ \displaystyle\sum_{s=1}^r\max\{i-\lambda_s,0\},&\text{ if $i<0$.} \end{cases} \end{equation} Of course, $\mathbf{v}$ is finitely supported because $v_i=0$ unless $\lambda_1>i>\lambda_r$. Note that \begin{equation} \label{eqn:n-gm} n=\sum_{i\in\mathbb{Z}} v_i=\frac{1}{2}\sum_{s=1}^r \lambda_s^2, \end{equation} in accordance with~\cite[Theorem 5.2(1)]{bf}.
Hence, in the $\Gamma=\mathbb{G}_m$ case, Proposition~\ref{prop:fundamental} says that for any $\lambda\in\Lambda^+$ we have $\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)=\mathrm{Bun}_G^\rho(\mathbb{A}^2/\mathbb{G}_m)$ where $\rho:\mathbb{G}_m\to\mathrm{GL}(V)$ is a representation of $\mathbb{G}_m$ with multiplicity vector $\mathbf{v}$ given by~\eqref{eqn:v-gm} (and degree $n$ given by~\eqref{eqn:n-gm}).
Recall that Proposition~\ref{prop:barth-rho} gives us an isomorphism between $\mathrm{Bun}_G^\rho(\mathbb{A}^2/\mathbb{G}_m)$ and the quiver variety $\mathfrak{M}^{\mathbb{G}_m,\rho,\mathrm{reg}}(V,W)=\Lambda(V,W)^{sc,\mathbb{G}_m,\rho}/\mathrm{GL}_{\mathbb{G}_m,\rho}(V)$. We now recall how to pass to the more usual description of this as a quiver variety of type $\mathrm{A}$.
The decomposition of $(V,\rho)$ into its isotypic components for $\mathbb{G}_m$ constitutes a $\mathbb{Z}$-grading $V=\bigoplus_{i\in\mathbb{Z}} V_i$. Since we are using the trivial representation of $\mathbb{G}_m$ on $W$, the analogous $\mathbb{Z}$-grading of $W$ simply has $W_0=W$ and $W_i=\{0\}$ for $i\neq 0$.
By definition of the $\mathbb{G}_m$-action on $\Lambda(V,W)$, a quadruple $(B_1,B_2,\mathbf{i},\mathbf{j})\in\Lambda(V,W)$ is fixed by $\mathbb{G}_m$ if and only if the following conditions are satisfied: \begin{itemize} \item $B_1$ and $B_2$ lower and raise the $\mathbb{Z}$-gradings respectively, i.e.\ $B_1(V_i)\subseteq V_{i-1}$ and $B_2(V_i)\subseteq V_{i+1}$ for all $i\in\mathbb{Z}$; \item $\mathbf{i}:W\to V$ respects the $\mathbb{Z}$-gradings, i.e.\ $\mathbf{i}(W)\subseteq V_0$; \item $\mathbf{j}:V\to W$ respects the $\mathbb{Z}$-gradings, i.e.\ $\mathbf{j}(V_i)=0$ unless $i=0$. \end{itemize} Thus an element of $\Lambda(V,W)^{\mathbb{G}_m,\rho}$ consists of a configuration of linear maps: \[ \vcenter{ \xymatrix@R=15pt{ &&&W\ar@/_/[dd]_(.4){\mathbf{i}}&&& \\ \\ &V_{-2}\ar@/^/[r]^-{B_{2}}\ar@{.}[l]&V_{-1}\ar@/^/[l]^-{B_{1}}\ar@/^/[r]^-{B_{2}}&V_{0}\ar@/^/[l]^-{B_{1}}\ar@/^/[r]^-{B_{2}}\ar@/_/[uu]_(.6){\mathbf{j}}&V_{1}\ar@/^/[l]^-{B_{1}}\ar@/^/[r]^-{B_{2}}&V_2\ar@{.}[r]\ar@/^/[l]^-{B_{1}}& } } \]
where we have written simply $B_1$ for each component $B_1|_{V_i}$, and similarly for $B_2$. The equation~\eqref{eqn:adhm}, when broken into its various components, is equivalent to the following equations: \begin{equation} \begin{split} B_1B_2+\mathbf{i}\mathbf{j}&=B_2B_1\text{ on }V_0,\\ B_1B_2&=B_2B_1\text{ on $V_i$ for all $i\neq 0$.} \end{split} \end{equation}
Of course, since $V_i=\{0\}$ for sufficiently large $|i|$, the above configuration of linear maps is effectively finite.
We can obviously identify $\mathrm{GL}_{\mathbb{G}_m,\rho}(V)$ with $\prod_{i\in\mathbb{Z}} \mathrm{GL}(V_i)$, and its action on the above configurations of linear maps is the obvious one. So, after making a suitable choice of orientiation of the underlying graph of type $\mathrm{A}$, we recover the usual set-up of quiver varieties of (finite) type $\mathrm{A}$ as in~\cite{nak1,nak2}. In this context it is more common to use the notations $\mathfrak{M}(\mathbf{v},\mathbf{w})$ for $\mathfrak{M}^{\mathbb{G}_m,\rho}(V,W)$ and $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})$ for $\mathfrak{M}^{\mathbb{G}_m,\rho,\mathrm{reg}}(V,W)$, where $\mathbf{v}=(v_i)_{i\in\mathbb{Z}}$ is the dimension vector of the $\mathbb{Z}$-graded vector space $V$, and $\mathbf{w}$ is defined likewise; in our case, $\mathbf{v}$ is given by~\eqref{eqn:v-gm} and $\mathbf{w}=r\delta_0$.
Combining Proposition~\ref{prop:barth-rho} with Proposition~\ref{prop:identification}, we obtain the following special case of a result of Mirkovi\'c and Vybornov~\cite[Theorem 3.1]{mvy-cr}. Their proof involved a comparison with nilpotent orbits; the moduli-space proof below was suggested in~\cite{bf} and~\cite{nak4}.
\begin{prop} \label{prop:mv} For any $\lambda\in\Lambda^+$, with $\mathbf{v}$ defined by~\eqref{eqn:v-gm} and $\mathbf{w}=r\delta_0$, we have a $G$-equivariant isomorphism \[ \Psi\circ\Theta:\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})\simto\mathsf{Gr}_0^\lambda \] which sends the orbit of $(B_1,B_2,\mathbf{i},\mathbf{j})\in\Lambda(V,W)^{sc,\mathbb{G}_m,\rho}$ to \[ \gamma:=1+(\mathbf{j}\mathbf{i})t^{-1}+(\mathbf{j} B_2B_1\mathbf{i})t^{-2}+(\mathbf{j} B_2^2B_1^2 \mathbf{i})t^{-3}+\cdots \in G[t^{-1}]_1=\mathsf{Gr}_0. \] \end{prop}
\begin{proof} We have already seen in Proposition~\ref{prop:barth-rho} that we have a $G$-equivariant isomorphism $\Theta:\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})=\mathfrak{M}^{\mathbb{G}_m,\rho,\mathrm{reg}}(V,W)\simto\mathrm{Bun}_G^\rho(\mathbb{A}^2/\mathbb{G}_m)=\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)$, and Proposition~\ref{prop:identification} gave a $G$-equivariant isomorphism $\Psi:\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)\simto\mathsf{Gr}_0^\lambda$. All that remains to be proved is the explicit formula for the composition $\Psi\circ\Theta$.
To a configuration of maps $(B_1,B_2,\mathbf{i},\mathbf{j})$ as above that is stable and costable, the isomorphism $\Theta$ assigns a $\mathbb{G}_m$-equivariant pair $(\mathcal{E},\Phi)\in\mathrm{Bun}_G(\mathbb{A}^2)$ by the rule of Proposition~\ref{prop:barth}; that is, $\mathcal{E}=\ker(b)/\mathrm{im}(a)$ with $\Phi$ being the trivialization on $\ell_\infty$ explained after Proposition~\ref{prop:barth}. We want to show that $\Psi(\mathcal{E},\Phi)\in\mathsf{Gr}_0^\lambda$ equals the element $\gamma$ in the statement, and we will use the description of $\mathrm{Bun}_G(\mathbb{A}^2)$ and of the bijection $\Psi:\mathrm{Bun}_G(\mathbb{A}^2/\mathbb{G}_m)\to\mathsf{Gr}_0$ given in \S\ref{ss:p2}.
Consider the restriction of $\mathcal{E}$ to the open set $U_1$ defined by $z_1\neq 0$. Since $B_1$ lowers the $\mathbb{Z}$-grading on $V$ as seen above, it is nilpotent; hence, on $U_1$, the linear transformation $z_0 B_1 - z_1\mathrm{id}_V$ appearing in the definitions of $a$ and $b$ is invertible. So we can extend $\Phi^{-1}|_{U_1\smallsetminus U_0}$ to a trivialization of $\mathcal{E}$ on $U_1$, namely the isomorphism $W\otimes{\mathcal{O}}_{U_1}\simto \mathcal{E}|_{U_1}$ induced by the injective map \[ [0\quad -z_0(z_0 B_1-z_1 \mathrm{id}_V)^{-1}\mathbf{i}\quad \mathrm{id}_W]:W\otimes{\mathcal{O}}_{U_1}\to (V\oplus V\oplus W)\otimes{\mathcal{O}}_{U_1}, \]
whose image clearly lies in $\ker(b|_{U_1})$ and is transverse to $\mathrm{im}(a|_{U_1})$.
In the same way, we can extend $\Phi^{-1}|_{U_2\smallsetminus U_0}$ to a trivialization of $\mathcal{E}$ on $U_2$, namely the isomorphism $W\otimes{\mathcal{O}}_{U_2}\simto \mathcal{E}|_{U_2}$ induced by the injective map \[ [z_0(z_0 B_2-z_2 \mathrm{id}_V)^{-1}\mathbf{i}\quad 0\quad \mathrm{id}_W]:W\otimes{\mathcal{O}}_{U_2}\to (V\oplus V\oplus W)\otimes{\mathcal{O}}_{U_2}, \]
whose image clearly lies in $\ker(b|_{U_2})$ and is transverse to $\mathrm{im}(a|_{U_2})$.
The transition function on $U_1\cap U_2$ relating these trivializations of $\mathcal{E}$ on $U_1$ and $U_2$ is the following element of $\ker(G[z_0/z_1,z_2/z_1,z_1/z_2]\to G[z_2/z_1,z_1/z_2])$: \[ \begin{split} g_2^1&=\mathrm{id}_W+\mathbf{j} z_0(z_0B_2-z_2\mathrm{id}_V)^{-1}z_0(z_0 B_1-z_1 \mathrm{id}_V)^{-1}\mathbf{i}\\ &=\mathrm{id}_W+(z_0^2/z_1z_2)\,\mathbf{j}(\mathrm{id}_V-(z_0/z_2)B_2)^{-1}(\mathrm{id}_V-(z_0/z_1)B_1)^{-1}\mathbf{i}\\ &=\mathrm{id}_W+(z_0^2/z_1z_2)\,\mathbf{j}\left(\sum_{i,j=0}^\infty (z_0/z_2)^i(z_1/z_2)^j B_2^i B_1^j\right)\mathbf{i}\\ &=\mathrm{id}_W+(z_0^2/z_1z_2)\,\mathbf{j}\left(\sum_{i=0}^\infty (z_0/z_2)^i(z_1/z_2)^i B_2^i B_1^i\right)\mathbf{i}\\
&=\gamma|_{t\mapsto z_1z_2/z_0^2}, \end{split} \] where the fourth equality holds because of the known effect of $B_1,B_2,\mathbf{i},\mathbf{j}$ on the $\mathbb{Z}$-gradings.
Hence, as an element of $\mathrm{Bun}_G(\mathbb{A}^2)=Z'/H'$, $(\mathcal{E},\Phi)$ is the $H'$-orbit of some triple $(g_1^0,g_2^0,g_2^1)\in Z'$ with $g_2^1=\gamma|_{t\mapsto z_1z_2/z_0^2}$. On the other hand, if $\Psi(\mathcal{E},\Phi)=\gamma'\in\mathsf{Gr}_0$, then Proposition~\ref{prop:p2-psi} says that $(\mathcal{E},\Phi)$ is the $H'$-orbit of a triple $(\tilde{g}_{1}^{0},\tilde{g}_{2}^{0},\tilde{g}_{2}^{1})\in Z'$ with $\tilde{g}_2^1=\gamma'|_{t\mapsto z_1z_2/z_0^2}$. We conclude that \begin{equation} \label{eqn:endgame}
\gamma'|_{t\mapsto z_1z_2/z_0^2}=h_2\left(\gamma|_{t\mapsto z_1z_2/z_0^2}\right)h_1^{-1} \end{equation} for some elements \[ \begin{split} h_1&\in \ker(G[z_0/z_1,z_2/z_1]\to G[z_2/z_1]), \\ h_2 &\in \ker(G[z_0/z_2,z_1/z_2]\to G[z_1/z_2]). \end{split} \]
We claim that it follows from~\eqref{eqn:endgame} that $\gamma'=\gamma$ as desired. The reason is that $(\gamma'|_{t\mapsto z_1z_2/z_0^2})h_1(\gamma|_{t\mapsto z_1z_2/z_0^2})^{-1}$ belongs to the group $G[z_0/z_1,z_2/z_1,z_0^2/z_1z_2]$, whose intersection with $\ker(G[z_0/z_2,z_1/z_2]\to G[z_1/z_2])$ is trivial. So~\eqref{eqn:endgame} forces $h_2=1$, and similarly $h_1=1$. \end{proof}
\begin{rmk} \label{rmk:mv} The result of Mirkovi\'c and Vybornov~\cite[Theorem 3.1]{mvy-cr} is considerably more general than Proposition~\ref{prop:mv}. Firstly, it involves $\mathsf{Gr}_\mu^\lambda$ for general $\mu$, as in Remark~\ref{rmk:slice}; this corresponds to taking more general choices of $\mathbf{w}$. Secondly, it relates not just $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})$ with $\mathsf{Gr}_\mu^\lambda$, but the whole affine variety $\mathfrak{M}_0(\mathbf{v},\mathbf{w})$ with the closure $\overline{\mathsf{Gr}}_\mu^\lambda$, and the whole quiver variety $\mathfrak{M}(\mathbf{v},\mathbf{w})$ with a certain desingularization of $\overline{\mathsf{Gr}}_\mu^\lambda$. At least the first of these variants can be proved by an argument similar to the above proof of Proposition~\ref{prop:mv}, using the Uhlenbeck closure of the moduli space $\mathrm{Bun}_G^\lambda(\mathbb{A}^2/\mathbb{G}_m)$ as in Remark~\ref{rmk:uhlenbeck}; for this, combine~\cite[Theorem 5.2(2)]{bf},~\cite[Theorem 5.12]{bfg} and~\cite[Theorem 1]{vv}. \end{rmk}
Recall that the involution $\iota$ of $\mathsf{Gr}_0$ satisfies $\iota(\mathsf{Gr}_0^\lambda)=\mathsf{Gr}_0^{-w_0\lambda}$. In terms of $r$-tuples, $-w_0(\lambda_1,\cdots,\lambda_r)=(-\lambda_r,\cdots,-\lambda_1)$. If $\mathbf{v}$ is the dimension vector corresponding to $\lambda$ as above, then the dimension vector corresponding to $-w_0\lambda$ is $\mathbf{v}^\dagger$ where $v_i^\dagger=v_{-i}$. Indeed, the representation $\rho^\dagger:\mathbb{G}_m\to\mathrm{GL}(V)$ for $-w_0\lambda$ can be chosen to be the composition of $\rho:\mathbb{G}_m\to\mathrm{GL}(V)$ with the inverse map on $\mathbb{G}_m$, so that the $i$th isotypic component of $V$ for $\rho^\dagger$ is exactly $V_{-i}$.
\begin{prop} \label{prop:mv-iota} Let $\lambda\in\Lambda^+$ and define $\mathbf{v}$ and $\mathbf{w}$ as above. Under the isomorphisms of Proposition~\ref{prop:mv}, the map $\iota:\mathsf{Gr}_0^\lambda\simto\mathsf{Gr}_0^{-w_0\lambda}$ corresponds to the map $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})\simto\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v}^\dagger,\mathbf{w})$ which sends the orbit of $(B_1,B_2,\mathbf{i},\mathbf{j})\in\Lambda(V,W)^{sc,\mathbb{G}_m,\rho}$ to the orbit of $(-B_2,B_1,\mathbf{i},\mathbf{j})\in\Lambda(V,W)^{sc,\mathbb{G}_m,\rho^\dagger}$. \end{prop}
\begin{proof} Using the formula for the isomorphism in Proposition~\ref{prop:mv}, this claim amounts to saying that for any $(B_1,B_2,\mathbf{i},\mathbf{j})\in\Lambda(V,W)^{sc,\mathbb{G}_m,\rho}$ we have the following equality in $G[t^{-1}]_1$: \begin{equation} \left(1-\sum_{i=0}^\infty (\mathbf{j} B_1^i B_2^i\mathbf{i})\,t^{-i-1}\right)^{-1} =1+\sum_{i=0}^\infty (\mathbf{j} B_2^i B_1^i\mathbf{i})\,t^{-i-1}. \end{equation} This equality was proved by direct computation in~\cite[Lemma 4.10]{hl}. But we now have a more conceptual explanation: the claim follows by combining Theorem~\ref{thm:normalizer} with the $\mathrm{GL}(2)$-equivariance of $\Theta$ proved in Proposition~\ref{prop:barth-equiv}. \end{proof}
If $\lambda\in\Lambda_1^+$, i.e.\ $\lambda_{r+1-s}=-\lambda_s$ for all $s$, then the corresponding $\mathbf{v}$ satisfies $\mathbf{v}^\dagger=\mathbf{v}$. In this case, Proposition~\ref{prop:mv-iota} says that the involution $\iota$ of $\mathsf{Gr}_0^\lambda$ corresponds under the isomorphism of Proposition~\ref{prop:mv} to a \emph{diagram involution} of $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})$, in the sense of~\cite[Section 3.2]{hl} (specifically, set $\sigma_n=\mathrm{id}$ in~\cite[Example 3.8]{hl}; one must take into account that in~\cite{hl} we used a slightly different form of the ADHM equation). In the next subsection we will effectively consider the fixed-point subvarieties of these involutions. On the quiver variety side, such a fixed-point subvariety is described in great generality by~\cite[Theorem 3.9]{hl}. However, there will be no need to invoke that result; the reason we can bypass it is that, in the special case of diagram involutions of type-$\mathrm{A}$ quiver varieties, one can deduce~\cite[Theorem 3.9]{hl} easily from Proposition~\ref{prop:disconnection-full}, as outlined in~\cite[Section 3.7]{hl}.
\subsection{The $\Gamma=N$ case} \label{ss:N-case}
Recall from \S\ref{ss:n-equiv} the notation $\Xi$ for the set of $G$-conjugacy classes of homomorphisms $\tau:N\to G$, i.e.\ equivalence classes of deteminant-$1$ representations of $N$ on $W=\mathbb{C}^r$, and the disjoint union $\Xi=\bigsqcup_{\lambda\in\Lambda_1^+}\Xi(\lambda)$.
Suppose that $\lambda\in\Lambda_1^+$, and let $\xi\in\Xi(\lambda)$. By definition, we can choose $\tau\in\xi$ such that the restriction of $\tau$ to $\mathbb{G}_m$ is $\lambda$. Recall the labelling of the irreducible representations of $N$ given in \S\ref{ss:subgroups}, and note that a representation of $N$ has determinant $1$ if and only if the sum of the multiplicities of $S_{0,-},S_2,S_4,\cdots$ is even. For $i>0$, the multiplicity of the irreducible representation $S_i$ in $(W,\tau)$ is $m_i(\lambda)=m_{-i}(\lambda)$, and the sum of the multiplicities of $S_{0,+}$ and $S_{0,-}$ is $m_0(\lambda)$. Thus, given $\lambda\in\Lambda_1^+$, the choice of $\xi\in\Xi(\lambda)$ is equivalent to the choice of an ordered pair of nonnegative integers $(m_{0,+},m_{0,-})$ satisfying \begin{equation} \label{eqn:ordered-pair} m_{0,+}+m_{0,-}=m_0(\lambda),\quad m_{0,-}\equiv m_2(\lambda)+m_4(\lambda)+\cdots\ (\text{mod}\ 2), \end{equation} where $m_{0,\pm}$ specifies the multiplicity of $S_{0,\pm}$.
Let $\rho:N\to\mathrm{GL}(V)$ be a representation, and let $\mathbf{v}=(v_{0,+},v_{0,-},v_1,v_2,\cdots)$ be its multiplicity vector. For convenience set $v_0:=v_{0,+}+v_{0,-}$. The multiplicity vector of the restriction of $\rho$ to $\mathbb{G}_m$ is $\mathbf{v}^{\mathrm{A}}:=(\cdots,v_2,v_1,v_0,v_1,v_2,\cdots)$. The condition~\eqref{eqn:fundamental2} becomes the conjunction of the following inequalities: \begin{equation} \label{eqn:fund-n} \begin{split} r-2v_{0,+}+v_1&\geq 0,\\ -2v_{0,-}+v_1&\geq 0,\\ -2v_i+v_{i-1}+v_{i+1}&\geq 0\text{ for all }i\geq 1, \end{split} \end{equation} and if~\eqref{eqn:fund-n} is satisfied, the resulting element $\xi\in\Xi$ is the one with multiplicity vector $(m_{0,+},m_{0,-},m_1,m_2,\cdots)$ given by the left-hand sides of~\eqref{eqn:fund-n}. Note that \[ m_{0,-}+m_2+m_4+\cdots = -2v_{0,+}+2v_1-2v_2+2v_3-2v_4+\cdots \] is indeed even. Set $m_0:=m_{0,+}+m_{0,-}=r-2v_0+2v_1$. Thus $\xi\in\Xi(\lambda)$ where $\lambda\in\Lambda_1^+$ has multiplicity vector $(\cdots,m_2,m_1,m_0,m_1,m_2,\cdots)$. Clearly this coweight $\lambda$ is the one associated to $\mathbf{v}^{\mathrm{A}}$ as in \S\ref{ss:Gm-case}.
Conversely, given $\lambda\in\Lambda_1^+$, we can define $\mathbf{v}^{\mathrm{A}}$ by~\eqref{eqn:v-gm}, and it will automatically have the symmetric form $\mathbf{v}^{\mathrm{A}}=(\cdots,v_2,v_1,v_0,v_1,v_2,\cdots)$. Note that $v_0$ is the sum of the positive entries in the $r$-tuple $\lambda$, and $v_0-v_1$ is the number of these positive entries, implying that $v_1\equiv m_2(\lambda)+m_4(\lambda)+\cdots\ (\text{mod}\ 2)$. Suppose that $\xi\in\Xi(\lambda)$ corresponds to the ordered pair $(m_{0,+},m_{0,-})$ satisfying~\eqref{eqn:ordered-pair}; in terms of $\mathbf{v}^{\mathrm{A}}$, this latter condition becomes \begin{equation} \label{eqn:ordered-pair2} m_{0,+}+m_{0,-}=r-2v_0+2v_1,\quad m_{0,-}\equiv v_1\ (\text{mod}\ 2). \end{equation} Then $\xi$ arises from some representation $\rho:N\to\mathrm{GL}(V)$ if and only if we can write \begin{equation} \label{eqn:prelim} m_{0,+}=r-2v_{0,+}+v_1,\quad m_{0,-}=-2v_{0,-}+v_1 \end{equation} for some nonnegative integers $v_{0,+},v_{0,-}$ (whose sum is then necessarily $v_0$). Rearranging~\eqref{eqn:prelim}, we see that this condition is equivalent to requiring that the following are nonnegative integers: \begin{equation} \label{eqn:v-N} v_{0,+}:=\frac{1}{2}(r-m_{0,+}+v_1),\quad v_{0,-}:=\frac{1}{2}(-m_{0,-}+v_1), \end{equation} and this in turn, in view of~\eqref{eqn:ordered-pair2}, is equivalent to requiring simply that
$m_{0,-}\leq v_1$.
To sum up, if $m_{0,-}\leq v_1$, then Proposition~\ref{prop:fundamental} tells us that $\mathrm{Bun}_G^\xi(\mathbb{A}^2/N)=\mathrm{Bun}_G^\rho(\mathbb{A}^2/N)$ is nonempty, and Proposition~\ref{prop:barth-rho} tells us that it is connected. If $m_{0,-}>v_1$, then Proposition~\ref{prop:fundamental} tells us that $\mathrm{Bun}_G^\xi(\mathbb{A}^2/N)$ is empty.
Recall that Proposition~\ref{prop:barth-rho} gives us an isomorphism between $\mathrm{Bun}_G^\rho(\mathbb{A}^2/N)$ and $\mathfrak{M}^{N,\rho,\mathrm{reg}}(V,W)=\Lambda(V,W)^{sc,N,\rho}/\mathrm{GL}_{N,\rho}(V)$. We will now see how to express the latter variety as a quiver variety of type $\mathrm{D}$ in the usual sense. The idea is much the same as in the type-$\mathrm{A}$ case considered in \S\ref{ss:Gm-case}.
The decomposition of $(V,\rho)$ into its isotypic components for $N$ is closely related to the $\mathbb{Z}$-grading $V=\bigoplus_{i\in\mathbb{Z}} V_i$ obtained from the restriction of $\rho$ to $\mathbb{G}_m$. Namely, we have a direct sum decomposition $V_0=V_{0,+}\oplus V_{0,-}$ into isotypic components corresponding to the irreducible representations $S_{0,+}$ and $S_{0,-}$, and the isotypic component corresponding to $S_i$ for $i>0$ is $V_i\oplus V_{-i}\cong V_i\otimes S_i$. Note that $\rho([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])$ maps $V_i$ isomorphically onto $V_{-i}$ for all $i\neq 0$, and acts as the identity on $V_{0,+}$ and as minus the identity on $V_{0,-}$. Since we are using the trivial representation of $N$ on $W$, the analogous decomposition of $W$ has only one nonzero term, $W=W_{0,+}$.
A quadruple $(B_1,B_2,\mathbf{i},\mathbf{j})\in\Lambda(V,W)$ is fixed by $N$ if and only if it is fixed by $\mathbb{G}_m$ and also fixed by $[\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}]$. In \S\ref{ss:Gm-case} we have already seen how to translate the condition of being fixed by $\mathbb{G}_m$ in terms of the $\mathbb{Z}$-gradings on $V$ and $W$. By definition of the $N$-action on $\Lambda(V,W)$, the additional condition of being fixed by $[\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}]$ amounts to the following extra constraints: \begin{itemize}
\item $B_2=\rho([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])B_1\rho([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])^{-1}$, which means that the maps $B_1|_{V_i}$ for $i\leq 0$ are uniquely determined by the maps $B_2|_{V_i}$ for $i\geq 0$ and the maps $B_2|_{V_i}$ for $i<0$ are uniquely determined by the maps $B_1|_{V_i}$ for $i>0$; \item $\mathbf{i}(W)\subseteq V_{0,+}$; \item $\mathbf{j}(V_{0,-})=0$. \end{itemize} Thus an element of $\Lambda(V,W)^{N,\rho}$, after removing the redundant data, consists of a configuration of linear maps of the form \[ \vcenter{ \xymatrix@R=15pt{ W\ar@/_/[dd]_(.4){\mathbf{i}}&&&& \\ \\ V_{0,+}\ar@/^/[dr]^(.6){B_{2,+}}\ar@/_/[uu]_(.6){\mathbf{j}}&&&&\\ &V_{1}\ar@/^/[ul]^(.6){B_{1,+}}\ar@/^/[dl]^(.4){B_{1,-}}\ar@/^/[r]^-{B_{2}}&V_2\ar@/^/[l]^-{B_{1}}\ar@/^/[r]^-{B_{2}}&V_3\ar@{.}[r]\ar@/^/[l]^-{B_{1}}&\\ V_{0,-}\ar@/^/[ur]^(.4){B_{2,-}}&&&& } } \]
where $B_{1,+}$ and $B_{1,-}$ are the components of $B_1|_{V_1}$, and $B_{2,+}$ and $B_{2,-}$ are the components of $B_2|_{V_0}$, relative to the direct sum decomposition $V_0=V_{0,+}\oplus V_{0,-}$. The equation~\eqref{eqn:adhm}, when broken into its various components and simplified using the rule $B_2=\rho([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])B_1\rho([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])^{-1}$, is equivalent to the following equations: \begin{equation} \label{eqn:type-d} \begin{split} 2B_{1,+}B_{2,+}+\mathbf{i}\mathbf{j}&=0\text{ on $V_{0,+}$,}\\ B_{1,-}B_{2,-}&=0\text{ on $V_{0,-}$,}\\ B_1B_2&=B_{2,+}B_{1,+}+B_{2,-}B_{1,-}\text{ on $V_1$,}\\ B_1B_2&=B_2B_1\text{ on $V_i$ for all $i>1$.} \end{split} \end{equation} Again, since $V_i=\{0\}$ for sufficiently large $i$, the above configuration of maps is effectively finite.
We can obviously identify $\mathrm{GL}_{N,\rho}(V)$ with $\mathrm{GL}(V_{0,+})\times\mathrm{GL}(V_{0,-})\times\prod_{i>0}\mathrm{GL}(V_i)$, and its action on the above configurations of linear maps is the obvious one. So, after making a suitable choice of orientiation of the underlying graph of type $\mathrm{D}$, we recover the usual set-up of quiver varieties of (finite) type $\mathrm{D}$ as in~\cite{nak1,nak2}. (We can account for the factor of $2$ in the first equation of~\eqref{eqn:type-d} by scaling $\mathbf{i}$, as in~\cite[Example 3.12]{hl}.) In this context it is more common to use the notations $\mathfrak{M}(\mathbf{v},\mathbf{w})$ for $\mathfrak{M}^{N,\rho}(V,W)$ and $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})$ for $\mathfrak{M}^{N,\rho,\mathrm{reg}}(V,W)$, where $\mathbf{v}=(v_{0,+},v_{0,-},v_1,v_2,\cdots)$ is the vector of dimensions of $V_{0,+},V_{0,-},V_1,V_2,\cdots$ and $\mathbf{w}$ is defined likewise; in our case, $\mathbf{v}$ is given by~\eqref{eqn:v-gm} and~\eqref{eqn:v-N}, and $\mathbf{w}=(r,0,0,0,\cdots)$.
We can now state a more specific version of Theorem~\ref{thm:intro-glr}:
\begin{prop} \label{prop:glr} Let $\lambda=(\lambda_1,\cdots,\lambda_r)\in\Lambda_1^+$ and $\xi\in\Xi(\lambda)$. Then $(\mathsf{Gr}_0)^{\iota,\xi}$ is nonempty if and only if the pair $(m_{0,+},m_{0,-})$ determined by $\xi$, which by definition satisfies~\eqref{eqn:ordered-pair}, also satisfies \[ m_{0,-}\leq \sum_{s=1}^r \max\{\lambda_s-1,0\}. \] Assume henceforth that this condition holds. Define $\mathbf{v}=(v_{0,+},v_{0,-},v_1,v_2,\cdots)$ by~\eqref{eqn:v-gm} and~\eqref{eqn:v-N} and set $\mathbf{w}=(r,0,0,0,\cdots)$. Then we have a $G$-equivariant isomorphism \[ \Psi\circ\Theta:\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})\simto(\mathsf{Gr}_0)^{\iota,\xi} \] which sends the orbit of a configuration of linear maps as above to the following element of $G[t^{-1}]_1$: \[ 1+(\mathbf{j}\mathbf{i})t^{-1}+\sum_{i=0}^\infty (-1)^{i+1} (\mathbf{j} B_{1,+}B_1^i B_2^iB_{2,+}\mathbf{i})\,t^{-i-2}. \] In particular, $(\mathsf{Gr}_0)^{\iota,\xi}$ is nonsingular and connected when it is nonempty. \end{prop}
\begin{proof} Recall that the $G$-equivariant bijection $\Psi:\mathrm{Bun}_G(\mathbb{A}^2/N)\to(\mathsf{Gr}_0)^\iota$ restricts to an isomorphism $\mathrm{Bun}_G^\xi(\mathbb{A}^2/N)\simto(\mathsf{Gr}_0)^{\iota,\xi}$. So the nonemptiness criterion for $(\mathsf{Gr}_0)^{\iota,\xi}$ follows from that for $\mathrm{Bun}_G^\xi(\mathbb{A}^2/N)$, seen above. Assume that these varieties are indeed nonempty. Proposition~\ref{prop:barth-rho} tells us that the $G$-equivariant isomorphism $\Theta:\mathfrak{M}^{\mathrm{reg}}(V,W)\simto\mathrm{Bun}_G^n(\mathbb{A}^2)$ restricts to an isomorphism $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})=\mathfrak{M}^{N,\rho,\mathrm{reg}}(V,W)\simto\mathrm{Bun}_G^\rho(\mathbb{A}^2/N)=\mathrm{Bun}_G^\xi(\mathbb{A}^2/N)$. Recall that $\mathfrak{M}^{N,\rho,\mathrm{reg}}(V,W)$ is nonsingular and connected by Proposition~\ref{prop:connected}.
All that remains to be proved is that the formula for the composition $\Psi\circ\Theta$ on $\mathfrak{M}^{\mathbb{G}_m,\rho|_{\mathbb{G}_m},\mathrm{reg}}(V,W)$ given in Proposition~\ref{prop:mv} restricts to the stated formula on $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v},\mathbf{w})=\mathfrak{M}^{N,\rho,\mathrm{reg}}(V,W)$. This follows from the fact that, for any quadruple $(B_1,B_2,\mathbf{i},\mathbf{j})\in\Lambda(V,W)^{N,\rho}$ and any $i\geq 0$, \[ \begin{split} \mathbf{j} B_2^{i+1} B_1^{i+1} \mathbf{i} &=\mathbf{j} \left(\rho([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])B_1\rho([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])^{-1}\right)^{i+1} \left(-\rho([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])B_2\rho([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])^{-1}\right)^{i+1} \mathbf{i}\\ &=(-1)^{i+1} \mathbf{j} B_1^{i+1}B_2^{i+1} \mathbf{i}=(-1)^{i+1} \mathbf{j} B_{1,+} B_1^i B_2^i B_{2,+} \mathbf{i}. \end{split} \] Here the second equality holds because $\rho([\begin{smallmatrix}0&1\\-1&0\end{smallmatrix}])$ acts as the identity on $V_{0,+}$. \end{proof}
\begin{ex} \label{ex:disconnected} Take $r=4$ and $\lambda=(3,0,0,-3)\in\Lambda_1^+$. The corresponding $\mathbb{Z}$-tuple $\mathbf{v}^{\mathrm{A}}$ is $(\cdots,0,0,1,2,3,2,1,0,0,\cdots)$ with $v_0=3$. The relevant ordered pairs $(m_{0,+},m_{0,-})$ are $(2,0)$ and $(0,2)$; let $\xi_1$ and $\xi_2$ be the corresponding elements of $\Xi(\lambda)$. Then $(\mathsf{Gr}_0)^{\iota,\xi_1}$ and $(\mathsf{Gr}_0)^{\iota,\xi_2}$ are the connected components of $(\mathsf{Gr}_0^\lambda)^\iota$, and by Proposition~\ref{prop:glr} they are isomorphic to the following quiver varieties respectively: \[ \mathfrak{M}_0^{\mathrm{reg}}((2,1,2,1),(4,0,0,0))\text{ and } \mathfrak{M}_0^{\mathrm{reg}}((3,0,2,1),(4,0,0,0)), \] where we have put the vertices of $\mathrm{D}_\infty$ in the order $(0,+),(0,-),1,2,3,\cdots$ and truncated all vertices where $v_i=0$, leaving quiver varieties of type $\mathrm{D}_4$ (except that the second is really a quiver variety of type $\mathrm{A}_3$, since it happens to have $v_{0,-}=0$). \end{ex}
\begin{rmk} A general formula for the dimension of a Nakajima quiver variety is given in~\cite[Corollary 3.12]{nak2}. Applied to the quiver variety of type $\mathrm{A}$ in Proposition~\ref{prop:mv}, this recovers the well-known formula $\dim\mathsf{Gr}_0^\lambda=\langle\lambda,2\rho\rangle$ for any $\lambda\in\Lambda^+$, where $2\rho$ is the sum of the positive roots of $\mathrm{SL}(r)$. Applied to the quiver variety of type $\mathrm{D}$ in Proposition~\ref{prop:glr}, it gives \begin{equation} \dim (\mathsf{Gr}_0)^{\iota,\xi} = \langle\lambda,\rho\rangle + \frac{1}{4}(r^2-(m_{0,+}-m_{0,-})^2), \end{equation} where $\lambda\in\Lambda_1^+$ and $\xi\in\Xi(\lambda)$ corresponds to the pair $(m_{0,+},m_{0,-})$. \end{rmk}
\subsection{Proof of Theorem~\ref{thm:intro-gl2}} \label{ss:proof-gl2}
Now suppose that $G=\mathrm{SL}(2)$ and that $\lambda=m\alpha=(m,-m)$ for $m\in\mathbb{Z}^+$. The corresponding $\mathbb{Z}$-tuple $\mathbf{v}^{\mathrm{A}}$ is \[ (\cdots,0,0,1,2,\cdots,m-1,m,m-1,\cdots,2,1,0,0,\cdots) \] with $v_0=m$. Hence Proposition~\ref{prop:mv} says that \begin{equation} \label{eqn:special-mv} \mathsf{Gr}_0^{m\alpha}\cong\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v}^{\mathrm{A}},2\delta_0), \end{equation} a quiver variety of type $\mathrm{A}_{2m-1}$.
There is another isomorphism involving this quiver variety: \begin{equation} \label{eqn:kronheimer} \mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v}^{\mathrm{A}},2\delta_0)\cong{\mathcal{O}}_{(2m)}\cap\mathcal{S}_{(m,m)}, \end{equation} where ${\mathcal{O}}_{(2m)}$ is the regular nilpotent orbit in $\mathfrak{sl}(2m)$ and $\mathcal{S}_{(m,m)}$ is the Slodowy slice to the orbit ${\mathcal{O}}_{(m,m)}$. The isomorphism~\eqref{eqn:kronheimer} is a special case of Nakajima's result~\cite[Theorem 8.4]{nak1}, a reformulation of Kronheimer's result~\cite[Theorem 1]{kron}. Thus, it implicitly treats the graph of type $\mathrm{A}_{2m-1}$ as part of the McKay graph of $\mathrm{SL}(2)$, \emph{not} the McKay graph of $\mathbb{G}_m$ as in the proof of~\eqref{eqn:special-mv}; this distinction was highlighted in~\cite[Section 2(v)]{nak4}. Philosophically, this is why we obtain an interesting isomorphism by composing~\eqref{eqn:special-mv} and~\eqref{eqn:kronheimer}, namely \begin{equation} \label{eqn:tworow} \mathsf{Gr}_0^{m\alpha}\cong {\mathcal{O}}_{(2m)}\cap\mathcal{S}_{(m,m)}. \end{equation}
\begin{rmk} We do not know an explicit formula for the isomorphism~\eqref{eqn:kronheimer}. Instead, there is a recursive procedure for passing from a quadruple $(B_1,B_2,\mathbf{i},\mathbf{j})$ to an element of $\mathfrak{sl}(2m)$, which is due to Maffei~\cite[Theorem 8]{maffei} in greater generality, and is rephrased in~\cite[Lemma 4.8]{hl} in the case relevant for~\eqref{eqn:kronheimer}. Note that Mirkovi\'c and Vybornov~\cite[Section 3.2]{mvy-cr} provided explicit versions of~\eqref{eqn:kronheimer} and~\eqref{eqn:tworow} where the Slodowy slice $\mathcal{S}_{(m,m)}$ is replaced by a different transverse slice to the orbit ${\mathcal{O}}_{(m,m)}$. However, their transverse slice is not stable under the negative-transpose involution considered below. \end{rmk}
As a special case of Proposition~\ref{prop:mv-iota}, the involution $\iota$ of $\mathsf{Gr}_0^{m\alpha}$ corresponds under the isomorphism~\eqref{eqn:special-mv} to a diagram involution of $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v}^{\mathrm{A}},2\delta_0)$. As a special case of~\cite[Theorem 4.4]{hl} (and this is where the assumption $r=2$ is vital), this diagram involution of $\mathfrak{M}_0^{\mathrm{reg}}(\mathbf{v}^{\mathrm{A}},2\delta_0)$ corresponds, under the isomorphism of~\eqref{eqn:kronheimer}, to an involution of ${\mathcal{O}}_{(2m)}\cap\mathcal{S}_{(m,m)}$ that is the restriction of a Lie algebra involution of $\mathfrak{sl}(2m)$, namely the negative transpose map with respect to a nondegenerate form on $\mathbb{C}^{2m}$ that is symmetric if $m$ is even and skew-symmetric if $m$ is odd. See~\cite[Section 4]{hl} for the detailed definitions.
We now consider the fixed-point subvarieties of these involutions. From~\eqref{eqn:ordered-pair} we see that $\Xi(m\alpha)$ is empty if $m$ is even, and has a unique element, corresponding to the ordered pair $(0,0)$, if $m$ is odd. Recall that we already showed that $(\mathsf{Gr}_0^{m\alpha})^\iota$ is empty for $m$ even and positive in Lemma~\ref{lem:even-empty}; on the other side of~\eqref{eqn:tworow}, it is well known that ${\mathcal{O}}_{(2m)}$ does not intersect $\mathfrak{so}(2m)$.
Assume henceforth that $m$ is odd. By Proposition~\ref{prop:glr}, taking fixed points on both sides of ~\eqref{eqn:special-mv} gives the isomorphism \begin{equation} \label{eqn:explicit} (\mathsf{Gr}_0^{m\alpha})^\iota\cong\mathfrak{M}_0^{\mathrm{reg}}((\tfrac{m+1}{2},\tfrac{m-1}{2},m-1,\cdots,2,1),(2,0,\cdots,0)), \end{equation} where the quiver variety is of type $\mathrm{D}_{m+1}$, with the vertices labelled in the same order as in Example~\ref{ex:disconnected}. (In the $m=1$ case, we have to interpret $\mathrm{D}_2$ as $\mathrm{A}_1\times\mathrm{A}_1$.) Taking fixed points on both sides of~\eqref{eqn:kronheimer} gives the isomorphism \begin{equation} \label{eqn:implicit} \mathfrak{M}_0^{\mathrm{reg}}((\tfrac{m+1}{2},\tfrac{m-1}{2},m-1,\cdots,2,1),(2,0,\cdots,0))\cong {\mathcal{O}}_{(2m)}^{\mathrm{C}_m}\cap\mathcal{S}_{(m,m)}^{\mathrm{C}_m}, \end{equation} which is a special case of~\cite[Theorem 1.2]{hl}. Finally, Theorem~\ref{thm:intro-gl2} follows by combining~\eqref{eqn:explicit} and~\eqref{eqn:implicit}, or in other words by taking fixed points on both sides of~\eqref{eqn:tworow}.
\begin{rmk} The isomorphisms~\eqref{eqn:special-mv} and~\eqref{eqn:kronheimer} extend to isomorphisms between the natural closures of these varieties: \begin{equation} \label{eqn:closures} \overline{\mathsf{Gr}}_0^{m\alpha}\cong\mathfrak{M}_0(\mathbf{v}^{\mathrm{A}},2\delta_0)\cong\mathcal{N}\cap\mathcal{S}_{(m,m)}, \end{equation} where $\mathcal{N}$ is the nilpotent cone of $\mathfrak{sl}(2m)$. The first isomorphism in~\eqref{eqn:closures} follows from~\cite[Theorem 5.2]{bf} and~\cite[Theorem 5.12]{bfg}, and the second isomorphism in~\eqref{eqn:closures} is a special case of~\cite[Theorem 8]{maffei}. The involutions of the open subvarieties considered above all extend to the closures. If $m$ is odd, we obtain the following by taking fixed points throughout~\eqref{eqn:closures}: \begin{equation} (\overline{\mathsf{Gr}}_0^{m\alpha})^\iota\cong\mathfrak{M}_0((\tfrac{m+1}{2},\tfrac{m-1}{2},m-1,\cdots,2,1),(2,0,\cdots,0))\cong\mathcal{N}^{\mathrm{C}_m}\cap\mathcal{S}_{(m,m)}^{\mathrm{C}_m}, \end{equation} where the quiver variety is again of type $\mathrm{D}_{m+1}$ and $\mathcal{N}^{\mathrm{C}_m}$ is the nilpotent cone of $\mathfrak{sp}(2m)$. For the claim about the fixed-point subvariety of $\mathfrak{M}_0(\mathbf{v}^{\mathrm{A}},2\delta_0)$, see the proof of~\cite[Theorem 1.2]{hl}. \end{rmk}
\end{document} | arXiv | {
"id": "1512.04254.tex",
"language_detection_score": 0.6035203337669373,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Neural Topological Ordering for Computation Graphs}
\begin{abstract}
Recent works on machine learning for combinatorial optimization have shown that learning based approaches can outperform heuristic methods in terms of speed and performance.
In this paper, we consider the problem of finding an optimal topological order on a directed acyclic graph with focus on the memory minimization problem which arises in compilers. We propose an end-to-end machine learning based approach for topological ordering using an encoder-decoder framework. Our encoder is a novel attention based graph neural network architecture called \emph{Topoformer} which uses different topological transforms of a DAG for message passing. The node embeddings produced by the encoder are converted into node priorities which are used by the decoder to generate a probability distribution over topological orders. We train our model on a dataset of synthetically generated graphs called layered graphs. We show that our model outperforms, or is on-par, with several topological ordering baselines while being significantly faster on synthetic graphs with up to 2k nodes. We also train and test our model on a set of real-world computation graphs, showing performance improvements.
\end{abstract}
\section{Introduction \label{sec:intro}}
Many problems in computer science amount to finding the best sequence of objects consistent with some precedence constraints. An intuitive example comes from routing problems, where we would like to find the shortest route between cities but we have requirements (i.e. for example to pick up and subsequently deliver a package) on the order in which the cities should be visited~\cite{vrp_book}. Another case is found in compiler pipelines, wherein the "cities" become operations to be executed and the constraints come from the data dependencies between these operations, such as when the result of an operation is an operand in a subsequent one. In this case, the metric to be optimized can be the run time of the compiled program, or the memory required to execute the program \cite{ahn2020ordering}. Common across this class of problems is their formulation in term of finding the optimal topological order of the Directed Acyclic Graph (DAG) that encodes the precedence constraints, which induces a Combinatorial Optimization~\cite{co_book} (CO) problem which is in general computationally hard \cite{sharp_p_complete}.
Already from the two examples above, one can immediately grasp the relevance of such problems for industrial Operations Research, which has prompted various actors to invest in the development of efficient CO solvers; these solvers usually encapsulate heuristic methods whose design typically requires extensive use of domain-specific and problem-specific knowledge, across decades of development. In recent years, considerable interest has emerged in the possibility of replacing such handcrafted heuristics with ones learned by deep neural nets~\cite{bengio2021machine} (machine learning for combinatorial optimization, MLCO). As a matter of fact, both of our two examples of DAG-based CO problems have indirectly been object of study in the Machine Learning literature. References~\cite{kool2018TSP,xin2021neurolkh,joshi2021learning,correia2022neural} take into consideration Routing Problems, especially the Traveling Salesperson Problem (TSP) which, on account of its richness, complexity and long history of mathematical study~\cite{tsp_book}, has attained the status of a standard benchmark for MLCO~\cite{joshi2021learning}. Conversely, less attention has been devoted to operations sequencing likely due to the proprietary and sensitive nature of compiler workflows, which hampers the definition of public benchmarks. References~\cite{regalpaper, gopaper} both consider the task of optimizing the run time of a neural network's forward pass by optimizing the ordering and device assignment of its required operations. However, in this last case the sequencing stage is only one part of a larger compiler pipeline, and as a result of this both the performance metrics and the datasets employed cannot be made available for reproduction by third parties. This makes it both hard to assess the results therein, and to draw general conclusions and guidelines for the advancement of MLCO, which still suffers from a lack of commonly accepted and standard datasets and benchmarks.
In this work, we address the problem of finding optimal topological orders in a DAG using deep learning, focusing on the compiler task of optimizing the peak local memory usage during execution. We make the following contributions:
\begin{itemize}[leftmargin=4.2mm]
\item We present a neural framework to optimize sequences on directed acyclic graphs. Mindful of the need for scalability, we consider a non-auto-regressive (NAR) scheme for parametrizing the probability distribution of topological orders. This allows our method to attain an extremely favorable performance vs. run time tradeoff: it always outperforms fast baselines, and is only matched or outperformed by those requiring a much longer (in one case 4000x more) run time.
\item We address the problem of how to perform meaningful message-passing on DAGs, a graph type which has received comparatively less attention in the literature on Graph Neural Networks. We introduce \emph{Topoformer}, a flexible, attention-based architecture wherein messages can be passed between each and every pair of nodes, with a different set of learnable parameters depending on the topological relation between the nodes.
\item To test our method, we introduce an algorithm for the generation of \emph{synthetic}, layered, Neural Net-like computation graphs, allowing any researcher to generate a dataset of \emph{as many as desired} graphs of \emph{any desired size}. These graphs are a more faithful model of real NN workflows, and allow us to prove our method on a much larger and varied dataset, than previous efforts~\cite{regalpaper}. To our knowledge, this is the first public algorithm of this kind. Nevertheless, we also test our method on proprietary graphs to illustrate its relevance to realistic compiler workflows.
\end{itemize}
\section{Related work \label{sec:related}}
\paragraph{Machine Learning for Combinatorial Optimization:} Combinatorial optimization as a use case for deep learning poses interesting technical challenges. First, the combinatorial nature of the problem conflicts with the differentiable structure of modern deep neural networks; and second, the models need to be run at large scale to solve real world instances, exacerbating the challenges in training deep learning models. Given the discrete nature of CO problems, a natural approach is to pose them as reinforcement learning (RL) problems~\cite{rl_book}. The aim is then to learn a policy that selects the best actions to maximize a reward directly related to the optimization objective. Algorithms then differ in the way the policy is parameterized: either in an end-to-end manner where the actions directly correspond to solutions of the optimization problem~\cite{gopaper, kool2018TSP, joshi2021learning, khalil2017learning}, or in a hybrid manner, where the policy augments parts of a traditional solver, e.g. by replacing heuristics used in setting parameters of an algorithm, see e.g. \cite{regalpaper, correia2022neural, xin2021neurolkh, ahn2020ordering}. Our approach follows an end-to-end design philosophy, which, not having to rely on an external algorithm, affords better control of post-compile run time and facilitates application on edge devices~\cite{ahn2020ordering}. Furthermore, RL has the advantage of being useful as a black box optimizer, when no handcrafted heuristics can be designed.\\ \textbf{Sequence Optimization via ML:} Within MLCO, much effort has been devoted to the task of predicting optimal sequences~\cite{bello2016neural,kool2018TSP,mena2018learning,linderman2018,gadetsky2020low}. The end-to-end nature of our method places it close to the one proposed in \cite{kool2018TSP}, although to the best of our knowledge, our work is the first to tackle the challenge of enforcing precedence constraints in the network predictions. As we shall see in more detail below, this generalization is non-trivial: already counting the number of topological orders belongs to the hardest class of computational problems \cite{sharp_p_complete}. This has to be contrasted with the fact that the number of sequences without topological constraints is simply $n!$ for $n$ objects. Besides, as pointed out in \cite{joshi2021learning}, no MLCO method has so far been able to convincingly tackle TSPs of sizes above a few hundred nodes, when it comes to \emph{zero-shot} generalization to unseen problem instances, i.e.~when no fine tuning on the test set is done. It is also therein pointed out how an auto-regressive parametrization of the sequence (which was the method used in ref.~\cite{kool2018TSP}) appears to be necessary to achieve acceptable performance even at those small sizes. Conversely, in the present work we show compelling zero-shot performance on DAGs of sizes up to \emph{thousands} of nodes, while nonetheless generating our sequences in a fully non-auto-regressive (NAR) way and maintaining a strong run time advantage over classical sequencing algorithms. Our results can then also be interpreted as cautioning against the idea of using the TSP as the sole, paradigmatic test-bed for MLCO research, as \cite{joshi2021learning} remarks.\\ \textbf{ML for Compiler Optimization:} The DAG sequencing task we consider is an omnipresent stage in compiler workflows, which usually also include such tasks as device assignment and operations fusion~\cite{gopaper}. In such a setting, jointly optimizing these tasks to reduce the \emph{run time} of a certain workflow (such as the forward pass of a Neural Net) is a common objective, which in refs~\cite{regalpaper, gopaper, steiner2021value} is tackled with ML methods. In this work we focus on the task of minimizing the peak local memory usage during execution, which does not require a performance model or simulator as well as being relevant to applications on edge devices~\cite{ahn2020ordering}. In ~\cite{regalpaper}, the ML solution leans on an existing genetic algorithm, whilst our solution is end-to-end, much like that proposed in ~\cite{gopaper}. Another characteristic of the solution proposed in~\cite{gopaper} is the idea of interpolating between AR and NAR via an \emph{iterative refinement} scheme, in which sequences are generated in one pass but subsequently refined during an user-defined number of subsequent passes; conversely, we generate all our sequences in a single pass.\\ While in \cite{regalpaper} the run time optimization is studied on both real-world and synthetic random graphs -- the latter being relatively small (up to about 200 nodes), the peak memory optimization is studied only on a proprietary dataset augmented via perturbation of the node attributes. In \cite{gopaper} the authors train and test their method on a relatively small set of six proprietary workflows which are not disclosed to the reader, and out of those six, only the size of the largest instance is mentioned.\\
\textbf{Deep Graph Neural Networks:} Given that our problem is specified as a DAG, it is a logical choice to parametrize our sequence-generation policy with a Graph Neural Network architecture~\cite{gnn_survey}. The basic idea of every GNN architecture is to update graph and edge representations by passing messages between the graph nodes along the graph edges~\cite{battaglia2018relational}. However, this can be too restrictive when it comes to sequence generation on DAGs. For example, nodes that come after each other in the sequence might not be linked by an edge in the graph, and therefore are unable to directly influence each other's representation. Notice how this difficulty is another consequence of the presence of precedence constraints in our problem, which conversely was not an issue in e.g.~\cite{kool2018TSP} where the graph is fully connected and no constraints are present. Relatively few efforts (see e.g.~\cite{thost2020directed, zhang2019dvae, bianchini2002recursive}) have been devoted to devise a way to perform meaningful message passing on DAGs. As a matter of fact, the quest for expressive GNN architectures is at the center of intense theoretical investigation \cite{graphomer, review-expressive-gnn}.
\section{Background \label{sec:backg}}
\subsection{Topological Orders and DAGs} We here introduce the mathematical background, starting with a few definitions. A partial order is an irreflexive transitive relation $<$ between certain pairs of a set $V$. We call a pair $(x,y)\in V\times V$ that is related by $<$ comparable, and \emph{incomparable} otherwise. A Directed Acyclic Graph (DAG) $G=(V,E)$ is a directed graph with no directed loops. We can map a DAG $G=(V,E)$ to a partially ordered set $(V,<)$ where $x<y$ if there is a directed path from node $x$ to node $y$. Multiple DAGs map to the same partial order. For example, the DAGs with vertex set $\{x,y,z\}$ and edge sets $E=\{x\to y, y\to z\}$ and $E'=\{x\to y, y\to z, x\to z\}$, where $s\to t$ denotes a directed edge from $s$ to $t$, correspond to the same partial order $x<y<z$. We define the \emph{transitive closure} (TC) of a DAG as the graph with most edges that has the same underlying partial order, so that there exists a directed edge $(x,y)$ whenever $x<y$. Conversely, the \emph{transitive reduction} (TR) is the graph with \emph{least} edges that results in the same partial order. We denote the order induced by a DAG by $<_G$.
A topological order or sorting of a DAG $G$ is a bijection $\sigma : V \to \{1,\dots,|V|\}$ such that $\sigma(x)<\sigma(y)$ whenever $x<_Gy$. The set ${\cal T}_G$ of topological orders of $G$ is a subset of the permutation group of the vertices and coincides with total orders on $V$ that respect $<_G$, called \emph{linear extensions} of the partial order. While there are several well-known algorithms to compute a topological order of a DAG, e.g. breadth first search and depth first search, counting the number of topological orders is one of the hardest computational problems, being $\#$P complete \cite{sharp_p_complete}.
In this work we develop a general machine learning method to find a topological order that minimizes a given cost function on a DAG, which we define in the next section.
\subsection{Peak Memory Minimization}
Deciding the best way to schedule operations in a computational graph representing a neural network is a central problem in compilers \cite{regalpaper, gopaper, ahn2020ordering}. We can associate a DAG to a computational graph in such a way that nodes represent operations ("ops"), and incoming/outgoing edges represent operands/results of these operations. Every time one executes an op, the inputs\footnote{We use "inputs" and "operands" interchangeably throughout the paper.} to that op need to be in memory, and memory for the outputs needs to be allocated. Therefore, each node of the DAG carries a label $m : V \to \mathbb{N}$ specifying the memory required to store the output of that op. A typical first step in scheduling a DAG is to identify topological orders to execute operations.
Compilers for edge devices, which have limited memory, aim at choosing the optimal topological order that minimizes the peak memory footprint \cite{ahn2020ordering}. We focus therefore on the peak local memory usage minimization task, which can be formulated as the following combinatorial optimization problem on a labeled DAG $G=(V,E,m)$: \begin{equation}
\min_{\sigma \in {\cal T}_G} \mathcal{C}(\sigma),\qquad \mathcal{C}(\sigma) =
\max(M_1(\sigma), \dots, M_{|V|}(\sigma)), \end{equation} with the definitions \begin{align}
M_t &= I_{t-1} + m(\sigma_t) \label{eq:mem_cost},\\
I_t &= M_t - \sum_{i \in S_t}m_i, \qquad S_t = \left\{i: i \notin \bigcup_{l=0}^{t-1} S_{l} ~~ \text{ and }~~ \forall (i,j) \in E,\, j\in \sigma_{1:t} \right\} \label{eq:I_t}, \end{align} i.e. the memory usage at time $t$ is given by the memory usage $I_{t-1}$ of the outputs which have not yet been consumed, at time $t-1$, by downstream operations, plus the memory requirement of the output of operation $\sigma(t)$. $I_t$ is in turn obtained by subtracting from $M_t$ the memory costs of nodes whose outgoing edges only connect to already scheduled nodes, i.e. nodes whose output was only required by already scheduled operations. Naturally, $I_0 = 0, S_0 = \phi$.
\section{Method}\label{sec:method}
\begin{figure}
\caption{Our complete architecture for neural topological ordering. The shades of gray in the MHA boxes are to highlight how attentions heads operate separately on the forward and backward version of the first three graphs.
The priorities $(y_i)_{i=1}^{|V|}$ are represented by the red bars on the original DAG and decoded into a sequence with its associated probability. }
\label{fig:e2e_pipeline}
\end{figure}
We use an encoder-decoder architecture whose schematic is shown in figure \ref{fig:e2e_pipeline}. Our encoder is \emph{Topoformer}, a novel GNN architecture, which derives an embedding for each node of the graph. The embeddings are used by the decoder which generates a distribution in the sequence space and finally the distribution can be converted to a sequence via different inference methods like sampling, greedy inference or beam search. Next, we describe each of the component in detail.
\subsection{Topoformer: Topologically Masked Attention\label{sec:topoformer}}
A Graph Neural Network (GNN) is a natural choice to encode our scheduling problem via embedding of the DAG nodes. All canonical GNN architectures operate by updating these embeddings via the aggregation of "messages" sent from the other nodes, usually in the form of some function of their own embedding~\cite{gnn_survey}. Architectures mainly differ in how the set of sender nodes is constructed and the aggregation function is chosen. In a Graph Convolutional Network~\cite{kipf2017semi}, the senders are the first neighbors of a node and the aggregation function is a weighted average, whilst in a vanilla Graph Attention Network~\cite{GAT}, the senders are all the other nodes, but their contributions are aggregated via averaging with \emph{learned} weights so as to account for their degree of relevance. When trying to apply such mechanisms on DAGs, a common point of contention is whether, and how in practice, the partial ordering encoded by it should reflect in the direction of travel of the messages~\cite{wang2021bilevel, bianchini2002recursive, thost2020directed}. While disregarding the DAG structure entirely (as one would do in a vanilla GAT), does not appear wise, it might be too restrictive when it comes to our task. For example, nodes that are next to each other in the sequence might well be incomparable, and thus lack a path for messages between them. The combinatorial nature of the task also poses requirements; it is known~\cite{graphomer, bengio2021machine} that reasoning about CO problems on a graph requires the capacity to reason about the \emph{global structure} of it, whilst architectures such as those proposed in~\cite{wang2021bilevel, bianchini2002recursive, thost2020directed} limit the set of sender nodes to a \emph{local} neighborhood of the receiver node. In summary, our architecture must strike a compromise between accounting for \emph{global} structure and \emph{local} partial ordering information.
Our \emph{Topoformer} architecture meets these requirements. A vector $\boldsymbol{x}_i$ of input features (see the appendix for details about its definition and dimensionality) is first turned into an initial node embedding $\boldsymbol{h}_i^{(0)}$ via a node-wise linear transformation, $\boldsymbol{h}_i^{(0)} = W\boldsymbol{x}_i + \boldsymbol{b}$. Subsequently, a succession of $L$ attention layers, each of them consisting of a Multi-Head Attention (MHA)~\cite{GAT} sub-layer followed by one more node-wise MLP, updates these embeddings, similar to a vanilla Transformer~\cite{vaswani2017attention}; however, we confer a topological inductive bias to these updates by having a separate group of attention heads masked by each of the following graphs induced by the original DAG: \begin{itemize}[leftmargin=4.2mm,noitemsep]
\item Its transitive reduction (TR).
\item The directed graph obtained by removing the TR edges from the DAG: $G\backslash E_{\textrm{TR}(G)}$.
\item The directed graph obtained by removing the edges of the DAG from its TC: $\textrm{TC}(G)\backslash E$.
\item The backwards versions (i.e. with flipped edges) of each of the three above.
\item The undirected graph obtained by joining all incomparable node pairs. \end{itemize} By adding together these graphs, one would obtain the fully connected graph relative to the node set $V$, whereupon all nodes would attend to all nodes. Then effectively, the propagation rules of Topoformer are same as those of a vanilla transformer encoder, \begin{align}
\hat{\boldsymbol{h}}_i^{(\ell)} =& \boldsymbol{h}_{i}^{(\ell-1)} + \texttt{concat}_{j}\left[\textrm{MHA}_i^{\ell, j}\left(\boldsymbol{h}_1^{(\ell-1)},\dots,\boldsymbol{h}_{|V|}^{(\ell-1)}; M^j\right)\right], \\
\boldsymbol{h}^{(\ell)}_i=& \hat{\boldsymbol{h}}_i^{(\ell)} + \textrm{MLP}^{(\ell)}\left(\hat{\boldsymbol{h}}_i^{(\ell)}\right) \label{eq:mlptopo}, \end{align} save for the presence of the \emph{mask} $M^j$, which ensures that head $j$ only attends to its assigned graph among the seven listed above. Following~\cite{transf_layernorm}, we also apply layer normalization~\cite{layernorm} to the MHA and MLP inputs. The number of heads assigned to each graph can be chosen independently (setting it to zero means to not message-pass along the edges of the respective graph), or parameters can be tied among different MHAs. One should also remark how the MLP sub-layer allows the flow of information between different attention heads. All nodes are then able to influence each other's representation, while anyway injecting a strong inductive bias based on the DAG structure. Information about the Topoformer configurations used in our experiments is provided in the appendix.
\subsection{Decoder} \label{sec:ar_nar}
Once the embeddings of the nodes are generated, the decoder's task is to derive a stochastic policy $p(\sigma|G)$ over the valid topological orders of the graph. The most straightforward way is to take advantage of the chain rule of conditional probability to decompose the policy as a product
\begin{equation} p(\sigma|G) = \prod_{t=2}^{|V|}p_\theta(\sigma_t|\sigma_{1:t-1},\boldsymbol{h} , G) \times p_\theta(\sigma_1|\boldsymbol{h} , G) ,
\end{equation} We could then sample a complete sequence by autoregressively choosing a new node at each step as done e.g. in~\cite{kool2018TSP}. This scheme is the most principled and expressive; however, when a NN is used as a function approximator for $p_\theta$, it also requires that $|V|$ calls to this NN be performed, which limits its feasibility to relatively small graphs due to the amount of computation required.
In order to scale to large graphs, we employ a Non-Auto-Regressive (NAR) scheme which decouples the number of NN calls from the graph size. Similar to the approach of~\cite{gopaper}, we assign scheduling \emph{priorities} $y_i \in \mathbb{R}$ to the nodes, rather than scheduling probabilities. The priority for node $i$ is derived by passing its final embedding through an MLP: \begin{equation} y_i = \textrm{MLP}\left(\boldsymbol{h}^{(L)}_i\right). \label{eq: mlp_priority} \end{equation} These priorities are assigned with a \emph{single} NN inference. The sequence itself is subsequently constructed by adding a new node at each step. Given the partial sequence $\sigma_{1:i-1}$, the next node can only be selected from a subset $\mathcal{S}(\sigma_{1:i-1}, G)$ of schedulable nodes, due to both the graph topology and choices made earlier in the sequence. Then, the distribution of the next node to be added at step $i$ is given as follows:
\begin{equation} p(\sigma_t|\sigma_{1:t-1}, \boldsymbol{h}, G) = \begin{cases} \dfrac{\exp(y_{\sigma_t})}{\sum_{j\in \mathcal{S}(\sigma_{1:t-1}, G)} \exp(y_j)}, ~~ &\text{if } \sigma_t \in \mathcal{S}(\sigma_{1:t-1}, G), \\ 0, ~~~ &\text{otherwise.} \end{cases} \end{equation}
\textbf{Decoding Methods}:
We use the following three methods to obtain the next node in the partial sequence from the distribution $p(\sigma_t|\sigma_{1:t-1}, \boldsymbol{h}, G)$: \begin{enumerate}[leftmargin=4.5mm]
\item \emph{Greedy}: At each step $t$, select the node with the highest probability i.e. $\sigma_t = \argmax_{\Tilde{\sigma}_t} p(\Tilde{\sigma}_t|\sigma_{1:t-1}, \boldsymbol{h}, G)$
\item \emph{Sampling}: At each step $t$, sample from the next node distribution i.e. $\sigma_t \sim p(\cdot|\sigma_{1:t-1}, \boldsymbol{h}, G)$
\item \emph{Beam search with state-collapsing}: We can also expand the partial sequences by using a beam search method where the score function is total probability of the partial sequence. We improve our beam search routine by making the following observation: suppose there are two partial sequences in consideration, $\sigma_{1:t}$ and $\Tilde{\sigma}_{1:t}$, such that both have scheduled the same set of nodes so far (but different order), and $\mathcal{C}(\sigma_{1:t}) < \mathcal{C}(\Tilde{\sigma}_{1:t})$. Then, we can ignore the partial sequence $\Tilde{\sigma}_{1:t}$ and only keep $\sigma_{1:t}$ in the beam search. This is because both partial sequences must schedule the same set of remaining nodes, and hence the set of future memory costs are identical for both $\sigma_{1:t}$ and $\Tilde{\sigma}_{1:t}$, but the current peak memory cost is higher for $\Tilde{\sigma}_{1:t}$. Thus, $\sigma_{1:t}$ dominates $\Tilde{\sigma}_{1:t}$ in terms of achievable minimal peak memory usage. \end{enumerate}
\subsection{Training}
Our encoder-decoder architecture induces a distribution $p_{\theta} (\sigma|G)$ on the set of topological orders for a given DAG $G$. The expected cost incurred is given by $J(\theta | G) = \mathbb{E}_{p_\theta(\sigma|G)}\left[\mathcal{C}(\sigma(\theta))\right]$. We minimize the cost $J(\theta) = \mathbb{E}_{G} \left[J(\theta | G)\right]$ via gradient descent using the REINFORCE gradient estimator ~\cite{williams1992,rl_book} as follows \begin{equation}
\nabla J(\theta) = \mathbb{E}_{G, p_\theta(\sigma|G)}\left[(\mathcal{C}(\sigma) - b(G)\right)\nabla_\theta\log p_\theta(\sigma|G)], \end{equation} where $b(G)$ is a \emph{baseline} meant to reduce the variance of the estimator. We follow~\cite{kool2018TSP} in setting it equal to the cost of a \emph{greedy rollout} of a baseline policy on the graph $G$ \begin{equation}
b(G) = \mathcal{C}(\argmax_\sigma p_\theta(\sigma|G)). \end{equation}
\section{Experiments}\label{sec:experiments}
We conduct experiments on a synthetic dataset of graphs which we refer to as "layered graphs", as well as a set of real-world computation graphs. We compare our approach with the following classic topological ordering baselines: \begin{itemize}[leftmargin=3.2mm,noitemsep]
\item\emph{Depth/Breadth first sequencing}: Find the topological order by traversing the graph in depth/breadth first manner according to the layout of the graph generated using pygraphviz.
\item\emph{Depth-first dynamic programming (DP)}: Depth-first DP is a global depth-first method for searching the optimal sequence, with automatic backtracking when equivalent partial sequences are found; it retains the full sequence with minimum cost so far, and returns it if search does not complete before the prescribed timeout.
\item\emph{Approximate DP}: In approximate DP, a beam of partial sequences are considered at each step. For each beam in the subsequent step, the top-$K$ partial sequences with the lowest costs are retained. This DP is also able to find the optimal sequence given enough memory and compute resources (with unlimited beam size $K$), but we only consider an approximate version with $K=10^5$ in this work. Note that approximate DP uses the state-collapsing and parallelism to improve its efficiency.
\item\emph{Random order}: We generate $100$ random topological orders, and pick the one with smallest cost. \end{itemize} Please see the appendix for more detailed description of the baselines. Therein, we also report some ablation studies on various neural baselines including ablation studies on decoder by considering other neural architectures, as well as a comparison with an end to end baseline adapted from ref.~\cite{kool2018TSP}. Neural topo order greedy, sample and BS denote the performance of our model in greedy, sampling and beam search inference mode respectively. We use a sample size and beam size of $16$ sequences, of which the best one is subsequently picked, for all our experiments. Next, we describe in detail the results of the two experiments.
\subsection{Layered Graphs}
In order to generate a large corpus of training data we come up with a way to synthetically generate graphs of a given size which have similar structure to the computation graphs of feed-forward neural networks. We call our synthetic graph family \emph{layered graphs}, as these graphs comprise of well-defined layers of nodes. The nodes in a layer have connections to the nodes in the subsequent layer and can also have skip connections with nodes in layers farther down. The number of layers, number of node per layer, number of edges between subsequent layers, number of skip connections and memory utilization of the nodes are all generated randomly, and can be controlled by setting appropriate parameters. We refer the reader to the appendix for more details on layered graphs, including their generation algorithm and some visual examples.
We train our model on $500$-node layered graphs for $325$ epochs, where in each epoch we generate a training set of $1000$ new graphs. We test the performance of our model on a set of 300 unseen graphs of the same size, generated with the same method. We also evaluate the cross-size generalization performance of our trained model by testing it on graphs of size $1000$ and $2000$. We refer the reader to the appendix for a comprehensive description of the training algorithm and model configuration.
Figure \ref{fig:perf_time_layered} shows the performance vs. run time plot on layered graphs of size $|V|=500, 1000$, and $2000$. We report the performance in terms of the \% gap of peak memory utilization from the peak memory obtained via approximate DP, which we consistently observed to be the best-performing baseline. Note that the run time is plotted on a log-scale. We can observe that for $500$-node graphs, our model beats all the baselines except approximate DP in terms of both the memory usage and run time. Our model is slightly worse than approximate DP from the memory usage perspective, but it runs 100x faster. We also observe that our model generalizes well to larger sized graphs. For the case of $2000$-node graphs our model performs better than approximate DP in terms of peak memory usage, while being $4000$x times faster. This shows that while approximate DP performs more poorly as graph size increases, our model is able to generalize to larger graphs by learning meaningful embeddings of the topological structure thanks to Topoformer, and to be extremely fast thanks to our NAR decoding scheme. \begin{figure}
\caption{Average \% gap from approximate DP vs average run time comparison on the test set of 300 layered graphs. Lower is better for both \% gap and run time. }
\label{fig:perf_time_layered}
\end{figure} \begin{table}[h!tb] \begin{adjustwidth}{-0.25cm}{}
\centering
\caption{Comparison of methods on the synthetic layered graph test set.}
\begin{center}
\begin{tabular}{lrrrrrr}
\toprule
\multicolumn{1}{c}{\multirow{3}{*}{Algorithm}} &
\multicolumn{2}{c}{{500- node graphs}} &
\multicolumn{2}{c}{{1000-node graphs}} &
\multicolumn{2}{c}{{2000-node graphs}} \\
\cmidrule{2-3} \cmidrule{4-5} \cmidrule{6-7}
& \% gap from & run time & \% gap from & run time & \% gap from & run time \\
& approx. DP & [$s$] & approx. DP & [$s$] & approx. DP & [$s$] \\
\midrule
Approximated DP & 0 & 264.88 & 0 & 1561.17 & 0 & 8828.86 \\
\midrule
Depth-First DP & 5.76 & 3600 & 3.84 & 3600 & 2.40 & 3600 \\
(max. run time=1H) & & & & & & \\
Random order & 6.86 & 1.38 & 2.62 & 7.13 & 0.31 & 36.87 \\
Depth-first seq. & 12.9 & 2.45 & 7.1 & 10.91 & 3.57 & 51.32 \\
Breadth-first seq. & 20.94 & 2.43 & 11.31 & 10.87 & 6.42 & 51.52 \\
\midrule
Neural Topo Order & & & & & & \\
\checkmark Greedy & 4.32 & 0.6 & 0.48 & 1.19 & -1.47 & 2.44 \\
\checkmark Sample & 3.49 & 0.72 & \textbf{0.03} & 1.41 & \textbf{-1.68} & 2.87 \\
\checkmark Beam search & \textbf{3.21} & 2.68 & 0.08 & 5.92 & -1.66 & 14.74 \\
\bottomrule
\end{tabular}
\end{center}
\label{tab:decoding_complexity} \end{adjustwidth} \end{table}
\subsection{Real-World Graphs}
While our synthetic layered graphs are convenient for experimentation, we see value in also presenting results obtained from neural computation graphs used for commercial development of our artificial intelligence hardware and software products. Here we sample 115 representative graphs that have diverse architectures (classifiers, language processors, denoisers, etc.) and size (from a few dozen to 1k nodes). We split this dataset into a training set and test set via a random $80-20$ split. We train our model for $500$ epochs and report the performance on the unseen test set at the end of training in table \ref{tab:real_graph}. In order to ensure fair comparison of run times, we stratify the test set into 3 categories based on the graph size. Figure \ref{fig:perf_time_real} shows the performance vs run time plot on the test set of real graphs. We observe that for real graphs the performance gap between the best baseline (approximate DP) and our model is remarkable. We can obtain sequences which are $50\%$ better than approximate DP on average while also being almost 1000x faster on average. This proves the capability of our model to generalize and perform well on real-world computation workflows.
\begin{figure}
\caption{Performance vs run time comparison for different approaches on test set of real computation graphs. Performance is measured in average \% gap from approximate DP.}
\label{fig:perf_time_real}
\end{figure}
\begin{table}[h!tb] \begin{adjustwidth}{-0.25cm}{}
\centering
\caption{Comparison of methods on the real graph test set. Smaller \% gap is better}
\begin{tabular}{lrrrrrr}
\toprule
\multicolumn{1}{c}{\multirow{3}{*}{Algorithm}} &
\multicolumn{2}{c}{{200 - 500-node graphs}} &
\multicolumn{2}{c}{{500 - 700-node graphs}} &
\multicolumn{2}{c}{{700 - 1000-node graphs}} \\
\cmidrule{2-3} \cmidrule{4-5} \cmidrule{6-7}
& \% gap from & run time & \% gap from & run time & \% gap from & run time \\
& approx. DP & [$s$] & approx. DP & [$s$] & approx. DP & [$s$] \\
\midrule
Approximated DP & 0 & 113.54 & 0 & 517.60 & 0 & 1131.61 \\
\midrule
Depth-First DP & 62.18 & 3600 & 102.76 & 3600 & 50.57 & 3600 \\
(max. run time=1H) & & & & & & \\
Random order & 469.34 & 0.25 & 376.16 & 1.24 & 116.24 & 2.40 \\
Depth-first seq. & 506.21 & 0.70 & 394.93 & 2.26 & 123.21 & 4.49 \\
Breadth-first seq. & 348.77 & 0.75 & 149.81 & 2.31 & -35.55 & 4.86 \\
\midrule
Neural Topo Order & & & & & & \\
\checkmark Greedy & -17.57 & 0.42 & -51.23 & 0.6 & -68.97 & 0.83 \\
\checkmark Sample & \textbf{-21.53} & 0.44 & -40.51 & 0.68 & -61.46 & 0.97 \\
\checkmark Beam search & -19.5 & 1.22 & \textbf{-57.34} & 2.58 & \textbf{-73.45} & 3.86 \\
\bottomrule
\end{tabular}
\label{tab:real_graph} \end{adjustwidth} \end{table}
\subsection{Encoder Ablation Study}
\begin{table}[h!tb] \begin{adjustwidth}{-0.25cm}{}
\centering
\caption{Comparison of different encoder architectures. Topoformer with MP (message passing) on DAG corresponds to forward and backward message passing only on the input DAG using topoformer.}
\begin{center}
\begin{tabular}{lrrrrrr}
\toprule
\multicolumn{1}{c}{\multirow{3}{*}{Algorithm}} &
\multicolumn{2}{c}{{500-node graphs}} &
\multicolumn{2}{c}{{1000-node graphs}}\\
\cmidrule{2-3} \cmidrule{4-5}
& \% gap from & run time & \% gap from & run time \\
& approx. DP & [$s$] & approx. DP & [$s$] \\
\midrule
MLP & & & & \\
\checkmark Greedy & 8.31 $\pm$ 0.76 & 0.58 $\pm$ 0.0 & 2.95 $\pm$ 0.48 & 1.52 $\pm$ 0.01 \\
\checkmark Sample & 4.41 $\pm$ 0.50 & 0.67 $\pm$ 0.0 & 0.68 $\pm$ 0.35 & 1.84 $\pm$ 0.02 \\
\checkmark Beam search & 6.5 $\pm$ 0.69 & 2.47 $\pm$ 0.01 & 2.43 $\pm$ 0.49 & 7.62 $\pm$ 0.07 \\
\midrule
Fully Connected Transformer & & & & \\
\checkmark Greedy & 8.46 $\pm$ 0.72 & 0.69 $\pm$ 0.01 & 3.09 $\pm$ 0.46 & 1.3 $\pm$ 0.01 \\
\checkmark Sample & 4.72 $\pm$ 0.52 & 0.8 $\pm$ 0.01 & 0.85 $\pm$ 0.37 & 1.55 $\pm$ 0.02 \\
\checkmark Beam search & 6.52 $\pm$ 0.72 & 2.98 $\pm$ 0.03 & 2.09 $\pm$ 0.47 & 6.49 $\pm$ 0.07 \\
\midrule
GAT (forward only) & & & & \\
\checkmark Greedy & 5.94 $\pm$ 0.61 & 0.49 $\pm$ 0.01 & 1.33 $\pm$ 0.38 & 1.24 $\pm$ 0.01 \\
\checkmark Sample & 4.19 $\pm$ 0.56 & 0.64 $\pm$ 0.01 & 0.48 $\pm$ 0.36 & 1.54 $\pm$ 0.02 \\
\checkmark Beam search & 4.22 $\pm$ 0.60 & 2.22 $\pm$ 0.02 & 0.60 $\pm$ 0.38 & 5.94 $\pm$ 0.04 \\
\midrule
GAT (forward+backward) & & & & \\
\checkmark Greedy & 4.84 $\pm$ 0.55 & 0.63 $\pm$ 0.01 & 0.90 $\pm$ 0.37 & 1.37 $\pm$ 0.02 \\
\checkmark Sample & 3.55 $\pm$ 0.53 & 0.80 $\pm$ 0.01 & 0.23 $\pm$ 0.36 & 1.67 $\pm$ 0.02 \\
\checkmark Beam search & 3.55 $\pm$ 0.54 & 2.89 $\pm$ 0.01 & 0.39 $\pm$ 0.36 & 6.50 $\pm$ 0.05 \\
\midrule
Topoformer (forward+backward) (Ours) & & & & \\
\checkmark Greedy & 4.82 $\pm$ 0.55 & 0.73 $\pm$ 0.01 & 0.76 $\pm$ 0.36 & 1.62 $\pm$ 0.02 \\
\checkmark Sample & 3.67 $\pm$ 0.52 & 0.85 $\pm$ 0.01 & 0.21 $\pm$ 0.36 & 1.99 $\pm$ 0.02 \\
\checkmark Beam search & 3.68 $\pm$ 0.57 & 3.03 $\pm$ 0.03 & 0.35 $\pm$ 0.37 & 8.1 $\pm$ 0.08 \\
\midrule
Full Topoformer (Ours) & & & & \\
\checkmark Greedy & 4.31 $\pm$ 0.56 & 1.04 $\pm$ 0.01 & 0.47 $\pm$ 0.36 & 1.51 $\pm$ 0.01 \\
\checkmark Sample & 3.35 $\pm$ 0.52 & 1.21 $\pm$ 0.01 & \textbf{-0.01} $\pm$ 0.35 & 1.8 $\pm$ 0.02 \\
\checkmark Beam search & \textbf{3.08} $\pm$ 0.51 & 4.15 $\pm$ 0.02 & 0.05 $\pm$ 0.36 & 7.4 $\pm$ 0.07 \\
\bottomrule
\end{tabular}
\end{center}
\label{tab:encoder_ablation} \end{adjustwidth} \end{table}
We conduct experiments by using an MLP, fully connected transformer and GAT as an encoder architecture to quantify the effectiveness of our topoformer architecture. We test a vanilla version of GAT (referred as GAT forward only) which does message passing only on the edges of the DAG. We also consider GAT encoder which does message passing on the augmented graph having reverse edges corresponding to all the edges of the DAG and refer to this setting as GAT forward+backward.
We train each model on the layered graph dataset of 500 node graphs. We evaluate the performance of the trained model on the test set (300 graphs) of 500 node and 1000 node graphs. We use a sample size and beam width of 16 for evaluation on both 500 and 1000 node graphs. The MLP and transformer use the same number of layers and hidden dimension as the topoformer specified in appendix \ref{append:architecture_details}. We run the inference on our test set of $300$ graphs $10$ times for each model to be more precise in our run time calculations. We report the mean \% gap from approximate DP and the mean run time across all the graphs and trials along with their 95\% confidence interval.
Table \ref{tab:encoder_ablation} shows the performance of different encoder architectures. It can be observed that both versions of our topoformer architecture and GAT have a superior performance than MLP and fully connected transformer for both graph sizes. Moreover, full topoformer (message passing on all the seven graphs listed in section \ref{sec:topoformer}) has a better performance than GAT and topoformer with message passing only the forward and backward edges of the DAG. This shows the benefit of global message passing between all the nodes which is enabled by the full topoformer.
\section{Conclusion}
In this work we propose an end-to-end machine learning method for the task of optimizing topological orders in a directed acyclic graph. Two key elements in our design are: (1) an attention-based GNN architecture named Topoformer that employs message passing that is both global and topologically-aware in directed acyclic graphs, (2) a non-autoregressive parametrization of the distribution on topological orders that enables fast inference. We demonstrated, for both synthetic and real-world graphs, the effectiveness of the method in tackling the problem of minimizing peak local memory usage for a compute graph -- a canonical task in compiler pipelines. Said pipelines also include other tasks~\cite{gopaper}, chief amongst them the one of assigning operations to devices for execution. At the present stage, our method and dataset cannot be leveraged for solving these, or for end-to-end optimization of a whole pipeline. Extending our method to this more challenging setting is therefore a natural direction for future research.
\appendix
\doparttoc \addcontentsline{toc}{section}{Appendix} \part{Appendix} \parttoc
\section{Layered graphs dataset} We report here the details of the generation algorithm we use to create our dataset. It is not the first time that a synthetic dataset of graphs is used to train and test an ML framework on a compiler task, as this was already done in ref.~\cite{regalpaper}. However, the models therein used were generic random graph models (e.g. Erdos-Renyi), rather than a model explicitly tailored to reproduce NN-like computation graphs. We develop such a model, and we release its details with the intent of both ensuring reproducibility of our results, as well as of providing tool that we hope will be picked up by researchers interested in compiler problems, as well as more general sequence optimization task on DAGs.
The algorithm builds a graph by organizing a fixed number $|V|$ of nodes into well-defined layers, and then placing edges between subsequent layers, as well as skip connections that skip at least one layer. While the number of nodes is fixed by the user, the target number of layers $L$ depends on the \emph{width factor} $\mathcal{W}$ of the graph. A width factor of 0 would result in a one-dimensional chain graph, whilst a width factor of 1 in a graph with a single, wide layer,
\begin{equation} L = \left\lceil\sqrt{|V|\left(\frac{1}{\mathcal{W}}-1\right)}\right\rceil, \end{equation} where $\lceil\cdot\rceil$ is the ceiling function. In order to promote architectural variability within the dataset, we choose to randomly draw a new width factor, $\mathcal{W}\sim U\left(0.25,0.5\right)$, for each graph, with $U(a,b)$ denoting the uniform distribution in the $[a,b]$ interval. Subsequently, the number of nodes to assign to each layer $\ell$ is also an integer randomly drawn from a uniform distribution \begin{equation}
\mathcal{N}_\ell \sim U\left(\lceil|V|/L\left(1-\sigma_\mathcal{N}\right)\rceil,\lfloor|V|/L\left(1+\sigma_\mathcal{N}\right)\rfloor\right),
\end{equation} with $\sigma_\mathcal{N}$ being a user-defined variability parameter, and $\lfloor\cdot\rfloor$ is the floor function. We stress that both $L$ and $\mathcal{N}_\ell$ are just target values, since we wish to keep $|V|$ fixed: this layer-by-layer node addition process is stopped as soon as the graph has the number of nodes $|V|$ required, which might lead to the number of layers and nodes per layer being ultimately different from their respective targets. The pseudocode for this procedure is reported in algorithm~\ref{algo:nodes}. \begin{algorithm}[htb!] \SetAlgoLined \KwOut{A layered graph $G=(V,)$ without edges}
\KwIn{Total number of nodes $|V|$, number-of-nodes-per-layer variability $\sigma_\mathcal{N}$} \KwData{layer index $\ell$, node index $n$, node counter $N$, target number $\mathcal{N}_\ell$ of nodes for layer $\ell$} $\ell \leftarrow$ 0\; $N \leftarrow$ 0\; \While{True}{
$\mathcal{N}_\ell \sim U\left(\lceil|V|/L\left(1-\sigma_\mathcal{N}\right)\rceil,\lfloor|V|/L\left(1+\sigma_\mathcal{N}\rfloor\right)\right)$\; \For{$n\in[1, \mathcal{N}_\ell]$}{
\uIf{$N\geq |V|$}{ break\; } add node $n$ to graph $G$\; add node $n$ to layer $\ell$\; $N \leftarrow N+1$\; } $\ell \leftarrow \ell + 1$ } \caption{Node-assignment algorithm for layered graphs. \label{algo:nodes}} \end{algorithm}
After the layers are set up, the algorithm proceeds to assign edges between adjacent layers. As an example, let us assume that $\mathcal{N}_1$ and $\mathcal{N}_2$ are the numbers of nodes for two adjacent layers, with $\mathcal{N}_2<\mathcal{N}_1$. The maximal number of edges between these two layers, corresponding to a fully-connected, MLP-like topology, would be $\mathcal{N}_1\times\mathcal{N}_2$. Since we want each node to have at least one ingoing and one outgoing connection (except for those in the first and last layers), the minimal number of connections must be $\max(\mathcal{N}_1,\mathcal{N}_2) = \mathcal{N}_1$. The user can interpolate between these two extrema by tuning the \emph{edge density} parameter $\rho_E$, with the number of edges to place between the two layers being ultimately equal to \begin{equation}
|E|_{(\ell_i,\ell_{i+1})} = (\mathcal{N}_{\ell_i}\times\mathcal{N}_{\ell_{i+1}})\rho_E + (1-\rho_E)\max(\mathcal{N}_{\ell_i},\mathcal{N}_{\ell_{i+1}}). \end{equation} This budget of edges is subsequently distributed among the nodes in the larger layer (layer 1 in our example), with them being assigned to the node with the smallest number of so-far-assigned edges (ties are broken randomly), until it is exhausted. What then remains to do is connecting all the so assigned edges to nodes in the other layer (layer 2 in our example above). We choose these destination nodes in a such a way that, if the layers were visualized as being centered one above the other, with the larger layer at the top, the edges assigned to a node end up more or less equally spaced in 2-$d$ cone below it. This procedure is repeated for every pair of adjacent layers, as we report in algorithm~\ref{algo:edges}. \begin{algorithm}[htb!] \SetAlgoLined \KwOut{A layered graph $G=(V,E)$ with edges but no skip connections.} \KwIn{A layered graph $G=(V,)$ without edges, edge density $\rho_E$} \caption{Edge-assignment algorithm for layered graphs. \label{algo:edges}}
\KwData{Number $|E|_{(\ell_i,\ell_j)}$ of edges between layers $\ell_i$ and $\ell_j$. $c_n$ is a counter of edges incoming or outgoing from node $n$} \For{$\ell_1 \in$ graph layers}{ $\ell_2 = \ell_1+1$\;
$|E|_{(\ell_1,\ell_2)} = (\mathcal{N}_{\ell_1}\times\mathcal{N}_{\ell_2})\rho_E + (1-\rho_E)\max(\mathcal{N}_{\ell_1},\mathcal{N}_{\ell_2})$ (rounded to the closest integer)\; \uIf{$\mathcal{N}_1\geq\mathcal{N}_2$}{ $\ell_s \leftarrow \ell_1$, $\ell_t \leftarrow \ell_2$\; } \Else{ $\ell_s \leftarrow \ell_2$, $\ell_t \leftarrow \ell_1$\; } \For{$n \in \ell_s$}{ $c_n \leftarrow 0$\; }
\While{$\sum_{n\in \ell_s}c_n < |E|_{(\ell_1,\ell_2)}$}{ $\mathcal{S} \leftarrow \argmin c_n$\; Pick $i$ randomly from set $\mathcal{S}$\; $c_i \leftarrow c_i + 1$\;
} \For{$n \in [0, \mathcal{N}_{\ell_s}-1]$}{ \uIf{$\mathcal{N}_{\ell_s}=1$}{ $n_c$ = 0\; }\Else{ $n_c = n \times \frac{\mathcal{N}_{\ell_t}-1}{{\mathcal{N}_{\ell_s}}-1}$ set "center node," rounded to the nearest integer } \For{$i \in[0,c_n-1]$}{
$n_t = (n_c - (c_n - 1)//2) + [0, c_n-1]$ (a range centered at $n_c$)\; Shift the range $n_t$ up/down such that no index is less than $0$ or greater than $\mathcal{N}_{\ell_t}-1$ \; \For{$j \in n_t$}{ add one edge between node $n$ of layer $\ell_s$ and node $j$ of layer $\ell_t$ }
} } } \end{algorithm}
Skip connections, i.e. edges skipping at least one layer, which are often found in modern NN architectures, are then added to the graph. The total number of skip connections to add is fixed as \begin{equation}
\mathcal{N}_S = |E|\frac{\rho_S}{(1 - \rho_S)},
\end{equation} where $|E|$ is the total number of edges in the graph so far, and $\rho_S$ a user-defined skip connection density. For each skip connection, we randomly draw a source layer among those between the first and the third-to-final ones (since skip connections must skip at least one layer). The target layer number is then also drawn at random between the source layer number $+2$, and the final layer (both included). One must then assign a source and a target \emph{node} within each of these layers. We just select the source node at random within the source layer, and then assign the target node in such a way that it would be more or less directly below the source node if the graph were visualized on a 2-$d$ plane. The pseudocode of this procedure is reported in algorithm~\ref{algo:skip}, and figure \ref{fig:layered_graph} shows three example instances of layered graphs created with our algorithm. \begin{algorithm}[htb!] \SetAlgoLined \KwOut{A layered graph $G=(V,E)$ with both connections between adjacent layers, and skip connections} \KwIn{A layered graph $G=(V,E)$ with edges between adjacent layers but no skip connections, skip connection density $\rho_S$}
\KwData{number of layers $L$, number of edges $|E|$} \uIf{L<3}{ break; ~~\tcc{cannot have skip connections with fewer than 3 layers}\ }
$\mathcal{N}_S = \lceil|E|\frac{\rho_S}{(1 - \rho_S)}\rceil$\; \For{$i\in [0,\mathcal{N}_S)$}{ $\ell_s \leftarrow$ a layer at random between the first and third-to-last (both included)\; $\ell_t \leftarrow$ a layer at random between layer number $\ell_s + 2$ and the last (both included)\; $x_s\sim U(0,1)$\; $y\sim U(0,1)$\; $x_t = x_s + 0.2\times y$\; $x_t = \min(x_t, 0.999)$; ~~\tcc{ensure that $x_t\in[0,1)$}\
add an edge between node $\lfloor x_s \mathcal{N}_{\ell_s}\rfloor$ of layer $\ell_s$ and node $\lfloor x_t \mathcal{N}_{\ell_t}\rfloor$ of layer $\ell_t$ } \caption{Skip connection-assignment algorithm for layered graphs. \label{algo:skip}} \end{algorithm}
\begin{figure}
\caption{Three example graphs from the layered graph family with (from left) 25, 50, and 100 nodes, generated using the algorithm we describe in the text. One can clearly make out the layered structure and easily remark the presence of skip connections.}
\label{fig:layered_graph}
\end{figure}
Finally, we specify the assignment of memory costs to the nodes. In the layered graph model, we have both output memory costs $\left(m_i\right)_{i=1}^{|V|}$ and parameter costs $\left(p_i\right)_{i=1}^{|V|}$, where the output cost is the memory usage of the output of an operation, and the parameter cost the one of a variable necessary to execute the operation; for example, if the operation at node $i$ were a matrix multiplication, $\boldsymbol{y}=M\boldsymbol{x}$, $o_i$ would be the memory usage of $\boldsymbol{y}$ and $p_i$ the one of the matrix $M$. The parameter cost of operation $\sigma_t$ during a sequence is added to the memory usage at time $t$, but not to the cost at subsequent steps since the memory associated to it can be de-allocated as soon as the operation has been executed.In particular, the memory utilization cost $M_t$ in \eqref{eq:mem_cost} gets modified to the following: \begin{equation}
M_t = I_{t-1} + m(\sigma_t) + p(\sigma_t) \end{equation} where $I_t$ is defined in \eqref{eq:I_t}. Both costs are randomly drawn from a simple mixture of Gaussians $\text{GMM}(\mathbf{w}, \mathbf{\mu}, \mathbf{\sigma}) \equiv \sum_{i=1}^4 w_i\mathcal{N}(\mu_i,\sigma_i)$ projected on to the positive reals,
\begin{equation} m_i \sim \text{GMM}(\mathbf{w}, \mathbf{\mu}, \mathbf{\sigma})\big|_{\mathbb{R}_+}, ~~~~ p_i \sim \text{GMM}(\mathbf{w}, \mathbf{\mu}, \mathbf{\sigma}))\big|_{\mathbb{R}_+}. \end{equation}
To align the costs assignment with the real world computation graphs, instead of sampling the memory costs for each node $n$, we sample one output cost $m_l$ and parameter cost $p_l$ for each layer $l$ and assign the costs $m_l, p_l$ to each node in layer $l$. This is because many real world computation graphs are a tiled version of the original precedence graph of compute nodes where each node is broken down into a layer of nodes with similar shape and parameter requirements. This concludes the description of our dataset generation algorithm. For the sake of reproducibility, we report below the value we took for all the user-defined parameters mentioned in this section: \begin{itemize} \item Variability of number of nodes per layer $\sigma_\mathcal{N} = 0.75$ \item Edge density $\rho_E = 0.2$ \item Skip connection density $\rho_S = 0.14$ \item Means of the Gaussian mixture $(\mu_1,\mu_2,\mu_3,\mu_4) = (0.5, 1, 3, 5)$ \item Standard deviations of the Gaussian mixture $(\sigma_1,\sigma_2,\sigma_3, \sigma_4) = (0.5, 1, 1, 1)$ \item Weights of the Gaussian mixture $(w_1,w_2,w_3,w_4) = (0.3, 0.3, 0.3, 0.1)$ \end{itemize}
\section{Decoder Ablation study}
In order to measure the effectiveness of our architecture we perform ablation experiments to study the effect of changing the decoder to an auto-regressive decoder and changing both the encoder and the decoder (Table~\ref{tab:decoder_ablation}).
We compare the performance of our architecture with the model which uses topoformer as an encoder but uses an auto-regressive decoder. We adapt the decoder designed for the TSP problem \cite{kool2018TSP} for our memory-minimization problem. The decoder of \cite{kool2018TSP} uses a notion of context node for decoding and at each decoding step using a series of multi-head attention with the context node arrives at the distribution of the next node to be selected for the order. We modify the masking procedure in the decoder of \cite{kool2018TSP} to mask out all the nodes which are not present in the set of feasible next nodes $\mathcal{S}(\sigma_{1:t-1}, G)$.
We also conduct an experiment by changing both the encoder and decoder by adapting the model of \cite{kool2018TSP} to our problem. We adapt the auto-regressive decoder of \cite{kool2018TSP} as described above. \cite{kool2018TSP} uses a fully connected transformer as an encoder since the underlying graph in TSP is a fully connected graph. We modify the encoder of \cite{kool2018TSP} to do message passing only on the edges of our input DAG so that it can exploit the topological structure of the graph in the encoding stage. We refer to this model as "GNN encoder + AR decoder" in table \ref{tab:decoder_ablation}.
\begin{table}[h!tb] \begin{adjustwidth}{-0.25cm}{}
\centering
\caption{Comparison with Auto-regressive decoding}
\begin{center}
\begin{tabular}{lrrrrrr}
\toprule
\multicolumn{1}{c}{\multirow{3}{*}{Algorithm}} &
\multicolumn{2}{c}{{500-node graphs}} &
\multicolumn{2}{c}{{1000-node graphs}}\\
\cmidrule{2-3} \cmidrule{4-5}
& \% gap from & run time & \% gap from & run time \\
& approx. DP & [$s$] & approx. DP & [$s$] \\
\midrule
GNN encoder + AR decoder & & & & \\
\checkmark Greedy & 6.13 $\pm$ 0.58 & 1.66 $\pm$ 0.01 & 1.84 $\pm$ 0.39 & 3.34 $\pm$ 0.02 \\
\checkmark Sample & 4.71 $\pm$ 0.56 & 1.76 $\pm$ 0.01 & 1.38 $\pm$ 0.37 & 3.59 $\pm$ 0.02 \\
\checkmark Beam search & 4.87 $\pm$ 0.61 & 4.01 $\pm$ 0.02 & 2.09 $\pm$ 0.41 & 7.90 $\pm$ 0.05 \\
\midrule
Topoformer + AR decoder & & & & \\
\checkmark Greedy & 4.43 $\pm$ 0.55 & 1.53 $\pm$ 0.01 & 0.53 $\pm$ 0.35 & 3.05 $\pm$ 0.02 \\
\checkmark Sample & 3.33 $\pm$ 0.51 & 1.7 $\pm$ 0.01 & \textbf{0.05} $\pm$ 0.35 & 3.38 $\pm$ 0.02 \\
\checkmark Beam search & 3.14 $\pm$ 0.52 & 4.27 $\pm$ 0.04 & 0.13 $\pm$ 0.36 & 7.90 $\pm$ 0.05 \\
\midrule
Topoformer + NAR decoder (Ours) & & & & \\
\checkmark Greedy & 4.31 $\pm$ 0.56 & 1.04 $\pm$ 0.01 & 0.47 $\pm$ 0.36 & 1.53 $\pm$ 0.01 \\
\checkmark Sample & 3.35 $\pm$ 0.52 & 1.21 $\pm$ 0.01 & 0.09 $\pm$ 0.35 & 1.78 $\pm$ 0.01\\
\checkmark Beam search & \textbf{3.08} $\pm$ 0.51 & 4.15 $\pm$ 0.02 & 0.2 $\pm$ 0.36 & 5.57 $\pm$ 0.05 \\
\bottomrule
\end{tabular}
\end{center}
\label{tab:decoder_ablation} \end{adjustwidth} \end{table}
We train both the models: "GNN encoder + AR decoder" and "Topoformer + AR decoder" on the layered graph dataset of 500 node graphs. We evaluate the performance of the trained model on the test set (300 graphs) of 500 node and 1000 node graphs. We use a sample size and beam width of 16 for 500 node graphs and a sample size and beam width of 8 for 1000 node graphs. We use a smaller sample size for 1000 node graphs due to GPU memory issues with the auto-regressive decoder approaches.
Table \ref{tab:decoder_ablation} shows the mean and the 95\% confidence interval of the \% gap from approximate DP and run time for the three approaches on 500 and 1000 node graphs. We note that the performance of topoformer with AR decoder is quite close to our model for both 500 and 1000 node graphs. However, our model can run inference 2x faster than topoformer with AR decoder on 1000 graphs nodes (in greedy mode). Also, our model outperforms the adaptation of \cite{kool2018TSP} attention based GNN encoder and AR decoder to our problem both in terms of memory cost of sequence and run time. This shows the merit of our topoformer architecture over using a traditional GNN architecture which does message passing only on the input graph.
\section{Training and Model details}\label{append:architecture_details}
\subsection{Training} We train our model using the ADAM optimizer with the initial learning rate of $10^{-4}$ and learning rate decay factor of $0.996$ per epoch. We use a batch size of $8$ for training our model. The training and testing of our model is done on a single GPU (Nvidia Tesla V-100) with $32$ GB memory. We trained our model for 326 epochs on the synthetic graph dataset where in each epoch we provide 1000 training graphs. Also, we provide a new training set in each epoch so that we do not overfit our model on a fixed training set. We found the training to be fairly stable, and it converged in about 1-2 days.
\subsection{Model architecture}
We use topoformer with number of layers $n_{layers} = 4$, embedding dimension $d = 256$, number of heads $n_{heads} = 10$ for each MHA operation on the seven graphs listed in section \ref{sec:topoformer} and the query and value dimension of $64$ for each head of MHA. The MLP used in \eqref{eq:mlptopo} consists of a linear layer ($d_{input} = d_{output} = 256$) with GELU activation followed by another linear layer ($d_{input} = d_{output} = 256$). The MLP used in \eqref{eq: mlp_priority} to generate the node priorities consists of a linear layer ($d_{input} = d_{output} = 256$) with RELU activation followed by another linear layer ($d_{input} = 256, d_{output} = 1$). In order to restrict the range of priority values, we also normalize the priorities of the nodes used for the decoding as follows: \begin{equation}
\Tilde{y}_i = \alpha \times \frac{y_i - \text{mean}(\mathbf{y})}{\text{std} (\mathbf{y})} \end{equation}
where $\mathbf{y} = \left[y_1, y_2, \ldots, y_{|V|} \right]$ and $\alpha$ is a hyperparamter. We set $\alpha=5$ for our experiments.
\subsection{Baselines} We provide more details about the dynamic programming baselines used in our experiments to compare the performance of our model
\begin{itemize}
\item
\textbf{Depth-First Dynamic Programming (DP).}
Topological orders are generated in a depth-first manner (with backtracking) where next node is picked randomly among available candidates. Branch exploration is terminated if 1) the same set of nodes are in the partial sequence as a branch that has been already explored - only the lowest cost partial sequence is retained (dynamic programming approach), and 2) if the current partial cost is already higher than the lowest cost of any full sequence already found (cost increases monotonically). This algorithm will eventually find the global optimal order, though the run time for doing so is expected to be at least exponential in |V|~\cite{ahn2020ordering}; it is however able to return at least one complete sequence in time $O(|V| + |E|)$~\cite{kahn_dfs} in the worst case, same as DFS.
In our implementation, we set a wall time of one hour and pick the best complete path found. We observe that for our synthetic layered graphs, if the graph size is as small as $|V|=100$, we can actually find the optimal sequence in most cases within the one hour budget. We ran this algorithm on a CPU machine with Intel(R) Xeon(R) W-2123 CPU @ 3.60GHz
\item
\textbf{Approximate DP.}
We define the state space $S$ as the space including a set of all nodes for each partial sequence (which \emph{ignores} the ordering information) and the action space for each state as the space of all possible next-node choices at that state (based on the topological structure). As an example for the state representation, if there is a partial sequence $5\rightarrow 2 \rightarrow 4 \rightarrow 3 \rightarrow 1$, the corresponding state is $\{1, 2, 3, 4, 5\}$.
With the empty set $\emptyset$ being an initial state (meaning that no node has been added), we consider a state transition model that adds an action (a node) to a state and creates a successor state. Specifically, we can partition $S$ into $S_0\cup S_1\cup \cdots \cup S_{|V|}$, where $S_t$ is the space including a set of all nodes for each length-$t$ partial sequence (note that $S_0=\{\emptyset\}$). At every iteration $t=0, 1, ..., |V|-1$, the algorithm takes $S_t$ and assumes that we have \emph{(1) the minimum cost} and \emph{(2) the best partial sequence} for each state in $S_t$, where the minimum cost is over all feasible partial sequences corresponding to the state. Then, for each successor state in $S_{t+1}$, the algorithm computes the minimum cost and the best partial sequence for reaching out that state.
It should be noted that the algorithm gives an \emph{exact} solution if the amount of time and memory resource is sufficient, e.g., an exact solution can be found for 100-node graphs. However, due to the practical resource limitation, we only keep top-$K$ elements of $S_{t+1}$ for each iteration $t$ based on costs. We use the beam size $K=100,000$ for all experiments, and Nvidia Tesla V-100 is used for parallel computation across multiple states for each iteration.
\end{itemize}
\subsection{Baseline policy}
The baseline $b(G)$ used in the policy gradient update is generated using the greedy rollout of the baseline policy. The baseline policy is also an instance of our model which is updated regularly during the course of training. At the end of each epoch, if the performance of the model being trained becomes better than the baseline model (in greedy inference mode) on a set of validation graphs then we copy the weights of the trained mode to the baseline model.
\subsection{Input features and initial node embedding} We use the following as the input features $x_j$ for node $j$: \begin{enumerate}
\item Output memory cost $m_j$ and parameter memory cost $p_j$
\item In-degree and out-degree of the node
\item Minimum and maximum distance (in terms of hop count) of the node from the source and target node \end{enumerate}
We normalize each entry of the input node feature across the nodes so that the features lie between $0$ and $1$ making it invariant with respect to the graph size. To be precise, the $i^{th}$ entry of the normalized input feature of node $j$ is given as $\bar{x}^i_j = \frac{x^i_j}{\max_{n} x^i_n}$. We also augment the node features with the Laplacian positional encodings (PE) \cite{dwivedi2020generalization} of dimension 20. We compute the laplacian PE using the laplacian matrix of the undirected DAG where all the directed edges are converted to undirected edges. Finally, the initial embedding $h^0_j$ for node $j$ is obtained by passing $\bar{x}_j$ through a linear layer.
\end{document} | arXiv | {
"id": "2207.05899.tex",
"language_detection_score": 0.8333956003189087,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Estimating the density of a set of primes with applications to group theory} \date{October 19, 2018} \author{Carlos Esparza} \address{Technical University of Munich} \email{carlos.esparza@tum.de} \author{Lukas Gehring} \address{University of Bonn} \email{lukas.gehring@uni-bonn.de}
\subjclass[2010]{
11N05 }
\begin{abstract}
We estimate the asymptotic density of the set $\widebar{A}$ of primes $p$ satisfying the constraint that $p+1$ and $p-1$ have only one prime divisor larger than 3. We also estimate the density of a maximal subset $\widebar{B} \subset \widebar{A}$ such that for $p_1, p_2 \in \widebar{B}$ no common prime divisor of $p_1(p_1 + 1)(p_1 - 1)$ and $p_2 (p_2 + 1)(p_2 - 1)$ is larger than $3$. Assuming a generalized Hardy--Littlewood conjecture, we prove that for both $\widebar{A}$ and $\widebar{B}$ the number of elements lesser than $x$ is asymptotically equal to a constant times $ x / (\log x)^3$. \end{abstract}
\maketitle
\begin{notation}
We consider $0 \not\in \mathbb{N}$.
In the bounds of a sum or a product, $p$ always denotes a prime.
$(a,b)$ without any function name is the greatest common divisor of $a$ and $b$.
For a set $S \subset \mathbb{N}_0$ and $x \in \mathbb{R}$ we define $S_{\leq x} = \{n \in S : n \leq x\}$.
$\log$ denotes the natural logarithm (unless subscripted with another base).
We write $f(x) \ll g(x)$ with the same meaning as $f(x) = \OO{g(x)}$
and subscript the $O$ or the $\ll$ with the quantities on which the implicit constants depend (no subscript means the implicit constant is absolute).
Finally, $f(x) \sim g(x)$ means that $\lim_{x \to \infty} f(x) / g(x) = 1$ or equivalently $f(x) = g(x) + o(g(x))$. \end{notation}
\section{Introduction}
The authors of \cite{graph} work on bounding the number of vertices $V(G)$ of the \emph{character degree graph} \footnote{\label{note:1} The definition can be found in \cite{graph} but it will not be relevant for this paper.} (or simply \emph{degree graph}) of a finite group $G$ in terms of its \emph{clique number}\footnoteref{note:1} $\omega(G)$.
In \cite[Theorem A]{graph} they prove the bound $
|V(G)| \leq 3\omega(G) - 4 \label{eq:graph} $ for $\omega(G) \geq 5$ and conjecture that it can be attained for every value of $\omega(G)$.
In order to provide such a $G$ for all $\omega(G) \geq 5$, they choose a suitable set of primes $\Pi = \{p_1, \dots, p_n\}$ and construct $G_\Pi = \PSL{2}{p_1}~\times~\dots~\times~\PSL{2}{p_n}$.
This group satisfies $\omega(G_\Pi) = n + 2$ and $|V(G_\Pi)| = 3n + 2$. Therefore it provides the desired equality for $\omega(G) \leq N + 2$ where $N$ is the cardinality of a maximal suitable set of primes.
We now specify which sets of primes are suitable. To this end, let
\begin{align} \widebar{A} = \{ p \text{ prime} : ~& p \equiv 2 \pmod{3}, \quad \exists \alpha, \beta, \gamma, \mu, \nu, q, r \in \mathbb{N}: \notag\\ &\quad p+1 = 2^\alpha 3^\beta q^\mu, \quad p-1 = 2^\gamma r^\nu , \quad q, r \text{ primes with } (6,qr)=1 \} \label{eq:AAdef} ~,\ \end{align}
so $\widebar{A}$ is the set of all primes $p$ such that a $p+1$ and $p-1$ have only one prime divisor larger than $3$. Further set $\widebar{B}$ to be a maximal subset of $\widebar{A}$ such that $\forall p_1, p_2 \in \widebar{B}$ with $p_1 \neq p_2$ the only common divisors of $p_1(p_1 + 1)(p_1 - 1)$ and $p_2(p_2 + 1)(p_2 - 1)$ are products of powers of $2$ and powers of $3$. A suitable set of primes $\Pi$ is a finite subset of $\widebar{B}$.
Assuming the infinitude of $\widebar{B}$ the authors of \cite{graph} therefore come to the conclusion that their upper bound on $V(G_\Pi)$ is optimal.
In this paper we will achieve some estimates about the sets $\widebar{A}$ and $\widebar{B}$ using a generalized form of the Hardy-Littlewood conjecture (\cref{thm:hlc}), which will be presented in \cref{sect:ana}. It is a special case of the generalized Hardy-Littlewood conjecture used in \cite{greentao}, except for the fact that we impose some additional hypotheses concerning the uniformity of the error term.
In fact, we will not only deal with the topic of infinitude but will also evaluate the density of those sets in the natural numbers. We also give some unconditional upper bound on this density based on the work of \cite{halb}. Therefore this paper can be seen as the number theoretical contribution to the work in \cite{graph}.
\section{Main results} \label{sect:main}
We first derive a formula for the density of primes in $\widebar{A}$ using the generalized Hardy--Littlewood conjecture \begin{thm} \label{thm:bound}
Assuming \cref{thm:hlc} we have
\begin{equation}
|\widebar{A}_{\leq x}| \sim \frac{\mathfrak{S}_{\!A}}{2} \frac{x}{(\log x)^3}~,
\end{equation} \end{thm} \noindent where $\mathfrak{S}_{\!A} = 9\prod_{p\geq 5}\left(1-\frac{3}{p}\right)\left(1-\frac{1}{p}\right)^{-3}\approx 5.71649719\text{\textellipsis}$~.
We also prove an unconditional upper bound, using the same value of $\mathfrak{S}_{\!A}$: \begin{thm}\label{thm:uncon}
In the situation of \cref{thm:bound} we have, without assuming any conjectures
\begin{equation*}
\limsup_{x \to \infty} \frac{\abs{\widebar{A}_{\leq x}}}{x / (\log x)^3} \leq 24 \mathfrak{S}_{\!A} ~.
\end{equation*}
\end{thm} Finally, we give an estimate on the density of the set $\widebar{B}$: \begin{thm} \label{thm:A0}
Assuming \cref{thm:bound} we have
\begin{equation*}
\abs{\widebar{B}_{\leq x}} \sim \abs{\widebar{A}_{\leq x}} \sim \frac{\mathfrak{S}_{\!A}}{2} \frac{x}{(\log x)^3} ~.
\end{equation*}
\end{thm}
\section{Elementary reductions} \label{sect:elem}
We will not deal with $\widebar{A}$ and $\widebar{B}$ directly here. Instead we define two slightly modified sets $A$ and $B$ and will prove in \cref{lem:same} that the theorems about $A$ and $B$ presented in \ref{sect:main} also hold for $\widebar{A}$ and $\widebar{B}$.
The important result of this section will be \cref{lem:irr}, which tells us that the Hardy--Littlewood conjecture is applicable to the sets $A^{\alpha,\beta,\gamma}$.
\begin{defi} \label{def:AB} Let \begin{align}
A = \{ p \text{ prime} : ~& p \equiv 2 \pmod{3}, \quad \exists \alpha, \beta, \gamma,q,r \in \mathbb{N}: \notag\\
&\quad p+1 = 2^\alpha 3^\beta q, \quad p-1 = 2^\gamma r,\quad q, r \text{ primes with } (6,qr)=1 \} \label{eq:Adef} ~. \end{align}
and set $B$ to be a maximal subset of $A$ such that $\forall p_1, p_2 \in B$ with $p_1 \neq p_2$ the only common divisors of $p_1(p_1 - 1)(p_1 + 1)$ and $p_2(p_2 - 1)(p_2 + 1)$ are products of powers of $2$ and powers of $3$.
For $\alpha,\beta,\gamma\in\mathbb{N}$, $x\in\mathbb{R}_{\geq 1}$ we set
\begin{align*} A_{\leq x}^{\alpha,\beta,\gamma}=\{p \in A_{\leq x} : \quad&\exists q,r\in\mathbb{N}:\\ &\quad p+1 = 2^\alpha 3^\beta q, \quad p-1 = 2^\gamma r,\quad q, r \text{ primes with } (6,qr)=1\} ~. \end{align*} \end{defi}
\begin{lem}\label{lem:exp}
If $A^{\alpha,\beta,\gamma}$ is not empty then
\begin{equation}
\beta\geq 1, \quad \min(\alpha,\gamma) = 1, \quad \max(\alpha,\gamma)\geq 2. \label{eq:ok}
\end{equation}
\end{lem} \begin{proof}
Let $p \in A^{\alpha,\beta,\gamma}$ y $p \equiv 2 \pmod{3}$ we get $3\mid p+1$, thus $\beta\geq 1$. Furthermore $p$ is odd (because $2\notin A$), so $p-1$ and $p+1$ are two consecutive even numbers. Therefore both are divisible by $2$, but exactly one of them is divisible by $4$. This yields the desired $\min(\alpha,\gamma) = 1$, $\max(\alpha,\gamma)\geq 2$. \end{proof} \cref{lem:exp} gives us two kinds of primes in $A$ which we will mostly consider separately. The calculations are similar in both cases $\alpha = 1$ and $\gamma = 1$, so we skip some of them for one of the cases.
The properties of the primes in $A$ give us some relations between $p$ and the corresponding big prime divisors $q$ of $p+1$ and $r$ of $p-1$, respectively. \begin{lem}\label{lem:gamma}
If $\gamma = 1$ and $p\in A$, then $p$ is of the form
\begin{equation*}
p=2^\alpha 3^\beta n - 1
\end{equation*}
for some $n\in \mathbb{N}_0$.The corresponding $q$ and $r$ in \cref{eq:Adef} are then given by
\begin{align*}
q&=n,\\
r&=2^{\alpha - 1}3^\beta n - 1 ~.
\end{align*} \end{lem} \begin{proof}
By $p+1=2^\alpha 3^\beta q$ we immediately get the desired $n$ by setting $n=q$. The given form for $r$ results from
\begin{equation*}
r = \frac{p-1}{2}=\frac{2^\alpha 3^\beta n -2}{2}=2^{\alpha - 1}3^\beta n - 1 ~.
\end{equation*} \end{proof} In the case $\gamma=1$, these linear functions will from now on be denoted by $p(n),q(n),r(n)$. \begin{lem}\label{lem:alpha}
Assume that $\beta,\gamma\in\mathbb{Z}$ satisfy $\beta\geq 1$, $\gamma\geq 2$. Then the system of congruences
\begin{align*}
s &\equiv -1 \pmod{3^\beta} ~,\\
s &\equiv 1 \pmod{2^\gamma}
\end{align*}
has a unique solution $k\in \mathbb{Z}$ with $0<k<3^\beta2^\gamma$.
If $\alpha = 1$ and $p\in A$, then $p$ is of the form
\begin{equation*}
p=3^\beta 2^\gamma n + k
\end{equation*}
for some $n\in \mathbb{N}_0$.
The corresponding $q$ and $r$ in \cref{eq:Adef} are then given by
\begin{align*}
q&=2^{\gamma-1} n + \frac{k+1}{2\cdot 3^\beta} ~,\\
r&=3^\beta n + \frac{k-1}{2^\gamma} ~.
\end{align*}
Furthermore, $\frac{k+1}{2\cdot 3^\beta}$ is an odd integer and $\frac{k-1}{2^\gamma}$ is an integer not divisible by $3$. \end{lem} \begin{proof}
We know $p = 2\cdot3^\beta q -1 =2^\gamma r+1$. Thus we get
\begin{align*}
p &\equiv -1 \pmod{2\cdot 3^\beta} ~,\\
p &\equiv 1 \pmod{2^\gamma} ~.
\end{align*}
But as $\gamma\geq 2$, $p \equiv 1 \pmod{2^\gamma}$ implies $p\equiv -1 \pmod{2}$. By the Chinese Remainder Theorem, $p\equiv -1 \pmod{2}$ and $p\equiv -1 \pmod{3^\beta}$ yield $p \equiv -1 \pmod{2\cdot 3^\beta}$, so we can simplify the first of the two given congruences for $p$ and replace it by $p\equiv -1 \pmod{3^\beta}$. Now the two moduli are coprime, so we can use the Chinese Remainder Theorem again. Therefore we get the desired unique $k$ and the given form of $p$.
The given forms for $q$ and $r$ result from:
\begin{align*}
q &=\frac{p+1}{2\cdot 3^\beta}=\frac{3^\beta 2^\gamma n + k+1}{2\cdot 3^\beta}=2^{\gamma-1} n + \frac{k+1}{2\cdot 3^\beta} ~,\\
r &=\frac{p-1}{2^\gamma}=\frac{3^\beta 2^\gamma n + k-1}{2^\gamma}=3^\beta n + \frac{k-1}{2^\gamma} ~.
\end{align*}
As $k$ fulfills
\begin{align*}
k &\equiv -1 \pmod{3^\beta} ~,\\
k &\equiv 1 \pmod{2^\gamma} ~,
\end{align*}
both of $\frac{k+1}{2\cdot 3^\beta}$ and $\frac{k-1}{2^\gamma}$ are integers. By \cref{lem:exp} we get $\beta\geq 1$ and $\gamma\geq 2$, thus $3\mid k+1$ and $4\mid k-1$. But this yields $3\nmid k-1$ and $4\nmid k+1$, so $\frac{k-1}{2^\gamma}$ can not be divisible by 3 and $\frac{k+1}{2\cdot 3^\beta}$ can not be divisible by 2. \end{proof} In the case $\alpha=1$, these linear functions will from now on be denoted by $p(n), q(n), r(n)$. \begin{lem}\label{lem:irr}
In the situation of both Lemma~\ref{lem:gamma} and \ref{lem:alpha} the linear functions $p(n),q(n),r(n)$ describing $p, q, r$ are irreducible and primitive polynomials (i.\,e.\ of the form $an+b$ with $(a,b)=1$). For all odd primes $\ell$, there is no $n\in\mathbb{N}_0$ such that more than one of $p(n),q(n),r(n)$ is divisible by $\ell$. \end{lem} \begin{proof}
Primitivity is obvious in the situation of \cref{lem:gamma} and follows immediately from the remarks on $\frac{k+1}{2\cdot 3^\beta}$ and $\frac{k-1}{2^\gamma}$ in the situation of \cref{lem:alpha}. Since they are linear primitivity implies irreducibility.
By Lemmas~\ref{lem:gamma} and \ref{lem:alpha}, there exist $c_1,c_2\in \mathbb{N}_0$ such that $c_1q(n)-1=p(n)=c_2r(n)+1$. This means that if $l\mid p(n)$ and $\ell \mid q(n)$ or $\ell \mid r(n)$ then also $\ell \mid 1$ (because $p(n)$ and $q(n)$ or $p(n)$ and $r(n)$, respectively, have multiples which differ by 1) which is a contradiction. The same argument yields $(q(n),r(n))\geq 2$.
Thus there can not be any odd prime number dividing more than one of $p(n),q(n),r(n)$. In fact, we can even get $(q(n),r(n))=1$ as in both Lemmas~\ref{lem:gamma} and \ref{lem:alpha} one polynomial out of $q$ and $r$ has only odd values. \end{proof}
\section{Analytic estimates} \label{sect:ana}
In this section we will first present and give evidence for the generalized Hardy--Littlewood conjecture and then move on to prove Theorems~\ref{thm:bound}, \ref{thm:uncon} and \ref{thm:A0} for $A$ and $B$ instead of $\widebar{A}$ and $\widebar{B}$. Finally we will show in \cref{lem:same} that the estimates from these theorems also hold for $\widebar{A}$ and $\widebar{B}$.
We now want to estimate the density of $A$ in $\mathbb{N}$ using \cref{thm:hlc}. First we need to define a value $\mathfrak{S}$ appearing in the conjecture. \begin{defi} \label{def:SS} Let $R \in \mathbb{N}$ and $a_i, b_i \in \mathbb{Z}$ for $1 \leq i \leq R$ with $(a_i, b_i) = 1$. The \emph{Hardy--Littlewood singular series} $\mathfrak{S}$ corresponding to the $a_i, b_i$ is defined as
\begin{equation} \mathfrak{S} = \prod_{p\text{ prime}} \left(1-\frac{\rho(p)}{p}\right)\left(1-\frac{1}{p}\right)^{-R} \label{eq:sing} ~, \end{equation}
where we write $\rho(p)$ for the number of solutions of $\prod_{i=1}^{R}(a_in+b_i)=0$ for $n \in \mathbb{Z} / p\mathbb{Z}$. \end{defi}
\begin{conj}[Generalized Hardy--Littlewood conjecture] \label{thm:hlc}
Fix $R \in \mathbb{N}$ and $C > 0$. Let $a_i, b_i \in \mathbb{Z}$ with $(a_i, b_i) = 1$, $a_i>0$ for $1 \leq i \leq R$.
If the product for $\mathfrak{S}$ in \cref{eq:sing} converges
\footnote{In particular this implies $a_i \neq a_j$ or $b_i \neq b_j$ for $i \neq j$.}
, we conjecture that
\begin{equation*}
\abs{\set{n \leq x : \forall i\colon a_i n + b_i \mathrm{~prime}}} = \left( 1 + o_{R, C}(1) \right) \mathfrak{S} \frac{x}{ (\log x)^R}
+ o_{R, C} \! \left( \frac{x}{(\log x)^{R}} \right)
\end{equation*}
for $x$ satisfying $(\log x)^C \geq |a_i|, |b_i|$ for $1 \leq i \leq R$. \end{conj}
\begin{rem}
If $\mathfrak{S} = \OO[R, C]{1}$ holds in \cref{thm:hlc} (as will be our case), we can absorb the first error term in the second one. \end{rem}
This conjecture is a quantitative generalization of the well-known $k$-tuples conjecture (due to Hardy and Littlewood, \cite{hardylittlew}), which is precisely the special case $a_i = 1$.
\cref{thm:hlc} is in turn almost a special case ($d = 1$) of the generalized Hardy--Littlewood conjecture presented by Green and Tao in \cite{greentao}. The difference is that we expect a certain uniformity of the error term. Specifically we conjecture that the error term does not depend on the specific $a_i$ and $b_i$ as long as $(\log x)^A \geq |a_i|, |b_i|$. This is done in analogy to the Siegel--Walfisz theorem, which is (except differing error terms) equivalent to the $R = 1$ case of the conjecture.
It is also worthy to note that the conjecture is in perfect accordance with the heuristic \emph{Cramér-model} which asserts that the ``probability'' of a number $n$ being prime is $\prod_{p \leq n} \lfrac1{p} \sim \lfrac1{\log(n)}$. The singular series is then introduced as a correction factor accounting for the fact that the probability of different numbers with certain gaps being prime is not ``independent''.
\begin{lem}\label{lem:sing}
For all choices of $\alpha,\beta,\gamma$ satisfying the conditions (\ref{eq:ok}) the singular series $\mathfrak{S}_{\!A}$ corresponding to the linear functions $p(n),q(n),r(n)$ is the same. \end{lem} \begin{proof}
As $R=3$ for all these choices, we just need to show that $\rho(p)$ is independent from $\alpha,\beta,\gamma$ for all primes $p$.
For $p\in\{2,3\}$ we notice that for all choices of $\alpha,\beta,\gamma$ satisfying (\ref{eq:ok}), two of the three polynomials are never divisible by $p$, while the third one gives us exactly one solution of $\prod_{i=1}^{R}(a_in+b_i)=0$~modulo~$p$. Thus $\rho(2)=\rho(3)=1$.
For $p\geq 5$ we find $(p,a_i)=1$ for $i=1,2,3$ and all allowed choices of $\alpha,\beta,\gamma$. By Bezout's identity, each of the polynomials yields us exactly one solution of $\prod_{i=1}^{3}(a_in+b_i)=0$~modulo~$p$. But by \cref{lem:irr}, these have to be pairwise distinct. Thus $\rho(p)=3$. \end{proof}
This allows us to calculate the singular series
\begin{align*}
\mathfrak{S}_{\!A} &=\frac{1-\frac{1}{2}}{\left(1-\frac{1}{2}\right)^3}\cdot\frac{1-\frac{1}{3}}{\left(1-\frac{1}{3}\right)^3} \prod_{p\geq 5\text{ prime}}\left(1-\frac{3}{p}\right)\left(1-\frac{1}{p}\right)^{-3} \\
&=9\prod_{p\geq 5\text{ prime}}\left(1-\frac{3}{p}\right)\left(1-\frac{1}{p}\right)^{-3}\approx 5.71649719\text{\textellipsis} ~. \end{align*}
The convergence of the product is guaranteed because the sum over $\left( 1 - \lfrac3{p} \right) ( 1 - \lfrac1{p})^{-3} - 1 = (1 - 3p)(p-1)^{-3}$ converges.
We first prove the statement analogous to \cref{thm:bound} \begin{thm} \label{thm:bound_}
Assuming \cref{thm:hlc} we have
\begin{equation}
|A_{\leq x}| \sim \frac{\mathfrak{S}_{\!A}}{2} \frac{x}{(\log x)^3}~,
\end{equation} \end{thm} \begin{proof} By \cref{lem:exp}, we have \[
|A_{\leq x}| = \sum_{\beta \geq 1, \gamma \geq 2} A_{\leq x}^{1, \beta, \gamma} + \sum_{\alpha \geq 2, \beta \geq 1} A_{\leq x}^{\alpha, \beta, 1} ~. \] We start with the sum for $\alpha = 1$. In this case all values of $p$ are of the form $p(n) = 3^\beta 2^\gamma n + k$ with $n \in \mathbb{N}_0$ and $0 \leq k < 3^\beta2^\gamma$. For $p \in A$ we must also have $q(n) = 2^{\gamma-1} n + \frac{k+1}{2 \cdot 3^\beta}, r(n) = 3^\beta n + \frac{k-1}{2^\gamma}$ prime.
We set $y = (\log x)^6$ and split the sum into two parts (The lower bound for $\beta$ and $\gamma$ is implicit in all sums):
\begin{equation}
\sum_{\beta \geq 1, \gamma \geq 2} A_{\leq x}^{1, \beta, \gamma} = \sum_{3^\beta 2^\gamma \leq y} A_{\leq x}^{1, \beta, \gamma}
+ \sum_{y < 3^\beta 2^\gamma \leq x} A_{\leq x}^{1, \beta, \gamma} \label{eq:part} \end{equation}
We bound the second sum trivially by
\begin{align*}
\sum_{y < 3^\beta 2^\gamma \leq x} A_{\leq x}^{1, \beta, \gamma} &=
\sum_{y < 3^\beta 2^\gamma \leq x} \left|\left\{0 \leq n \leq \frac{x-k}{3^\beta 2^\gamma} : p(n), q(n), r(n) \text{ prime} \right\}\right| \\
&\leq \sum_{y < 3^\beta 2^\gamma \leq x} \left( \frac{x-k}{3^\beta 2^\gamma} + 1 \right)
\leq \log_2(x) \log_3(x) \left( \frac{x}{y} + 1 \right)\\
&\ll \frac{x}{(\log x)^4} ~. \end{align*}
For the first sum in \cref{eq:part} we have
\begin{align*}
3^\beta 2^\gamma \leq (\log x)^6
& \leq \left( \log\!\left(x - (\log x)^6 \right) - 6 \log\log x \right)^7 \\
&\leq \left(\log \frac{x-k}{3^\beta 2^\gamma} \right)^7 \end{align*}
for $x$ large enough, allowing us (together with \cref{lem:irr}) to invoke the generalized Hardy--Littlewood-Conjecture (\cref{thm:hlc}) for $C = 7$:
\begin{align}
\sum_{3^\beta 2^\gamma \leq y} A_{\leq x}^{1, \beta, \gamma} &=
\sum_{3^\beta 2^\gamma \leq y} \left(
\mathfrak{S}_{\!A} \frac{x-k}{3^\beta 2^\gamma}\frac1{\left( \log\frac{x-k}{3^\beta 2^\gamma} \right)^3}
+ o \left( \frac{x-k}{3^\beta 2^\gamma} \frac1{\left( \log\frac{x-k}{3^\beta 2^\gamma} \right)^3} \right)
\right)
\label{eq:invoke}\\
&= \mathfrak{S}_{\!A} \frac{x}{(\log x)^3}\sum_{3^\beta 2^\gamma \leq y} \frac1{3^\beta 2^\gamma} + o \left( \frac{x}{(\log x)^3} \right) \notag \end{align}
For the summation of the main term we used
\begin{equation*} \frac{x}{(\log x)^3} \sim \frac{x}{\left( \log(x - (\log x)^6) - 6 \log\log x \right)^3}
\geq \frac{x - k}{\left( \log \frac{x-k}{3^\beta 2^\gamma}\right)^3}
\geq \frac{x}{(\log x)^3} - (\log x)^6
\sim \frac{x}{(\log x)^3} ~. \end{equation*}
We can sum all the error terms because the implied constants are absolute (i.\,e.\ independent of $\beta$ and $\gamma$) and furthermore \[
o\!\left(\frac{x-k}{\left( \log\frac{x-k}{3^\beta 2^\gamma} \right)^3} \right) = o\!\left(\frac{x}{( \log x )^3} \right) ~. \]
Note that $\sum_{\beta \geq 1, \gamma \geq 2} 3^{-\beta} 2^{-\gamma} = \frac14 < \infty$.
It is obvious that the same argument (replacing $\gamma$ by $\alpha$) also works for $\sum_{\alpha \geq 2, \beta \geq 1} A_{\leq x}^{\alpha, \beta, 1}$ (since the polynomial for $p$ has leading coefficient $2^\alpha 3^\beta$ instead of $3^\beta 2^\gamma$). Putting all together, we get
\begin{equation}
|A_{\leq x}| = 2 \mathfrak{S}_{\!A} \frac{x}{(\log x)^3} \sum_{\substack{\beta \geq 1, \sigma \geq 2\\3^\beta 2^\sigma \leq y}}\frac1{3^\beta 2^\sigma} +
o\!\left(\frac{x}{(\log x)^3} \right) \label{eq:almost} ~. \end{equation}
Taking a closer look at the sum in the last equation, we notice that
\begin{align*}
\frac14 \geq \sum_{3^\beta 2^\sigma \leq y} \frac1{2^\sigma 3^\beta}
&\geq \sum_{3^\beta,\,2^\sigma \leq \sqrt{y}} \frac1{2^\sigma 3^\beta} \\
&= \left( \sum_{\beta \geq 1} \frac1{3^\beta} + \OO{\frac1{\sqrt{y}}}\right) \left( \sum_{\sigma \geq 2} \frac1{3^\beta} + \OO{\frac1{\sqrt{y}}}\right)\\
&= \frac14 + \OO{\frac1{(\log x)^{5/2}}} ~, \end{align*}
which yield the claim when plugged into \cref{eq:almost}. \end{proof}
Halberstam and Richert \cite{halb} unconditionally prove an upper bound for the quantity of interest in \cref{thm:hlc}:
\begin{thm}[\cite{halb}, Theorem 5.7] \label{thm:halb} In the Situation of \cref{thm:hlc} (although we no longer need that $(a_i, b_i) = 1$) set
\begin{equation*} E = \prod_{i=1}^{R} a_i \cdot \prod_{1\leq r < s \leq R} (a_r b_s - a_s b_r) ~. \end{equation*} If $E \neq 0$ we have
\begin{equation}
\abs{\set{n \leq x : \forall i\colon a_i n + b_i \mathrm{~prime}}} \leq 2^R R! \mathfrak{S} \frac{x}{(\log x)^R}
+ \OO[R]{x\frac{ \log\log 3x + \log\log 3|E|}{(\log x)^{R+1}}} \label{eq:halb} ~, \end{equation}
where $\mathfrak{S}$ is the singular series given in \cref{def:SS}. \end{thm} \begin{rem}
Note that if Inequality \ref{eq:halb} did not have the additional constant $2^R R!$ and were an equality this would imply \cref{thm:hlc}. This is due to the fact that the $\log\log 3\abs{E}$ is negligible in the error term given the hypotheses of the conjecture (we could even replace $\log\log 3|E|$ by the logarithm of some polynomial in $a_i, b_i$), so basically the error term of \cref{thm:halb} would be ``good enough'' for \cref{thm:hlc}. Since both the theorem and the conjecture deal with the same quantity, this provides some more evidence in favor of the error term in \cref{thm:hlc}. \end{rem}
This allows us to prove the unconditional version of \cref{thm:bound_} with a weakened upper bound which is the result analogous to \cref{thm:uncon}.
\begin{thm}\label{thm:uncon_}
In the situation of \cref{thm:bound_} we have, without assuming any conjectures
\begin{equation*}
\limsup_{x \to \infty} \frac{\abs{A_{\leq x}}}{x / (\log x)^3} \leq 24 \mathfrak{S}_{\!A} ~.
\end{equation*}
\end{thm}
\begin{proof} We only have to modify how we proceed at \cref{eq:invoke} in the proof of \cref{thm:bound}. Using the shorthand $\xi = \frac{x - k}{3^\beta 2^\gamma}$ we get from \cref{thm:halb} that \begin{equation*}
\sum_{3^\beta 2^\gamma \leq y} A_{\leq x}^{1, \beta, \gamma} \leq
\sum_{3^\beta 2^\gamma \leq y} \left(
48 \mathfrak{S}_{\!A} \frac{x-k}{3^\beta 2^\gamma} \frac1{(\log \xi)^3}
+ \frac1{3^\beta 2^\gamma} \OO[R]{(x-k) \frac{\log\log 3 \xi + \log\log 3|E|}{(\log \xi)^4 }}
\right) ~. \end{equation*} From the definition of $E$ it follows that $\abs{E} \leq 8 (3^\beta 2^\gamma)^9 \leq 8 y^9 \ll x$ so we can neglect it in the error term. And since we have $\lfrac{(\log\log \xi)}{(\log \xi)^4 } \ll \lfrac{(\log\log x)}{(\log x)^4 } $ for $3^\beta 2^\alpha \leq y$, we get \begin{equation*}
\sum_{3^\beta 2^\gamma \leq y} A_{\leq x}^{1, \beta, \gamma} \leq
48 \mathfrak{S}_{\!A} \frac{x}{(\log x)^3} \sum_{3^\beta 2^\gamma \leq y} \frac1{3^\beta 2^\gamma} + \OO{\frac{\log\log x}{(\log x)^4}} ~. \end{equation*} Thus we have the same result as before, just with $48\mathfrak{S}_{\!A}$ instead of $\mathfrak{S}_{\!A}$ and an (asymptotic) inequality instead of an (asymptotic) equality (and with a better error term). Proceeding as in \cref{thm:bound} therefore gives us the desired result. \end{proof}
We now move on and consider the density of the set $B$, proving a result analogous to \cref{thm:A0}.
\begin{thm} \label{thm:A0_}
Assuming \cref{thm:bound_} we have
\begin{equation*}
\abs{\widebar{B}_{\leq x}} \sim \abs{\widebar{A}_{\leq x}} \sim \frac{\mathfrak{S}_{\!A}}{2} \frac{x}{(\log x)^3} ~.
\end{equation*}
\end{thm}
\begin{rem}
Our proof shows that $|B_{\leq x}| \sim |A_{\leq x}|$ as long as $|A_{\leq x}| \sim K x / (\log x)^3$, even if $K$ is not the constant predicted by \cref{thm:hlc}. \end{rem}
\begin{proof} We show that $(A \setminus B)_{\leq x} = o\!\left( \lfrac{x}{(\log x)^3} \right)$. For $p, p' \in A$ let $r, q_, r', q'$ denote the corresponding primes from the definition of $A$ in \cref{eq:Adef}. Setting
\begin{equation}
S = \set{p \in A : \exists p' \in A_{> p},~~ r = r' \vee q = r' \vee p = r' \vee r = q' \vee q = q' \vee p = q'} \label{eq:S} ~, \end{equation}
we are guaranteed to have $A \setminus B \subseteq S$ (we only have to look at these six cases in \cref{eq:S} because $p' > p > r, q$).
In order to bound the size of $S$ we take a look at
\begin{align*}
\preS{1} &= \{p \in A: \exists \alpha', \beta' \in \mathbb{N} : p' = 2^{\alpha'} 3^{\beta'} r - 1 \text{ prime}\} \\
\preS{2} &= \{p \in A: \exists \alpha', \beta' \in \mathbb{N} : p' = 2^{\alpha'} 3^{\beta'} q - 1 \text{ prime}\} \\
\preS{3} &= \{p \in A: \exists \alpha', \beta' \in \mathbb{N} : p' = 2^{\alpha'} 3^{\beta'} p - 1 \text{ prime}\} \\
\preS{4} &= \{p \in A: \exists \gamma' \in \mathbb{N} : p' = 2^{\gamma'} r + 1 \text{ prime}\} \\
\preS{5} &= \{p \in A: \exists \gamma' \in \mathbb{N} : p' = 2^{\gamma'} q + 1 \text{ prime}\} \\
\preS{6} &= \{p \in A: \exists \gamma' \in \mathbb{N} : p' = 2^{\gamma'} p + 1 \text{ prime}\} ~. \end{align*}
Again, we allow superscripts like $\preS{1}^{\alpha, \beta, \gamma; \alpha', \beta'}$ or $\preS{4}^{\alpha, \beta, \gamma; \gamma'}$ to let $p$ range over $A^{\alpha, \beta, \gamma}$ and to specify choice of $\alpha', \beta'$ or a choice of $\gamma'$ in \cref{eq:S}. For conciseness we may write this as $\preS{i}^{\bm{\mu}}$, having set $\bm{\mu} = (\alpha, \beta, \gamma; \alpha', \beta')$ or respectively $\bm{\mu} = (\alpha, \beta, \gamma; \gamma')$.
Obviously we have the bound $\abs{S_{\leq x}} \leq \sum_{i=1}^{6} \abs{\preS{i}_{\leq x}}$.
For $i = 1, \dots, 6$ we now want to look at $\preS{i}$. In \cref{lem:gamma} and \cref{lem:alpha}, for fixed $\alpha, \beta, \gamma$ satisfying the conditions (\ref{eq:ok}), we expressed $p, q, r$ as linear integer polynomials in a variable $n$. Therefore $p'$ can also be expressed as a linear integer polynomial $f$ (whose coefficients depend on $\alpha, \beta, \gamma$ and, depending on $i$, on $\alpha', \beta'$ or on $\gamma'$) in $n$. One easily sees that the leading coefficient of $f$ only has prime factors $2$ and $3$ and that its constant coefficient is lesser than the leading one.
Now fix $\alpha, \beta, \gamma$ such that they satisfy the conditions (\ref{eq:ok}) and also fix $(\alpha', \beta')$ or $\gamma'$ and denote the corresponding tuple by $\bm{\mu}$. We set
\begin{equation*}
T = \prod_{\substack{\{g, h\} \subset \{p, q, r, f\} \\ g \neq h}} \res(g, h) ~, \end{equation*}
where $\res$ denotes the resultant of two polynomials. Since no two of the polynomials $p, q, r$ are equal up to sign, $T$ is zero if and only if any of $\res(f, p), \res(f, q)$ or $\res(f, r)$ is zero. This is equivalent to $f$ being an integer multiple of $p, q$ or $r$. Therefore $T = 0$ implies $\preS{i}^{\bm{\mu}} = \emptyset$, since for any $n \in \mathbb{N}_0$ either $p' = f(n)$ is not strictly larger than $p(n)$ or is composite.
Now assume $T \neq 0$. In order to apply \cref{thm:halb} to $\preS{i}$ we set $R = 4$ and look at the linear functions $\{p, q, r, f\}$. If for a prime $\ell \geq 5$ we have $\ell \nmid T$ then $p, q, r, f$ have 4 distinct zeros modulo $\ell$ (because all leading coefficients are coprime to $\ell$), so $\rho_{\{p, q, r, f\}}(l) = 4$. Together with the inequality $1 - R/p \leq (1 - 1/p)^R$ this allows us to give an estimate for the corresponding singular series:
\begin{align*}
\mathfrak{S}_{\{p, q, r, f\}} &= \prod_{\ell \text{ prime}} \frac{1 - \rho_{\{p, q, r, f\}}(l)/p}{(1 - 1/p)^4} \\
&\leq \frac1{(1 - 1/2)^4} \frac1{(1 - 1/3)^4} \prod_{\substack{\ell \text{ prime} \\ \ell \nmid T}} \frac{1 - 4/p}{(1 - 1/p)^4}
\prod_{\substack{\ell \text{ prime} \\ \ell \mid T}} \frac{1 - 1/p}{(1 - 1/p)^4} \\
&\ll \left(\frac{T}{\varphi(T)} \right)^3 \ll (\log\log T)^3 ~, \end{align*}
where $\varphi$ denotes the Euler totient function and we use the estimate $\varphi(n) \gg \lfrac{n}{\log\log n}$ from \cite[, Theorem 5.6]{tenenbaum}.
In a similar fashion to the proof of \cref{thm:bound} we now proceed to sum over all exponents $\alpha, \beta, \gamma$ satisfying (\ref{eq:ok}) and over $\alpha', \beta'$ (for $i=1, 2, 3$) or over $\gamma'$ (for $i=4, 5, 6$). We denote by $M$ the largest (leading) coefficient in the polynomials $p, q, r, f$ and by $k$ the constant coefficient of the according polynomial ($M$ is supposed to depend on the exponents $\alpha, \beta, \dots$ over which we are going to sum).
We have the bounds $M \geq 2^\alpha 3^\beta$ or $M \geq 3^\beta 2^\gamma$, depending on which case we have in (\ref{eq:ok}) and $M \geq 2^{\alpha'} 3^{\beta'}$ or $M \geq 2^{\gamma'}$, depending on $i$.
For a given $x$ we set $y = (\log x)^{8}$ and write $\sum^*$ for a sum over all $\alpha, \beta, \gamma$ satisfying (\ref{eq:ok}) and over $\alpha', \beta'$ or $\gamma'$. We have
\begin{equation*} \sums{} \preS{i}^{\bm{\mu}}_{\leq x} = \sums{M \leq y} \preS{i}^{\bm{\mu}}_{\leq x} +
\sums{y < M \leq x} \preS{i}^{\bm{\mu}}_{\leq x} ~. \end{equation*}
Both sums range over either $(\alpha, \beta)$ or $(\beta, \gamma)$ as well as over $(\alpha', \beta')$ or over $\gamma'$. Using our bounds for $M$ we can therefore deduce that the second sum has at most $(\log_2 x)^2 (\log_3 x)^2 \ll (\log x)^4$ summands while the first sum has at most $(\log_2 y)^2 (\log_3 y)^2 \ll (\log \log x)^4$.
Again, we begin with the second sum:
\begin{equation*} \sums{y < M \leq x} \preS{i}^{\bm{\mu}}_{\leq x} \leq \sums{_{y \leq M \leq x}} \left( \frac{x}{M} + 1 \right)
\leq (\log x)^4 \left( \frac{x}{y} + 1 \right) \ll \frac{x}{(\log x)^4} \end{equation*}
In the first sum we can bound the summands with $T = 0$ by any positive number. With this in mind we apply \cref{thm:halb} \footnote{$E = 0 \iff T = 0$, hence we only need to apply the theorem in the cases where $E \neq 0$} to get
\begin{equation*}
\sums{M \leq y} \preS{i}^{\bm{\mu}}_{\leq x}
\leq \sums{M \leq y} \left( 2^4 4! \mathfrak{S}_{\{p, q, r, f\}} \frac{x}{M (\log\xi)^4}
+ \OO{\xi \frac{\log\log(3\xi) + \log\log(3 \abs{E})}{(\log \xi)^5}}
\right) ~. \end{equation*}
Since $\abs{E} \leq M^4 \left(2 M^2 \right)^6 \ll (\log x)^{128}$ we can neglect its contribution to the error term. As $\xi \leq x$ and there are only $\OO{(\log \log x)^4}$ summands, the sum of all error terms is $\OO{x/(\log x)^4}$. Using the bound for the singular series together with $T \leq \left( 2M^2 \right)^{6} \ll x$ on the main terms therefore gives us
\begin{equation*}
\sums{M \leq y} \preS{i}^{\bm{\mu}}_{\leq x} \ll \frac{x (\log\log x)^3}{(\log x)^4} \sums{M \leq y} 1 + \OO{\frac{x}{(\log x)^4}}
\ll \frac{x (\log\log x)^7 }{(\log x)^4} ~. \end{equation*}
Here again we have $\log \xi \sim \log x$ because of
\begin{equation*} x \geq \xi \geq \left( x - (\log x)^8 \right)/(\log x)^8 \sim x ~. \end{equation*}
Having a bound of $o\!\left( x/(\log x)^3 \right)$ for every $\preS{i}_{\leq x}$ separately, we get such a bound for $S_{\leq x}$, which proves the claim. \end{proof}
As announced in \cref{sect:main} we finally show that all our main results can be applied to $\widebar{A}$ and $\widebar{B}$ \begin{lem} \label{lem:same}
The asymptotic estimates from \cref{thm:bound_}, \cref{thm:uncon_} and \cref{thm:A0_} also hold for $\widebar{A}$ and $\widebar{B}$, respectively.
This proves Theorems~\ref{thm:bound}, \ref{thm:uncon} and \ref{thm:A0} \end{lem}
\begin{proof} First note that the maximality of $B$ implies $\widebar{B} \setminus B \subset \widebar{A} \setminus A$. We now prove the claim by showing that
$| (\widebar{A} \setminus A)_{\leq x} | = o\!\left( \lfrac{x}{(\log x)^3} \right)$. Since all our density estimates were of the order $x/(\log x)^3$ this is enough to prove our claim.
According to the definition of $\widebar{A}$ in \cref{eq:AAdef}, to every $p \in \widebar{A} \setminus A$ correspond two primes $p, r$ and two exponents $\mu, \nu \in \mathbb{N}$. The number of $p \in (\widebar{A} \setminus A)_{\leq x}$ with $\mu \geq 2$ are limited by $\sqrt{x} \log_2(x) \log_3(x)$ while the number of $p$ with $\nu \geq 2$ is limited by $\sqrt{x} \log_2(x)$. So $ | (\widebar{A} \setminus A)_{\leq x} | = \OO{\sqrt{x} (\log x)^2}$, which yields the claim.
\end{proof}
\appendix \section{Empirical data}
We calculated $A_{\leq x}$ for $x = 25 \cdot\!10^{12}$. Some results are displayed in \cref{tb:emp}. In addition to $x/(\log x)^3$ we also compare $|A_{\leq x}|$ to $\operatorname{Li}_3 (x) = \int_{2}^{\infty} \frac{\mathrm{d}x}{(\log x)^3}$ which is asymptotically equivalent to $x/(\log x)^3$, but expected to produce better approximations for ``small'' numbers.
\begin{table}[h] \begin{tabular}{rrrc}
\multicolumn{1}{c}{$x$} & \multicolumn{1}{c}{$| A_{\leq x} |$} & $\frac{| A_{\leq x} |}{x / (\log x)^3}$ &
$\frac{| A_{\leq x} |}{\operatorname{Li}_3 (x)}$\\ \hline
$10^{4\phantom{0}}$ & $114$ & $334.70$ & $4.70$ \\
$10^{6\phantom{0}}$ & $2192$ & $64.40$ & $4.33$ \\
$10^{8\phantom{0}}$ & $74531$ & $21.90$ & $3.84$ \\
$10^{10}$ & $3393108$ & $9.96$ & $3.57$ \\
$10^{12}$ & $183047288$ & $5.37$ & $3.42$ \\
$25 \cdot\! 10^{12}$ & $3174617502$ & $3.73$ & $3.35$ \end{tabular}
\caption{Values of $A_{\leq x}$ compared to the predicted approximations.} \label{tb:emp} \end{table} It seems plausible the limit of the ratios is $\mathfrak{S}_{\!A}/2 \approx 2.86$. However, due to the slow speed of convergence, this data alone does not allow to make any useful predictions about the limit of any of the the ratios.
\end{document} | arXiv | {
"id": "1810.08679.tex",
"language_detection_score": 0.6358082294464111,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Scheduling in the Secretary Modelootnote{Work supported by the European Research Council, Grant Agreement No. 691672, project APEG.}
\begin{abstract} This paper studies Makespan Minimization in the random-order model. Formally, jobs, specified by their processing times, are presented in a uniformly random order. An online algorithm has to assign each job permanently and irrevocably to one of~$m$ parallel and identical machines such that the expected time it takes to process them all, the makespan, is minimized.
We give two deterministic algorithms. First, a straightforward adaptation of the semi-online strategy $\mathrm{Light Load}$ \cite{albers_semi-online_2012} provides a very simple algorithm retaining its competitive ratio of $1.75$. A new and sophisticated algorithm is $1.535$-competitive. These competitive ratios are not only obtained in expectation but, in fact, for all but a very tiny fraction of job orders.
Classically, online makespan minimization only considers the worst-case order. Here, no competitive ratio below $1.885$ for deterministic algorithms and $1.581$ using randomization is possible. The best randomized algorithm so far is $1.916$-competitive. Our results show that classical worst-case orders are quite rare and pessimistic for many applications. They also demonstrate the power of randomization when compared much stronger deterministic reordering models.
We complement our results by providing first lower bounds. A competitive ratio obtained on nearly all possible job orders must be at least $1.257$. This implies a lower bound of~$1.043$ for both deterministic and randomized algorithms in the general model. \end{abstract}
\section{Introduction} We study one of the most basic scheduling problems, the classic problem of makespan minimization. For the classic makespan minimization problem one is given an input set $\ensuremath{\mathcal{J}}\xspace$ of $n$ jobs, which have to be scheduled onto $m$ identical and parallel machines. Preemption is not allowed. Each job $J\in\ensuremath{\mathcal{J}}\xspace$ runs on precisely one machine. The goal is to find a schedule minimizing the \emph{makespan}, i.e.\ the last completion time of a job. This problem admits a long line of research and countless practical applications in both, its offline variant see e.g.\ \cite{graham_bounds_1966, hochbaum_using_1987} and references therein, as well as in the online setting studied in this paper.
In the online setting jobs are revealed one by one and each has to be scheduled by an online algorithm $A$ immediately and irrevocably without knowing the sizes of future jobs. The makespan of online algorithm $A$, denoted by $A(\ensuremath{\mathcal{J}}\xspace^\sigma)$, may depend on both the job set $\ensuremath{\mathcal{J}}\xspace$ and the job order~$\sigma$. The optimum makespan ${\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$ only depends on the former. Traditionally, one measures the performance of $A$ in terms of competitive analysis. The input set $\ensuremath{\mathcal{J}}\xspace$ as well as the job order~$\sigma$ are chosen by an adversary whose goal is to maximize the ratio $\frac{A(\ensuremath{\mathcal{J}}\xspace^\sigma)}{{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)}$. The maximum ratio, $c=\sup_{\ensuremath{\mathcal{J}}\xspace,\sigma} \frac{A(\ensuremath{\mathcal{J}}\xspace^\sigma)}{{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)}$, is the \emph{(adversarial) competitive ratio}. The goal is to find online algorithms obtaining small competitive ratios.
In the classical secretary problem the goal is to hire the best secretary out of a linearly ordered set $S$ of candidates. Its size $n$ is known. Secretaries appear one by one in a uniformly random order. An online algorithm can only compare secretaries it has seen so far. It has to decide irrevocably for each new arrival whether this is the single one it wants to hire. Once a candidate is hired, future ones are automatically rejected even if they are better. The algorithm fails unless it picks the best secretary. Similar to makespan minimization this problem has been long studied, see \cite{ dynkin_optimum_1963, feldman_simple_2014, ferguson_who_1989, kaplan_competitive_2020, kleinberg_multiple-choice_2005, lachish_o_2014, lindley_dynamic_1961} and references therein.
This paper studies a makespan minimization under the input model of the secretary problem. The adversary determines a job set of known size $n$. Similar to the secretary problem, these jobs are presented to an online algorithm $A$ one by one in a uniformly random order. Again, $A$ has to schedule each job without knowledge of the future. The expected makespan is considered. The \emph{competitive ratio in the secretary (or random-order) model} $c=\sup_{\ensuremath{\mathcal{J}}\xspace} \ensuremath{\mathbf{E}}\xspace_\sigma\left[ \frac{A(\ensuremath{\mathcal{J}}\xspace^\sigma)}{{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)}\right]$ is the maximum ratio between the expected makespan of~$A$ and the optimum makespan. The goal is again to obtain small competitive ratios.
We propose the term \emph{secretary model}, first used in \cite{molinaro_online_2017}, to set this model apart from the model studied by the same authors in~\cite{albers_scheduling_2020} where~$n$, the number of jobs, is not known in advance. Not knowing $n$ is quite restrictive and has never been considered in any other work on scheduling with random-order arrival~\cite{gobel2015online, molinaro_online_2017, osborn_lists_2008}. For the adversarial model such information is useless.
Similar frameworks received a lot of recent attention in the research community sparking the area of random-order analysis. Random-order analysis has been successfully applied to numerous problems such as matching \cite{goel_online_2008, karande_online_2011, karp_optimal_1990, mahdian_online_2011}, various generalizations of the secretary problem \cite{babaioff_matroid_2018, feldman_simple_2014, ferguson_who_1989, kaplan_competitive_2020, kleinberg_multiple-choice_2005, lachish_o_2014}, knapsack problems \cite{babaioff_knapsack_2007}, bin packing \cite{kenyon_best-fit_1996}, facility location \cite{meyerson_online_2001}, packing LPs \cite{kesselheim_primal_2014}, convex optimization \cite{gupta_maximizing_2018}, welfare maximization \cite{korula_online_2018}, budgeted allocation \cite{mirrokni_simultaneous_2012} and recently scheduling \cite{albers_scheduling_2020, gobel2015online, molinaro_online_2017, osborn_lists_2008}.
For makespan minimization the role of randomization is poorly understood. The lower bound of $1.581$ from~\cite{chen_lower_1994,sgall_lower_1997} is considered pessimistic and exhibits quite a big gap towards the best randomized ratio of $1.916$ from~\cite{albers_randomized_2002}.
The upper bound of $1.535$ in this paper demonstrates surprising power when it comes to randomization in the input order. The power of reordering has been studied by Englert~et.~al.~\cite{englert_power_2008}. Their lower bound considers online algorithms, which are able to look-ahead and rearrange almost all of the input sequence in advance. Their only disadvantage is that such rearrangement is deterministic. Englert~et.~al.\ show that these algorithms can not be better than $1.466$-competitive for general~$m$. This is quite close to our upper bound of $1.535$, given that the algorithm involved has neither look-ahead nor control over the arrangement of the sequence.
A main consequence of the paper is that random-order arrival allows to beat the lower bound of $1.581$ for randomized adversarial algorithms. This formally sets this model apart from the classical adversarial setting even if randomization is involved.
\subparagraph*{Previous work:} Online makespan minimization and variants of the secretary problem have been studied extensively. We only review results most relevant to this work beginning with the traditional adversarial setting. For $m$ identical machines, Graham \cite{graham_bounds_1966} showed 1966 that the greedy strategy, which schedules each job onto a least loaded machine, is $\left(2-\frac{1}{m}\right)$-competitive. This was subsequently improved in a long line of research \cite{galambos_-line_1993, bartal_new_1992, karger_better_1996, albers_better_1999} leading to the currently best competitive ratio by Fleischer and Wahl \cite{fleischer_-line_2000}, which approaches $1.9201$ for $m\rightarrow\infty$. Chen et al.\ \cite{chen_approximating_2015} presented an algorithm whose competitive ratio is at most $(1+\varepsilon)$-times the optimum one, though the actual ratio remains to be determined. For general~$m$, lower bounds are provided in \cite{faigle_performance_1989, bartal_better_1994, gormley_generating_2000, rudin_improved_2001}. The currently best bound is due to Rudin III \cite{rudin_improved_2001} who shows that no deterministic online algorithm can be better than $1.88$-competitive.
The role of randomization in this model is not well understood. The currently best randomized ratio of $1.916$ \cite{albers_randomized_2002} barely beats deterministic guarantees. In contrast, the best lower bound approaches $\frac{e}{e-1}> 1.581$ for $m\rightarrow\infty$ \cite{chen_lower_1994,sgall_lower_1997}. There has been considerable research interest in tightening the~gap.
Recent results for makespan minimization consider variants where the online algorithm obtains extra resources. There is the semi-online setting where additional information on the job sequence is given in advance, like the optimum makespan \cite{kellerer_efficient_2013} or the total processing time of jobs \cite{albers_semi-online_2012, cheng_semi--line_2005, kellerer_semi_1997,kellerer2015efficient}. In the former model the optimum competitive ratio lies in the interval $[1.333,1.5]$, see~\cite{kellerer_efficient_2013}, while for the latter the optimum competitive ratio is known to be~$1.585$~\cite{albers_semi-online_2012,kellerer2015efficient}. Taking this further, the advice complexity setting allows the algorithm to receive a certain number of advice bits from an offline oracle \cite{albers_online_2017, dohrau_online_2015, kellerer_semi_1997}.
The work of Englert~et.~al.~\cite{englert_power_2008} is particularly relevant as they, too, study the power of reordering. Their algorithm has a buffer, which can reorder the sequence 'on the fly'. They prove that a buffer size linear in $m$ suffices to be $1.466$-competitive. Their lower bound shows that this result cannot be improved for any sensible buffer size.\footnote{A buffer size of $n$ would not be sensible since it reverts to the offline problem, which admits a PTAS~\cite{hochbaum_using_1987}. Their lower bound holds for any buffer size $b(n)$, depending on the input size $n$, if $n-b(n)$ is unbounded. Such a buffer can already hold almost all, say any fraction, of the input sequence.}
The secretary problem is even older than scheduling \cite{ferguson_who_1989}. Since the literature is vast, we only summarize the work most relevant to this paper. Lindley \cite{lindley_dynamic_1961} and Dynkin \cite{dynkin_optimum_1963} first show that the optimum strategy finds the best secretary with probability $1/e$ for $n\rightarrow\infty$. Recent research focusses on many variants, among others generalizations to several secretaries \cite{albers2020new, kleinberg_multiple-choice_2005} or even matroids~\cite{babaioff_matroid_2018, feldman_simple_2014, lachish_o_2014}. A modern version considers adversarial orders but allows prior sampling \cite{correa2021secretary, kaplan_competitive_2020}. Related models are prophet inequalities and the game of googol \cite{correa2020two,correa2019prophet}.
So far, little is known for scheduling in the secretary model. Osborn and Torng \cite{osborn_lists_2008} prove that Graham's greedy strategy is still not better than $2$-competitive for~$m\rightarrow\infty$. In \cite{albers_scheduling_2020} we studied the very restricted variant where~$n$, the number of jobs, is not known in advance and provide a $1.8478$-competitive algorithm and first lower bounds. Here, most common techniques, e.g.\ sampling, do not work. We are the only ones who ever considered this restriction. Molinary \cite{molinaro_online_2017} studies a very general scheduling problem. His algorithm has expected makespan $(1+\varepsilon){\mathrm{OPT}}+O(\log(m)/\varepsilon)$, but its random-order competitive ratio is not further analyzed. G\"obel~et~al.~\cite{gobel2015online} study a scheduling problem on a single machine where the goal is to minimize weighted completion times. Their competitive ratio is $O(\log(n))$ whereas they show that the adversarial model allows no sublinear competitive ratio.
\subparagraph*{Our contribution:} We study makespan minimization for the secretary (or random-order) model in depth. We show that basic sampling ideas allow to adapt a fairly simple algorithm from the literature \cite{albers_semi-online_2012} to be $1.75$-competitive. A more sophisticated algorithm vastly improves this competitive ratio to $1.535$. This beats all lower bounds for adversarial scheduling, including the bound of $1.582$ for randomized algorithms.
Our main results focus on a large number of machines, $m\rightarrow\infty$. This is in line with most recent adversarial results \cite{albers_randomized_2002, fleischer_-line_2000} and all random-order scheduling results \cite{albers_scheduling_2020, gobel2015online, molinaro_online_2017, osborn_lists_2008}. While adversarial guarantees are known to improve for small numbers of machines, nobody has ever, to the best of our knowledge, explored guarantees for random-order arrival on a small number of machines. We prove that our simple algorithm is $\left(1.75+O\left(\frac{1}{\sqrt{m}}\right)\right)$-competitive. Explicit bounds on the hidden term are given as well as simulations, which indicate good performance in practice. This shows that the focus of contemporary analyses on the limit case is sensible and does not hide unreasonably large additional terms.
All results in this paper abide to the stronger measure of \emph{nearly competitiveness} from~\cite{albers_scheduling_2020}. An algorithm is required to achieve its competitive ratio not only in expectation but on nearly all input permutations. Thus, input sequences where it is not obtained can be considered extremely rare and pathological. Moreover, we require worst-case guarantees even for such pathological inputs. This seems quite relevant to practical applications, where we do not expect fully random inputs. Both algorithms in this paper hold up to this stronger measure of nearly competitiveness.
A basic approch in random-order models relies on sampling; a small part of the input is used to predict the rest. Sampling allows us to include techniques from semi-online and advice scheduling with two further challenges. On the one hand, the advice is imperfect and may be, albeit with low probability, totally wrong. On the other hand, the advice has to be learned, rather than being available right from the start. In the beginning 'mistakes' cannot be avoided. This makes it impossible to adapt better semi-online algorithms than $\mathrm{Light Load}$, namely~\cite{cheng_semi--line_2005, kellerer_semi_1997,kellerer2015efficient} to our model. These algorithms need to know the total processing volume right from the start. Note that the advanced algorithm in this paper out-competes the optimum competitive ratio of~$1.585$ these semi-online algorithms can achieve~\cite{albers_better_1999,kellerer2015efficient}. We conjecture that this is not possible for algorithms that solely use sampling.
Algorithms that can only use sampling are studied in a modern variant of the secretary problem \cite{correa2021secretary, kaplan_competitive_2020}. First, a random sample is observed, then the sequence is treated in adversarial order. The analysis of $\mathrm{Light Load}$ carries over to such a model without changes. The $1.535$-competitive algorithm does not maintain its competitive ratio in such a model.
The $1.535$-competitive main algorithm is based on a modern point of view, which, analogous to kernelization, reduces complex inputs to sets of critical jobs. A set of critical jobs is estimated using sampling. Critical jobs impose a lower bound on the optimum makespan. If the bound is high, an enhanced version of Graham's greedy strategy suffices; called the Least-Loaded-Strategy. Else, it is important to schedule critical jobs correctly. The Critical-Job-Strategy, based on sampling, estimates the critical jobs and schedules them ahead of time. An easy heuristic suffices, due to uncertainty involved in the estimates. Uncertainty poses not only the main challenge in the design of the Critical-Job-Strategy. On a larger scale, it also makes it hard to decide, which of the two strategies to use. Sometimes the Critical-Job-Strategy is chosen wrongly. These cases comprise the crux of the analysis and require using random-order arrival in a novel way beyond sampling.
The analyses of both algorithms follows three steps. First, adversarial analyses give worst-case guarantees and take care of \emph{simple job sets}, which lack structure to be exploited via random reordering. Intuitively, random sequences have certain properties, like being not 'ordered'. A second step formalizes this, introducing stable orders. Non-stable orders are rare and negligible. Reducing to stable orders yields a natural semi-online setting. Third, we analyze our algorithm in this semi-online setting. See \Cref{fig:simplestable} for a lay of the land.
The paper concludes with lower bounds for the secretary~model. No algorithm, deterministic or randomized, is better than nearly $1.257$-competitive. This immediately yields a lower bound of $1.043$ in the general secretary model, too.
\section{Notation} Almost all notations relevant in scheduling depend on the input set $\ensuremath{\mathcal{J}}\xspace$ or on the ordered input sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$. We use the notation $[\ensuremath{\mathcal{J}}\xspace]$ and $[\ensuremath{\mathcal{J}}\xspace^\sigma]$ to indicate such dependency, for example $L[\ensuremath{\mathcal{J}}\xspace]$ and $L_\varphi[\ensuremath{\mathcal{J}}\xspace^\sigma]$. If such dependency needs not be mentioned, for example if the sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$ is fixed, we drop this appendage, simply writing $L$ and $L_\varphi$. Similarly, we write ${\mathrm{OPT}}$ for ${\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$. If we focus on the job order $\sigma$ whilst the dependency of the job set $\ensuremath{\mathcal{J}}\xspace$ does not deserve mention, the notation $[\sigma]$ instead of $[\ensuremath{\mathcal{J}}\xspace^\sigma]$ is used. We could for example write $L_\varphi[\sigma]$.
\section{A strong measure of random-order competitiveness}
Consider a set of $n$ jobs $\ensuremath{\mathcal{J}}\xspace=\{J_1,\ldots, J_n\}$ with non-negative sizes $p_1,\ldots, p_n$ and let $S_n$ be the group of permutations of the integers from $1$ to $n$. We consider $S_n$ a probability space under the uniform distribution, i.e.\ we pick each permutation with probability $1/n!$. Each permutation $\sigma\in S_n$, called an \emph{order}, gives a \emph{job sequence} $\ensuremath{\mathcal{J}}\xspace^\sigma=J_{\sigma(1)},\ldots,J_{\sigma(n)}$. Recall that traditionally an online algorithm $A$ is called \emph{$c$-competitive} for some $c\ge 1$ if we have for all job sets $\ensuremath{\mathcal{J}}\xspace$ and job orders $\sigma$ that $A(\ensuremath{\mathcal{J}}\xspace^\sigma)\le c{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$. We call this the \emph{adversarial model}.
In the secretary~model we consider the expected makespan of $A$ under a uniformly chosen job order, i.e.\ $A^{\mathrm{rom}}=\ensuremath{\mathbf{E}}\xspace_{\sigma\sim S_n}[A(\ensuremath{\mathcal{J}}\xspace^\sigma)]=\frac{1}{n!}\sum_{\sigma\in S_n} A(\ensuremath{\mathcal{J}}\xspace^\sigma)$, rather than the makespan achieved in a worst-case order. The algorithm $A$ is \emph{$c$-competitive in the secretary model} if $A^{\mathrm{rom}}(\ensuremath{\mathcal{J}}\xspace)\le c{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$ for all input sets $\ensuremath{\mathcal{J}}\xspace$.
This model tries to lower the impact of particularly badly ordered sequences by looking at competitive ratios only in expectation. Interestingly, the scheduling problem allows for a stronger measure of random-order competitiveness for large $m$, called \emph{nearly competitiveness}~\cite{albers_scheduling_2020}. One requires the given competitive ratio to be obtained on nearly all sequences, not only in expectation, as well as a bound on the adversarial competitive ratio as well. We recall the definition and the main fact, that an algorithm is already $c$-competitive in the secretary model if it is nearly $c$-competitive.
\begin{definition}\label{def:comp} A deterministic online algorithm $A$ is called {\em nearly $c$-competitive\/} if the following two conditions hold. \begin{itemize} \setlength{\parskip}{0pt} \setlength{\itemsep}{0pt plus 1pt} \item The algorithm $A$ achieves a constant competitive ratio in the adversarial model. \item For every $\varepsilon >0$, we can find $m(\varepsilon)$ such that for all machine numbers $m \geq m(\varepsilon)$ and all job sequences ${\cal J}$ there holds $\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n} [A({\cal J}^\sigma) \geq (c+\varepsilon) OPT({\cal J})] \leq \varepsilon$. \end{itemize} \end{definition}
\begin{lemma}\label{le.nearly} If a deterministic online algorithm is nearly $c$-competitive, then it is $c$-competitive in the random-order model as $m\rightarrow \infty$. \end{lemma}
\begin{proof} Let $C$ be the constant adversarial competitive ratio of $A$. Given $\delta>0$ we need to show that we can choose $m$ large enough such that our algorithm is $(c+\delta)$-competitive in the random-order model. For $\varepsilon=\frac{\delta}{C-c+1}$ choose $m$ large enough such that $P_\varepsilon(\ensuremath{\mathcal{J}}\xspace)=\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[C_A(\ensuremath{\mathcal{J}}\xspace^\sigma)\ge (c+\varepsilon){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)\right] \le \varepsilon$ holds for every input sequence $\ensuremath{\mathcal{J}}\xspace$. Then we have for every input sequence $\ensuremath{\mathcal{J}}\xspace$ that \begin{align*}C^{\mathrm{rom}}(\ensuremath{\mathcal{J}}\xspace)&\le \left(1-P_\varepsilon(\ensuremath{\mathcal{J}}\xspace)\right)\cdot (c+\varepsilon){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace) +P_\varepsilon(\ensuremath{\mathcal{J}}\xspace)\cdot C\cdot{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)\le ((1-\varepsilon)(c+\varepsilon)+\varepsilon C){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace) \\ &\le(c+\delta(C-c+1)){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)=(c+\delta){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace). &\qedhere \end{align*} \end{proof}
\section{Basic properties}\label{sec.basic} Given an input sequence $\ensuremath{\mathcal{J}}\xspace^\sigma=J_{\sigma(1)},\ldots J_{\sigma(n)}$ and $0<\varphi\le 1$, we consider the \emph{load estimate} $L_\varphi=L_\varphi[\ensuremath{\mathcal{J}}\xspace^\sigma]=\frac{1}{\varphi m}\sum_{\sigma(t)\le\varphi n} p_{t}$, which is $\varphi^{-1}$-times the average load (in any schedule) after the first $\varphi n$ jobs have been assigned. We are particularly interested in the \emph{average load} $L=L[\ensuremath{\mathcal{J}}\xspace]=L_1[\ensuremath{\mathcal{J}}\xspace^\sigma]$, which is a lower bound for ${\mathrm{OPT}}$. The value $L_\varphi$ for smaller $\varphi$ is a guess for $L$, which can be made by an online algorithm after a $\varphi$-fraction of the input-sequence has been observed. Given $t> 0$, let $p^t_{\mathrm{max}}=\max(p_{t'}\mid t'< t+1)$ be the size of the largest among the first $\lceil t\rceil$ jobs. In particular, $p_{\mathrm{max}}=p^n_{\mathrm{max}}$, the \emph{size of the largest jobs}, is again an important lower bound for ${\mathrm{OPT}}$.
\begin{proposition}\label{pro.bounds} We have the following lower bounds for the optimum makespan:
\begin{itemize} \item $p_{\mathrm{max}} \le {\mathrm{OPT}}$
\item $L \le {\mathrm{OPT}}$ \end{itemize} \end{proposition}
\begin{proof}The first bound follows from observing that any schedule must, in particular, schedule the largest job on some machine whose load thus is at least $p_{\mathrm{max}}$. For the second bound one observes that the makespan, the maximum load of a machine in any given schedule, cannot be less than $L$, the average load of all machines. \end{proof}
Let us consider any fixed (ordered) job sequence $\ensuremath{\mathcal{J}}\xspace^\sigma=J_{\sigma(1)},\ldots J_{\sigma(n)}$ and any (deterministic) algorithm that assigns these jobs to machines. We begin with some fundamental observations.
\begin{lemma}\label{le.avglb} Let $\varphi>0$ and $t\le \varphi n$. Then the $k$-th least loaded machine at time $t$ has load at most $\frac{m}{m-k+1}\varphi L_{\varphi}$. In particular, its load is at most $\frac{m}{m-k+1} L$. \end{lemma}
\begin{proof} Let $L^t$ be the sum of all loads at time $t$. Since this is the same as the sum of all processing times of jobs arriving at time $t$, we have $L^t\le\sum_{\sigma(t')\le t}p_{t'} \le \varphi m \frac{1}{\varphi m}\sum\limits_{\sigma(t')\le\varphi n} p_{t'}= \varphi m L_\varphi$. Let $l$ be the load of the $k$-th least loaded machine at time $t$. Per definition $m-k+1$ machines had at least that load. Thus $(m-k+1)l\le L^t \le \varphi m L_\varphi$ or, equivalently, $l\le\frac{m}{m-k+1}\varphi L_{\varphi}$. \end{proof}
Consider the value $R ({\cal J}) =\min\{{L\over p_{\max}},1\}$, which measures the complexity of the input set independent of its order. Informally, a smaller value $R(\ensuremath{\mathcal{J}}\xspace)$ makes the job set easier to be scheduled but less suited to reordering arguments. Later, sets with a small value $R(\ensuremath{\mathcal{J}}\xspace)$ need to be treated separately. The following proposition is both interesting for its implication on general sequences and, particularly, simple sequences with $R ({\cal J})$ small.
\begin{figure}
\caption{A surprisingly difficult sequence for random-order arguments. The big job carries most of the processing volume. Other jobs are negligible. Thus, all permutations look basically the same. Note that for such a 'simple' sequence $R ({\cal J}) $ is small.
}
\label{fig:counterx}
\end{figure}
\begin{proposition}\label{prop1} If any job $J$ is scheduled on the $k$-th least loaded machine, the load of said machine does not exceed $\left(\left(\frac{m}{m-i+1}\right)R(\ensuremath{\mathcal{J}}\xspace)+1 \right)OPT({\cal J})$ afterwards. \end{proposition}
\begin{proof} Let $l$ be the load of the $i$-th least loaded machine before $J$ is scheduled. Then $l \le \frac{m}{m-k+1}L$ by \Cref{le.avglb}. Since $J$ had size at most $p_\mathrm{max}$, the load of the machine it was scheduled on won't exceed $l+p_\mathrm{max} \le \frac{m}{m-k+1}L + p_\mathrm{max}\le \frac{m}{m-k+1}\frac{L}{\max(L,p_\mathrm{max})}{\mathrm{OPT}}+{\mathrm{OPT}} = \left(\left(\frac{m}{m-k+1}\right)R(\ensuremath{\mathcal{J}}\xspace)+1 \right)OPT$. \end{proof}
Thus, if an algorithm avoids a constant fraction of most loaded machines, its competitive ratio is bounded and approaches $1$ as~$R ({\cal J})\rightarrow 0$.
We call a vector $(\tilde l_M^t)$ indexed over all machines~$M$ and all times $t=0,\ldots, n$ a \emph{pseudo-load} if $\tilde l_M^t\ge l_M^t$ for any time $t$ and machine $M$. We introduce such a pseudo-load in the analysis of our main algorithm. Let $\tilde L = \sup_t \frac{1}{m} \sum_M \tilde l_M^t$ be the \emph{maximum average pseudo-load} and, again, consider $\tilde R ({\cal J}) = \min\{ {\tilde L\over p_{\max}},{\tilde L\over L}\}=R(\ensuremath{\mathcal{J}}\xspace) {\tilde L\over L} $. The following observation is immediate. \begin{lemma}\label{le.sensibletildeL} We have $\tilde L\ge L$ and $R({\cal J}) \le \tilde R({\cal J})$. \end{lemma} \begin{proof} Indeed, $L=\frac{1}{m}\sum_M l^n_M\le \frac{1}{m}\sum_M \tilde l^n_M \le \tilde L$. This already implies $R({\cal J}) \le \tilde R({\cal J})$. \end{proof}
It will be important to note that \Cref{le.avglb} and Proposition~\ref{prop1} generalize to pseudo-loads. Since the proofs stay almost the same, we do not include them in the main body of the paper but leave them to \Cref{sec.basic.p} for completeness.
\begin{restatable}{lemma}{leavglbII}\label{le.avglb2} Let $\varphi>0$ and $t\le \varphi n$. Then the machine with the $k$-th least pseudo-load at time $t$ had pseudo-load at most $\frac{m}{m-k+1}\tilde L$. \end{restatable}
\begin{restatable}{proposition}{propII}\label{prop2} If job $J_{\sigma(t+1)}$ is scheduled on the machine M with $i$-th smallest pseudo-load~$\tilde l_M^t$ at time $t$, then, afterwards, its load $l_M^{t+1}$ does not exceed $\left(1+\left(\frac{m}{m+1-i}\right)\tilde R(\ensuremath{\mathcal{J}}\xspace)\right)OPT({\cal J})$. \end{restatable}
\subsection{Sampling and the Load Lemma}\label{sec.sampling}
Our model is particularly suited to sampling. Given a job set $\ensuremath{\mathcal{J}}\xspace$, we call a subset $\ensuremath{\mathcal{C}}\xspace\subseteq \ensuremath{\mathcal{J}}\xspace$ a \emph{job class}. Consider any job order $\sigma\in S_n$. For $0<\varphi\le 1$, let $n_{\ensuremath{\mathcal{C}}\xspace,\varphi}[\sigma]$ denote the number of jobs in $\ensuremath{\mathcal{C}}\xspace$ arriving till time $\varphi n$, i.e.\ $n_{\ensuremath{\mathcal{C}}\xspace,\varphi}[\sigma]=|\{J_{\sigma(i)}\mid J_{\sigma(i)}\in \ensuremath{\mathcal{C}}\xspace \land\sigma(i) \le \varphi n\}|$. Let $n_{\ensuremath{\mathcal{C}}\xspace}=n_{\ensuremath{\mathcal{C}}\xspace,1}[\sigma]=|\ensuremath{\mathcal{C}}\xspace|$ be the total number of jobs in $\ensuremath{\mathcal{C}}\xspace$. The following is a consequence of Chebyshev's inequality. The proof is left to \Cref{sec.basic.p}.
\begin{restatable}{proposition}{prosample}\label{pro.sample} Let $\ensuremath{\mathcal{C}}\xspace\subset\ensuremath{\mathcal{J}}\xspace$ be a job class for a job set $\ensuremath{\mathcal{J}}\xspace$ of cardinality at least $m$. Given $\varphi>0$ and $E\ge 0$ we have
\[\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[\left|\varphi^{-1} n_{\ensuremath{\mathcal{C}}\xspace,\varphi}[\sigma]-n_{\ensuremath{\mathcal{C}}\xspace}\right|\ge E\right]\le \frac{n_\ensuremath{\mathcal{C}}\xspace}{\varphi(E-1/m)^2}.\] \end{restatable}
A basic lemma in random-order scheduling is the Load Lemma from~\cite{albers_scheduling_2020}, which allows a good estimate of the average load under very mild assumptions on the job set. Here, we introduce a more general version. It is all we need to adapt the semi-online algorithm $\mathrm{LightLoad}$ from the literature to the secretary model.
\begin{figure}
\caption{A graphic depicting the average load over time on the classical lower bound sequence from \cite{albers_better_1999} for $40$, $400$ and $4000$ machines. The dashed line corresponds to the original adversarial order. The three solid lines corresponding to random permutations clearly approximate a straight line. Thus, sampling allows to predict the (final) average load.
}
\label{fig.ratios}
\end{figure}
\begin{restatable}{lemma}{Loadlemma}[Load Lemma~\cite{albers_scheduling_2020}]\label{Loadlemma}
Let $R_\mathrm{low}=R_\mathrm{low}(m)>0$, $1\ge \varphi=\varphi(m)>0$ and $\varepsilon=\varepsilon(m)>0$ be three functions such that $\varepsilon^{-4}\varphi^{-1}R_\mathrm{low}^{-1}=o(m)$. Then there exists a variable $m(R_\mathrm{low},\varphi,\varepsilon)$ such that we have for all $m\ge m(R_\mathrm{low},\varphi,\varepsilon)$ and all job sets $\ensuremath{\mathcal{J}}\xspace$ with $R(\ensuremath{\mathcal{J}}\xspace)\ge R_\mathrm{low}$ and $|\ensuremath{\mathcal{J}}\xspace|\ge m$:
\[ \ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n} \left[\left|\frac{L_\varphi[\ensuremath{\mathcal{J}}\xspace^\sigma]}{L[\ensuremath{\mathcal{J}}\xspace]}-1\right|\ge\varepsilon \right]<\varepsilon.\] \end{restatable}
We sketch the proof, leaving the details to \Cref{sec.basic.p} since it is technical and a slight generalization of the one found in~\cite{albers_scheduling_2020}. We use geometric rounding so that we only have to deal with countably many possible job sizes. Now, jobs of any given size $p$ form a job class $\ensuremath{\mathcal{C}}\xspace=\ensuremath{\mathcal{C}}\xspace_p$. Using \Cref{pro.sample}, we can relate their actual cardinality $n_\ensuremath{\mathcal{C}}\xspace$ with the $\varphi$-estimate $n_{\ensuremath{\mathcal{C}}\xspace,\varphi}$. Putting everything together yields the Load Lemma, which compares the load $L$ and the load estimate $L_\varphi$. The lemma relies intrinsically on the lower bound~$R_\mathrm{low}$ for $R(\ensuremath{\mathcal{J}}\xspace)$. Consider a job set $\ensuremath{\mathcal{J}}\xspace$ like the one in \Cref{fig:counterx}, only one job carries all the load while there are lots of other jobs with size zero (or negligible size $\epsilon>0$). Then $R(\ensuremath{\mathcal{J}}\xspace)=\frac{1}{m}$ and a statement as in \Cref{Loadlemma} could not be true for $\varepsilon< \min(1,\varphi^{-1}-1)$ since $L_\varphi\in\{0,\varphi^{-1}L\}$.
\Cref{fig.ratios} shows the behavior of the average load on three randomly chosen permutations of a classical input sequence. As predicted, this average load approaches a straight line for large number of machines. The Load Lemma is an important theoretical tool but only provides asymptotic guarantees. In \Cref{sec.Lunderest} we explore practical guarantees for small numbers of machines.
\subsection{A simple $\boldsymbol{1.75}$-competitive algorithm}\label{sec.1.75} We modify the semi-online algorithm $\mathrm{Light Load}$ from the literature to obtain a very simple nearly $1.75$-competitive algorithm. For any $0\le t\le n$ let $M_{\mathrm{mid}}^{t}$ be a machine having the $\lfloor m/2 \rfloor$-lowest load at time $t$, i.e.\ right before job $J_{t+1}$ is scheduled. Let $l_{\mathrm{mid}}^{t}$ be its load and let $l_{\mathrm{low}}^{t}$ be the smallest load of any machine. We recall the algorithm $\LL{L_{\mathrm{guess}}}$ from Albers and Hellwig~\cite{albers_semi-online_2012}, where the parameter $L_{\mathrm{guess}}$ is a guess for $L$.
\begin{algorithm}[H] \caption{The (semi-online) algorithm $\LL{L_{\mathrm{guess}}}$~\cite{albers_semi-online_2012}.}\label{alg.exceptional2} \begin{algorithmic}[1] \State \textit{Let $J_t$ be the job to be scheduled and let $p_t$ be its size.} \If{$l_{\mathrm{low}}^{t-1} \le 0.25 L_{\mathrm{guess}}$ \textbf{or} $l_{\mathrm{mid}}^{t-1} + p_t > 1.75 L_{\mathrm{guess}}$} \State Schedule $J_t$ on any least loaded machine; \Else { }schedule $J_t$ on $M_{\mathrm{mid}}^{t-1}$; \EndIf \end{algorithmic} \end{algorithm}
$\LL{L}$ has been analyzed in the setting where the average load $L$ is known in advance, i.e.\ with fixed parameter $L_{\mathrm{guess}}=L$. Albers and Hellwig obtain the following: \begin{theorem}[\cite{albers_semi-online_2012}]\label{te.ll} $\LL{L}$ is adversarially $1.75$-competitive, i.e.\ for every job sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$ with average load $L=L[\ensuremath{\mathcal{J}}\xspace]$ there holds $\LL{L}(\ensuremath{\mathcal{J}}\xspace^\sigma)\le 1.75{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$. \end{theorem}
The proof from~\cite{albers_semi-online_2012} is complicated and not repeated in this paper. We need to deal with more general guesses $L_{\mathrm{guess}}$ that are slightly off. The following corollary is derived from \Cref{te.ll} by enlarging the input sequence.
\begin{restatable}{corollary}{coll}\label{co.ll} Let $\ensuremath{\mathcal{J}}\xspace^\sigma$ be any (ordered) input sequence and let $L_{\mathrm{guess}} \ge L[\ensuremath{\mathcal{J}}\xspace]$. Then the makespan of $\LL{L_{\mathrm{guess}}}$ is at most $1.75 \cdot \max (L_{\mathrm{guess}},{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace))$. \end{restatable}
The idea of the proof is rather simple. We can add jobs to the end of the sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$ such that for the resulting sequence $\ensuremath{\mathcal{J}}\xspace'^{\sigma'}$ there holds ${\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace')=\max(L_{\mathrm{guess}},{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace))$. We then apply \Cref{te.ll} to see that $\LL{L_{\mathrm{guess}}}$ has cost at most $1.75 \cdot \max (L_{\mathrm{guess}},{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace))$ on this sequence. Passing over to the prefix $\ensuremath{\mathcal{J}}\xspace^\sigma$ of $\ensuremath{\mathcal{J}}\xspace'^{\sigma'}$ cannot increase this cost. A technical proof is left to \Cref{sec.1.75.p} for completeness.
We also need to deal with guesses $L_{\mathrm{guess}}$ that are totally of. Since $\LL{L_{\mathrm{guess}}}$ only considers the least or the $\lfloor m/2\rfloor$-th least loaded machine we get by \Cref{prop1}: \begin{corollary}\label{co.ll2} For any (ordered) sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$ and any value $L_{\mathrm{guess}}$ the makespan of $\LL{L_{\mathrm{guess}}}$ is at most $(1+2R(\ensuremath{\mathcal{J}}\xspace)){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$. In particular, it is at most $3{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$. \end{corollary}
\subsubsection*{Adapting LightLoad to the random-order model} Let $\delta=\delta(m)=1/\log(m)$ be the \emph{margin of error our algorithm allows}. We will see that our algorithm is $(1.75+O(\delta))$-competitive. In fact, any function with $\delta(m)\in \omega(m^{-1/4})$ and $\delta(m)\in o_m(1)$ would do. Given an input sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$ let $\hat L_{\mathrm{pre}}=\hat L_{\mathrm{pre}}[\ensuremath{\mathcal{J}}\xspace^\sigma]=\frac{L_{1/4}[\ensuremath{\mathcal{J}}\xspace^\sigma]}{1-\delta}$ be our \emph{guess for $L$}. We use the index 'pre' since our main algorithm later will use a slightly different guess $\hat L$. In this section we consider the algorithm $\mathrm{Light Load ROM}=\LL{\hat L_{\mathrm{pre}}}$. Let us observe first that this is indeed an online algorithm, not only a semi-online algorithm as one might expect since the $\textbf{if}$-clause uses the guess $\hat L_{\mathrm{pre}}$ before it is known.
\begin{lemma}\label{le.llromon} The algorithm $\mathrm{Light Load ROM}$ can be implemented as an online algorithm. \end{lemma}
\begin{proof} It suffices to note that the $\textbf{if}$-clause always evaluates to \textsc{true} for $t<n/4$, i.e.\ before $L_{\mathrm{guess}}=\hat L_{\mathrm{pre}}$ is known. Indeed, in this case $l_{\mathrm{low}} \le 0.25L_{1/4} < 0.25 \hat L_{\mathrm{pre}}$ by \Cref{le.avglb}. \end{proof}
We now prove the main theorem. \Cref{co.1.75} follows immediately from \Cref{le.nearly}.
\begin{theorem}\label{te.1.75} The algorithm $\mathrm{Light Load ROM}$ is nearly $1.75$-competitive. \end{theorem} \begin{corollary}\label{co.1.75} $\mathrm{Light Load ROM}$ is $1.75$-competitive in the secretary model for $m\rightarrow\infty$. \end{corollary}
\begin{figure}
\caption{The lay of the land of the analysis. The algorithm is $(c+O(\delta))$-competitive on simple and proper stable sequences. Only the small unstable remainder (hashed) is problematic. Dashed lines mark orbits under the action of the permutation group $S_n$. Simple sequences stay simple under permutation. Non-simple orbits have at most an $\delta$-fraction, which is unstable (hashed). Thus, the algorithm is $(c+O(\delta))$-competitive with probability at least $1-\delta$ after random permutation.}
\label{fig:simplestable}
\end{figure}
\begin{proof}[Proof of \Cref{te.1.75}]Our analysis forms a triad, which outlines how we are going to analyze our more sophisticated $1.535$-competitive algorithm later on as well.
\noindent{\bf Analysis basics:} By \Cref{co.ll2} the algorithm $\mathrm{Light Load ROM}$ is adversarially $3$-competitive. We call the input set $\ensuremath{\mathcal{J}}\xspace$ \emph{simple} if $|\ensuremath{\mathcal{J}}\xspace|\le m$ or $R[\ensuremath{\mathcal{J}}\xspace]<\frac{3}{8}$. If $|\ensuremath{\mathcal{J}}\xspace|\le m$ every job is scheduled onto an empty machine, which is optimal. If $R[\ensuremath{\mathcal{J}}\xspace]<\frac{3}{8}$, \Cref{co.ll2} bounds the competitive ratio by $1+2R[\ensuremath{\mathcal{J}}\xspace]<1.75$. We thus are left to consider non-simple, so called \emph{proper}, job sets.
\noindent{\bf Stable job sequences:} We call a sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$ \emph{stable} if $L\le \hat L_{\mathrm{pre}} \le \frac{1+\delta}{1-\delta}L$ holds true. By the Load Lemma, \Cref{Loadlemma}, the probability of the sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$ being stable is at least $1-\delta$ if we choose $m$ large enough and $\ensuremath{\mathcal{J}}\xspace$ proper. Here we use that $\delta(m) = 1/\log(m) \in \omega(m^{-1/4})$.
\noindent{\bf Adversarial Analysis:} By \Cref{co.ll}, the makespan of $\mathrm{Light Load ROM}$ on stable sequences is at most $1.75 \max (\hat L_{\mathrm{pre}}(\ensuremath{\mathcal{J}}\xspace),{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace))\le 1.75\frac{1+\delta}{1-\delta}{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)=\big(1.75+\frac{3.5\cdot\delta}{1-\delta}\big){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace).$
\noindent{\bf Conclusion:} Let $\varepsilon>0$. Since $\delta(m)\rightarrow 0$, we can choose $m$ large enough such that $\frac{3.5\delta(m)}{1-\delta(m)}\le \varepsilon$. In particular $\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n} [\mathrm{Light Load ROM}({\cal J}^\sigma) \geq (c+\varepsilon) OPT({\cal J})] \leq \delta(m) \le \varepsilon$ since the only sequences where the inequality does not hold are proper but not stable. This concludes the second condition of nearly competitivity. \end{proof}
\subsubsection*{Why underestimating $L$ is actually not as bad as one may think.}\label{sec.Lunderest} So far we were careful to choose our guess $\hat L_\mathrm{pre}$ in such a way that it is unlikely to underestimate~$L$ since this allowed us to prove results in a self-contained fashion, using \Cref{te.ll} from~\cite{albers_semi-online_2012} only as a black box. One should note that their analysis also allows us to tackle guesses $L_{\mathrm{guess}}<L$.
\begin{lemma}\label{le.underest} Let $\ensuremath{\mathcal{J}}\xspace^\sigma$ be any (ordered) input sequence and let $L_{\mathrm{guess}} = (1-\delta)L[\ensuremath{\mathcal{J}}\xspace]$ for some $\delta \ge 0$. Then the makespan of $\LL{L_{\mathrm{guess}}}$ is at most $1.75(1+\delta) {\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$. \end{lemma}
Showing this lemma requires carefully rereading the analysis of Albers and Helwig~\cite{albers_semi-online_2012}. We next describe how their analysis has to be adapted to derive \Cref{le.underest}.
\begin{proof}[How to adapt the proof from~\cite{albers_semi-online_2012}] Consider any input sequence $J_1,\ldots J_n$. Using induction we may assume the result of the lemma to hold on the prefix $J_1,\ldots,J_{n-1}$. In~\cite{albers_semi-online_2012} they argue that the algorithm remains $1.75$-competitive if the least loaded machine had load at most $0.75L$ upon arrival of $J_n$. By a similar reasoning the less strict statement of \Cref{le.underest} holds if the least loaded machine had load at most $0.75L+1.75\delta L$ at that time. Thus we are left to consider the case that its load is $0.75L+1.75\delta L +\epsilon L$ for some $0<\epsilon<0.25-1.75\delta$. Following the arguments~\cite{albers_semi-online_2012}, it suffices to show that every machine received a job of size $0.5L+\epsilon L$. The statement of Lemma 1 in~\cite{albers_semi-online_2012} needs to be weakened to 'At time $t_{j_0}$ the least loaded machine had load at most $(0.25+1.75\delta)L$.' The proof of the lemma remains mostly the same. The only change occurs in the induction step. Here, the size of a job causing a machine to reach load $0.75L+\delta L +\epsilon L$ and, in addition, the corresponding decrease in potential is only $(1-1.75\delta)L$. Similarly, the statement of Lemma 2 needs to be refined to 'the $j_0$-th least loaded machine had load at most $(1.25+1.75\delta)L-\epsilon L=1.75L_{\mathrm{guess}}-0.5L-\epsilon L$.' The proof of Lemma 2 stays the same. Using these modifications, the rest of the analysis of~\cite{albers_semi-online_2012} can be applied to conclude the proof. \end{proof}
\begin{theorem}\label{th.fulllightloadcapM}
Let $\ensuremath{\mathcal{J}}\xspace^\sigma$ be any (ordered) input sequence. The makespan of $\mathrm{Light Load ROM}$ on $\ensuremath{\mathcal{J}}\xspace^\sigma$ is $1.75\cdot\left(1+\frac{|\hat L_{\mathrm{pre}}[\ensuremath{\mathcal{J}}\xspace^\sigma]-L|}{L}\right){\mathrm{OPT}}$. \end{theorem}
\begin{proof}
On input permutation $\ensuremath{\mathcal{J}}\xspace^\sigma$ the competitive ratio of our algorithm is at most $1.75+\frac{\left|L_{1/4}-L\right|}{L}$ by \Cref{co.ll} if $L_{1/4} \ge L$. Else, recall that ${\mathrm{OPT}}\ge L$ and apply \Cref{le.underest}. \end{proof}
Let us assume for simplicity that the input length $n$ is divisible by $4$. We can always add up to three jobs of size $0$ to obtain such a result. This 'adding' can be simulated by an online algorithm.
Recall that the \emph{absolute mean deviation} of a random variable $X$ that has nonzero expectation is defined as $\mathrm{MD}[X]=\ensuremath{\mathbf{E}}\xspace\left[\left|X-\ensuremath{\mathbf{E}}\xspace[X]\right|\right]$ and its \emph{normalized absolute mean deviation} is $\mathrm{NMD}[X]=\frac{\ensuremath{\mathbf{E}}\xspace\left[\left|X-\ensuremath{\mathbf{E}}\xspace[X]\right|\right]}{\ensuremath{\mathbf{E}}\xspace[X]}$. In particular, $\mathrm{NMD}[L_{1/4}]=\frac{\ensuremath{\mathbf{E}}\xspace_{\sigma\sim S_N}\left[\left|L_{1/4}-L\right|\right]}{L}.$ From the previous theorem we obtain:
\begin{theorem}\label{th.reduc} On input set $\ensuremath{\mathcal{J}}\xspace$ the competitive ratio of $\mathrm{Light Load ROM}$ in the random-order model is at most $1.75(1+\mathrm{NMD}(\hat L_{\mathrm{pre}}))$. If $n\le m$ or $R(\ensuremath{\mathcal{J}}\xspace)\le \frac{3}{8}$, then $\LL{L_{1/4}}$ is already $1.75$-competitive. \end{theorem} \begin{proof} The first statement follows from \Cref{th.fulllightloadcapM} by taking expected values. If $n\le m$, the algorithm $\LL{L_{1/4}}$ places every job on a separate machine and is thus optimal. If $R(\ensuremath{\mathcal{J}}\xspace)\le \frac{3}{8}$, it is $1.75$-competitive by \Cref{co.ll2}. \end{proof}
We will now provide estimates on $\mathrm{NMD}(\hat L_{\mathrm{pre}})$. \Cref{fig.NAMD} depicts practical estimates, while our analysis will focus on theoretical bounds.
\begin{figure}\label{fig.NAMD}
\end{figure}
Given any job set $\ensuremath{\mathcal{J}}\xspace$ of size $n>m$ and $R(\ensuremath{\mathcal{J}}\xspace) \ge \frac{3}{8}$ we are left to estimate this normalized standard deviation of $L_{1/4}$. One observes that $\mathrm{NMD}[L_{1/4}]$ does not change if we scale all jobs by a common factor $\lambda>0$. By choosing $\lambda=L^{-1}$ we may wlog.\ assume that $L=1$. In particular, $\mathrm{NMD}[L_{1/4}]=\mathrm{MD}[L_{1/4}]$. Now $R(\ensuremath{\mathcal{J}}\xspace)\ge \frac{3}{8}$ implies that all jobs have size at most $8/3$. The following lemma allows us to reduce ourselves to particularly easy job instances.
\begin{lemma} Consider two jobs $J_a,J_b\in \ensuremath{\mathcal{J}}\xspace$ of sizes $p_a\le p_b$ and $0\le\varepsilon\le p_a$. If we set the size of $J_a$ to $p_a-\varepsilon$ and the size of $J_b$ to $p_b+\varepsilon$, then $ \mathrm{MD}(L_{1/4})$ does not decrease. \end{lemma}
\begin{proof}
Consider $\mathrm{MD}(L_{1/4})(p_1,\ldots,p_n)=\ensuremath{\mathbf{E}}\xspace[|L_{1/4}-L|](p_1,\ldots,p_n)$ as a function on the job sizes $p_1,\ldots,p_n$. This function is convex since it is a convex combination of the convex functions $|L_{1/4}[\sigma]-L|$ for all~$\sigma\in S_n$.
\end{proof}
We apply the previous lemma to any pairs of $J_a,J_b\in \ensuremath{\mathcal{J}}\xspace$ of sizes $0<p_a\le p_b<8/3$ to set either $p_a=0$ or $p_b=8/3$. We then repeat this process till all jobs but one last one have either size $0$ or $8/3$. So far at most $\left\lfloor\frac{3m}{8}\right\rfloor$ jobs have size~$8/3$ since by assumption $L=1$. Using again the fact that $\mathrm{MD}[L_{1/4}]$ is convex in the size of jobs, setting the size $p$ of this remaining job to at least one of the values $0$ or $8/3$ cannot decrease $\mathrm{MD}[L_{1/4}]$. Let us do so. This breaks the assumption that $L=1$, which is why we consider $\mathrm{MD}$ instead of $\mathrm{NMD}$. Let $K=K(n)$ be the number of jobs of size~$8/3$. Then $K$ is either $\left\lfloor\frac{3m}{8}\right\rfloor$ or $\left\lfloor\frac{3m}{8}\right\rfloor+1$. Let $X\sim\mathrm{HyperGeom}(n,K,n/4)$, in other words $X$ corresponds to drawing $n/4$ elements without replacement from a population of size $n$ that contains precisely $K$ successes. Then $L_{1/4}=\frac{8X}{3m}$ for our modified job set. Since all modifications never caused $\mathrm{MD}[X]$ to decrease we have shown so far:
\begin{lemma}\label{le.Ldevbound} Let $\ensuremath{\mathcal{J}}\xspace$ be a job set of size $n>m$ with $R(\ensuremath{\mathcal{J}}\xspace)\le \frac{3}{8}$. Then we can choose $K$ either $\left\lfloor\frac{3m}{8}\right\rfloor$ or $\left\lfloor\frac{3m}{8}\right\rfloor+1$ such that for $X\sim\mathrm{HyperGeom}(n,K,n/4)$ there holds $\mathrm{NMD}[L_{1/4}]\le \frac{8}{3m}\mathrm{MD}[X]$. \end{lemma}
It is possible to evaluate the standard mean deviation of $X\sim\mathrm{HyperGeom}(n,K,n/4)$ directly using the techniques in~\cite{diaconis1991closed}. Since such an analysis is quite complex we present a simpler proof, which yields somewhat worse bounds.
\begin{lemma}\label{le.XYcomp} Let $X\sim\mathrm{HyperGeom}(n,K,n/4)$ and $Y\sim\mathrm{Bin}(K,1/4)$ then $\mathrm{MD}(X)\le\mathrm{MD}(Y)$. \end{lemma}
\begin{proof}
Indeed, both random variables correspond to $K$ draws from a population of size $n$ that contains $n/4$ successes. For $X$ these draws occur without replacement, while for $Y$ these are draws with replacement. Let $X_i$ respectively $Y_i$ be the respective random variable, which only considers the first $i$ draws for $0\le i\le K$. We can show via induction that the random variable $|Y_i-i/4|$ dominates $|X_i-i/4|$ by a case distinction on the possible values of $Y_{i-1}-(i-1)/4$ and $X_{i-1}-(i-1)/4$. Thus, the random variable $|Y-\ensuremath{\mathbf{E}}\xspace[Y]|$ dominates $|X-\ensuremath{\mathbf{E}}\xspace[X]|$. This implies $\mathrm{MD}(Y)=\ensuremath{\mathbf{E}}\xspace[|Y-\ensuremath{\mathbf{E}}\xspace[Y]|]\ge\ensuremath{\mathbf{E}}\xspace[|X-\ensuremath{\mathbf{E}}\xspace[X]|]=\mathrm{MD}(X)$. \end{proof}
Given $Y\sim\mathrm{Bin}(K,p)$, we are interested in $p=1/4$, let $\mathrm{bin}(k,K,p)=P[Y=k]={K\choose k}p^k(1-p)^{K-k}$. Then we can evaluate the median deviation of $Y$ using de Moivre's theorem.
\begin{theorem}[de Moivre] $\mathrm{MD}(Y)=2\lfloor pK+1\rfloor (1-p)\mathrm{bin}(\lfloor pK+1\rfloor,K,p)$ for $Y\sim\mathrm{Bin}(K,p)$. \end{theorem}
A proof of the theorem can be found in \cite{diaconis1991closed}. Consider $Y\sim\mathrm{Bin}(K,1/4)$ and set $k=\lfloor K/4+1\rfloor$. Using Stirling's Approximation we derive \begin{align*} \mathrm{MD}(Y)&=\lfloor 2k\rfloor \cdot 3/4 \cdot \mathrm{bin}\left(k,K,1/4\right)\\ &=(1+o_K(1)) \frac{3}{8}K \frac{K!}{k!(K-k)!} (1/4)^k(3/4)^{K-k} \\ &=(1+o_K(1)) \frac{3}{8}K \frac{\sqrt{2\pi K}\left(\frac{K}{e}\right)^K}{\sqrt{2\pi k}\left(\frac{k}{e}\right)^k\sqrt{2\pi (K-k)}\left(\frac{K-k}{e}\right)^{K-k}} \left(\frac{1}{4}\right)^k\left(\frac{3}{4}\right)^{K-k} \\ &=(1+o_K(1)) \frac{3}{8}K \sqrt\frac{K}{2\pi k(K-k)} \left(\frac{K}{4k}\right)^k\left(\frac{3}{4}\frac{K}{K-k}\right)^{K-k} . \end{align*} Recall $K/4<k\le K/4+1$. Thus $\left(\frac{K}{4k}\right)^k\le 1$ and $\left(\frac{3}{4}\frac{K}{K-k}\right)^{K-k} \le \left(1+\frac{1}{3K/4-1}\right)^{3K/4}=e+o_K(1)$. We get that $\mathrm{MD}(Y)\le (1+o_K(1)) \frac{3e}{8}K \sqrt\frac{K}{2\pi k(K-k)} $. Recall that $K=\left\lfloor\frac{3m}{8}\right\rfloor$ or $K=\left\lfloor\frac{3m}{8}\right\rfloor+1$. In particular, $K=(1+o_m(1))\frac{8}{3}m$ and $k=(1+o_m(1))\frac{2}{3}m$ . Thus \[\mathrm{MD}(Y)= (1+o_m(1)) \frac{3e}{8}\frac{8m}{3} \sqrt\frac{8m/3}{2\pi 2m/3\cdot(1m/3)} = (1+o_m(1))\sqrt\frac{6e^2m}{\pi}.\]
Consider any job set $\ensuremath{\mathcal{J}}\xspace$ of size $n$ with $R(\ensuremath{\mathcal{J}}\xspace)\le \frac{3}{8}$. Combining the previous bound with \Cref{le.Ldevbound} and \Cref{le.XYcomp} yields \[\mathrm{NMD}[L_{1/4}]\le \frac{8}{3m}\mathrm{MD}[X] = (1+o_m(1))\frac{8}{3m}\sqrt\frac{6e^2m}{\pi}<\frac{10.02+o_m(1)}{\sqrt{m}}.\]
This bound allows to establish a competitive ratio in the random-order model.
\begin{theorem}\label{th.fulllightloadcap2} The competitive ratio of $\mathrm{Light Load ROM}$ in the random-order model is $1.75+\frac{18}{\sqrt{m}}+O\left(\frac{1}{m}\right)$. \end{theorem} \begin{proof} This is a consequence of \Cref{th.reduc} and the prior bound on $\mathrm{NMD}[L_{1/4}]$. \end{proof}
\begin{remark} The constant in the previous theorem is far from optimal. As mentioned before a first improvement can be derived by estimating the absolute mean average deviation of the hypergeoemtric distribution directly using the techniques from~\cite{diaconis1991closed}. A much stronger improvement results from filtering out huge jobs reducing essentially to the case that $R(\ensuremath{\mathcal{J}}\xspace)=1$. Given any guess $L_{\mathrm{guess}}$ let $\delta(L_{\mathrm{guess}})$ be $L-L_{\mathrm{guess}}$ if $L_{\mathrm{guess}} \le L$; $L_{\mathrm{guess}}-{\mathrm{OPT}}$ if $L_{\mathrm{guess}}>{\mathrm{OPT}}$; and $0$ else. Combining \Cref{co.ll} and \Cref{le.underest} yields that $\LL{L_{\mathrm{guess}}}$ is $1.75(1+\delta(L_{\mathrm{guess}}))$-competitive. So far, we picked for $L_{\mathrm{guess}}$ an estimator for $L$. But besides $L$ we could also try to estimate the lower bound for ${\mathrm{OPT}}$, that is we could consider $B_\mathrm{pre}[\ensuremath{\mathcal{J}}\xspace^\sigma]=\max\left(L_{1/4}[\ensuremath{\mathcal{J}}\xspace^\sigma],p^{n/4}_{\mathrm{max}}[\ensuremath{\mathcal{J}}\xspace^\sigma]\right)$, which is similar to how we estimate ${\mathrm{OPT}}$ in our main algorithm. The nature of the guess $B_\mathrm{pre}[\ensuremath{\mathcal{J}}\xspace^\sigma]$ ensures that only few job have size exceeding $B_\mathrm{pre}$; only $4$ in expectation. A more careful analysis reveals that the competitive ratio of $\LL{B_\mathrm{pre}}$ is in fact $1.75+\frac{4.4}{\sqrt{m}}+\frac{7}{m}+O\left(m^{-3/2}\right)$ in the random-order model. \end{remark}
\section{The new, nearly 1.535-competitive algorithm}\label{sec.algorithm}
\begin{figure}
\caption{The 1.535-competitive algorithm. First, few jobs are sampled. Then, the algorithm decides between two strategies. The Critical-Job-Strategy tries to schedule critical jobs ahead of time. The Least-Loaded-Strategy follows a greedy approach, which reserves some machines for large jobs. Sometimes, we realize very late that the Critical-Job-Strategy does not work and have to switch to the Least-Loaded-Strategy 'on the fly'. We never switch in the other direction. }
\label{fig:inputsequence}
\end{figure}
Our new algorithm achieves a competitive ratio of $c=\frac{1+\sqrt{13}}{3}\approx 1.535$. Let $\delta=\delta(m)=\frac{1}{\log(m)}$ be the \emph{margin of error our algorithm allows}. Throughout the analysis it is mostly sensible to treat $\delta$ as a constant and forget about its dependency on $m$. Our algorithm maintains a certain set $\ensuremath{\mathcal{M}}\xspace_{\mathrm{res}}$ of $\lceil\delta m\rceil$ \emph{reserve machines}. Their complement, the \emph{principal machines}, are denoted by~$\ensuremath{\mathcal{M}}\xspace$. Let us fix an input sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$. Let $\hat L=\hat L[\ensuremath{\mathcal{J}}\xspace^\sigma]=L_{\delta^2}[\ensuremath{\mathcal{J}}\xspace^\sigma]$. For simplicity, we hide the dependency on $\ensuremath{\mathcal{J}}\xspace^\sigma$ whenever possible. Our online algorithm uses $ B=\max\left(p^{\delta^2 n}_{\mathrm{max}},\hat L\right)$ as an \emph{estimated lower bound for ${\mathrm{OPT}}$}, which is known after the first $\lfloor\delta^2 n\rfloor$ jobs are treated. Our algorithm uses geometric rounding implicitly. Given a job $J_t$ of size $p_t$ let $f(p_t)=(1+\delta)^{\left\lfloor \log_{1+\delta}p_t\right\rfloor}$ be its \emph{rounded size}. We also call $J_t$ an $f(p_t)$-job. Using rounded sizes, we introduce job classes. Let $p_{\mathrm{small}} =c-1=\frac{\sqrt{13}-2}{3}\approx 0.535$ and $p_{\mathrm{big}}=\frac{c}{2}=\frac{1+\sqrt{13}}{6}\approx 0.768$. Then we call job $J_t$ \begin{itemize} \setlength{\parskip}{0pt} \setlength{\itemsep}{0pt plus 1pt} \item \emph{small} if $f(p_t)\le p_{\mathrm{small}} B$ and \emph{critical} else, \item \emph{big} if $f(p_t)>p_{\mathrm{big}} B$, \item \emph{medium} if $J$ is neither small nor big, i.e.\ $p_{\mathrm{small}} B\le f(p_t)\le p_{\mathrm{big}} B$, \item \emph{huge} if its (not-rounded) size exceeds $B$, i.e.\ $B<p_t$, and \emph{normal} else. \end{itemize}
Consider the sets $\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}=\{(1+\delta)^i \mid (1+\delta)^{-1}p_{\mathrm{small}} B < (1+\delta)^i \le p_{\mathrm{big}} B\}$ and $\ensuremath{\mathcal{P}}\xspace_{\mathrm{big}}=\{(1+\delta)^i \mid p_{\mathrm{big}} B < (1+\delta)^i \le B\}$ corresponding to all possible rounded sizes of medium respectively big jobs, excluding huge jobs. Let $\ensuremath{\mathcal{P}}\xspace=\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}\cup\ensuremath{\mathcal{P}}\xspace_{\mathrm{big}}$. This subdivision gives rise to a \emph{weight function}, which will be important later. Let $w(p)=1/2$ for $p\in \ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}$ and $w(p)=1$ for $p\in \ensuremath{\mathcal{P}}\xspace_{\mathrm{big}}$. The elements $p\in\ensuremath{\mathcal{P}}\xspace$ define job classes $\ensuremath{\mathcal{C}}\xspace_p\subseteq\ensuremath{\mathcal{J}}\xspace$ consisting of all $p$-jobs, i.e.\ jobs of rounded size $p$. By some abuse of notation, we call the elements in $\ensuremath{\mathcal{P}}\xspace$ 'job classes', too. Using the notation from \Cref{sec.sampling} we set $n_p=n_{\ensuremath{\mathcal{C}}\xspace_p}=|\ensuremath{\mathcal{C}}\xspace_p|$ and $\hat n_{p}=n_{\ensuremath{\mathcal{C}}\xspace_p,\delta^2}=|\{J_{\sigma(j)}\mid \sigma(j)\le \delta^2 n \land J_{\sigma(j)} \textrm{ is a $p$-job}\}|$. We want to use the values $\hat n_{p}$, which are available to an online algorithm quite early, to estimate the values $n_{p}$, which accurately describe the set of critical jobs. First, $\delta^{-2}\hat n_{p}$ comes to mind as an estimate for $n_p$. Yet, we need a more complicated guess: $c_p=\max\left(\left\lfloor\left(\delta^{-2}\hat n_{p}-m^{3/4}\right)w(p)\right\rfloor,\hat n_{p}\right) w(p)^{-1}$. It has three desirable advantages. First, for every $p\in\ensuremath{\mathcal{P}}\xspace$ the value $c_p$ is close to $n_p$ with high probability, but, opposed to $\delta^{-2}\hat n_{p}$, unlikely to exceed it. Overestimating $n_p$ turns out to be far worse than underestimating it. Second, $w(p)c_p$ is an integer and, third, we have $c_p\ge \hat n_{p}w(p)^{-1}$. A fundamental fact regarding the values $(c_p)_{p\in\ensuremath{\mathcal{P}}\xspace}$ and $B$ is, of course, that they are known to the online algorithm once $\lfloor\delta^2 n\rfloor$ jobs are scheduled.
\subparagraph*{Statement of the algorithm:} If there are less jobs than machines, i.e.\ $n\le m$, it is optimal to put each job onto a separate machine. Else, a short sampling phase greedily schedules each of the first $\lfloor \delta^2 n\rfloor$ jobs to the least loaded principal machine $M\in\ensuremath{\mathcal{M}}\xspace$. Now, the values $B$ and $(c_p)_{p\in\ensuremath{\mathcal{P}}\xspace}$ are known. Our algorithm has to choose between two strategies, the Least-Loaded-Strategy and the Critical-Job-Strategy, which we will both introduce subsequently. It maintains a variable $\textsc{strat}$, initialized to $\textsc{Critical}$, to remember its choice. If it chooses the Critical-Job-Strategy, some additional preparation is required. It may at any time discover that the Critical-Job-Strategy is not feasible and switch to the Least-Loaded-Strategy but it never switches the other way around.
\begin{algorithm}[H] \caption{The complete algorithm: How to schedule job $J_t$.}\label{alg.complete} \begin{algorithmic}[1] \State \textit{\textsc{strat} is initialized to \textsc{Critical}, $J_t$ is the job to be scheduled.} \If{$n\le m$}{ }Schedule $J_t$ on any empty machine; \ElsIf{$t \le \varphi n$}{ }schedule $J_t$ on a least loaded machine in $\ensuremath{\mathcal{M}}\xspace$;\Comment{\textit{Sampling phase}} \Else \If{we have $t=\lfloor \varphi n\rfloor+1$}
\If{$\sum_{p\in\ensuremath{\mathcal{P}}\xspace} w(p)c_p>m$}
{ }$\textsc{strat}\gets \textsc{Least-Loaded}$
\Else
{ }proceed with the Preparation for the Critical-Job-Strategy (\Cref{alg.preparation});
\EndIf \EndIf \If{$\textsc{strat}=\textsc{Critical}$} proceed with the Critical-Job-Strategy (\Cref{alg.main}); \Else { }proceed with the Least-Loaded-Strategy (\Cref{alg.exceptional}); \EndIf \EndIf \end{algorithmic} \end{algorithm}
\begin{figure}
\caption{The Least-Loaded-Strategy schedules jobs greedily. A few machines are reserved for unexpected huge jobs, such as the largest job, which is unlikely to arrive in the sampling phase.
}
\label{fig:inputsequence}
\end{figure}
The \textbf{Least-Loaded-Strategy} places any normal job on a least loaded principal machine. Huge jobs are scheduled on any least loaded reserve machine. This machine will be empty, unless we consider rare worst-case orders.
\begin{algorithm}[H] \caption{The Least-Loaded-Strategy: How to schedule job $J_t$.}\label{alg.exceptional} \begin{algorithmic}[1] \If{$J_t$ is huge}{ }schedule $J_t$ on any least loaded reserve machine; \Else{ }schedule $J_t$ on any least loaded principal machine; \EndIf \end{algorithmic} \end{algorithm}
For the Critical-Job-Strategy we introduce \emph{$p$-placeholder-jobs} for every size $p\in\ensuremath{\mathcal{P}}\xspace$. Sensibly, the size of a $p$-placeholder-job is $p$. During the Critical-Job-Strategy we treat placeholder-jobs similar to \emph{real} jobs. The \emph{anticipated load} $\tilde l_M^t$ of a machine $M$ at time $t$ is the sum of all jobs on it, including placeholder-job, opposed to the common load $l_M^t$, which does not take the latter into account. Note that $\tilde l_M^t$ defines a pseudo-load as introduced in \Cref{sec.basic}.
\begin{figure}
\caption{The Critical-Job-Strategy. Each machine gets either two medium, one large or no critical job. Placeholder jobs (dotted) are assigned during the Preparation and reserve space for critical jobs yet to come. Processing volume of small jobs (dark) 'on the bottom' arrived during the sampling phase. Reserve machines accommodate huge jobs or, possibly, jobs without matching placeholders.
}
\label{fig:CriticalJobStrategy}
\end{figure}
During the \textbf{Preparation for the Critical-Job-Strategy} the algorithm maintains a counter $c_p'$ of all $p$-jobs scheduled so far (including placeholders). A job class $p\in\ensuremath{\mathcal{P}}\xspace$ is called \emph{unsaturated} if $c_p'\le c_p$. First, we add unsaturated medium placeholder-jobs to any principal machine that already contains a medium real job from the sampling phase. We will see in \Cref{le.prep.welldef} that such an unsaturated medium job class always exists. Now, let $m_{\mathrm{empty}}$ be the number of principal machines which do not contain critical jobs. We prepare a set $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ of cardinality at most $m_{\mathrm{empty}}$, which we will then schedule onto these machines. The set $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ may contain single big placeholder-jobs or pairs of medium placeholder-jobs. We greedily pick any unsaturated job class $p\in\ensuremath{\mathcal{P}}\xspace$ and add a $p$-placeholder-job to $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$. If $p$ is medium, we pair it with a job belonging to any other, not necessarily different, unsaturated medium job class. Such a job class always exists by \Cref{le.prep.welldef}. We stop once all job classes are saturated or if $|\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}|=m_{\mathrm{empty}}$. We then assign the elements in $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ to machines. We iteratively pick the element $e\in\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ of maximum size and assign the corresponding jobs to the least loaded principal machine, which does not contain critical jobs yet. Sensibly, the size of a pair of jobs in $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ is the sum of their individual sizes. We repeat this until all jobs and job pairs in $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ are assigned to some principal machine.
\begin{algorithm}[H] \caption{Preparation for the Critical-Job-Strategy.}\label{alg.preparation} \begin{algorithmic}[1]
\While{there is a machine $M$ containing a single medium job}
\State Add a placeholder $p$-job for an unsaturated size class $p\in\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}$ to $M$; $c_p'\gets c_p'+1$;
\EndWhile
\While{there is an unsaturated size class $p\in\ensuremath{\mathcal{P}}\xspace$ and $|\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}|< m_{\mathrm{empty}}$}
\State Pick an unsaturated size class $e=p\in \ensuremath{\mathcal{P}}\xspace$ with $c_p'$ minimal; $w(e)\gets p$;
$c_p'\gets c_p'+1$;
\If{$p$ is medium}{ }pick $q\in \ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}$ unsaturated. $e\gets(p,q)$; $w(e)\gets p+q$; $c_q'\gets c_q'+1$;
\EndIf
\State Add $e$ to $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$;
\EndWhile
\While{$\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}\neq \emptyset$}
\State Pick a least loaded machine $M\in\ensuremath{\mathcal{M}}\xspace$, which does not contain a critical job yet;
\State Pick $e\in \ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ of maximum size $w(e)$ and add the jobs in $e$ to $M$; \State $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}\gets\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}\setminus\{e\}$;
\EndWhile \end{algorithmic} \end{algorithm}
\begin{lemma}\label{le.prep.welldef} In line 2 and 5 of \Cref{alg.preparation} there is always an unsaturated medium size class available. Thus, \Cref{alg.preparation}, the Preparation for the Critical-Job-Strategy, is well defined. \end{lemma}
\begin{proof} Concerning line 2, there are precisely $\sum_{p\in\ensuremath{\mathcal{P}}\xspace_\mathrm{med}} \hat n_p$ machines with critical jobs while there are at least $\sum_{p\in\ensuremath{\mathcal{P}}\xspace_\mathrm{med}} (c_p-\hat n_p) \ge \sum_{p\in\ensuremath{\mathcal{P}}\xspace_\mathrm{med}} \hat n_p$ placeholder-jobs available to fill them. Here we make use of the fact that for medium jobs $p\in\ensuremath{\mathcal{P}}\xspace_\mathrm{med}$ we have $c_p\ge \hat n_p w(p)^{-1}=2\hat n_p$.
Concerning line 5, observe that so far every machine and every element in $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ contains an even number of medium jobs. If the placeholder picked in line 4 was the last medium job remaining, $\sum_{p\in\ensuremath{\mathcal{P}}\xspace_\mathrm{med}}c_p$ would be odd. But this is not the case since every $c_p$ for $p\in\ensuremath{\mathcal{P}}\xspace_\mathrm{med}$ is even. \end{proof}
After the Preparation is done, the \textbf{Critical-Job-Strategy} becomes straightforward. Each small job is scheduled on a principal machines with least anticipated load, i.e.\ taking placeholders into account. Critical jobs of rounded size $p\in\ensuremath{\mathcal{P}}\xspace$ replace $p$-placeholder-jobs whenever possible. If no such placeholder exists anymore, critical jobs are placed onto the reserve machines. Again, we try pair up medium jobs whenever possible. If no suitable machine can be found among the reserve machines, we have to switch to the Least-Loaded-Strategy. We say that the algorithm \emph{fails} if it ever reaches this point. In this case, it should rather have chosen the Least-Loaded-Strategy to begin with. Since all reserve machines are filled at this point, the Least-Loaded-Strategy is impeded, too. The most difficult part of our analysis shows that, excluding worst-case orders, this is not a problem on job sets that are prone to cause failing.
\begin{algorithm}[H] \caption{The Critical-Job-Strategy.}\label{alg.main} \begin{algorithmic}[1]
\If{$J_t$ is medium or big}
\textit{let $p$ denote its rounded size;}
\If{there is a machine $M$ containing a $p$-placeholder-job $J$}
\State Delete the $p$-placeholder-job $J$ and assign $J_t$ to $M$;
\ElsIf{$J_t$ is medium and there exists $M\in\ensuremath{\mathcal{M}}\xspace_{\mathrm{res}}$ containing a single medium job}
\State Schedule $J_t$ on $M$;
\ElsIf{there exists an empty machine $M\in\ensuremath{\mathcal{M}}\xspace_{\mathrm{res}}$}{ }schedule $J_t$ on $M$;
\Else{ }$\textsc{stat}\gets\textsc{Least-Loaded}$; \Comment{We say the algorithm \emph{fails}.} \State\textbf{use} the Least-Loaded-Strategy (\Cref{alg.exceptional}) from now on;
\EndIf
\Else
{} assign $J_t$ to the least loaded machine in $\ensuremath{\mathcal{M}}\xspace$ (take placeholder jobs into account);
\EndIf \end{algorithmic} \end{algorithm}
\section{Analysis of the algorithm}\label{sec.ana} \Cref{te.main} is main result of the paper. \Cref{co.main} follows immediately by \Cref{le.nearly}.
\begin{theorem}\label{te.main} Our algorithm is nearly $c$-competitive. Recall that $c=\frac{1+\sqrt{13}}{3}\approx 1.535$. \end{theorem}
\begin{corollary}\label{co.main} Our algorithm is $c$-competitive in the secretary~model as $m\rightarrow\infty$. \end{corollary}
The analysis of our algorithm proceeds along the same three reduction steps used in the proof of \Cref{te.1.75}. First, we assert that our algorithm has a bounded adversarial competitive ratio, which approaches $1$ as $R(\ensuremath{\mathcal{J}}\xspace)\rightarrow 0$. Not only does this lead to the first condition of nearly competitiveness, it also enables us to introduce \emph{simple} job sets on which we perform well due to basic considerations resulting from \Cref{sec.basic}. \begin{definition} A job set $\ensuremath{\mathcal{J}}\xspace$ is called \emph{simple} if $R(\ensuremath{\mathcal{J}}\xspace)\le \frac{(1-\delta)\delta^3}{2(\delta^2+1)}(2-c)$ or if it consists of at most $m$ jobs. Else, we call it $\emph{proper}$. We call any ordered input sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$ \emph{simple} respectively \emph{proper} if the underlying set $\ensuremath{\mathcal{J}}\xspace$ has this property. \end{definition}
Next we are going to sketch out main proof introducing three Main Lemmas. These follow the three proof steps introduced in the proof of \Cref{co.1.75}.
\begin{mainlemma}\label{le.main.basic} In the adversarial model our algorithm has competitive ratio $4+O( \delta)$ on general input sequences and $c+O(\delta)$ on simple sequences.
\end{mainlemma}
The proof is discussed later. We are thus reduced to treating \emph{proper} job sets. In the second reduction we introduce \emph{stable} sequences. These have many desirable properties. Most notably, they are suited to sampling. We leave the formal definition to \Cref{sec.ana.stable} since it is rather technical. The second reduction shows that stable sequences arise with high probability if one orders a proper job set uniformly randomly.
Formally, for $m$ the number of machines, let $P(m)$ be the maximum probability by which the permutation of any proper sequence may not be stable, i.e.\ \[P(m)=\sup\limits_{\ensuremath{\mathcal{J}}\xspace \textrm{ proper}}\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[\ensuremath{\mathcal{J}}\xspace^\sigma \textrm{ is not stable}\right].\] The second main lemma asserts that this probability vanishes as $m\rightarrow \infty$. \begin{mainlemma}\label{le.main.stable} $\lim\limits_{m\rightarrow\infty}P(m)=0$. \end{mainlemma}
In other words, non-stable sequences are very rare and of negligible impact in random-order analyses. Thus, we only need to consider stable sequences. In the final, third, step we analyze our algorithm on these. This analysis is quite general. In particular, it does not rely further on the random-order model. Instead, we work with worst-case stable input sequences, i.e.\ we allow the adversary to present any (ordered) stable input sequence. \begin{mainlemma}\label{le.conclusion} Our algorithm is adversarially $(c+O(\delta))$-competitive on stable sequences. \end{mainlemma}
These three main lemmas allow us to conclude the proof of \Cref{te.main}.
\begin{proof}[Proof of \Cref{te.main}] By \Cref{le.main.basic}, the first condition of nearly competitiveness holds, i.e.\ our algorithm has a constant competitive ratio. Moreover, by \Cref{le.main.basic} and \Cref{le.conclusion}, given $\varepsilon>0$, we can pick $m_0(\varepsilon)$ such that our algorithm is $(c+\varepsilon)$-competitive on all sequences that are stable or simple if there are at least $m_0(\varepsilon)$ machines. Here, we need that $\delta(m)\rightarrow 0$ for $m\rightarrow\infty$. This implies that for $m\ge m_0(\varepsilon)$ the probability of our algorithm not being $(c+\varepsilon)$-competitive is at most $P(m)$, the maximum probability with which a random permutation of a proper, i.e.\ non-simple, input sequence is not stable. By \Cref{le.main.stable}, we can find $m(\varepsilon)\ge m_0(\varepsilon)$ such that this probability is less than $\varepsilon$. This satisfies the second condition of nearly competitiveness. \end{proof}
\subsection{The adversarial case. Proof of \Cref{le.main.basic}}\label{sec.ana.basics} Recall that the \emph{anticipated load} $\tilde l_M^t$ of a machine $M$ at time $t$ denotes its load including placeholder-jobs. It satisfies the definition of a pseudo-load as introduced in \Cref{sec.basic}. We obtain the following two bounds on the average anticipated load $\tilde L=\sup_t \frac{1}{m} \sum_M \tilde l_M^t$.
\begin{lemma}\label{le.tildeR.bound} We have $\tilde L \le L+2p_\mathrm{max}$. In particular $\tilde R(\ensuremath{\mathcal{J}}\xspace)\le 3$. \end{lemma}
\begin{proof} First note that every placeholder-job has at most the size of some job encountered during the sampling phase. In particular, the size of any placeholder-job is at most $p_\mathrm{max}$. Since there are at most two placeholder-jobs on each machine, the total processing time of all placeholder-jobs is at most $2mp_\mathrm{max}$. The total processing time of real jobs is at most $mL$. Thus the total processing time of all placeholder and real jobs scheduled at any time cannot exceed $m(L+2p_\mathrm{max})$. In particular, at any time $t$, we have $\frac{1}{m} \sum_M \tilde l_M^t\le \frac{1}{m}m(L+2p_\mathrm{max})$. Thus, $\tilde L \le L+2p_\mathrm{max}$. For the second part we conclude that $\tilde R(\ensuremath{\mathcal{J}}\xspace)\le \min\left(\frac{\tilde L}{p_\mathrm{max}},\frac{\tilde L}{L}\right) \le \min\left(\frac{L}{p_\mathrm{max}}+2,2\frac{p_\mathrm{max}}{L}+1\right)\le 3.$ \end{proof}
\begin{lemma}\label{le.2ndRtildeBound} We have $\tilde L \le \left(1+ \frac{1}{\delta^2}\right)L$, in particular $\tilde R(\ensuremath{\mathcal{J}}\xspace) \le \left(1+ \frac{1}{\delta^2}\right)R(\ensuremath{\mathcal{J}}\xspace)$. \end{lemma}
Let us first show the following stronger lemma. \begin{lemma}\label{le.placeholdersize} The total size of the placeholder-jobs is at most $m\hat L$. In particular, $\tilde L\le L+\hat L$. \end{lemma}
\begin{proof}[Proof of \Cref{le.placeholdersize}] For every $p\in\ensuremath{\mathcal{P}}\xspace$ we schedule at most $c_p\le\delta^{-2}\hat n_p$ placeholder-jobs of type $p$. Thus, the total size of the placeholder-jobs is at most $\sum_{p\in\ensuremath{\mathcal{P}}\xspace} \delta^{-2}\hat n_p \le m\hat L.$ The total size of all real jobs is precisely $mL$. Since $\tilde L$ is at most $\frac{1}{m}$-times the total processing time of all jobs, we have $\tilde L \le \frac{1}{m}(mL+m\hat L)$. \end{proof}
\begin{proof}[Proof of \Cref{le.2ndRtildeBound}] Observe that $\hat L=L_{\delta^2} \le \delta^{-2} L$. Then, the bound follows from \Cref{le.placeholdersize}. \end{proof}
We call a machine \emph{critical} if it receives a critical job from the Critical-Job-Strategy but no small job after the sampling phase. Else, we call it \emph{general}. General machines can be analyzed using \Cref{prop1} and~\ref{prop2}. \emph{Critical} machines need more careful arguments. \begin{lemma}\label{le.basic.bound} At any time, the load of any general machine is at most $\left(\frac{\tilde R(\ensuremath{\mathcal{J}}\xspace)}{1-\delta}+1 +2\delta\right){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$. \end{lemma}
\begin{proof} For sequences of length $n\le m$ our algorithm is optimal. Hence assume $n>m$.
During the sampling phase and the Least-Loaded-Strategy, our algorithm always uses either a least loaded machine or a least loaded principal machine. Both lie among the $\lfloor \delta m\rfloor+1$ least loaded machines. By \Cref{prop1} this cannot cause any load to exceed $\left(\frac{m}{m-\lfloor\delta m\rfloor}R(\ensuremath{\mathcal{J}}\xspace)+1\right){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)\le \left(1+\frac{R(\ensuremath{\mathcal{J}}\xspace)}{1-\delta}\right){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$. Observing that $\tilde R(\ensuremath{\mathcal{J}}\xspace)\ge R(\ensuremath{\mathcal{J}}\xspace)$, see \Cref{le.sensibletildeL}, the lemma holds for every machine that does not receive its last job during the Critical-Job-Strategy.
Now consider a general machine $M$, which received its last job during the Critical-Job-Strategy. Since it is a general machine, it also received a small job during the Critical-Job-Strategy. Let $J$ be the last small job it received. Right before receiving $J$ machine $M$ must have been a principal machine of least anticipated load. In total, it had at most the $(\lfloor \delta m\rfloor+1)$-smallest anticipated load. By \Cref{prop2} its anticipated load was at most $\left(\frac{\tilde R(\ensuremath{\mathcal{J}}\xspace)}{1-\delta}+1\right){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$ after receiving $J$. Afterwards machine $M$ may have received up to two critical jobs, which replaced placeholder-jobs. Since these jobs had at most $(1+\delta)$-times the size of the job they replaced. The load-increase is at most $\delta p_\mathrm{max}\le \delta {\mathrm{OPT}}$ for each of these two jobs. \end{proof}
We can now consider critical machines.
\begin{lemma}\label{le.reserve.bound} The load of a reserve machine is at most $\min(\max((1+\delta)c B,p_{\mathrm{max}}), 2p_{\mathrm{max}})$ till it receives a job from the Least-Loaded-Strategy. Critical reserve machines in particular fulfill this condition. \end{lemma}
\begin{proof} Every critical reserve machine receives either one big job or at most two medium ones, until the Least-Loaded-Strategy is applied. The second bound, $2p_{\mathrm{max}}$, follows immediately from that. The first bound follows from the fact that a single big job has size at most $p_\mathrm{max}$, while two medium jobs have size at most $2(1+\delta)p_{\mathrm{big}} B=(1+\delta)c B$. The $(1+\delta)$-factor comes from using rounded sizes in the definition of medium jobs. \end{proof}
The following lemma uses similar arguments to \Cref{le.nocritjob} only for the adversarial model.
\begin{lemma}\label{le.critical.bound} The load of a critical machine is at most $\min((1+\delta)cB+2\frac{\tilde R(\ensuremath{\mathcal{J}}\xspace)}{(1-\delta)}{\mathrm{OPT}},\frac{L}{1-\delta}+3p_\mathrm{max})$ if it was a principal machine. \end{lemma}
\begin{proof} Consider any critical principal machine $M$. Let $J$ be the last job received in the sampling phase. Before $J$ was scheduled on $M$ it was a least loaded principle machine and thus had load at most $\frac{L}{1-\delta}$ by \Cref{prop1}. After $J$ machine $M$ received at most two more jobs and thus its load cannot exceed $\frac{L}{1-\delta}+3p_\mathrm{max}$, the second term in the min-term.
If $J$ was critical, this implies that the load on $M$ of non-critical jobs was at most $\frac{L}{1-\delta}$, while the load of critical jobs no $M$ cannot exceed $2(1+\delta)p_{\mathrm{big}} B =(1+\delta) cB$. The first term in the min-term follows. We are left to consider the case that $M$ did not receive a critical job in the sampling phase, which means that it receives an element of $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$, else $M$ would not be critical. In fact, assume that $M$ was the $i$-th machine to receive an element from $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$.
First consider the case $i\le m/2-1$. Right before the while loop in line 7 of \Cref{alg.preparation} machine~$M$ had the $i$-th least anticipated load among the principal machines. By \Cref{le.avglb2} its anticipated load was at most $\frac{m}{m-i-\delta m+1}\tilde L\le \frac{2\tilde L}{1-\delta} \le 2\frac{\tilde R(\ensuremath{\mathcal{J}}\xspace)}{1-\delta}{\mathrm{OPT}}$ before receiving placeholder jobs of processing volume at most $cB$. The processing volume of the placeholder jobs increases by at most a factor $(1+\delta)$ once they are replaced by real jobs. Thus the bound of the lemma follows if $i\le m/2-1$.
Finally, consider the case $i\ge m/2$. Recall that $\delta^{-2} L_{\delta^2} = \hat L \le B$. Since $M$ did not receive a critical job in the sampling phase it follows from \Cref{le.avglb} that its load was at most $\frac{\delta^{-2} L_{\delta^2}}{1-\delta}+(c-1)B \le (c-\frac{\delta}{1-\delta})B\le (1+\delta)\cdot cB$ after the sampling phase. Let $p$ be the processing volume machine $M$ receives from $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$. Since the algorithm assigns the elements of $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ in decreasing order at least $i$ machines received processing volume at least $p$ from $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$. Thus $i\cdot p \le m\cdot \tilde L$ and, using that $i\ge m/2$, we derive that $p\le \frac{m}{i}\tilde L \le 2\tilde L\le 2\frac{\tilde R(\ensuremath{\mathcal{J}}\xspace)}{1-\delta}{\mathrm{OPT}}$. Again the first term of the min-term follows. \end{proof}
From these lemmas the two statements of \Cref{le.main.basic} follow.
\begin{corollary}\label{co.goodongeneral} Our algorithm is adversarially $\left(3+\frac{3}{1-\delta}+2\delta\right)$-competitive. \end{corollary}
\begin{proof}
By \Cref{le.tildeR.bound} we have $\tilde R(\ensuremath{\mathcal{J}}\xspace)\le 3$, also recall that $L,p_\mathrm{max}\le{\mathrm{OPT}}$. By \Cref{le.basic.bound}, \ref{le.reserve.bound} and~\ref{le.critical.bound} the makespan of the algorithm is thus at most $$\max\left(\frac{3}{1-\delta}+1+2\delta,2,\frac{1}{1-\delta}+3\right){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)=\left(1+\frac{3}{1-\delta}+2\delta\right){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace).\qedhere$$ \end{proof}
\begin{corollary}\label{co.goodonsimple} Our algorithm has makespan at most $(c+2\delta){\mathrm{OPT}}$ on simple sequences~$\ensuremath{\mathcal{J}}\xspace^\sigma$. \end{corollary}
\begin{proof} By \Cref{le.basic.bound}, \ref{le.reserve.bound} and~\ref{le.critical.bound} we see that the makespan of our algorithm is at most \[\max\left(\bigg(\frac{\tilde R(\ensuremath{\mathcal{J}}\xspace)}{1-\delta}+1 +2\delta\bigg){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace),p_\mathrm{max}, (1+\delta)cB+2\frac{R(\ensuremath{\mathcal{J}}\xspace)}{1-\delta}\right).\] Now, by lemma \Cref{le.2ndRtildeBound} and the definition of simple sequences, there holds $\tilde R(\ensuremath{\mathcal{J}}\xspace)\le \left(1+ \frac{1}{\delta^2}\right)R(\ensuremath{\mathcal{J}}\xspace) \le (1-\delta)\frac{\delta}{2}(2-c)$. In particular, $\left(\frac{\tilde R(\ensuremath{\mathcal{J}}\xspace)}{1-\delta}+1 +2\delta\right){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)\le (c+2\delta){\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)$. The second term $p_\mathrm{max}$ is always smaller than ${\mathrm{OPT}}$. Concerning the third bound in the max-term observe using \Cref{le.placeholdersize} that there holds \begin{align*} B & = \max\left(p_\mathrm{max}^{\delta^2n}, \hat L\right) \\ &\le \max\left(p_\mathrm{max}, \delta^{-2}L\right)\\ &\le \max\left(p_\mathrm{max}, \delta^{-2}R(\ensuremath{\mathcal{J}}\xspace)p_\mathrm{max}\right)\\ &\le \max\left(p_\mathrm{max},\delta^{-2}\frac{(1-\delta)\delta^3}{2(\delta^2+1)}(2-c)p_\mathrm{max}\right)\\&\le p_\mathrm{max}\\ &\le {\mathrm{OPT}}.\end{align*} Since $2\frac{\tilde R(\ensuremath{\mathcal{J}}\xspace)}{1-\delta}\le (2-c)\delta$ we have $(1+\delta)cB+2\frac{R(\ensuremath{\mathcal{J}}\xspace)}{1-\delta}{\mathrm{OPT}} \le (c+2\delta){\mathrm{OPT}}$. \end{proof}
\begin{proof}[Proof of \Cref{le.main.basic}] \Cref{le.main.basic} follows immediately from \Cref{co.goodongeneral} and \Cref{co.goodonsimple}. \end{proof}
\subsection{Stable job sequences. Proof sketch of \Cref{le.main.stable}}\label{sec.ana.stable} We introduce the class of \emph{stable} job sequences. The first two conditions state that all estimates our algorithm makes are \emph{accurate}, i.e.\ sampling works. By the third condition there are less huge jobs than reserve machines and the fourth condition states that these jobs are distributed evenly. The final condition is a technicality. Stable sequences are useful since they occur with high probability if we randomly order a proper job set.
\begin{definition}\label{def.stable} A job sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$ is \emph{stable} if the following conditions hold: \begin{itemize} \setlength{\parskip}{0pt} \setlength{\itemsep}{0pt plus 1pt} \item The estimate $\hat L$ for $L$ is accurate, i.e.\ $(1-\delta)L \le \hat L \le (1+\delta)L$. \item The estimate $c_p$ for $n_p$ is accurate, i.e.\ $c_p\le n_p\le c_p+2m^{3/4}$ for all $p\in \ensuremath{\mathcal{P}}\xspace$. \item There are at most $\lceil \delta m \rceil$ huge jobs in $\ensuremath{\mathcal{J}}\xspace^\sigma$.
\item Let $\tilde t$ be the time the last huge job arrived and let $n_{p,\tilde t}$ be the number of $p$-jobs scheduled at that time for a given $p\in \ensuremath{\mathcal{P}}\xspace$. Then $n_{p,\tilde t}\le \left(1-\delta^3\right)n_{p}$ for every $p\in \ensuremath{\mathcal{P}}\xspace$ with $n_p>\left\lfloor\frac{\left(1-\delta-2\delta^2\right)m}{|\ensuremath{\mathcal{P}}\xspace|} \right\rfloor$.
\item $\delta^3 \left\lfloor \left(1-\delta-2\delta^2\right)m / |\ensuremath{\mathcal{P}}\xspace| \right\rfloor \ge 2|\ensuremath{\mathcal{P}}\xspace|m^{3/4}$. \end{itemize} \end{definition}
\begin{proof}[Proof sketch of \Cref{le.main.stable}] The first two conditions are covered by arguments following \Cref{sec.sampling}. Here, we require that only proper sequences are considered. The third condition is equivalent to demanding one of the $\lceil \delta m \rceil$ largest jobs to occur during the sampling phase. This is extremely likely. The expected rank of the largest job occurring in the sampling phase is $\delta^{-2}$, a constant. The fourth condition states that, for any $p\in\ensuremath{\mathcal{P}}\xspace$, the huge jobs are evenly spread throughout the sequence when compared to any sizable class of $p$-jobs. Again, this is expected of a random sequence and corresponds to how one would view randomness statistically. For the final condition it suffices to choose the number of machines $m$ large enough. One technical problem arises since the class $\ensuremath{\mathcal{P}}\xspace=\ensuremath{\mathcal{P}}\xspace[\ensuremath{\mathcal{J}}\xspace^\sigma]$ is defined using the value $B[\ensuremath{\mathcal{J}}\xspace^\sigma]$. It thus highly depends on the input permutation $\sigma$. We rectify this by passing over to a larger class $\hat\ensuremath{\mathcal{P}}\xspace$ such that $\ensuremath{\mathcal{P}}\xspace\subset\hat\ensuremath{\mathcal{P}}\xspace$ with high probability. \end{proof} The formal proof of \Cref{le.main.stable} is simple but very technical. That is, we consider the underlying ideas to be rather simple but in order to give a rigorous proof many cases have to be considered. We leave it to \Cref{sec.ana.stable.p}. The definition of stable sequences is suited for our future algorithmic arguments. To make probabilistic arguments, we introduce \emph{probabilistically stable} sequences and prove that probabilistically stable sequences are always stable. Their definition is more convenient, as it avoids certain problems such as $\ensuremath{\mathcal{P}}\xspace$ being dependent on the job permutation. We then prove \Cref{le.main.stable} for all six conditions of probabilistically stable sequences separately.
\subsection{Adversarial analysis on stable sequences. Proof sketch of \Cref{le.conclusion}} \subsubsection*{General observations}\label{sec.ana.critical.p} This section is devoted for some general observations needed several times throughout the analysis. Recall that $\tilde L=\sup_t \frac{1}{m} \sum_M\tilde l_M^t$ denotes the maximum average load taking placeholder jobs into account. We will see that this does, in fact, not overestimate the total load $L$ if the sequence is stable.
\begin{lemma}\label{le.tildeL=L} For every stable sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$ there holds $\tilde L=L$. \end{lemma} \begin{proof} By \Cref{le.sensibletildeL} we have $\tilde L\ge L$ for any pseudo-load. Recall that $\tilde L=\sup_t \frac{1}{m} \sum_M\tilde l_M^t$. Thus it suffices to show that $\sup_t \frac{1}{m} \sum_M\tilde l_M^t\le L$ for any given time $t$. Consider the schedule of our algorithm time $t$ including placeholder-jobs. If it contains $p$-placeholder-jobs for some $p\in\ensuremath{\mathcal{P}}\xspace$ it contains at most $c_p$ many $p$-jobs in total. By the second property of stable sequences there holds $c_p\le n_p$. Thus, we can find real $p$-jobs not scheduled yet and replace the $p$-placeholder-jobs by them. This way the load of each machine can only increase. In particular, the resulting schedule has average load at least $\frac{1}{m} \sum_M\tilde l_M^t$. But since it contains only real jobs, its average load will be at most $L$. Therefore $\frac{1}{m} \sum_M\tilde l_M^t\le L$. \end{proof}
The following lemma is a basic but very useful observation describing the load of any machine after the sampling phase.
\begin{lemma}\label{pro.observationalphasespec} Let $M$ be any machine after the sampling phase and $p$ be the size of the largest job scheduled on it. Then the load of $M$ is at most $\frac{\delta^2}{1-\delta} B+p.$ \end{lemma} \begin{proof} Let $l$ be the load of $M$ before the last job $J$ was scheduled on it. Using \Cref{le.avglb} we see that $l \le \frac{m}{m-\lfloor \delta m\rfloor}\delta^2\hat L \le \frac{\delta^2}{1-\delta} B.$ The last inequality uses $L_{\delta^2}=\hat L\le B$. Since $J$ had size at most $p$ the lemma follows. \end{proof}
\begin{lemma}\label{co.critjobs} Till the Least-Loaded-Strategy is employed (or till the end of the sequence) there is at most one reserve machine $M$ whose only critical job is medium. Every other machine contains either no critical job, one big job or two medium jobs (including placeholder jobs). \end{lemma} \begin{proof} First consider the situation right before the Critical-Job-Strategy is employed. Let $M$ be a machine containing a critical job. By \Cref{pro.observationalphasespec} the total size of all jobs besides the largest one on M is at most $\delta^2 \hat L\le \delta^2 B$. Since this is less than $p_{\mathrm{small}} B$ only the largest job could have been critical. Now observe that the algorithm adds a second medium placeholder-job to precisely every machine that contained a (necessarily single) medium job after the sampling phase. Afterwards, medium placeholder-jobs are always scheduled in pairs onto machines which do not contain critical jobs. While the Critical-Job-Strategy is employed, the number of medium jobs does not change for principal machines. We only replace placeholders with real jobs. Moreover the algorithm ensures that at most one reserve machine $M$ has a single medium job. \end{proof}
Finally let us make the following technical observation, which will be necessary later.
\begin{lemma}\label{le.memptybound} There are at most $2\delta^2m$ machines which contain (real) critical jobs before the Preparation for the Critical-Job-Strategy. In particular $m_{\mathrm{empty}}\le \left(1-\delta-2\delta^2\right)m$. \end{lemma} \begin{proof}
Assume the lemma would not hold. Since each critical job has size at least $p_{\mathrm{small}} B$ this implies that $ B \ge \hat L > \frac{1}{\delta^{2}m} \cdot 2\delta^2m \cdot p_{\mathrm{small}} B=2p_{\mathrm{small}} B> B$. A contradiction. In particular, at most $2\delta^2m$ machines received critical jobs after the observational phase. Thus $m_{\mathrm{empty}}\le |\ensuremath{\mathcal{M}}\xspace|-2\delta^2m\le (1-\delta-2\delta^2)m$. \end{proof}
\subsubsection*{Before the Least-Loaded-Strategy is employed.}\label{sec.ana.critical.p} The goal of this section is to analyze every part of the algorithm but the Least-Loaded-Strategy. Formally we want to show the following proposition and its important \Cref{co.noll}.
\begin{restatable}{proposition}{lecmake}\label{le.cmake} The makespan of our algorithm is at most $(c+O(\delta))\max\left( B,L,p_\mathrm{max}\right)$ on stable sequences till it employs the Least-Loaded-Strategy (or till the end of the sequence). \end{restatable}
For a formal proof we need to consider many cases where the statement of the lemma could go wrong. Let us first give a sketch of the full proof, which will be fleshed out subsequently.
\begin{proof}[Proof sketch] Let us only consider critical jobs at any time the Least-Loaded-Strategy, \Cref{alg.exceptional}, is not employed. Our algorithm ensures that a machine contains either one big job or at most two medium jobs. Formally, this is shown in \Cref{co.critjobs}. In the first case, we simply bound the size of this big, possibly huge, job by $p_{\mathrm{max}}$. Else, if the machine contains up to two medium jobs their total size is at most $2(1+\delta)p_{\mathrm{big}} B = (1+\delta)c B$. The factor $(1+\delta)$ arises since we use rounded sizes in the definition of medium jobs. Thus, critical jobs may cause a load of at most $\max(p_{\mathrm{max}},(c+O(\delta))B)$.
Analyzing the load increase by small, i.e.\ non-critical, jobs relies on \Cref{prop1} and~\ref{prop2} depending on whether these jobs were assigned during the sampling phase or during the Critical-Job-Strategy. \end{proof}
Note that for stable sequences $\hat L \le (1+\delta) L \le (1+\delta){\mathrm{OPT}}$, in particular $\max\left( B,L,p_\mathrm{max}\right)=\max\left(p^{\delta^2 n}_{\mathrm{max}},\hat L, L , p_\mathrm{max}\right)\le (1+\delta){\mathrm{OPT}}$. This proves the following important corollary to \Cref{le.cmake}.
\begin{restatable}{corollary}{collI}\label{co.noll} Till the Least-Loaded-Strategy is used the makespan of our algorithm is at most $(c+O(\delta)){\mathrm{OPT}}$ on stable sequences. \end{restatable}
We first need to assert that the statement holds after the preparation for the Critical-Job-Strategy, namely we prove the following proposition.
\begin{proposition}\label{le.prepan} After the Preparation for the Critical-Job-Strategy the anticipated load of no machine exceeds $ (c+O(\delta)) B$. \end{proposition}
There are three types of machines we need to consider. First, there are machines which contain a real critical job after the Preparation for the Critical Job Strategy. Second, there are machines, which only receive placeholder jobs. Finally there are machines that only receive critical jobs during sampling. The following two lemmas concern themselves with the first two types of machines. Afterwards we prove \Cref{le.prepan}.
\begin{lemma}\label{le.critjob} If a machine contains a real critical job its anticipated load is at most $( (1+\delta/2)c+\frac{\delta^2}{1-\delta}) B\le(c+O(\delta)) B$ after the Preparation for the Critical-Job-Strategy. \end{lemma}
\begin{proof} After the Preparation for the Critical-Job-Strategy a machine contains either a big job of size at most $B<(1+\delta/2)cB$ or two medium jobs. Each medium has size at most $(1+\delta)p_{\mathrm{big}}$ where the factor $(1+\delta)$ is due to rounding. Thus the total size of critical jobs is at most $2(1+\delta)p_{\mathrm{big}} B= (1+\delta/2)cB$. \Cref{pro.observationalphasespec} bounds the size of all non-critical jobs by $\frac{\delta^2}{1-\delta}B$. \end{proof}
\begin{lemma}\label{le.nocritjob} Let $M$ be the $i$-th last machine that received a job from $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ for $i\le m_{\mathrm{empty}}$. After the Preparation for the Critical-Job-Strategy its anticipated load is at most \[\min\left(p_\mathrm{small}+\frac{\delta^2}{1-\delta},\frac{m}{i}\delta^2\right) B +\min\left(c,\frac{m}{m_{\mathrm{empty}}-i+1}\right) B\le(c+O(\delta)) B.\] \end{lemma}
\begin{proof} Let $l$ be the load of $M$ before the Preparation for the Critical-Job-Strategy and let $p$ be the sum of all the placeholder-jobs assigned to it. Then the load of $M$ after the preparation is precisely $l+p$. We bound both summands separately
To see that $l\le \min\left(p_\mathrm{small}+\frac{\delta^2}{1-\delta},\frac{m}{i}\delta^2\right) B$ observe that the largest job on $M$ has size at most $p_\mathrm{small} B$ since $M$ does contain no critical jobs. In particular, by \Cref{pro.observationalphasespec}, $l \le (p_\mathrm{small}+\frac{\delta^2}{1-\delta})B$. Consider the schedule right before placeholder jobs were assigned. By definition this schedule had average load $\frac{\delta^2}{1-\delta}\hat L \le \frac{\delta^2}{1-\delta}B$ and $M$ was at most the $i$-th most loaded machine. The second bound then follows from \Cref{le.avglb}.
We have $p\le \min\left(c,\frac{m}{m_{\mathrm{empty}}-i+1}\right) B$. The first bound holds since we either assign two medium placeholder-jobs of size at most $p_\mathrm{big} B$ each or one big job of size at most $ B$ to any machine. Thus the sum of the placeholder-jobs assigned is at most $\max(1,2p_\mathrm{big}) B=c B$. For the second term recall that \Cref{le.placeholdersize} shows that the total size of all placeholder-jobs is at most $m \hat L\le m B$. Prior to $M$ precisely $m_{\mathrm{empty}}-i$ machines received placeholder job of total size at least $p$. Thus, $(m_{\mathrm{empty}}-i+1)p \le m B$, or, equivalently, $p\le \frac{m}{m_{\mathrm{empty}}-i+1} B$.
Altogether we derive that the anticipated load of $M$ is $l+p\le \min\left(p_\mathrm{small}+\frac{\delta^2}{1-\delta},\frac{m}{i}\delta^2\right) B +\min\left(c,\frac{m}{m_{\mathrm{empty}}-i+1}\right) B$. We need to see that this term is in $(c+O(\delta)) B$. Consider two cases. For $i\ge \delta m$ the term is at most $\frac{m}{i}\frac{\delta^2}{1-\delta} B+c B \le (c+\delta) B$. Else, for $i\le \delta m$, it is at most $(p_\mathrm{small}+\delta^2) B+\frac{m}{m_{\mathrm{empty}}-i+1} B\le \left(p_\mathrm{small}+\delta^2+\frac{m}{m-2\delta m-\delta^2m}\right) B=\left(c+\delta^2+\frac{2\delta-\delta^2}{1-2\delta+\delta^2}\right) B=(c+O(\delta)) B$. The first inequality uses \Cref{le.memptybound}, the second equality uses that $p_{\mathrm{small}}=c-1$. \end{proof}
\begin{proof}[Proof of \Cref{le.prepan}] There are two cases to consider. If the machine contains a real critical job, the proposition follows from \Cref{le.critjob}. If it contains critical placeholder jobs, the proposition follows from \Cref{le.nocritjob}. Finally, if it does not receive placeholder jobs, \Cref{pro.observationalphasespec} bounds its load by $\frac{\delta^2}{1-\delta}B+p_\mathrm{max}^{\delta^2 n}\le \left(1+\frac{\delta^2}{1-\delta}\right)B$. \end{proof}
We now come to the main result of this section.
\lecmake*
\begin{proof} By \Cref{le.prepan} the statement of the lemma holds after the Preparation for the Critical-Job-Strategy. We have to show that it still holds afterwards. There are three cases to consider.
First, consider reserve machines. By \Cref{le.reserve.bound} their load is at most $\max((1+\delta)c B,p_\mathrm{max})$ till the Least-Loaded-Strategy is employed.
Second, consider the case that a small job $J$ is scheduled. The job $J$ will be scheduled on a principal machine $M$ with smallest anticipated load. By \Cref{le.avglb2} said smallest anticipated load is at most $\frac{1}{1-\delta}\tilde L$. Since $J$ has size at most $p_{\mathrm{small}} B$, the anticipated load of $M$ won't exceed $\frac{1}{1-\delta}\tilde L+p_\mathrm{small} B\le (c+\frac{\delta}{1-\delta})\max\left( B,L\right)$ after $J$ is scheduled. The last inequality makes use of the fact that $\tilde L=L$ for stable sequences, \Cref{le.tildeL=L}, and that $p_{\mathrm{small}}=c-1$.
Finally, we consider critical jobs that are scheduled onto principal machines. They replace placeholder-jobs. Such a critical job can have at most $(1+\delta)$-times the size of the placeholder-job it replaces. Thus it may cause the load of a machine to increase by at most $\delta B$. Since each principal machine receives at most two critical jobs the increase on principal machines due to critical jobs is at most $2\delta B$ and the lemma follows.\footnote{A more careful analysis shows that the total increase is in fact most $c\delta B$.} \end{proof}
\collI*
\begin{proof}Use \Cref{le.cmake} and the fact that the conditions for stable sequences imply that $B=\max\left(p^{\delta^2 n}_{\mathrm{max}},\hat L\right)\le (1+\delta){\mathrm{OPT}}$. \end{proof}
\paragraph*{Concerning the case that the algorithm \textsc{fails}.} We need to assert certain structural properties if the algorithm \textsc{fails}, i.e.\ reaches line 7 in \Cref{alg.main}. This is done in this section. The first important finding shows that we do not have to deal with huge jobs anymore.
\begin{restatable}{proposition}{lefailsbound}\label{le.fails.bound} If the algorithm \textsc{fails}, every huge job has been scheduled. \end{restatable}
The second proposition will help us obtain a lower bound on the optimum makespan.
\begin{restatable}{proposition}{lefailsboundl}\label{le.fails.bound.l} If the algorithm \textsc{fails} at time $t$ we have $\sum_{p\in\ensuremath{\mathcal{P}}\xspace} \tilde n_{p,t}w(p)>m$. \end{restatable}
For any job class $p\in\ensuremath{\mathcal{P}}\xspace$ let $c_p'$ denote the number of $p$-jobs scheduled after the Preparation for the Critical-Job-Strategy, including placeholder-jobs. This is consistent with our notation from \Cref{sec.algorithm} if we consider the values of $c_p'$ after the execution of \Cref{alg.preparation}. We call a job class $p\in\ensuremath{\mathcal{P}}\xspace$ \emph{unsaturated} if $c_p'\le c_p$. Given $p\in\ensuremath{\mathcal{P}}\xspace$, let $\tilde n_{p,t}$ denote the number of $p$-jobs scheduled at any time $t$ including placeholder-jobs. After the sampling phase $\tilde n_{p,t}=\max(c_p',n_{p,t})$.
The most important technical ingredient in this chapter is to establish that if the algorithm \textsc{fails}, there is one job class of which a sizable fraction of jobs has not been scheduled even if we take placeholder jobs into account. The next four lemmas prove this by looking at unsaturated job classes.
\begin{lemma}\label{le.saturatedjobclass} If the algorithm \textsc{fails} on a stable sequence, there exists an unsaturated job class $p\in\ensuremath{\mathcal{P}}\xspace$. In particular, during the Critical-Job-Strategy every principal machine contains either one big or two medium jobs. \end{lemma}
\begin{proof}
Let us assume that every job class is saturated. This implies that at least $n_p-c_p$ jobs of every job class $p\in\ensuremath{\mathcal{P}}\xspace$ fit onto the principal machines. By the properties of stable sequences, at most $n_p-c_p\le 2m^{3/4}$ jobs of each class thus need to be scheduled onto the reserve machines; that is at most $|\ensuremath{\mathcal{P}}\xspace|\cdot 2m^{3/4}$ in total. By the last condition of stable sequences this is less than $ \lfloor\delta m\rfloor$, the number of reserve machines. A contradiction. The algorithm could not have failed.
If there was an unsaturated job class after the Preparation for the Critical-Job-Strategy, $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ must have contained precisely $m_{\mathrm{empty}}$ elements after the second while-loop in \Cref{alg.preparation}. Else, another iteration of this loop would have added further elements. Thus, every principal machine that did not already contain real critical jobs received (critical) placeholder-jobs. By \Cref{co.critjobs} every principal machine in fact received either one big or two medium jobs. \end{proof}
\begin{lemma}\label{le.unsatured.cpsize}
For every unsaturated job class $p\in\ensuremath{\mathcal{P}}\xspace$, there holds $c_p'\ge\left\lfloor\frac{\left(1-\delta-2\delta^2\right)m}{|\ensuremath{\mathcal{P}}\xspace|} \right\rfloor$. \end{lemma}
\begin{proof}
Note that $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ actually attains cardinality $m_\mathrm{empty}$ in \Cref{alg.preparation}, otherwise there could not have been an unsaturated job class. Every time we add an element to $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ in line~4 the value~$c_p'$ increases for an unsaturated job class $p$ that currently has minimum value $c_p'$. In particular, whenever we add $|\ensuremath{\mathcal{P}}\xspace|$-many elements to $\ensuremath{\mathcal{J}}\xspace_{\mathrm{rep}}$ the value $\min\limits_{p\in\ensuremath{\mathcal{P}}\xspace \mathrm{ unsaturated}} c_p'$ increases by at least $1$. In total it increases at least $\left\lfloor\frac{m_\mathrm{empty}}{|\ensuremath{\mathcal{P}}\xspace|}\right\rfloor$ times. The lemma follows since $m_\mathrm{empty}\le\left(1-\delta-2\delta^2\right)m$, see \Cref{le.memptybound}. \end{proof}
\begin{lemma}\label{le.cp'sum} There holds $\sum_{p\in\ensuremath{\mathcal{P}}\xspace} c_p'w(p)\le m-\lceil\delta m\rceil$. \end{lemma}
\begin{proof}
Let $n_{\mathrm{med}}$ be the number of medium jobs and $n_{\mathrm{big}}$ be the number of big jobs after the Preparation for the Critical-Job-Strategy, then $\sum_{p\in\ensuremath{\mathcal{P}}\xspace} c_p'w(p)=n_{\mathrm{big}} + \frac{n_{\mathrm{med}}}{2}$. But by \Cref{co.critjobs} every principal machine contains either one big job, two medium jobs or no critical jobs at all after the Preparation for the Critical-Job-Strategy. Reserve machines are empty. Thus, $\sum_{p\in\ensuremath{\mathcal{P}}\xspace} c_p'w(p)\le |\ensuremath{\mathcal{M}}\xspace| \le m-\lceil\delta m\rceil$. \end{proof}
We now prove one main proposition of this paragraph.
\lefailsboundl*
\begin{proof} Let $J=J_t$ be the job that caused the algorithm to fail. Consider the schedule right before job $J$ was scheduled. As a matter of thinking, let us assume that job $J$ resided on some fictional $(m+1)$-th machine $\tilde M$ at that time. We award any machine $\frac{1}{2}$ points for each medium job on it and $1$ point for each big job on it. This includes placeholder-jobs. Then $\sum_{p\in\ensuremath{\mathcal{P}}\xspace} \tilde n_{p,t}w(p)$ is exactly the number of points scored by every machine including $\tilde M$.
By \Cref{le.saturatedjobclass} every principal machine scores one point. There was no empty reserve machine, since $J$ could have been scheduled onto it, otherwise. Thus every reserve machine scores at least half a point. We call a machine \emph{bad} if it scored only $1/2$ point. There cannot be two bad reserve machines, since our algorithm would have scheduled any medium job onto such a bad machine rather than creating a second one. Moreover, if there exists a bad machine, job $J$ cannot be medium, i.e.\ $\tilde M$ cannot be not bad, too. We conclude that there is at most one bad machine amongst the $m+1$ machines, which include the fictional machine $\tilde M$. All other machines score one point. This implies that $\sum_{p\in\ensuremath{\mathcal{P}}\xspace} \tilde n_{p,t}w(p)\ge m+\frac{1}{2}$. \end{proof}
\begin{lemma}\label{le.bigjobclass}
Assume that the algorithm \textsc{fails} at time $t$ on a stable sequence and that not all huge jobs are scheduled. Then there exists a job class $p\in\ensuremath{\mathcal{P}}\xspace$ with $\tilde n_{p,t}<n_p-2|\ensuremath{\mathcal{P}}\xspace|m^{3/4}$
\end{lemma}
\begin{proof}
We first show that there needs to exist a job class $p\in\ensuremath{\mathcal{P}}\xspace$ with $\tilde n_{p,t}> c_p'+2m^{3/4}$. Assume for contradiction sake, that we had $\tilde n_{p,t}\le c_p'+2m^{3/4}$ for every job class $p\in\ensuremath{\mathcal{P}}\xspace$. Then we get a contradiction to \Cref{le.fails.bound.l}, namely $\sum_{p\in\ensuremath{\mathcal{P}}\xspace} \tilde n_{p,t}w(p)\le \sum_{p\in\ensuremath{\mathcal{P}}\xspace} c_p'w(p) + |\ensuremath{\mathcal{P}}\xspace|\cdot 2m^{3/4} \le m-\lceil\delta m\rceil+ \frac{\delta^3}{2} \left\lfloor \frac{\left(1-\delta-2\delta^2\right)m}{|\ensuremath{\mathcal{P}}\xspace|} \right\rfloor \le m$. The second inequality uses \Cref{le.cp'sum} and the fifth condition on stable sequences.
Thus, let $p$ be such a job class satisfying $\tilde n_{p,t}> c_p'+2m^{3/4}$. Since $\tilde n_{p,t}=\max(n_{p,t},c_p')$, this implies that $n_{p,t}> c_p'+2m^{3/4}$. Moreover, since $n_{p,t}\le c_p+2m^{3/4}$ by the second property of stable sequences, we must have $c_p'<c_p$, i.e.\ the job class $p$ is unsaturated. \Cref{le.unsatured.cpsize} implies $c_p'\ge \left\lfloor\frac{\left(1-\delta-2\delta^2\right)m}{|\ensuremath{\mathcal{P}}\xspace|} \right\rfloor$. In particular $n_p\ge c_p > c_p'\ge \left\lfloor\frac{\left(1-\delta-2\delta^2\right)m}{|\ensuremath{\mathcal{P}}\xspace|} \right\rfloor$. We conclude that
$n_{p,t}\le n_p-\delta^3 n_p < n_p-\delta^3 \left\lfloor\frac{\left(1-\delta-2\delta^2\right)m}{|\ensuremath{\mathcal{P}}\xspace|} \right\rfloor \le n_p-2|\ensuremath{\mathcal{P}}\xspace|m^{3/4}.$ The first inequality uses the fourth condition of stable sequences, recall that by assumption not all huge jobs are scheduled; the second inequality uses the bound on $n_p$ we just derived; the final inequality uses the fifth condition of stable sequences. \end{proof}
We finally prove \Cref{le.fails.bound}, the remaining main proposition of this paragraph.
\lefailsbound* \begin{proof}
Let $\tilde t$ be the time the algorithm fails. By \Cref{le.bigjobclass} there exists a job class $q$ such that \[w(q)n_{q,\tilde t}<w(q)n_q-w(q)2|\ensuremath{\mathcal{P}}\xspace|m^{3/4}\le w(q)n_q-|\ensuremath{\mathcal{P}}\xspace|m^{3/4}\] holds. In particular
\begin{equation}\sum_{p\in\ensuremath{\mathcal{P}}\xspace} \tilde n_{p,\tilde t}w(p) \le \sum_{p\in\ensuremath{\mathcal{P}}\xspace} n_pw(p)-|\ensuremath{\mathcal{P}}\xspace|m^{3/4}\le\sum_{p\in\ensuremath{\mathcal{P}}\xspace} (n_p -2m^{3/4})w(p)\le \sum_{p\in\ensuremath{\mathcal{P}}\xspace} c_p w(p).\end{equation}\label{eq.valuations} The first inequality uses the previous bound on $n_{q,\tilde t}$ and the fact that for stable sequences $\tilde n_{p,\tilde t}=\max(c_p',n_{p,\tilde t})\le n_p$. For the second inequality observe that $w(p)\le 1$ for all~$p\in\ensuremath{\mathcal{P}}\xspace$. For the last inequality use again the second condition of stable sequences.
Now \Cref{le.fails.bound.l} and the previous inequality imply that \[m< \sum_{p\in\ensuremath{\mathcal{P}}\xspace} \tilde n_{p,\tilde t}w(p)\le \sum_{p\in\ensuremath{\mathcal{P}}\xspace} c_p w(p).\] If this was the case, the algorithm would already have chosen the Least-Loaded-Strategy in \Cref{alg.preparation} line~6 and thus never failed, i.e.\ reached line~7 in \Cref{alg.main}. A contradiction. \end{proof}
\paragraph*{The Least-Loaded-Strategy.}\label{sec.ana.leastloaded} We now derive two important consequences from the previous section.
\begin{lemma}\label{le.ll.trivialbound} If the input sequence is stable, the Least-Loaded-Strategy schedules every huge job onto an empty machine. Thus, if the makespan increases due to the Least-Loaded-Strategy scheduling a huge job, it is at most $p_\mathrm{max}\le{\mathrm{OPT}}$. \end{lemma}
\begin{proof}[Proof of \Cref{le.ll.trivialbound}] By \Cref{le.fails.bound} if a huge job is scheduled using the Least-Loaded-Strategy, our algorithm already decided to do so during the Preparation for the Critical-Job-Strategy, \Cref{alg.preparation}. At this time all $\lfloor\delta m\rfloor$ reserve machines were empty. By the conditions of stable sequences there are at most $\lfloor\delta m\rfloor$ huge jobs and there will always be an empty reserve machine available once one arrives. \end{proof}
\begin{lemma}\label{le.ll.bound} If our algorithm schedules a normal job $J$ using the Least-Loaded-Strategy, the load of the machine the job is scheduled on will be at most $\frac{1}{1-\delta}L+ B$. For stable sequences this is at most $\left(2+\frac{2\delta-\delta^2}{(1-\delta)^2}\right) B=(2+O(\delta)) B$. \end{lemma}
\begin{proof}[Proof of \Cref{le.ll.bound}] Let $l$ be the load of the machine $M$ before job $J$ was scheduled on it. Since $M$ was the least loaded principal machine at that time $l\le \frac{m}{m-\lfloor\delta m\rfloor}L\le \frac{1}{1-\delta}L$ by \Cref{le.avglb}. Since $J$ was normal, its size was at most $ B$. The first part of the lemma follows. For the second part observe that the first condition on stable sequences implies that $L \le \frac{ B}{1-\delta}$ and thus $\frac{1}{1-\delta}L+ B\le \left(2+\frac{2\delta-\delta^2}{(1-\delta)^2}\right) B=(2+O(\delta)) B$. \end{proof}
In order to ameliorate this worse general lower bound we need a better upper bound for $B$.
\begin{restatable}{lemma}{leOPTboundI}\label{le.OPT.bound1} If the Least-Loaded-Strategy is applied on a stable sequence, $B \le \frac{c}{2} {\mathrm{OPT}}$. \end{restatable}
\begin{proof} Let us first assert that $\sum_{p\in\ensuremath{\mathcal{P}}\xspace}n_pw(p)\ge \sum_{p\in\ensuremath{\mathcal{P}}\xspace}\tilde n_{n,p}^tw(p)>m$. There are two cases. If the algorithm chooses the Least-Loaded-Strategy in the Preparation for the Critical-Job-Strategy there holds $\sum_{p\in\ensuremath{\mathcal{P}}\xspace}c_p w(p)>m$. By the properties of stable sequence $c_p\le n_p$ and thus the inequality follows. Else, the algorithm \textsc{fails}, i.e.\ reaches line 7 in \Cref{alg.main}. Let $t$ be the time that happens. Then using \Cref{le.fails.bound.l} we have that $\sum_{p\in\ensuremath{\mathcal{P}}\xspace}n_pw(p)\ge \sum_{p\in\ensuremath{\mathcal{P}}\xspace}\tilde n_{n,p}^tw(p)>m$.
Consider any schedule. We say a machine scores $\frac{1}{2}$ points for every medium job and $1$ point for every big job. Then $\sum_{p\in\ensuremath{\mathcal{P}}\xspace}n_pw(p)>m$ is the number of points scored in total. Thus there existed a machine which scores strictly more than $1$ point. Such a machine must contain either three medium or one big and another critical job. In the former case its load will be at least $3p_\mathrm{small} B=3(c-1) B>\frac{2}{c} B$, in the latter case its load is at least $(p_{\mathrm{small}} B+p_{\mathrm{big}} B)=\left(\frac{3}{2}c-1\right) B=\frac{2}{c} B$. The last equality holds since $c=\frac{1+\sqrt{13}}{3}$. \end{proof}
\paragraph*{Final proof of \Cref{le.conclusion}}\label{sec.ana.leastloaded.p}
If the algorithm does not change its makespan while applying the Least-Loaded-Strategy, the result follows form \Cref{co.noll}. If the makespan of our algorithm is caused by a huge job while applying the Least-Loaded-Strategy, it leads to an optimal makespan of $p_\mathrm{max}\le{\mathrm{OPT}}$ by \Cref{le.ll.trivialbound}. Finally, if the makespan of our algorithm is caused by a normal job it will be $(2+O(\delta)) B$ by \Cref{le.ll.bound}. On the other hand, \Cref{le.OPT.bound1} implies that ${\mathrm{OPT}} \ge \frac{2}{c} B$ in this case. The competitive ratio is thus at most $\frac{(2+O(\delta)) B}{\frac{2}{c} B}\le c+O(\delta)$.
\section{Lower bounds}\label{sec.lowerbound} We establish the following theorem using two lower bound sequences. \begin{restatable}{theorem}{temainlb}\label{te.main.lb} For every online algorithm $A$
there exists a job set $\ensuremath{\mathcal{J}}\xspace$ such that
\[\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[A(\ensuremath{\mathcal{J}}\xspace^\sigma)\ge \frac{\sqrt{73}-1}{6}{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)\right]\ge \frac{1}{6}.\] This result actually holds for randomized algorithms too if the random choices of the algorithm are included in the previous probability. \end{restatable}
\Cref{te.main.lb} implies the following lower bounds.
\begin{restatable}{corollary}{colbI} If an online algorithm $A$ is nearly $c$-competitive, $c\ge\frac{\sqrt{73}-1}{6}\approx 1.257$. \end{restatable}
\begin{restatable}{corollary}{colbII} The best competitive ratio possible in the secretary~model is $\frac{\sqrt{73}+29}{36}\approx 1.043$. \end{restatable}
Let us now prove these results. For this section let $c=\frac{\sqrt{73}-1}{6}$ be our main lower bound on the competitive ratio. We consider three types of jobs: \begin{enumerate} \setlength{\parskip}{0pt} \setlength{\itemsep}{0pt plus 1pt} \item \emph{negligible jobs} of size $0$ (or a tiny size $\varepsilon>0$ if one were to insist on positive sizes). \item \emph{big jobs} of size $1-\frac{c}{3}=\frac{17-\sqrt{37}}{18}\approx 0.581$. \item \emph{small jobs} of size $\frac{c}{3}=\frac{1+\sqrt{37}}{18}\approx 0.419$ \end{enumerate}
Let $\ensuremath{\mathcal{J}}\xspace$ be the job set consisting of $m$ jobs of each type.
\begin{lemma} There exists a schedule of $\ensuremath{\mathcal{J}}\xspace$ where every machine has load $1$. Every schedule that has a machine with smaller load has makespan at least $c$. \end{lemma}
\begin{proof} This schedule is achieved by scheduling a type~2 and a type~3 job onto each machine. The load of each machine is then $1$. Every schedule which allocates these jobs differently must have at least one machine $M$ which contains at least three jobs of type $2$ or $3$ by the pigeonhole principle. The load of $M$ is then at least $3\frac{c}{3}=c$. \end{proof}
Given a permutation $\ensuremath{\mathcal{J}}\xspace^\sigma$ of $\ensuremath{\mathcal{J}}\xspace$ and an online algorithm $A$, which expects $3m+1$ jobs to arrive in total. Let $A(\ensuremath{\mathcal{J}}\xspace^\sigma,3m+1)$ denote its makespan after it processes~$\ensuremath{\mathcal{J}}\xspace^\sigma$ expecting yet another job to arrive. Let $P=\ensuremath{\mathbf{P}}\xspace[A(\ensuremath{\mathcal{J}}\xspace^\sigma,3m+1)=1]$ be the probability that $A$ achieves the optimal schedule where every machine has load $1$ under these circumstances. Depending on $P$ we pick one out of two input sets on which $A$ performs bad.
Let $j\in\{1,2\}$. We now consider the job set $\ensuremath{\mathcal{J}}\xspace_j$ consisting of $m$ jobs of each type plus one additional job of type $j$, i.e.\ a negligible job if $j=1$ and a big one if $j=2$. We call an ordering $\ensuremath{\mathcal{J}}\xspace_j^\sigma$ of $\ensuremath{\mathcal{J}}\xspace_j$ \emph{good} if it ends with a job of type $j$ or, equivalently, if its first $3m$ jobs are a permutation of $\ensuremath{\mathcal{J}}\xspace$. Note that the probability of $\ensuremath{\mathcal{J}}\xspace^\sigma$ being good is $\frac{m+1}{3m+1}\ge \frac{1}{3}$ for $\sigma\sim S_{3m+1}$.
\begin{lemma} We have \[\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[A(\ensuremath{\mathcal{J}}\xspace_1^\sigma)\ge c{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)\right]\ge \frac{1-P}{3}\] and \[\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[A(\ensuremath{\mathcal{J}}\xspace_2^\sigma)\ge c{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)\right]\ge \frac{P}{3}.\]
\end{lemma}
\begin{proof} Consider a good permutation of $\ensuremath{\mathcal{J}}\xspace_1$. Then with probability $1-P$ the algorithm $A$ does have makespan $c$ even before the last job is scheduled. On the other hand ${\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace_1)=1$. Thus with probability $\frac{1-P}{3}$ we have $A(\ensuremath{\mathcal{J}}\xspace_1^\sigma)=c=c{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace_1)$.
Now consider a good permutation of $\ensuremath{\mathcal{J}}\xspace_2$. Then, with probability $P$, algorithm $A$ has to schedule the last job on a machine of size $1$. Its makespan is thus $2-\frac{c}{3}=c^2$ by our choice of $c$. The optimum algorithm may schedule two big jobs onto one machine, incurring load $2-\frac{2c}{3}<c$, three small jobs onto another one, incurring load $c$ and one job of each type onto the remaining machines, causing load $1<c$. Thus ${\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace_2)=c$. In particular we have with probability $\frac{P}{3}$ that $A(\ensuremath{\mathcal{J}}\xspace_2^\sigma)=c^2=c{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace_2)$. \end{proof}
We now conclude the main three lower bound results.
\temainlb* \begin{proof} By the previous lemma we get that \[\max_{j=1,2} \left(\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[A(\ensuremath{\mathcal{J}}\xspace_j^\sigma)\ge c{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace)\right]\right)=\max\left(\frac{1-P}{3},\frac{P}{3}\right) \ge \frac{1}{6}.\qedhere\] \end{proof}
\colbI*
\begin{proof} This is immediate by the previous theorem. \end{proof}
\colbII*
\begin{proof} Let $A$ be any online algorithm. Pick a job set $\ensuremath{\mathcal{J}}\xspace$ according to \Cref{te.main.lb}. Then \[ A^{\mathrm{rom}}(\ensuremath{\mathcal{J}}\xspace)=\ensuremath{\mathbf{E}}\xspace_{\sigma\sim S_n}[A(\ensuremath{\mathcal{J}}\xspace^\sigma)]\ge \frac{1}{6}\cdot \frac{\sqrt{73}-1}{6}{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace) +\frac{5}{6}{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace) = \frac{\sqrt{73}+29}{36}{\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace).\qedhere \] \end{proof}
\let\oldbibliography\thebibliography \renewcommand{\thebibliography}[1]{
\oldbibliography{#1}
\setlength{\itemsep}{0pt plus .3pt}
\setlength{\parsep}{0pt plus .3pt}
\setlength{\parskip}{0pt plus .3pt} }
\appendix \section{Missing proofs in \Cref{sec.basic}}\label{sec.basic.p}
\leavglbII* \begin{proof} Let $\tilde l$ be $k$-th least pseudo-load at time $t$. This means that there are at least $m-k+1$ machines with pseudo-load $\tilde l_M^t\ge \tilde l$. In particular $(m-k+1)\tilde l \le \sum_M \tilde l_M^t \le m\tilde L$. \end{proof}
\propII* \begin{proof}Let $\tilde l$ be the pseudo-load of the $i$-th loaded machine before $J$ is scheduled. We have $\tilde l \le \frac{m}{m-i+1}\tilde L$ by \Cref{le.avglb2}. Since $J$ had size at most $p_\mathrm{max}$, the load of the machine it was scheduled on will not exceed $\tilde l+p_\mathrm{max} \le \frac{m}{m-i+1}\tilde L + p_\mathrm{max}\le \frac{m}{m-i+1}\frac{\tilde L}{\max(L,p_\mathrm{max})}{\mathrm{OPT}}+{\mathrm{OPT}} = \left(\left(\frac{m}{m-i+1}\right)\tilde R(\ensuremath{\mathcal{J}}\xspace)+1 \right)OPT$.
\end{proof}
\textbf{Sampling and the Load Lemma:}
\prosample* \begin{proof} For $\sigma\sim S_n$ chosen uniformly randomly, the random variable $n_{\ensuremath{\mathcal{C}}\xspace,\varphi}[\sigma]$ is hypergeometrically distributed: It counts how many out of $\lfloor \varphi n\rfloor$ jobs, chosen randomly from the set of all $n$ jobs without replacement, belong to $\ensuremath{\mathcal{C}}\xspace$. The mean of $n_{\ensuremath{\mathcal{C}}\xspace,\varphi}$ is thus \[\ensuremath{\mathbf{E}}\xspace[n_{\ensuremath{\mathcal{C}}\xspace,\varphi}]=\frac{\lfloor \varphi n \rfloor}{n} n_\ensuremath{\mathcal{C}}\xspace ,\] in particular, using that $\ensuremath{\mathcal{J}}\xspace$ has size at least $n\ge m$, we have \begin{equation}0\le \varphi n_\ensuremath{\mathcal{C}}\xspace -\ensuremath{\mathbf{E}}\xspace[n_{\ensuremath{\mathcal{C}}\xspace,\varphi}] \le \frac{1}{n} \le \frac{1}{m}.\end{equation} Similarly, the variance of $n_{\ensuremath{\mathcal{C}}\xspace,\varphi}$ is at most \begin{align*}\textrm{Var}[n_{\ensuremath{\mathcal{C}}\xspace,\varphi}] &=\frac{n_\ensuremath{\mathcal{C}}\xspace\left(n-n_\ensuremath{\mathcal{C}}\xspace\right)\lfloor\varphi n \rfloor \left(n-\lfloor\varphi n \rfloor\right) }{n^2(n-1)}\le \varphi n_\ensuremath{\mathcal{C}}\xspace .\end{align*} By Chebyshev's inequality we have:
\begin{align*}&\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[\left|\varphi^{-1} n_{\ensuremath{\mathcal{C}}\xspace,\varphi}\left[\sigma\right]-n_{\ensuremath{\mathcal{C}}\xspace}[\ensuremath{\mathcal{J}}\xspace]\right|\ge E\right]\\
\le &\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[\left| n_{\ensuremath{\mathcal{C}}\xspace,\varphi}\left[\sigma\right]-\ensuremath{\mathbf{E}}\xspace\left[n_{\ensuremath{\mathcal{C}}\xspace,\varphi}\right]\right|\ge \varphi\left(E-1/m\right)\right]\\ \le &\frac{\textrm{Var}[n_{\ensuremath{\mathcal{C}}\xspace,\varphi}]}{\varphi^2(E-1/m)^2}\\ \le &\frac{n_\ensuremath{\mathcal{C}}\xspace}{\varphi (E-1/m)^2}&&\qedhere \end{align*} \end{proof}
\Loadlemma* \begin{proof} Let $\delta=\frac{\varepsilon}{2}$ and let $F(m)=\frac{\sqrt{mR_\mathrm{low}}\delta}{1+\delta}$. The assumption that $\varepsilon^{-4}\varphi^{-1}R_\mathrm{low}^{-1}=o(m)$ already implies $F\in\Theta\left(\sqrt{mR_\mathrm{low}}\varepsilon\right)\subset \omega\left(\frac{1}{\varepsilon\sqrt{\varphi}}\right)$.
Let us fix any input sequence $\ensuremath{\mathcal{J}}\xspace$. Given a non-negative integer $j\in\ensuremath{\mathbb{Z}}\xspace_{\ge 0}$ let $p_j=(1+\delta)^{-j}p_\mathrm{max}$. For $j>0$ let $\ensuremath{\mathcal{C}}\xspace_j$ denote the set of jobs in $\ensuremath{\mathcal{J}}\xspace$ that have size in the half-open interval $(p_{j},p_{j-1}]$. Note that every job belongs to precisely one job class $\ensuremath{\mathcal{C}}\xspace_j$. Using the notation from \Cref{sec.sampling} we set $n_{j}^\sigma=n_{\ensuremath{\mathcal{C}}\xspace_j,\varphi}[\sigma]$ and $n_j=n_{\ensuremath{\mathcal{C}}\xspace_j}$.
Now $L^\downarrow= \frac{1}{m}\sum\limits_{j=1}^\infty (1+\delta)^{-j}n_jp_\mathrm{max}$ is the average load if we round down the size of every job in job class $\ensuremath{\mathcal{C}}\xspace_j$ to $p_j$ for every $j\ge 1$. In particular, there holds $L^\downarrow\le L \le (1+\delta)L^\downarrow$. Similarly, let $L^\downarrow_\varphi[\sigma]= \frac{\varphi^{-1}}{m}\sum\limits_{j=1}^\infty (1+\delta)^{-j}n_j^\sigma p_\mathrm{max}$ be the rounded-down version of $L_\varphi$. Again, there holds $L^\downarrow_\varphi[\sigma]\le L_\varphi[\sigma] \le (1+\delta)L^\downarrow_\varphi[\sigma]$. Using these approximations, we see that
\begin{align*}\left|L_\varphi[\sigma]-L\right|\le \left|L^\downarrow_\varphi[\sigma]-L^\downarrow\right|+\max\left(\delta L^\downarrow_\varphi[\sigma],\delta L^\downarrow\right) \\
\le \left|L^\downarrow_\varphi[\sigma]-L^\downarrow\right|+\delta \left|L^\downarrow_\varphi[\sigma]-L^\downarrow\right| +\delta L^\downarrow \\
\le (1+\delta)\left|L^\downarrow_\varphi[\sigma]-L^\downarrow\right| +\delta L\end{align*}
We can bound the term in the statement of the lemma via
\begin{equation}\label{eq.Lcompare}\left|\frac{L_\varphi}{L}-1\right|\le (1+\delta)\frac{\left|L^\downarrow_\varphi[\sigma]-L^\downarrow\right|}{L}+\delta. \end{equation}
Now, consider the term $\left|L^\downarrow_\varphi[\sigma]-L^\downarrow\right|$. \Cref{pro.sample} with $E=(1+\delta)^{j/2}F(m)\sqrt{n_j}$, yields
\[\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[\left|\varphi^{-1}n_j^\sigma-n_j\right|\ge (1+\delta)^{j/2}F(m)\sqrt{n_j}\right]=O\left(\frac{(1+\delta)^{-j}}{\varphi F(m)^2}\right)\]
Consider the event that we have $\left|\varphi^{-1}n_j^\sigma-n_j\right|\ge (1+\delta)^{j/2}F(m)\sqrt{n_j}$ for all $j$. By the union bound its probability is \[P(m)=1-O\left(\sum\limits_j\frac{(1+\delta)^{-j}}{\varphi F(m)^2}\right)= 1-O\left(\frac{1}{\delta\varphi F(m)^2}\right)=1-o(\varepsilon).\]
The first equality uses the union bound, the second the geometric sequence and the final one the fact, argued at the beginning of the proof, that $F\in \omega\left(\frac{1}{\varepsilon\sqrt{\varphi}}\right)$ and that $\delta=\Theta(\varepsilon)$. Now, if we have $\left|\varphi^{-1}n_j^\sigma-n_j\right|\ge (1+\delta)^{j/2}F(m)\sqrt{n_j}$ for all $j\ge 1$, we get:
\begin{align*}\left|L^\downarrow_\varphi[\sigma]-L^\downarrow\right|&=\left|\frac{\varphi^{-1}}{m}\sum\limits_{j=0}^\infty (1+\delta)^{-j}n_j^\sigma p_\mathrm{max}-\frac{1}{m}\sum\limits_{j=0}^\infty (1+\delta)^{-j}n_jp_\mathrm{max}\right| \\
&\le \frac{1}{m}\sum\limits_{j=0}^\infty (1+\delta)^{-j}\left|\varphi^{-1}n_j^\sigma-n_j\right| p_\mathrm{max}\\ &\le \frac{\sqrt{p_\mathrm{max}}}{m}\sum\limits_{j=0}^\infty (1+\delta)^{-j/2}F(m)\sqrt{n_j}\sqrt{p_\mathrm{max}}\\ &\le\frac{\sqrt{p_\mathrm{max}}F(m)}{\sqrt{m}}\left(\frac{1}{m}\sum\limits_{j=0}^\infty (1+\delta)^{-j}n_jp_\mathrm{max}\right)^{1/2}\\ &\le\frac{\sqrt{L}F(m) }{\sqrt{R_\mathrm{low} m}}\sqrt{L^\downarrow}\\ &\le\frac{F(m)}{\sqrt{R_\mathrm{low} m}}L =\frac{\delta}{1+\delta}L. \end{align*} The first inequality is the triangle inequality, the second holds per assumption, the third is the Cauchy–Schwarz inequality, the fourth uses the definition of $L^\downarrow$ and the fact that $\sqrt{p_\mathrm{max}}=\sqrt{{L}/{R[\ensuremath{\mathcal{J}}\xspace]}}\le \sqrt{{L}/{R_\mathrm{low}}}$, the last inequality uses that $L^\downarrow\le L$ and the final equality is simply the definition of $F$.
Combining this with \Cref{eq.Lcompare} yields that we have with probability $P(m)\in 1-o(\varepsilon)$
\[\left|\frac{L_\varphi}{L}-1\right|\le (1+\delta)\frac{\left|L^\downarrow_\varphi[\sigma]-L^\downarrow\right|}{L}+\delta\le 2 \delta =\varepsilon.\] It thus suffices to choose $m(R_\mathrm{low},\varphi,\varepsilon)$ such that $P(m)\le 1-\varepsilon$ for all $m\ge m(R_\mathrm{low},\varphi,\varepsilon)$. \end{proof} \section{Missing proofs in \Cref{sec.1.75}}\label{sec.1.75.p} \coll* \begin{proof}[Proof of \Cref{co.ll}] Let wlog.\ $\ensuremath{\mathcal{J}}\xspace^\sigma=J_1,\ldots, J_n$ and set ${\mathrm{OPT}}={\mathrm{OPT}}(\ensuremath{\mathcal{J}}\xspace^\sigma)={\mathrm{OPT}}(J_1,\ldots, J_n)$. We append a certain number of jobs $J_{n+1},\ldots, J_{n'}$ to the sequence such that the average load of $J_{1},\ldots, J_n'$ is $L_\mathrm{guess}$ and ${\mathrm{OPT}}(J_{1},\ldots, J_n')=\max(L_{\mathrm{guess}},{\mathrm{OPT}})$. We use the following procedure to construct the sequence: \begin{algorithm}[H] \caption{Appending a certain job sequence.}\label{alg.exceptional3} \begin{algorithmic}[1] \State Start with $n'=n$ and any optimal schedule of $J_{1},\ldots, J_n$. \While{$L_{n'}=\frac{1}{m}\sum\limits_{i=1}^{n'} p_{n'} <L_{\mathrm{guess}}$} \State Let $M$ be a least loaded machine and $l$ be its load. \State Append job $J_{n'+1}$ of size $p_n'=\min(L_{\mathrm{guess}} - l, m(L_{\mathrm{guess}} - L_{n'}))$ to the sequence. \State Schedule $J_{n'}$ onto $M$. $n'\gets n'+1$. \EndWhile \end{algorithmic} \end{algorithm}
It is easy to see that the previous schedule outputs a job sequence of average load $L[J_1,\ldots, J_{n'}]=L_{n'}=L_{\mathrm{guess}}$, assuming it started with a sequence $J_1,\ldots, J_n$ of average load at most $L_{\mathrm{guess}}$. Furthermore it maintains a schedule with makespan at most $\max (L_{\mathrm{guess}},{\mathrm{OPT}})$. This is necessarily an optimal schedule, since both the average load $L_{\mathrm{guess}}$ as well as the optimum ${\mathrm{OPT}}={\mathrm{OPT}}(J_1,\ldots, J_n)$ of a prefix are lower bounds on the optimum makespan. We've thus shown that ${\mathrm{OPT}}(J_{1},\ldots, J_{n'})=\max(L_{\mathrm{guess}},{\mathrm{OPT}})$.
Since we can apply \Cref{te.ll} to $J_{1},\ldots, J_{n'}$ we get: \begin{align*}\mathrm{Light Load [L_{\mathrm{guess}}]}(J_1,\ldots, J_n)&\le \mathrm{Light Load [L[J_1,\ldots, J_{n'}]]}(J_{1},\ldots, J_{n'}) \\ &\le 1.75 {\mathrm{OPT}}(J_{1},\ldots, J_{n'})\\ &= 1.75\max(L_{\mathrm{guess}},{\mathrm{OPT}}). &\qedhere\end{align*} \end{proof}
\section{Second reduction. Full proof of \Cref{le.main.stable}}\label{sec.ana.stable.p} Throughout this proof we assume that all job sets $\ensuremath{\mathcal{J}}\xspace$ considered are proper. Many notations in this proof will depend on the job set $\ensuremath{\mathcal{J}}\xspace$, the number of machines $m$ and possibly the job order. For simplicity we omit these dependencies whenever possible. If needed, we include it using the notation $\ensuremath{\mathcal{P}}\xspace[\ensuremath{\mathcal{J}}\xspace^\sigma]$ or even $\ensuremath{\mathcal{P}}\xspace[\ensuremath{\mathcal{J}}\xspace^\sigma,m]$, $\hat n_p[\ensuremath{\mathcal{J}}\xspace^\sigma]$, etc. In particular, we write mostly $\delta$ for the function $\delta(m)=\frac{1}{\log(m)}$. It is very important to note for the arguments in this section, that this is a function in $m$ whose inverse grows sub-polynomially in $m$.
For every job set $\ensuremath{\mathcal{J}}\xspace$ we fix a set $S=S[\ensuremath{\mathcal{J}}\xspace,m]\subset\ensuremath{\mathcal{J}}\xspace$ consisting of the $\left\lceil\delta(m)^{-7/3}\right\rceil$ largest jobs. We solve ties arbitrarily. Technically, we could choose any exponent other than $7/3$ in the open interval $(2,3)$, too. Let $s_\mathrm{min}=s_\mathrm{min}[\ensuremath{\mathcal{J}}\xspace,m]$ be the size of the smallest job in the set $S$. Recall the geometric rounding function $f(p)=(1+\delta)^{\left\lfloor \log_{1+\delta}p\right\rfloor}$ and consider the set $\ensuremath{\mathcal{P}}\xspace_\mathrm{glob}=\ensuremath{\mathcal{P}}\xspace_\mathrm{glob}[\ensuremath{\mathcal{J}}\xspace,m]=\{f(p_t)\mid p_t\text{ is the size of any job }J_t\in\ensuremath{\mathcal{J}}\xspace\}$ of all rounded sizes of jobs in $\ensuremath{\mathcal{J}}\xspace$. Then consider the subset $\hat\ensuremath{\mathcal{P}}\xspace=\hat\ensuremath{\mathcal{P}}\xspace[\ensuremath{\mathcal{J}}\xspace,m]=\{(1+\delta)^i\in\ensuremath{\mathcal{P}}\xspace_\mathrm{glob} \mid p_{\mathrm{small}} \max(s_\mathrm{min},(1-\delta)L)< (1+\delta)^i \}$. We will see that this set is likely a superset of $\ensuremath{\mathcal{P}}\xspace[\ensuremath{\mathcal{J}}\xspace^\sigma,m]$, which does not depend on the job order.
The following estimates of the sizes of $\ensuremath{\mathcal{P}}\xspace$ and $\hat \ensuremath{\mathcal{P}}\xspace$ will be relevant later.
\begin{lemma}\label{le.Pbound}
We have $|\ensuremath{\mathcal{P}}\xspace|\le 1-\lfloor\log_{1+\delta}(p_{\mathrm{small}})\rfloor \le O\left(\delta^{-1}\right)$. \end{lemma} \begin{proof}
First observe that $\ensuremath{\mathcal{P}}\xspace$ contains precisely one element for each power of $(1+\delta)$ in the half-open interval $((1+\delta)^{-1}p_{\mathrm{small}} B, B]$. In particular $|\ensuremath{\mathcal{P}}\xspace|\le 1-\lfloor\log_{1+\delta}(p_{\mathrm{small}})\rfloor \le O\left(\delta^{-1}\right)$. \end{proof}
\begin{lemma}\label{le.hatCPbound}
We have $|\hat \ensuremath{\mathcal{P}}\xspace|\le \delta(m)^{-7/3}-\lfloor\log_{1+\delta}(p_{\mathrm{small}})\rfloor\le O(\delta^{-7/3})$ \end{lemma}
\begin{proof}
Indeed, there are precisely $1-\lfloor\log_{1+\delta}(p_{\mathrm{small}})\rfloor$ powers of $(1+\delta)$ in the half-open interval $(p_{\mathrm{small}} s_\mathrm{min},s_\mathrm{min}]$, in particular $\hat\ensuremath{\mathcal{P}}\xspace$ contains at most that many elements of size lesser or equal to $s_\mathrm{min}$. Now all elements in $\hat\ensuremath{\mathcal{P}}\xspace$ which have size strictly greater than $s_\mathrm{min}$ need to be the rounded sizes of jobs in $S$ excluding the smallest job in $S$. Thus, there are at most $\delta(m)^{-7/3}-1$ elements in $\hat\ensuremath{\mathcal{P}}\xspace$ of size strictly greater than $s_\mathrm{min}$. In particular $|\hat \ensuremath{\mathcal{P}}\xspace|\le 1-\lfloor\log_{1+\delta}(p_{\mathrm{small}})\rfloor+ \delta(m)^{-7/3}-1$. \end{proof}
For every $p\in\hat\ensuremath{\mathcal{P}}\xspace[\ensuremath{\mathcal{J}}\xspace,m]$ we consider the job class $\ensuremath{\mathcal{C}}\xspace_p=\ensuremath{\mathcal{C}}\xspace_p[\ensuremath{\mathcal{J}}\xspace,m]\subset\ensuremath{\mathcal{J}}\xspace$ of jobs whose rounded size is $p$. Using the notation from \Cref{sec.sampling} we set $n_{p,\varphi}=n_{\ensuremath{\mathcal{C}}\xspace_p,\varphi}$ and $n_p=n_{\ensuremath{\mathcal{C}}\xspace_p}$ for every $p\in\hat\ensuremath{\mathcal{P}}\xspace$ and $0<\varphi<1$. We defined the property of being stable in a way that lends itself to algorithmic applications. We now give similar, in fact slightly stronger, conditions better suited for a probabilistic arguments.
\begin{definition} We call a proper job sequence $\ensuremath{\mathcal{J}}\xspace^\sigma$ \emph{probabilistically stable} if the following holds: \begin{enumerate} \setlength{\parskip}{0pt} \setlength{\itemsep}{0pt plus 1pt}
\item The load estimate $\hat L=\hat L_{\delta^2}[\ensuremath{\mathcal{J}}\xspace^\sigma]$ for $L=L[\ensuremath{\mathcal{J}}\xspace]$ is good, i.e.\ $(1-\delta)L \le \hat L \le (1+\delta)L$. \item There is at least one job in $S$ among the $\lceil\delta^2 n\rceil$ first jobs in $\ensuremath{\mathcal{J}}\xspace^\sigma$.
\item For every $p\in\hat\ensuremath{\mathcal{P}}\xspace$ we have $\left|\delta^{-2} n_{p,\delta^2}-n_p\right|\le m^{3/4}-1$. \item For every $p\in\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}[\ensuremath{\mathcal{J}}\xspace^\sigma]$ we have $2n_{p,\delta^2}\le n_p$. \item Let $t_S=t_S[\ensuremath{\mathcal{J}}\xspace^\sigma,m]$ be the time the last job in $S$ arrived, then $t_S \le \left(1-\delta(m)^{8/3}\right)n$.
\item For every $p\in\hat \ensuremath{\mathcal{P}}\xspace$ with $n_p>\left\lfloor\frac{\left(1-\delta-2\delta^2\right)m}{|\ensuremath{\mathcal{P}}\xspace|} \right\rfloor$ there holds $n_{p,1-\delta^{8/3}}\le \left(1-\delta^3\right)n_{p}$. \end{enumerate} We refer to these six conditions as \emph{probabilistic conditions}. \end{definition}
The following lemma is shows that we can analyze the probability of a sequence being probabilistically stable instead of using the conditions from \Cref{sec.ana.stable}.
\begin{lemma}\label{le.stableprobal} There exists a number $m_0$ such that for all $m\ge m_0$ every probabilistically stable sequence is stable. \end{lemma}
The constant $m_0$ in the previous lemma comes from the following technical lemma.
\begin{lemma}\label{le.m0.bound}
There exists $m_0>0$, such that for all $m\ge m_0$ and all proper job sets $\ensuremath{\mathcal{J}}\xspace$ we have $\delta(m)^{-7/3}\le \lfloor\delta m\rfloor$ and $\delta(m)^3 \cdot \left\lfloor \frac{\left(1-\delta(m)-2\delta(m)^2\right)m}{|\ensuremath{\mathcal{P}}\xspace(m,\ensuremath{\mathcal{J}}\xspace)|} \right\rfloor \ge 2|\ensuremath{\mathcal{P}}\xspace(m,\ensuremath{\mathcal{J}}\xspace)|m^{3/4}$. \end{lemma}
\begin{proof} This comes down to asymptotic observations. For the first inequality use that $\delta(m)^{-7/3}=\log^{7/3}(m)=o(m)$ while $\lfloor\delta m\rfloor=\Theta(m)$.
For the second inequality observe that $|\ensuremath{\mathcal{P}}\xspace(m,\ensuremath{\mathcal{J}}\xspace)|=O(\delta(m)^{-1}) =O(\log(m))$ by \Cref{le.Pbound}. Then we can see that
$\delta(m)^3 \cdot \left\lfloor \frac{\left(1-\delta(m)-2\delta(m)^2\right)m}{|\ensuremath{\mathcal{P}}\xspace(m,\ensuremath{\mathcal{J}}\xspace)|} \right\rfloor=\Omega (\delta(m)^4m)=\Omega\left(\frac{m}{\log^4(m)}\right)$ while, on the other hand, $2|\ensuremath{\mathcal{P}}\xspace|m^{3/4}=O\left(\log(m)m^{3/4}\right)=o\left(\frac{m}{\log^4(m)}\right)$.
These asymptotic observations already imply the statement of the lemma. \end{proof}
\begin{proof}[Proof of \Cref{le.stableprobal}] We consider the five conditions of stable sequences separately
\textbf{1.} The first condition of stable sequences agrees with the first probabilistic condition.
\textbf{2.} First consider job classes $p\in\ensuremath{\mathcal{P}}\xspace[\ensuremath{\mathcal{J}}\xspace^\sigma,m]\setminus\ensuremath{\mathcal{P}}\xspace_\mathrm{glob}[\ensuremath{\mathcal{J}}\xspace,m]$. For these job classes there holds $n_p=0$. This already implies that $c_p=0$ holds, too. Thus, the second condition follows trivially for these job classes.
By the second probabilistic condition, we have that $p^{\varphi n}_\mathrm{max}[\ensuremath{\mathcal{J}}\xspace^\sigma]\ge s_\mathrm{min}$ and by the first probabilistic condition there holds $\hat L[\ensuremath{\mathcal{J}}\xspace^\sigma]\ge(1-\delta)L$. In particular $B[\ensuremath{\mathcal{J}}\xspace^\sigma] =\max\left(p^{\varphi n}_\mathrm{max},\hat L\right) \ge \max(s_\mathrm{min},(1-\delta)L)$ and thus $\ensuremath{\mathcal{P}}\xspace(\ensuremath{\mathcal{J}}\xspace^\sigma,m)\cap \ensuremath{\mathcal{P}}\xspace_\mathrm{glob}\subseteq \hat \ensuremath{\mathcal{P}}\xspace(\ensuremath{\mathcal{J}}\xspace,m)$. There are two cases to consider now. If $c_p=\left\lfloor\left(\delta^{-2}\hat n_{p}-m^{3/4}\right)w(p)\right\rfloor w(p)^{-1}$ we conclude, using the third probabilistic condition, that $|(c_p+m^{3/4})-n_p| \le |\delta^{-2}\hat n_{p}-n_p+1|\le m^{3/4}$, which already implies that $c_p\le n_p \le c_p+2m^{3/4}$. If $c_p=\hat n_{p} w(p)^{-1}\ge \left\lfloor\left(\delta^{-2}\hat n_{p}-m^{3/4}\right)w(p)\right\rfloor$ the second bound $n_p\le c_p+2m^{3/4}$ still holds. The first bound, $c_p\hat n_{p} w(p)^{-1}\le n_p$ is trivial if $w(p)^{-1}=1$, or, equivalently, if $p\notin\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}[\ensuremath{\mathcal{J}}\xspace^\sigma]$. Else, it follows from the fourth probabilistic condition.
\textbf{3.} To conclude the third condition of stable sequences note that the second probabilistic condition implies that all huge jobs have size strictly greater than $s_\mathrm{min}$. This implies that they lie in $S$ and that there are at most $|S|=\delta(m)^{-7/3}$ many of those jobs. Since we only consider $m\ge m_0$ we have $\delta(m)^{-7/3}\le \lfloor \delta m\rfloor$ by \Cref{le.m0.bound}. Hence, the third condition of stable sequences follows.
\textbf{4.} Consider $p\in\ensuremath{\mathcal{P}}\xspace(\ensuremath{\mathcal{J}}\xspace^\sigma,m)$ with $n_p>\left\lfloor\frac{\left(1-\delta-2\delta^2\right)m}{|\ensuremath{\mathcal{P}}\xspace|} \right\rfloor$. As we argued when proving the second condition $p\in \hat \ensuremath{\mathcal{P}}\xspace(\ensuremath{\mathcal{J}}\xspace,m)$. By the second probabilistic condition all huge jobs lie in $S$ since they have size strictly greater than $\hat B \ge p^{\varphi n}_\mathrm{max} \ge s_\mathrm{min}$. It thus suffices to show that at most $(1-\delta^3)n_p$ jobs arrived at time $t_S[\ensuremath{\mathcal{J}}\xspace^\sigma,m]$, the time the last job in $S$ arrived. But by the fifth probabilistic condition this value is at most $n_{p,1-\delta^{8/3}}$, which is less than $(1-\delta^3)n_p$ by the sixth probabilistic condition. The fourth condition for stable sequences follows.
\textbf{5.} Finally, the fifth condition of stable sequences is already a consequence of choosing $m\ge m_0$ and \Cref{le.m0.bound}. \end{proof}
Now, we analyze the probability of each probabilistic condition separately. Namely, we consider \[P_i(m)=\sup\limits_{\ensuremath{\mathcal{J}}\xspace \textrm{ proper}}\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[\ensuremath{\mathcal{J}}\xspace^\sigma \textrm{does not fulfill the }i\text{-th condition}\right].\] Recall, that $P(m)$ similarly defines the worst probability with which a sequence may not be stable. It is the value we are interested in. The values $P_i(m)$ relate to $P(m)$ by the following corollary, which is an immediate consequence of \Cref{le.stableprobal} and the union bound.
\begin{corollary}\label{le.p1} We have that $P(m)\le \sum_{i=1}^6P_i(m)$ for all $m\ge m_0$ if we choose $m_0$ as in \Cref{le.m0.bound}. \end{corollary}
Thus, we are left to see that all the $P_i(m)$ vanish.
\begin{lemma}\label{le.p2} For every $i$ we have $\lim\limits_{m\rightarrow\infty} P_i(m)=0$. \end{lemma}
\begin{proof} We again consider every choice of $1\le i \le 6$ separately.
\textbf{1.} Apply the Load Lemma, \Cref{Loadlemma}, with $R_\mathrm{low}=\frac{(1-\delta)\delta^3}{2(\delta^2+1)}(2-c)$, $\varphi=\delta^2$ and $\varepsilon=\delta$. Then for $m$ large enough, there holds $ \ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n} \left[\left|\frac{L_{\delta^2}[\ensuremath{\mathcal{J}}\xspace^\sigma]}{L[\ensuremath{\mathcal{J}}\xspace]}-1\right|\ge\delta\right]<\delta$. Note that the condition $\left|\frac{L_{\delta^2}[\ensuremath{\mathcal{J}}\xspace^\sigma]}{L[\ensuremath{\mathcal{J}}\xspace]}-1\right|\ge\delta$ is equivalent to $(1-\delta) L \le \hat L \le (1+\delta)L$. Thus $P_1(m)\le\delta(m)$ and in particular $\lim\limits_{m\rightarrow\infty}P_1(m)=0$.
\textbf{2.} Let $J\in S$. The probability that $J$ is not among the $\lfloor \delta^2 m \rfloor$ first jobs is at most $1-\delta^2$ after random permutation. In particular the probability $P_2(m)$ that none of the jobs in $S$ is among the first $\lfloor \delta^2 m \rfloor$ jobs can be bounded via $(1-\delta^2)^{|S|} \le \frac{1}{1+\delta^2|S|}$ using Bernoulli's inequality. Thus $P_2(m)\le \frac{1}{1+\delta(m)^2|S|} \le \frac{1}{1+\delta(m)^{-1/3}} \le \delta(m)^{1/3}$ which tends to $0$ for $m\rightarrow\infty$.
\textbf{3.} Fix $p\in\hat \ensuremath{\mathcal{P}}\xspace$. By \Cref{pro.sample} we have $\ensuremath{\mathbf{P}}\xspace_{\sigma\sim S_n}\left[\left|\delta^{-2} n_{p,\delta^2}-n_p\right|\ge m^{3/4}-1\right]\le\frac{n_p}{\delta^2(m^{3/4}-1-1/m)^2}$ and thus by the union bound $P_3(m)\le \frac{\sum_{p\in\hat\ensuremath{\mathcal{P}}\xspace} n_p}{\delta^2(m^{3/4}-1-1/m)^2}$. Now observe that there holds $L\ge \frac{1}{m}\sum_{p\in\hat\ensuremath{\mathcal{P}}\xspace} p\cdot n_p \ge \frac{1}{m}\sum_{p\in\hat\ensuremath{\mathcal{P}}\xspace}p_{\mathrm{small}} (1-\delta) L \cdot n_p$, thus $\sum_{p\in\hat\ensuremath{\mathcal{P}}\xspace} n_p \le \frac{m}{(1-\delta)p_{\mathrm{small}}}$. From this we conclude that $P_3(m) \le \frac{m}{(1-\delta)p_{\mathrm{small}}\delta^2(m^{3/4}-1-1/m)^2}=O\left(\frac{1}{\sqrt{m}}\right)$ which already shows that $\lim\limits_{m\rightarrow\infty} P_3(m)=0$.
\textbf{4.} Recall that $f$ is the geometric rounding function. Let $\hat B[\ensuremath{\mathcal{J}}\xspace^\sigma]=\max((1+\delta)L,f(B[\ensuremath{\mathcal{J}}\xspace^\sigma]))$ and let $\hat\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}[\ensuremath{\mathcal{J}}\xspace^\sigma]=\{p\in\ensuremath{\mathcal{P}}\xspace_\mathrm{glod}\mid (1+\delta)^{-3}p_{\mathrm{small}} \hat B[\ensuremath{\mathcal{J}}\xspace^\sigma] \le p \le p_{\mathrm{big}} \hat B[\ensuremath{\mathcal{J}}\xspace^\sigma]\}$. We have $\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}[\ensuremath{\mathcal{J}}\xspace^\sigma]\subset \hat\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}[\ensuremath{\mathcal{J}}\xspace^\sigma]$ if the first probabilistic condition holds, i.e.\ $(1-\delta)L\le \hat L[\ensuremath{\mathcal{J}}\xspace^\sigma] \le (1+\delta)L.$ Since the probability of the latter not being true is $P_1(m)$ and vanishes for $m\rightarrow\infty$ it suffices to consider the second probabilistic condition where we replace $\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}[\ensuremath{\mathcal{J}}\xspace^\sigma]$ with $\hat\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}[\ensuremath{\mathcal{J}}\xspace^\sigma]$.
Now let $\hat B_\mathrm{fix}$ be any possible value the variable $\hat B[\ensuremath{\mathcal{J}}\xspace^\sigma]$ may obtain. We want to condition ourselves on the case that $\hat B[\ensuremath{\mathcal{J}}\xspace^\sigma]=\hat B_\mathrm{fix}$ for $\hat B_\mathrm{fix}$ either $(1+\delta)L$ or the rounded size of some element in $S$ exceeding $(1+\delta)L$. Note, that $\hat B[\ensuremath{\mathcal{J}}\xspace^\sigma]$ may attain other values than the previously mentioned ones only if either the first or second probabilistic condition is not met. Since $P_1(m)+P_2(m)\rightarrow 0$ we may ignore these cases. Fixing $\hat B$ also fixes the set $\hat\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}$.
Consider $p\in\hat\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}$. We want an upper bound on $\ensuremath{\mathbf{E}}\xspace[n_{p,\delta^2}\mid\hat B=\hat B_\mathrm{fix}]$, the expected value of $n_{p,\delta^2}$ conditioned on our choice of $\hat B$. What does it mean to condition on $\hat B[\ensuremath{\mathcal{J}}\xspace^\sigma]=B_\mathrm{fix}$? It is simply equivalent to stating that no element in $S$ of rounded size strictly greater than $B_\mathrm{fix}$ occurs in the sampling phase and that either an element of rounded size $B_\mathrm{fix}$ occurs in the sampling phase or $B_\mathrm{fix}=(1+\delta)L$. Thus conditioning on $\hat B[\ensuremath{\mathcal{J}}\xspace^\sigma]=B_\mathrm{fix}$ just fixes the position of some of the $\lceil\delta(m)^{-7/3}\rceil$ elements in $S$. The expected value of $n_{p,\delta^2}$ is maximized if we consider the case where all elements of $S$ have to occur after the sampling phase. In this case, $n_{p,\delta^2}$ is hypergeometrically distributed. We sample $\delta^2 n$ jobs from a set of $n-|S|$ jobs and count the number of $p$ jobs. Thus, we see that $\ensuremath{\mathbf{E}}\xspace[n_{p,\delta^2}\mid\hat B=\hat B_\mathrm{fix}]\le \frac{\lceil\delta^2 n\rceil n_p}{n-\lceil\delta(m)^{-7/3}\rceil}\le 2\delta^2 n_p$. For the latter inequality we need to choose $m$ (and thus $n\ge m$) sufficiently large. Now, we get by Markov's inequality \[\ensuremath{\mathbf{P}}\xspace\left[n_{p,\delta^2}\ge\frac{n_p}{2}\right]\le \ensuremath{\mathbf{P}}\xspace\left[n_{p,\delta^2}\ge\frac{\ensuremath{\mathbf{E}}\xspace[n_{p,\delta^2}]}{4\delta^2}\right]\le 4\delta^2.\]
We have $|\hat\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}[\ensuremath{\mathcal{J}}\xspace^\sigma]|\le \log_{1+\delta}(p_{\mathrm{big}})-\log_{1+\delta}(p_{\mathrm{small}})+4 =O(\delta(m))$. Thus, by the union bound $P_4(m)\le |\hat\ensuremath{\mathcal{P}}\xspace_{\mathrm{med}}|\cdot 4\delta^2 =O((\delta(m))^{-1})$ if we condition ourselves on $\hat B[\ensuremath{\mathcal{J}}\xspace^\sigma]=\hat B_\mathrm{fix}$. Since this holds for all possible choices of $\hat B_\mathrm{fix}$ but few degenerate ones that occur with probability at most $P_1(m)+P_2(m)$ we get that $P_4(m)\le P_1(m)+P_2(m)+O(\delta(m))$. Since all these terms vanish for $m\rightarrow\infty$ we also have that $\lim\limits_{m\rightarrow\infty} P_4(m)=0$.
\textbf{5.} Let $l=\left\lceil\delta(m)^{-7/3}\right\rceil$, then there holds $P_5(m)=1- \prod_{i=1}^l \frac{\left\lfloor\left(1-\delta(m)^{8/3}\right)n\right\rfloor-i}{n-i}$. Indeed, if we choose any order $S=\{s_1,\ldots,s_l\}$ then the $i$-th term in the product denotes the probability that the $i$-th element $s_i$ is among the $\lceil(1-\delta(m)^{8/3})n\rceil$-last elements conditioned on the earlier elements already fulfilling this condition. But now we see using Bernoulli's inequality that $\prod_{i=1}^l \frac{\left\lfloor\left(1-\delta(m)^{8/3}\right)n\right\rfloor-i}{n-i} \ge \left(1-\delta(m)^{8/3}-\frac{l+1}{n}\right)^l \ge 1 -\delta(m)^{8/3}l-\frac{l(l+1)}{n}$. Since for proper job sets $n\ge m$, this implies $P_5(m)\le \delta(m)^{8/3}l-\frac{l(l+1)}{m}=O(\delta(m)^{1/3})$. In particular $\lim\limits_{m\rightarrow\infty} P_5(m)=0$.
\textbf{6.} Fix any $p\in\hat\ensuremath{\mathcal{P}}\xspace$ with $n_p>\left\lfloor\frac{\left(1-\delta-2\delta^2\right)m}{|\ensuremath{\mathcal{P}}\xspace|} \right\rfloor=\Omega(m)$. Then we have by \Cref{pro.sample} \begin{align*}\ensuremath{\mathbf{P}}\xspace\left[n_{p,1-\delta^{8/3}}\ge \left(1-\delta^3\right)n_{p}\right]&\le \ensuremath{\mathbf{P}}\xspace\left[\left|(1-\delta^{8/3})^{-1}n_{p,1-\delta^{8/3}}-n_p\right|\ge \frac{\delta^{8/3}(1-\delta^{1/3})n_p}{1-\delta^{8/3}}\right]\\ &\le \frac{n_p}{(1-\delta^{8/3})\left(\frac{\delta^{8/3}(1-\delta^{1/3})n_p}{1-\delta^{8/3}}-1/m\right)^2}\\ &=O\left(\frac{\delta^{-16/3}}{n_p}\right) =O\left(\frac{\delta^{-16/3}}{m}\right). \end{align*}
By the union bound and \Cref{le.hatCPbound} there holds that $P_6(m)=O(|\hat \ensuremath{\mathcal{P}}\xspace| \delta^{-16/3}/ m)=O(\delta^{-23/3}/m)$. Thus, $\lim\limits_{m\rightarrow\infty} P_6(m)=0$. \end{proof}
\begin{proof}[Proof of \Cref{le.main.stable}] By \Cref{le.p1} we have $P(m)\le\sum_{i=1}^6 P_i(m)$ for $m\ge m_0$ and by ~\Cref{le.p2} we have that $\lim\limits_{m\rightarrow\infty}\sum_{i=1}^6 P_i(m)=0$. Thus $\lim\limits_{m\rightarrow\infty} P(m)=0$. \end{proof}
\end{document} | arXiv | {
"id": "2103.16340.tex",
"language_detection_score": 0.6925239562988281,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\centerline{\Large\bf Mutant knots with symmetry }
\centerline{\sc H.R.Morton} \vglue 0.1truein
{\small\sl \centerline{Department of Mathematical Sciences,} \centerline{University of Liverpool,} \centerline{Peach Street, Liverpool L69 7ZL, UK.} }
\begin{abstract} Mutant knots, in the sense of Conway, are known to share the same Homfly polynomial. Their $2$-string satellites also share the same Homfly polynomial, but in general their $m$-string satellites can have different Homfly polynomials for $m>2$. We show that, under conditions of extra symmetry on the constituent $2$-tangles, the directed $m$-string satellites of mutants share the same Homfly polynomial for $m<6$ in general, and for all choices of $m$ when the satellite is based on a cable knot pattern.
We give examples of mutants with extra symmetry whose Homfly polynomials of some $6$-string satellites are different, by comparing their quantum $sl(3)$ invariants. \end{abstract}
\section{Introduction}
This paper has been inspired by recent observations of Ochiai and Jun Murakami about the Homfly skein theory of $m$-parallels of certain symmetrical $2$-tangles. In \cite{Ochiai} Ochiai remarks that the $3$-parallels of the tangle $AB$ in figure \ref{figone} and its mirror image $\overline{A}\overline{B}=BA$ are equal in the Homfly skein of $6$-tangles, in other words, in the Hecke algebra $H_6$, \cite{AistonMorton}. \begin{figure}\label{figone}
\end{figure}
As a consequence, the $3$-parallels of any mutant pair of knots given by composing the $2$-tangles $AB$ and $BA$ with any other $2$-tangle $C$ and then closing will share the same Homfly polynomial.
This is in contrast with the known fact that $3$-parallels of mutant knots in general can have different Homfly polynomials, \cite{MortonTraczyk, MortonCromwell}.
There is interest in the extent to which the Homfly polynomial of $m$-parallels or other $m$-string satellites can distinguish mutants which are closures of $ABC$ and $BAC$ with $A$ and $B$ as above. Ochiai has found that the $4$-parallels of $AB$ and $BA$ are different in the skein $H_8$.
The purpose of this paper is to show that if $A$ and $B$ are any two oriented $2$-tangles with symmetry \begin{center} $ A$\quad = \quad {\labellist \pinlabel $A$ at 129 709
\endlabellist \pica{mutantone} {.6}}\ \ , \quad $ B$\quad = \quad {\labellist \pinlabel $B$ at 129 709
\endlabellist \pica{mutantone} {.6}} \end{center}
then the $m$-parallels, and indeed any directed $m$-string satellite, of knots $\widehat{\phantom A}ABC$ and $\widehat{\phantom A}BAC$ shown in figure \ref{threeparts} share the same Homfly polynomial for $m<6$.
\begin{figure}
\caption{Tangle interchange}
\label{threeparts}
\end{figure} In contrast there exist examples of $A,B$ and $C$, including Ochiai's case with \begin{center} $A\quad =$\quad \pic{A}{.300} \ ,\quad $B\quad =$\quad \pic{B}{.300}\ ,\end{center}
for which the Homfly polynomials of the $6$-fold parallel are different.
As an unexpected extension of the main result we show that the Homfly polynomial of a genuine connected cable, based on the $(m,n)$ torus knot pattern, with $m$ and $n$ coprime, for any number of strings, $m$, will not distinguish mutants with symmetry above, although a more general connected satellite pattern can do so.
The examples which exhibit differences for the directly oriented $6$-parallel can also be used to show that the $4$-parallels with two pairs of reverse strands have distinct Homfly polynomials.
The proofs are based on the relation of the Homfly satellite invariants to quantum $sl(N)$ invariants, and the techniques are an extension of work with Cromwell \cite{MortonCromwell} and with H. Ryder \cite{MortonRyder}. The eventual calculations that exhibit the difference of invariants in the specific example depend on the $27$ dimensional irreducible module over $sl(3)$ corresponding to the partition $4,2$, and some Maple calculations following similar lines to those in \cite{MortonRyder}.
\section{Shared invariants of mutants}
The term \emph{mutant} was coined by Conway, and refers to the following general construction.
Suppose that a knot $K$ can be decomposed into two oriented $2$-tangles $F$ and $G$
\begin{center} $K$ \quad {=} \quad { \labellist \pinlabel{$F$} at 100 733 \pinlabel{$G$} at 181 733 \endlabellist \pic{mutantbox}{ .6}} \end{center}
A new knot $K'$ can be formed by replacing the tangle $F$ with the tangle $F'=\tau_i(F)$ given by rotating $F$ through $\pi $ in one of three ways,
\begin{center} $\tau_1(F)$\quad {=} \quad {\labellist \pinlabel{$F$} at 96 668 \endlabellist \pic{mutanttwo} {.6} }\ ,\quad $\tau_2(F)$\quad {=} \quad{\labellist \pinlabel{$F$} at 102 629 \endlabellist \pic{mutantthree} { .6}}\ ,\quad $\tau_3(F)$\quad {=} \quad {\labellist \pinlabel{$F$} at 129 709 \endlabellist \pica{mutantone} {.6} }\ , \end{center} reversing its string orientations if necessary. Any of the three knots \begin{center} $K'$\quad {=} \quad{\labellist \pinlabel{{$\tau_i(F)$}} at 102 735 \pinlabel{{$G$}} at 181 735 \endlabellist \pic{mutantbox}{ .6}} \end{center}
is called a
\emph{mutant} of $K$.
The two $11$-crossing knots, $C$ and $KT$, with trivial Alexander polynomial found by Conway and Kinoshita-Teresaka are the best-known example of mutant knots.
\begin{center}{ $C\ =\ $ \pic{Conway} { .400}\ \qquad
$KT\ =\ $\pic{KT} {.400}\ } \end{center} \subsection{Satellites}
A satellite of $K$ is determined by choosing a diagram $Q$ in the standard annulus, and then drawing $Q$ on the annular neighbourhood of $K$ determined by the framing, to give the satellite knot $K*Q$. We refer to this construction as {\em decorating $K$ with the pattern $Q$}, as shown in figure \ref{figsatellite}. \begin{figure}
\caption{Satellite construction}
\label{figsatellite}
\end{figure}
For fixed $Q$ the Homfly polynomial $P(K*Q)$ of the satellite is an invariant of the framed knot $K$. The invariants $P(K*Q)$ as $Q$ varies make up the {\em Homfly satellite invariants} of $K$. We use the alternate notation $P(K;Q)$ in place of $P(K*Q)$ when we want to emphasise the dependence on $K$.
The general symmetry result compares the invariants of two knots $K$ and $K'$ made up of $2$-tangles $A$, $B$ and $C$, by interchanging $A$ and $B$ as in figure \ref{threeparts}.
\begin{theorem} \label{symmetry} Suppose that
$A$ and $B$ are both symmetric under the half-twist $\tau_3$, so that \begin{center} $ A$\quad = \quad {\labellist \pinlabel $A$ at 129 709
\endlabellist \pica{mutantone} {.6}}\ \ , \quad $ B$\quad = \quad {\labellist \pinlabel $B$ at 129 709
\endlabellist \pica{mutantone} {.6}} \end{center} Let $K$ and $K'$ be knots which are the closure of $ABC$ and $BAC$ respectively for any tangle $C$, as in figure \ref{threeparts}. Then $P(K*Q)=P(K'*Q)$ for every closed braid pattern $Q$ on $m<6$ strings. \end{theorem}
\begin{remark} Our proof will apply equally to the case where $Q$ is the closure of a directly oriented $m$-tangle with $m<6$. \end{remark}
In order to prove the theorem we must rewrite the Homfly satellite invariants in terms of quantum $sl(N)$ invariants, so we now give a brief summary of the relations bewteen these invariants, originally established by Wenzl. Further details can be found in \cite{AistonMorton} and the thesis of Lukac, \cite{Lukacthesis}, including details of variant Homfly skeins with a framing correction factor, $x$. These are isomorphic to the skeins used here but the parameter allows a careful adjustment of the quadratic skein relation to agree directly with the natural relation arising from use of the quantum groups $sl(N)$.
\subsection{Homfly skeins}
For a surface $F$ with some designated input and output boundary points the (linear) {Homfly skein} of $F$ is defined as linear combinations of oriented diagrams in $F$, up to Reidemeister moves II and III, modulo the skein relations \begin{center}\begin{enumerate} \item \qquad{$\pic{xor.eps}{.2500}\ -\ \pic{yor.eps} {.2500} \qquad =\qquad{(s-s^{-1})}\quad\ \pic{ior.eps} {.2500} \ ,$} \item \qquad {$ \pic{rcurlor.eps} {.2500} \qquad=\qquad {v^{-1}}\quad \pic{idor.eps} {.2500}\ . $} \end{enumerate} \end{center}
It is an immediate consequence that \begin{center} \pic{unknot} {.150} \ \pic{idor.eps} {.2500}\quad=\quad $\delta$ \ \pic{idor.eps} {.2500},\end{center} where $\delta =\displaystyle\frac{v^{-1}-v}{s-s^{-1}}\in\Lambda$. The coefficient ring $\Lambda$ is taken as $Z[v^{\pm1},s^{\pm1}]$, with denominators $ s^r-s^{-r}, r\ge1$.
The skein of the annulus is denoted by ${\cal C}$. It becomes a commutative algebra with a product induced by placing one annulus outside another.
The skein of the rectangle with $m$ inputs at the top and $m$ outputs at the bottom is denoted by $H_m$. We define a product in $H_m$ by stacking one rectangle above the other, obtaining the Hecke algebra $H_m(z)$, when $z=s-s^{-1}$ and the coefficients are extended to $\Lambda$. The Hecke algebra $H_m$ can also be regarded as the group algebra of Artin's braid group $B_m$ generated by the elementary braids $\sigma _i$, $i=1, \dots , m-1$, modulo the further quadratic relation $\sigma _i^2=z\sigma _i +1$.
The closure map from $H_m$ to ${\cal C}$ is the $\Lambda$-linear map induced by mapping a tangle $T$ to its closure $\widehat{T}$ in the annulus (see figure \ref{closuremap}). We refer to a diagram $Q=\widehat{T}$ as a \emph{directly oriented} pattern.
\begin{figure}
\caption{The closure map}
\label{closuremap}
\end{figure}
The image of this map is denoted by ${\cal C}_m$, which
has a useful interpretation as the space of symmetric polynomials of degree $m$ in variables $x_1, \ldots , x_N$ for large enough $N$. Moreover, the submodule ${\cal C}_+ \subset {\cal C}$ spanned by the union $\cup _{m \geq 0}\, {\cal C}_m$ is a subalgebra of ${\cal C}$ isomorphic to the algebra of the symmetric functions.
\subsection{Quantum invariants} A quantum group $\cal G$ is an algebra over a formal power series ring ${\bf Q}[[h]]$, typically a deformed version of a classical Lie algebra. We write $q=e^h, s=e^{h/2}$ when working in $sl(N)_q$. A finite dimensional module over $\cal G$ is a linear space on which $\cal G$ acts.
Crucially, $\cal G$ has a coproduct $\Delta$ which ensures that the tensor product $V\otimes W$ of two modules is also a module. It also has a {\em universal $R$-matrix} (in a completion of ${\cal G}\otimes{\cal G}$) which determines a well-behaved module isomorphism $$R_{VW}:V\otimes W \to W\otimes V.$$
This has a diagrammatic view indicating its use in converting coloured tangles to module homomorphisms. \begin{center} {\labellist \small \pinlabel{{$ W\ \otimes \ V$}} at 84 156 \pinlabel{{$V\ \otimes \ W$}} at 84 12 \pinlabel{$R_{VW}$} at -50 84 \endlabellist} \pic{Rmatrix} {.250} \end{center}
A braid $\beta$ on $m$ strings with permutation $\pi\in S_m$ and a colouring of the strings by modules $V_1,\ldots,V_m$ leads to a module homomorphism $$J_\beta:V_1\otimes\cdots\otimes V_m \to V_{\pi(1)}\otimes\cdots\otimes V_{\pi(m)}$$ using $R_{V_i,V_j}^{\pm1}$ at each elementary braid crossing. The homomorphism $J_\beta$ depends {\em only on the braid} $\beta$ itself, not its decomposition into crossings, by the Yang-Baxter relation for the universal $R$-matrix.
When $V_i=V$ for all $i$ we get a module homomorphism $J_\beta:W\to W$, where $W=V^{\otimes m}$. Equally, a directed $m$-tangle $T$ determines an endomorphism $J_T$ of $W=V^{\otimes m}$. Now any $sl(N)$ module $W$ decomposes as a direct sum $\bigoplus {(W_\mu\otimes V_\mu^{(N)})}$, where $W_\mu$ is the linear subspace consisting of the {\em highest weight vectors} of type $\mu$ associated to the module $V_\mu^{(N)}$. Highest weight subspaces of each type are preserved by module homomorphisms, and so $J_T$ determines (and is determined by) the restrictions $J_T(\mu):W_\mu \to W_\mu$ for each $\mu$.
If a knot $K$ is decorated by a pattern $Q$ which is the closure of an $m$-tangle $T$ then its quantum invariant $J(K*Q;V)$ can be found from the endomorphism $J_T$ of $W=V^{\otimes m}$ in terms of the quantum invariants of $K$ and the highest weight maps $J_T(\mu):W_\mu \to W_\mu$ by the formula \begin{equation} J(K*Q;V)=\sum c_\mu J(K;V_\mu^{(N)}) \label{weighttrace}\end{equation} with $c_\mu=\mbox{tr}\, J_T(\mu)$. This formula follows from lemma II.4.4 in Turaev's book \cite{Turaevbook}. Here $\mu $ runs over partitions with at most $N$ parts when we are working with $sl(N)$, and we set $c_\mu=0$ when $W$ has no highest weight vectors of type $\mu$.
\begin{proof}[Proof of theorem \ref{symmetry}] Take $V=V^{(N)}$ as the fundamental module of dimension $N$ for $sl(N)$. Then the only highest weight types $\mu$ which occur in equation (\ref{weighttrace}) are partitions of $m$ with at most $N$ rows. Because $J(K*Q;V^{(N)})=P(K*Q)$ when $v=s^{-N}$ we can show that $P(K*Q)=P(K'*Q)$ by showing that $J(K*Q;V^{(N)})=J(K'*Q;V^{(N)})$ for all $N$. By equation \ref{weighttrace} it is then enough to show that $J(K;V_\mu^{(N)})= J(K';V_\mu^{(N)})$ for all $N$ and all partitions $\mu\vdash m$.
Now each tangle $A$ and $B$ determines an endomorphism $J_A, J_B$ of $V_\mu\otimes V_\mu$. If $J_A$ and $J_B$ commute then $J(K;V_\mu)=J(K';V_\mu)$. The endomorphisms $J_A$ and $J_B$ are determined by their restriction $J_A(\nu), J_B(\nu)$ to the highest weight subspaces $W_\nu$ in the decomposition $V_\mu\otimes V_\mu=\sum W_\nu\otimes V_\nu$, so it is enough to show that $J_A(\nu)$ and $J_B(\nu)$ commute where $V_\nu$ is a summand of $V_\mu\otimes V_\mu$. This is certainly the case for all $\nu$ where $W_\nu$ is $1$-dimensional, which includes the case of single row or column partitions $\mu$, \cite{MortonCromwell}.
As a special case of the work of Rosso and Jones, \cite{Rosso,MortonManchon}, we know that the endomorphism of $V_\mu\otimes V_\mu$ for the full twist $\Delta^2$ on two strings operates as a scalar $e^{f(\nu)}$ on each highest weight space $W_\nu$, while the half twist $\Delta$, represented by the $R$-matrix $R_{V_\mu V_\mu}$, operates on $W_\nu$ with two eigenvalues $\pm e^{\frac{1}{2}f(\nu)}$.
The positive and negative eigenspaces corrspond to the classical decomposition of the Schur function $(s_\mu)^2$ into symmetric and skew-symmetric parts, $h_2(s_\mu)$ and $e_2(s_\mu)$, and the dimension of each eigenspace of $W_\nu$ is the multiplicity of $s_\nu$ in $h_2(s_\mu)$ and $e_2(s_\mu)$ respectively.
Now $A=\tau_3(A)$, so that $A\Delta=\Delta A$. Hence the endomorphism $J_A$, and similarly $J_B$, preserves the positive and negative eigenspaces of each $W_\nu$. If these eigenspaces have dimension $1$ or $0$ then $J_A$ and $J_B$ will commute on $W_\nu$.
The theorem is then established by checking that no $s_\nu$ occurs in $h_2(s_\mu)$ or $e_2(s_\mu)$ with multiplicity $>1$ for any $\mu$ with $|\mu|\le 5$. The decomposition of all of these can be quickly confirmed using the Maple program SF of Stembridge \cite{Stembridge}. \end{proof} \begin{corollary}
Examples include $k$-pretzel knots {$K(a_1,\ldots,a_k)$} with odd $a_i$.
\begin{center} {\labellist \small \pinlabel{{$a_1$}} at 143 670 \pinlabel{{$a_2$}} at 194 672 \pinlabel{{$a_k$}} at 300 674 \endlabellist \pic{pretzel}{.60}} \end{center}
Here the numbers $a_i$ can be permuted without changing the Homfly polynomial of any satellite with $\le5$-strings.
\end{corollary}
\section{Satellites with different Homfly polynomials}
A further check with the program SF when $|\mu|=6$ shows that there are just three partitions, $\mu=4,2$, its conjugate $\mu = 2,2,1,1$ and $\mu=3,2,1$ whose symmetric square $h_2[s_\mu]$ contains summands with multiplicity $>1$, as does the exterior squares of $\mu=3,2,1$. Explicitly $h_2[s_{4,\,2}] = {s_{8, \,4}} + {s_{8, \,2, \,2}} + {s_{7, \,4, \,1}} + {s_{7, \,3 , \,2}} + {s_{7, \,3, \,1, \,1}} + {s_{6, \,6}} + {s_{6, \,5, \,1 }} + 2\,{s_{6, \,4, \,2}} + {s_{6, \,3, \,2, \,1}} + {s_{6, \,2, \,2, \,2}} + {s_{5, \,5, \,1, \,1}}
+ {s_{5, \,4, \,3}} + {s_{5, \,4, \,2, \,1}} + {s_{5, \,3 , \,3, \,1}} + {s_{4, \,4, \,4}} + {s_{4, \,4, \,2, \,2}}$. This means that, although $m$-string satellites of $K$ and $K'$ must share the Homfly polynomial when $m\le 5$, it is possible for the Homfly polynomials of some $6$-string satellites to differ.
We give an example now where this does indeed happen.
\begin{theorem}
Let $K$ and $K'$ be the pretzel knots $K=K(1,3,3,-3,-3)$ and $K'=K(1,3,-3,3,-3)$.
\begin{center} \pic{pretzelreef}{.600}\qquad\pic{pretzelgranny}{.600} \end{center}
The $6$-fold parallels $K*Q$ and $K'*Q$, where $Q$ is the closure of the identity braid on $6$ strings, have different Homfly polynomials. \end{theorem}
\begin{proof}Write $K$ and $K'$ as the closure of the products $\Delta ABAB$ and $\Delta BAAB$ respectively, where \begin{center} $A\quad =$\quad \pic{A}{.300} \ ,\quad $B\quad =$\quad \pic{B}{.300}\ ,\end{center}
are the partially closed $3$-braids shown, and $\Delta$ is the positive half-twist. We show that $P(K*Q)\ne P(K'*Q)$ when $v=s^{-3}$. These values are given by the $sl(3)$ quantum invariants $J(K*Q;V^{(3)})$ and $J(K'*Q;V^{(3)})$, where $V^{(3)}$ is the fundamental $3$-dimensional module for $sl(3)$. Since $Q$ is the closure of the identity braid on $6$ strings it induces the identity endomorphism on the module $(V^{(3)})^{\otimes 6}$. This module decomposes as $\bigoplus W_\mu\otimes V_\mu^{(3)}$ where $\mu$ runs through partitions of $6 $ with at most $3$ rows. The trace of the identity on $W_\mu$ is just $d_\mu=\dim W_\mu$, giving $$J(K*Q;V^{(3)})=\sum d_\mu J(K;V_\mu^{(3)}).$$
The only partition $\mu$ in this range for which the exterior or symmetric square contains highest weight vectors of multiplicity $>1$ is the partition $\mu=4,2$, since the partition $\mu=2,2,1,1$ has $4$ rows and the repeated factors for $\mu=3,2,1$ occur for partitions with more than $3$ rows. Now $J_A(\mu)J_B(\mu)=J_B(\mu)J_A(\mu)$ for all other $\mu$ since $A$ and $B$ are symmetric up to altering the framing on both strings, while maintaining the writhe. Then $$P(K*Q)- P(K'*Q)=d_\mu(J(K;V_\mu^{(3)})-J(K';V_\mu^{(3)}))$$ when $v=s^{-3}$ and $\mu=4,2$. Since $d_\mu\ne0$ it is enough to show that $J(K;V_\mu^{(3)})\ne J(K';V_\mu^{(3)})$.
The module $V_\mu^{(3)}$ has dimension $27$.
We now work in the quantum group $sl(3)$ and drop the superscript $(3)$ from the irreducible modules.
Decompose the module $V_\mu\otimes V_\mu$ as $\sum W_\nu\otimes V_\nu$ and compare the endomorphisms given by the tangles $T=ABAB\Delta$ and $T'=BAAB\Delta$.
In this case just one of the invariant subspaces of highest weight vectors has dimension $> 1$. It can be shown that the corresponding $2\x2$ matrices $A_\mu$ and $B_\mu$ arising from the two mirror-image tangles $A$ and $B$ with $3$ crossings satisfy $\makebox{tr} (A_\mu B_\mu A_\mu B_\mu -A_\mu A_\mu B_\mu B_\mu)\ne 0$, which results in a difference in their $sl(3)$ invariants $J(K;V_\lambda)$.
None of the other $6$-cell invariants differ on the two knots. Consequently the $6$-parallels have different $sl(3)$ invariants. The $sl(3)$ invariant of the $6$-parallels of the two pretzel knots coloured with the fundamental module, and thus their Homfly polynomials, are then different. \end{proof}
\subsection{Use of the quantum group $sl(3)_q$}
The calculation of the $2\x2$ matrices $A_\nu$ and $B_\nu$ giving the effect of the two tangles on the highest weight vectors where there is a 2-dimensional highest weight subspace of the symmetric part of the module depends on finding the explicit action of the quantum group on the 27-dimensional module $V_{\mu}^{(3)}$ with $\mu=4,2$ and its tensor square, as well as the homomorphism representing its $R$-matrix. I used the linear algebra packages in Maple to handle the matrix working and subsequent polynomial factorisation, following fairly closely the techniques developed with H. Ryder in the paper \cite{MortonRyder}.
In the interests of reproducibility I give an account of the methods used, and some of the checks applied during the calculations, to test against known properties.
We start from a presentation of the quantum group $sl(3)_q$ as an algebra with six generators, $X_1^{\pm},\,X_2^{\pm},\,H_1,\,H_2$, and a description of the comultiplication and antipode.
Let $M$ be any finite-dimensional left module over $sl(3)_q$. The action of any one of these six generators $Y$ will determine a linear endomorphism $Y_M$ of $M$. We build up explicit matrices for these endomorphisms on a selection of low-dimensional modules, using the comultiplication to deal with the tensor product of two known modules, and the antipode to construct the action on the linear dual of a known module. We must eventually determine the matrices $Y_M$ for our module $M=V_{\pic{Fourtwo.eps}{.300}}$, and find the $729\times729$ $R$-matrix, $R_{MM}$ which represents the endomorphism of $M\otimes M$ needed for crossings.
We follow Kassel in the basic description of the quantum group from using generators $H_1$ and $H_2$ for the Cartan sub-algebra, but with generators $X_i^\pm$ in place of $X_i$ and $Y_i$. We use the notation $K_i=\exp(hH_i/4)$, and set $a=\exp(h/4), \, s=\exp(h/2)=a^2$ and $q=\exp(h)=s^2$, unlike Kassel. The generators satisfy the commutation relations $$[H_i,H_j]=0, \ [H_i,X_j^\pm]=\pm a_{ij}X_j^\pm, \ [X_i^+,X_i^-]=(K_i^2-K_i^{-2})/(s-s^{-1}),$$ where $(a_{ij})=\pmatrix{2&-1\cr-1&2\cr}$ is the Cartan matrix for $SU(3)$ (and also the Serre relations of degree 3 between $X_1^\pm$ and $X_2^\pm$).
Comultiplication is given by \[\begin{array}{rl}\Delta(H_i)&=H_i\otimes I+I\otimes H_i,\\ (\hbox{so }\Delta(K_i)&= K_i\otimes K_i,)\\ \Delta(X_i^\pm)&=X_i^\pm\otimes K_i+ K_i^{-1}\otimes X_i^\pm,\\ \end{array} \] and the antipode $S$ by $S(X_i^\pm) =-s^{\pm 1}X_i^\pm$, $S(H_i)=-H_i$, $ S(K_i)=K_i^{-1}$.
The fundamental $3$-dimensional module, which we denote by $E$, has a basis in which the quantum group generators are represented by the matrices $Y_E$ as listed here. \[X_1^+=\pmatrix{0&1&0\cr0&0&0\cr0&0&0\cr},\ X_2^+=\pmatrix{0&0&0\cr0&0&1\cr0&0&0\cr}\] \[ X_1^-=\pmatrix{0&0&0\cr1&0&0\cr0&0&0\cr},\ X_2^-=\pmatrix{0&0&0\cr0&0&0\cr0&1&0\cr}\] \[H_1=\pmatrix{1&0&0\cr0&-1&0\cr0&0&0\cr},\ =\pmatrix{0&0&0\cr0&1&0\cr0&0&-1\cr}.\]
For calculations we keep track of the elements $K_i$ rather than $H_i$, represented by \[K_1=\pmatrix{a&0&0\cr0&a^{-1}&0\cr0&0&1\cr},\ K_2=\pmatrix{1&0&0\cr0&a&0\cr0&0&a^{-1}\cr}\] for the module $E$.
We can then write down the elements $Y_{EE}$ for the actions of the generators $Y$ on the module $E\otimes E$, from the comultiplication formulae. The $R$-matrix $R_{EE}$ can be given, up to a scalar, by the prescription \[\begin{array}{rl}R_{EE}(e_i\otimes e_j)&=e_j\otimes e_i, \hbox{ if }i>j,\\ &=s\,e_i\otimes e_i, \hbox{ if } i=j,\\ &=e_j\otimes e_i+(s-s^{-1})e_i\otimes e_j, \hbox{ if }i<j, \\ \end{array} \] for basis elements $\{e_i\}$ of $E$.
The linear dual $M^*$ of a module $M$ becomes a module when the action of a generator $Y$ on $f\in M^*$ is defined by $<Y_{M^*}f,v>=<f,S(Y_M)v>$, for $v\in M$. For the dual module $F=E^*$ we then have matrices for $Y_F$, relative to the dual basis, as follows.
\[X_1^+=\pmatrix{0&0&0\cr-s&0&0\cr0&0&0\cr},\ X_2^+=\pmatrix{0&0&0\cr0&0&0\cr0&-s&0\cr}\] \[ X_1^-=\pmatrix{0&-s^{-1}&0\cr0&0&0\cr0&0&0\cr},\ X_2^-=\pmatrix{0&0&0\cr0&0&-s^{-1}\cr0&0&0\cr}\] \[K_1=\pmatrix{a^{-1}&0&0\cr0&a&0\cr0&0&1\cr},\ K_2=\pmatrix{1&0&0\cr0&a^{-1}&0\cr0&0&a\cr}.\]
The most reliable way to work out the $R$-matrices $R_{EF}, R_{FE}$ and $R_{FF}$ is to combine $R_{EE}$ with module homomorphisms $\hbox{cup}_{EF}$, $ \hbox{cup}_{FE}$, $\hbox{cap}_{EF}$ and $\hbox{cap}_{FE}$ between the modules $E\otimes F$, $F\otimes E$ and the trivial 1-dimensional module, $I$, on which $X_i^\pm$ acts as zero and $K_i$ as the identity. The matrices are determined up to a scalar by such considerations; a choice for one dictates the rest.
Once these matrices have been found they can be combined with the matrix $R_{EE}^{-1}$ to construct the $R$-matrices $R_{EF},R_{FE},R_{FF}$, using the diagram shown below, for example, to determine $R_{EF}$. This gives \[ R_{EF}=(1_F\otimes 1_E\otimes \hbox{cap}_{EF})\circ (1_F\otimes R_{EE}^{-1}\otimes 1_F)\circ (\hbox{cup}_{FE}\otimes 1_E\otimes 1_F) .\] \begin{center} \pic{crossing.eps} {.400} \end{center}
The module structure of $M=V_{\pic{Fourtwo.eps}{.300}}$ can be found by identifying $M$ as a $27$-dimensional submodule of $V_{\pic{Twotwo.eps}{.300}}\otimes V_{\pic{Two.eps}{.300}}$, while the two $6$-dimensional modules $V_{\pic{Two.eps}{.300}}$ and $V_{\pic{Twotwo.eps}{.300}}$ are themselves submodules of $E\otimes E$ and $F\otimes F$ respectively.
We know, by the Pieri formula, that there is a direct sum decomposition of $V_{\pic{Twotwo.eps}{.300}}\otimes V_{\pic{Two.eps}{.300}}$ as $M\oplus N$, where $M=V_{\pic{Fourtwo.eps}{.300}}$ and $N$ is the sum of the $8$-dimensional module $V_{\pic{Twoone.eps} {.400}}$ and the $1$-dimensional trivial module.
We first identify the module $V_{\pic{Two.eps}{.300}}$ as a submodule of $E\otimes E$, knowing that $E\otimes E$ is isomorphic to $V_{\pic{Two.eps}{.300}} \otimes F$.
The full twist element on the two strings both coloured by $E$ is represented by $R^2_{EE}$ which acts on $E\otimes E$ as a scalar on each of the two irreducible submodules $V_{\pic{Two.eps}{.300}}$ and $F$.
Use Maple to find bases for the two eigenspaces of $R^2_{EE}$. Then we can identify $V_{\pic{Two.eps}{.300}}$ with the $6$-dimensional one, and write $P$ and $Q$ for the $9\x6$ and $9\x3$ matrices whose columns are these bases. The partitioned matrix $(P|Q)$ is invertible, and its inverse, found by Maple, can be written as $\displaystyle\left(\displaystyle{R\over S}\right)$, where $R$ is a $6\x9$ matrix with $RP=I_{6}$ and $RQ=0$.
Regard $P=\mbox{inj}M_1EE$ as the matrix representing the inclusion of the module $V_{\pic{Two.eps}{.300}}$ into $E\otimes E$. Then $R=\mbox{proj}EEM_1$ is the matrix, in the same basis, of the projection from $E\otimes E$ to $V_{\pic{Two.eps}{.300}}$. For $M_1 =V_{\pic{Two.eps}{.300}}$ the module generators $Y_{M_1}$ are given by $Y_{M_1}=R\,Y_{EE}\,P$, giving the explicit action of the quantum group on $V_{\pic{Two.eps}{.300}}$.
We perform a similar calculation on $F\otimes F$ to identify the module $M_2=V_{\pic{Twotwo.eps}{.300}}$ and the matrices $\mbox{inj}M_2FF$ and $\mbox{proj}FFM_2$, giving the action of the quantum group on $M_2=V_{\pic{Twotwo.eps}{.300}}$ in a similar way.
We use inclusion and projection further to find the four $6^2\times 6^2$ $R$-matrices $R_{M_iM_j}$. For example, to construct $R_{M_1M_2}:M_1\otimes M_2\to M_2\otimes M_1$, first map $M_1\otimes M_2$ to $E\otimes E\otimes F\otimes F$ by $\mbox{inj}M_1EE\otimes \mbox{inj}M_2FF$. Then construct the $R$-matrix crossing two strings with $E\otimes E$ and two with $F\otimes F$ as the composite of $1\otimes R_{EF}\otimes 1$ , $R_{EF}\otimes R_{FE}$ and $1\otimes R_{FF}\otimes 1$, and finally compose with the projections $\mbox{proj}FFM_2 \otimes \mbox{proj}EEM_1$.
A similar calculation on the module $M_1\otimes M_2$ yields the submodule $M=V_{\pic{Fourtwo.eps}{.300}}$. The full twist on two strings, one coloured by $M_1$ and one by $M_2$, is represented by the product $R_{M_2M_1}R_{M_1M_2}$ and will have one 27-dimensional eigenspace $M$ complemented by two other eigenspaces. Taking the bases of these eigenspaces in a partitioned $36\x36 $ matrix as above will determine a $36\times 27$ matrix $P=\mbox{inj}MM_1M_2$ and a $27\x36$ matrix $R=\mbox{proj}M_1M_2M$. The quantum group actions $Y_{M_1M_2}$ on the tensor product are determined by the coproduct formulae, and the actions $Y_M$ are then given from these using $P$ and $R$. These in turn give rise to the quantum group actions $Y_{MM}$ on $M\otimes M$.
We are also able to construct the $27^2\x27^2$ $R$-matrix $R_{MM}$ using the same inclusion and projection to map $M\otimes M$ into $M_1\otimes M_2\otimes M_1\otimes M_2$, followed by the matrix for crossing four strands, built up from the $R$-matrices $R_{M_iM_j}$ and then the projections back to $M\otimes M$.
\subsection{Completing the calculations}\label{calculation}
\begin{remark} We can reach this stage directly if we know the six module generators $Y_M$ and the $R$-matrix $R_{MM}$ for the module $M=V_{\pic{Fourtwo.eps}{.300}}$. We can then calculate the module generators $Y_{MM}$ using the coproduct, and the twisting element $T_M=(K_{1M})^4(K_{2M})^4$. \end{remark}
Knowing the module generators $Y_{MM}$ gives an immediate means of finding the highest weight vectors as common null-vectors of $X^+_{iMM}$, and their weights can be identified. All the submodules of $M\otimes M$ occur with multiplicity $1$ except $V_\nu$ with partition $\nu=6,4,2$ whose highest weights are $2,2$. The $3$-dimensional space $W_\nu$ of highest weight vectors for $\nu$ is found by solving the linear equations $X^+_{1MM}v=0$, $X^+_{2MM}v=0$, $K_{1MM}v=a^2v$ and $K_{2MM}v=a^2v$ for $v$. We then find the $2$-dimensional positive eigenspace for $R_{MM}$ on $W_\nu$. The endomorphisms $J_A$ and $J_B$ will preserve this eigenspace.
Represent the $3$-braid $\sigma_2 \sigma_1^{-1}\sigma_2$ in the $2$-tangle $A$ by an endomorphism $F_A$ of $M\otimes M\otimes M$, using $R_{MM}$ and its inverse. Then use $T_M$ and the partial trace to close off one string, hence giving the endomorphism $J_A$ of $M\otimes M$ determined by $A$. Explicitly, choose a basis $\{e_i\}$ of $M$ and write $$F_A(v\otimes T_M(e_i))=\sum_j f_{ij}(v)\otimes e_j$$ with $f_{ij}(v)\in M\otimes M$. Then $J_A(v)=\sum_i f_{ii}(v)$. Applied to each of the two vectors in the highest weight space this determines a $2\x2$ matrix $A_\nu$ representing the restriction of $J_A$ to this subspace. Similarly $B_\nu$ is found using the mirror image braid $\sigma_2^{-1} \sigma_1\sigma_2^{-1}$.
We know that $R_{MM}$ acts as a scalar on the $2$-dimensional space so $J(K;V_\mu)-J(K';V_\mu)$ is a non-zero scalar multiple of $\makebox{tr}(A_\nu B_\nu A_\nu B_\nu-B_\nu A_\nu A_\nu B_\nu)$.
This difference is $2(q^6+q^5+q^4+q^3+q^2+q+1)(q^4+1)(q^6+q^3+1)^2(q^4-q^2+1)^2(q^4 +q^3+q^2+q+1)^3(q^2+1)^4(q^2+q+1)^4(q^2-q+1)^4(q+1)^{10}(q-1)^{18}$, up to a power of $q=s^2$ and the quantum dimension of $V_\nu$.
\subsection{Further examples of difference}
Using the same matrices $A_\nu$ and $B_\nu$ it is possible to find further pretzel knot examples based on sequences of the tangles $A$ and $B$ where the $6$-parallels have different Homfly polynomial, such as the knots $K(3,3,3,-3,-3)$ and $K(3,3,-3,3,-3)$. The difference here is the same as for the first example multiplied by the factor $2q^{32}-q^{31}-3q^{30}+5q^{29}+3q^{28}-10q^{27}+q^{26}+14q^{25}-6q^{24}-19q^{23}+ 21q^{22}+20q^{21}-46q^{20}+2q^{19}+61q^{18}-48q^{17}-35q^{16}+83q^{15}-27q^{14} -66q^{13}+72q^{12}+3q^{11}-57q^{10}+40q^9+10q^8-33q^7+16q^6+7q^5-12q ^4+7q^3-4q+2$. The same calculations guarantee that satellites based on any closed $6$-tangle $Q=\widehat T$ will have different Homfly polynomial, provided that the trace $c_\mu$ of the endomorphism $J_{\widehat T} $ on the highest weight space $W_\mu$ of $V^{\otimes 6}$ is non-zero, where $\mu$ is the partition $4,2$. This will be the case for most, but not all, patterns $Q$, and certainly will be the case for many satellites which are knots rather than links.
The calculations in section \ref{calculation} also show that the $4$-parallels of the two pretzel knots $K(1,3,3,-3,-3)$ and $K(1,3,-3,3,-3)$ with two strings oriented in one direction and two in the opposite direction will have different Homfly polynomials, by using the decomposition of the corresponding $sl(3)_q$ module $W=V\otimes V\otimes V_{\pic{Oneone.eps} {.300}}\otimes V_{\pic{Oneone.eps} {.300}}$ into a sum of irreducible $sl(3)_q$ modules. The only module to figure in this decomposition with any multiplicity in its symmetric or exterior square is again $V_{\pic{Fourtwo.eps}{.300}}$. The calculations above, using the fact that Homfly with $v=s^{-3}$ can be calculated by colouring strings with reverse orientation by the dual module $V^*$ to the fundamental module, and that this is $V_{\pic{Oneone.eps} {.300}}$ for $sl(3)_q$.
\section{Cable patterns} By way of contrast, if the pattern $Q$ is a cable on any number of strings then $K*Q$ and $K'*Q$ share the same Homfly polynomial, where $K$ and $K'$ have the same symmetry as in theorem \ref{symmetry}.
\begin{theorem} \label{cablesymmetry} Suppose that
$A$ and $B$ are both symmetric under the half-twist $\tau_3$, so that \begin{center} $ A$\quad = \quad {\labellist \pinlabel $A$ at 129 709
\endlabellist \pica{mutantone} {.6}}\ \ , \quad $ B$\quad = \quad {\labellist \pinlabel $B$ at 129 709
\endlabellist \pica{mutantone} {.6}} \end{center} Let $K$ and $K'$ be knots which are the closure of $ABC$ and $BAC$ respectively for any tangle $C$, as in figure \ref{threeparts}. Then $P(K*Q)=P(K'*Q)$ for every $(m,n)$ cable pattern $Q$ where $m$ and $n$ are coprime. \end{theorem} \begin{proof} As in the proof of theorem \ref{symmetry} we show that $J(K*Q;V^{(N)})=J(K'*Q;V^{(N)})$ for all $N$. By equation \ref{weighttrace} it is then enough to show that $J(K;V_\mu^{(N)})= J(K';V_\mu^{(N)})$ for all $N$ and all partitions $\mu\vdash m$ for which the coefficient $c_\mu\ne 0$. The coefficients $c_\mu$ depend on the pattern $Q$ and arise as the trace of the endomorphism $J_T$ when restricted to the highest weight space $W_\mu \subset V^{\otimes m}$, where $Q$ is the closure of the $m$-braid $T=(\sigma_1\sigma_2\cdots\sigma_{m-1})^n$.
It is shown in \cite{Rosso}, (see also \cite{MortonManchon}), that for any such cable $Q$ the only non-zero coefficients $c_\mu$ occur when the partition $\mu$ is a {\em hook}, if $m$ and $n$ are coprime . It is then enough to show that $J(K;V_\mu^{(N)})= J(K';V_\mu^{(N)})$ for all hook partitions $\mu$.
Using the same argument as in theorem \ref{symmetry} it remains to check that
no Schur function $s_\nu$ occurs with multiplicity $>1$ in the decomposition of either the symmetric or exterior squares, $h_2(s_\mu)$ or $e_2(s_\mu)$, for any hook partition $\mu$. This fact has been established by Carbonara, Remmel and Yang in theorem 3 of \cite{Carbonara}, and so the proof is complete. \end{proof} \begin{remark} Theorem \ref{cablesymmetry} highlights the importance of a precise terminology for different types of satellite. The term {\em cable} is sometimes used to mean any satellite, while there is a clear distiction here between the behaviour of cables and of parallels or other satellites, which is not primarily a matter of the number of components of the satellite. \end{remark}
\noindent May 2007
\end{document} | arXiv | {
"id": "0705.1321.tex",
"language_detection_score": 0.7715165019035339,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{On extensions of hook Weyl modules}
\author{Mihalis Maliakas\corref{cor1}} \ead{mmaliak@math.uoa.gr} \address{Department of Mathematics, University of Athens} \author{Dimitra-Dionysia Stergiopoulou\fnref{fn2}} \ead{dstergiop@math.uoa.gr} \address{Department of Mathematics, University of Athens}
\cortext[cor1]{Corresponding author} \fntext[fn2]{Partially supported by Onassis Foundation grant GZM 065-1.}
\begin{abstract} We determine the integral extension groups $\mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$ and $\mathrm{Ext}^k(\Delta(\mathrm{h}),$ $\Delta(\mathrm{h}(k)))$, where $\Delta(\mathrm{h}),\Delta(\mathrm{h}(k))$ are the Weyl modules of the general linear group $GL_n$ corresponding to hook partitions $\mathrm{h}=(a,1^b)$, $\mathrm{h}(k)=(a+k,1^{b-k})$. \end{abstract}
\begin{keyword}
Extensions \sep general linear group \sep Weyl module \sep hook
\MSC[2010] 20G05 \end{keyword} \maketitle
\section{Introduction}This paper concerns polynomial representations of the general linear group $GL_n$ over the integers. For a partition $\lambda$, let $\Delta(\lambda)$ denote the Weyl module of $GL_n$ of highest weight $\lambda$. The extension groups $\mathrm{Ext}^i(\Delta(\lambda),\Delta(\mu))$ play an important role in the theory. For example, the $p$-torsion of $\mathrm{Ext}^1(\Delta(\lambda),\Delta(\mu))$ yields the $\mathrm{Hom}$ space between the corresponding modular Weyl modules of $GL_n(K)$, where $K$ is an algebraically closed field of characteristic $p>0$, and the dimensions of the higher modular extensions may be obtained through torsion and restriction of integral extensions. Jantzen's sum formula can be viewed and proved via integral extension groups \cite{AK}.
There are not many cases where explicit computations of integral extension groups between Weyl modules have been carried out. In \cite{AB} the $GL_2$ case was treated and in \cite{BF} the $GL_3$ case when $\lambda$ and $\mu$ differ by a multiple of a single root, both for $i=1$. In \cite{Ak} the case $\lambda=(1^a), \mu=(a)$ was studied and in \cite{Ma} the situation where $\lambda, \mu$ are hooks differing by a single root was considered. In \cite{Ku2} the case where $\lambda, \mu$ are any partitions differing by a single root was settled. As the modular extension groups are intimately related to the integral ones, we mention the result on neighboring Weyl modules \cite{Ja} Part II Section 7, and the $SL_2$ result in \cite{Pa} for all $i$ generalizing \cite{Er} and \cite{CE}. More can be found in \cite{CP}.
Let $\mathrm{h}, \mathrm{h}(k)$ be hooks, $\mathrm{h}=(a,1^b), \mathrm{h}(k)=(a+k,1^{b-k})$, where $k$ is an integer such that $1 \le k \le b$. It follows that $\mathrm{Ext}^i(\Delta(\mathrm{h})), \Delta(\mathrm{h}(k)))=0$ if $ i>k $. In this paper we determine $\mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$ and $\mathrm{Ext}^k(\Delta(\mathrm{h})), \Delta(\mathrm{h}(k)))$ (Theorem 3.5 and Theorem 4.1). Our approach utilizes presentation matrices for various $\mathrm{Ext}$ groups that we determine from the description of generators and relations of Weyl modules of \cite{ABW} ($ i=1 $) and from the projective resolutions of \cite{Ma} ($ i=k $). Using these and the degree reduction theorem of Kulkarni \cite{Ku1}, we identify cyclic generators of extension groups of the form $\mathrm{Ext}^i(\Delta(\mathrm{h}),M)$, $ (i=1, k) $, where $ M $ is a tensor product of a divided power and an exterior power. Computing the image of these generators under canonical maps yields the results.
\section{Recollections}
\subsection{Notation} Let $F$ be a free abelian group of finite rank $n$. Fixing a basis of $F$ yields an identification of general linear groups $GL(F)=GL_n(\mathbb{Z})$. We will be working with homogeneous polynomial representations of $GL_n(\mathbb{Z})$ of degree $r$, or equivalently, with modules over the Schur algebra $S_\mathbb{Z}(n,r)$ \cite{Gr}, Section 2.4. We will write $S(n,r)$ in place of $S_\mathbb{Z}(n,r)$. By $DF=\sum_{i\geq 0}D_iF$ and $\wedge F=\sum_{i\geq 0}\wedge^{i}F$ we denote the divided power algebra of $F$ and the exterior algebra of $F$ respectively. We will usually omit $F$ and write $D_i$ and $\wedge^i$.
From \cite{Gr} or \cite{AB}, Proposition 2.1, we recall that for each sequence $a_1,...,a_n$ of non negative integers $a_i$ that sum to $r$, the $S(n,r)$-module $D_{a_1} \otimes ... \otimes D_{a_n}$ is projective. Throughout this paper all tensor products are over the integers.
For a partition $\lambda$ of $r$ with at most $n$ parts, we denote by $\Delta(\lambda)$ the corresponding Weyl module for $S(n,r)$. If $\lambda =(a)$ is a partition with one part, then $\Delta(\lambda) =D_a$, and if $\lambda =(1^b)$, then $\Delta(\lambda) =\wedge^{b}$. A hook $\mathrm{h}$ is a partition of the form $\mathrm{h}=(a,1^b)$. The following complex of $S(n,r)$-modules (which is the dual of the usual Koszul complex) is exact $$0 \rightarrow D_{a+b} \rightarrow ... \rightarrow D_{a+1} \otimes \wedge^{b-1} \xrightarrow{\theta_{a}} D_{a} \otimes \wedge^{b} \rightarrow ... \rightarrow \wedge^{a+b} \rightarrow 0,$$ where $\theta_{a}$ is the composition $\ D_{a+1}\otimes \wedge^{b-1} \xrightarrow{\triangle \otimes 1} D_{a}\otimes D_1 \otimes \wedge^{b-1}\xrightarrow{1 \otimes m} D_{a} \otimes \wedge^{b},$ where $\triangle$ (respectively, $m$) is the indicated component of the comultiplication (resp., multiplication) map of the Hopf algebra $DF$ (resp., $\wedge F $). It is well known that if $\mathrm{h}=(a,1^b)$ is a hook, $b \geq 1$, then $\Delta (\mathrm{h}) \simeq cok(\theta_{a}) \simeq ker(\theta_{a-1}),$ so that we have the following short exact sequence \begin{equation}0 \rightarrow \Delta (\mathrm{h}(1)) \xrightarrow{i} D_{a} \otimes \wedge^{b} \xrightarrow{\pi_0} \Delta(\mathrm{h}) \rightarrow 0, \end{equation} where $\mathrm{h}(1)=(a+1,1^{b-1})$, the map $i$ is induced by $\theta_{a}$ on generators and the map $\pi_0$ is induced by the identity map on generators
\textit{Notation}: Throughout this paper we use the notation $\mathrm{h}=(a,1^b), \mathrm{h}(k)=(a+k,1^{b-k}), 1\le k\le b, r=a+b.$
\subsection{Straightening law} We recall the straightening law and the semi-standard basis theorem for $\Delta(\mathrm{h})$ (\cite{ABW}, Theorem II.3.16). Fix an ordered basis $e_1,..,e_n$ of $F$. For simplicity, we denote the element $e_i$ by $i$ and accordingly the element $e_{i_1}^{(a_1)} ... e_{i_t}^{(a_t)} \otimes e_{j_1} \wedge ... \wedge e_{j_b} \in D_a \otimes \wedge^b$ by ${i_1}^{(a_1)} ... {i_t}^{(a_t)} \otimes {j_1} ... {j_b}$. The image of this element under the identification $\Delta(\mathrm{h}) \simeq cok(\theta_{a})$ will be denoted by ${i_1}^{(a_1)} ... {i_t}^{(a_t)} | {j_1} ... {j_b}$. Now suppose $i_1<i_2<...<i_t$ and $j_1 \le i_1$. Then in $\Delta(\mathrm{h})$ we have
$${i_1}^{(a_1)}...{i_t}^{(a_t)} | {j_1} ... {j_b} = \begin{cases} -\sum\limits_{s\geq 2}{i_1}^{(a_1+1)} ...{i_s}^{(a_s-1)}...{i_t}^{(a_t)} | {i_s}{j_2} ... {j_b}, & \mbox{if}\;j_1=i_1 \\ -\sum\limits_{s\geq 1}j_1{i_1}^{(a_1)} ...{i_s}^{(a_s-1)}...{i_t}^{(a_t)} | {i_s}{j_2} ... {j_b}, & \mbox {if}\;j_1<i_1. \end{cases} $$
A $\mathbb{Z}$ - basis of $\Delta(\mathrm{h})$ is the set of all ${i_1}^{(a_1)} ... {i_t}^{(a_t)} | {j_1} ... {j_b} $, where $a_1+...+a_t=a,\; i_1<...<i_t$ and $i_1 <j_1< ... <j_b.$
\subsection{Resolutions of hooks} We will use the the explicit finite projective resolution $P_{*}(a,b)$ of $ \Delta(\mathrm{h})$, \[0 \rightarrow ... \rightarrow P_2(a,b) \xrightarrow{\theta_{2}(a,b)} P_1(a,b) \xrightarrow{\theta_{1}(a,b)} P_0(a,b) \] of \cite{Ma}, Theorem 1, which we now recall. For short we denote the tensor product $D_{a_1} \otimes ... \otimes D_{a_m}$ of divided powers by $D(a_1,...,a_m)$. Let $P_i(a,b)=\sum D(a_1,...,a_{b+1-i})$ where the sum ranges over all sequences $(a_1,...,a_{b+1-i})$ of positive integers of length $b+1-i$ such that $a_1+...+a_{b+1-i}=a+b$ and $a \le a_1 \le a+i$. The differential $\theta_{i}(a,b)$ is defined be sending $x_1 \otimes ... \otimes x_{b+1-i} \in D(a_1,...,a_{b+1-i})$ to \[ \sum_{j=1}^{s} (-1)^{j+1}x_1 \otimes ...\otimes \triangle(x_j) \otimes ... \otimes x_{b+1-i} \in D(a_1,...,u,v,...,a_{s}), \] where $ s=b+1-i $ and $\triangle(x_j)$ is the image of $x_j$ under the two-fold diagonalization $D(a_j) \rightarrow \sum D(u,v)$, where the sum ranges of all positive integers $u,v$ such that $u+v=a_j$ and $D(a_1,...,u,v,...,a_{b+1-i})$ is a summand of $P_{i-1}(a,b)$ with $u$ located at position $j$. We denote by $ \triangle_{u,v} : D(a_j) \rightarrow D(u,v)$ the indicated component of the two-fold diagonalization $D(a_j) \rightarrow \sum D(u,v)$.
If $A,B$ are $S(n,r)$ - modules, we write $\mathrm{Hom}(A,B)$ and $\mathrm{Ext}^i(A,B)$ in place of $\mathrm{Hom}_{S(n,r)}(A,B)$ and $\mathrm{Ext}^i_{S(n,r)}(A,B)$ respectively.
We recall the recursions \begin{align*} &P_0(a,b)=D(a) \otimes P_0(1,b-1), \\ &P_i(a,b)=P_{i-1}(a+1,b-1) \oplus D(a) \otimes P_i(1,b-1), i>0 \end{align*} and that under these identifications we have the following.
\begin{rem} If $M$ is a $S(n,r)$-module, the differential $\mathrm{Hom}(\theta_i(a,b), M)$ of the complex $\mathrm{Hom}(P_*(a,b),M)$ looks like
\begin{center}
\begin{tikzcd}
\mathrm{Hom}(P_{i-2}(a+1,b-1),M) \arrow{r} \arrow[d, phantom, "\oplus"]
& \mathrm{Hom}(P_{i-1}(a+1,b-1),M)\arrow[d, phantom, "\oplus"]\\
\mathrm{Hom}(D(a) \otimes P_{i-1}(1,b-1),M)\arrow{r} \urar[shorten >= 25pt,shorten <= 25pt]{}
&\mathrm{Hom}(D(a) \otimes P_i(1,b-1),M)
\end{tikzcd}
\end{center}
where the top horizontal map is $\mathrm{Hom}(\theta_{i-1}(a+1,b-1),M)$, the bottom one is $-\mathrm{Hom}(1 \otimes \theta _i(1,b-1),M)$ and the restriction of the diagonal one on the summand $\mathrm{Hom}(D(a,j,a_2,...,a_m),M)$ is $\mathrm{Hom}(\triangle_{a,j} \otimes 1 \otimes ... \otimes 1), M) .$ \end{rem} For any $S(n,r)$-module $M$ and any sequence $a_1,...,a_m$ of non negative integers such that $a_1+...+a_m=r$ and $m \le n$, we identify the $\mathbb{Z}$-module $$\mathrm{Hom}(D(a_1,...,a_m),M)$$ with the $(a_1,...,a_m)$ weight subspace of $M$ (with respect to the action of $\mathbb{Z}^n$) according to \cite{AB}, eqn. (11) on p. 178. We will use such identifications freely throughout this paper.
In particular, suppose $M$ is a skew Weyl module for $S(n,r)$ (denoted be $K_{\lambda / \mu}(F)$ in \cite{ABW}). Using the $\mathbb{Z}$-basis of $M$ given by the semi-standard tableaux \cite{ABW}, Theorem II.3.16, we see that the $\mathbb{Z}$-module $\mathrm{Hom}(D(a_1,...,a_m),M)$ may be identified with the $\mathbb{Z}$-submodule of $M$ that has basis the semi-standard tableaux of $M$ that contain the entry $i$ exactly $a_i$ times, $i=1,...,m$. We call this the semi-standard basis of $\mathrm{Hom}(D(a_1,...,a_m),M)$. (Perhaps we should remark that what we have called semi-standard tableaux are called 'co-standard' in \cite{ABW}, Definition II.3.2: the entries in each row are weakly increasing from left to right and the entries in each column are strictly increasing from top to bottom.)
We record here a handy computational remark. For $M$ a skew Weyl module and $T \in \mathrm{Hom}(D(a_1,...,a_m),M)$ a semi-standard basis element, let $\phi_t (T)$, $1 \le t < m$, be the element of $\mathrm{Hom}(D(a_1,...,a_t+a_{t+1},...,a_m),M)$ obtained from $T$ by replacing each occurrence of $j>t$ by $j-1$. If $t\ge m$, let $\phi_t (T)=0$. By extending linearly, we obtain for each degree $i$ a map of $\mathbb{Z}$-modules $$\phi_t: \mathrm{Hom}(P_i(a,b),M) \rightarrow \mathrm{Hom}(P_{i+1}(a,b),M).$$ It is clear that only a finite number of these maps are nonzero. From the definition of the differential of $P_*(a,b)$, we obtain the following description for the differential of $\mathrm{Hom}(P_*(a,b),M)$. \begin{rem}
With the previous notation, $\mathrm{Hom}(\theta_i(a,b),M) = \sum_{i \ge 1}(-1)^{t-1}\phi_t.$ \end{rem}
Let $n \ge b+1$. Then $P_*(a,b)$ is a projective resolution of $\Delta(\mathrm{h})$. We claim that the $\mathbb{Z}$-module $\mathrm{Ext}^{i}(\Delta(\mathrm{h}),M)$, where $M$ is any skew Weyl $S(n,r)$-module, is isomorphic to the torsion submodule of the cokernel $E^i(\Delta(\mathrm{h}),M)$ of the map $\mathrm{Hom}(\theta_i(a,b),M)$. Indeed, by the argument of \cite{C}, bottom of p. 634 to the top of p. 635, we have \[E^i(\Delta(\mathrm{h}), M) \simeq \mathrm{Ext}^i(\Delta(\mathrm{h}), M) \oplus N, \]where $N$ is the image of the map $\mathrm{Hom}(\theta_i(a,b),M)$. (The argument given in loc. cit. is stated for $i=1$ but is valid for any $i \ge 1$.) As a submodule of a free $\mathbb{Z}$-module, $N$ is a free $\mathbb{Z}$-module. On the other hand, the $\mathbb{Z}$-module $ \mathrm{Ext}^i(\Delta(\mathrm{h}), M)$ is torsion for all $i \ge1$ by \cite{AB}, last paragraph of Section 8. Hence from the above isomorphism we obtain that the torsion submodule of $E^i(\Delta(\mathrm{h}),M)$ is isomorphic to $\mathrm{Ext}^i(\Delta(\mathrm{h}), M)$.
\subsection{The extensions $\mathrm{Ext}^i(\Delta(\mathrm{h}), D_{a+k}\otimes \wedge^{b-k})$} We will use the following lemma several times. Let $$r_k = gcd\left(\tbinom{k+1}{1}, \dots ,\tbinom{k+1}{k}\right)$$ and note that $r_k=1$ unless $k+1=p^e$, $ p $ prime, in which case $r_k=p$. \begin{lm}
Suppose $n \ge {b+1}$ and $1 \le k < b$. Then $$\mathrm{Ext}^i(\Delta(\mathrm{h}),D_{a+k} \otimes \wedge ^{b-k})=\mathrm{Ext}^i(\wedge ^{k+1}, D_{k+1}).$$ In particular, $\mathrm{Ext}^1(\Delta (\mathrm{h}),D_{a+k} \otimes \wedge ^{b-k})= \mathbb{Z}_2$ and
$\mathrm{Ext}^k(\Delta(\mathrm{h}),D_{a+k} \otimes \wedge^{b-k})= \mathbb{Z}_{r_k}.$
\end{lm} \begin{proof} A special case of the main result, Theorem 2, of \cite{Ku1} yields $$\mathrm{Ext}^i(\Delta (a,1^b),D_{a+k} \otimes \wedge ^{b-k})=\mathrm{Ext}^i(D_{a-1} \otimes \wedge ^{k+1}, D_{a+k}).$$ Applying contravariant duality \cite{Ja}, p. 209, and \cite{AB}, Theorem 7.7, we have $$\mathrm{Ext}^i(D_{a-1} \otimes \wedge ^{k+1}, D_{a+k})=\mathrm{Ext}^i(\wedge^{a+k}, D_{k+1} \otimes \wedge ^{a-1}).$$ Again by \cite{Ku1}, $\mathrm{Ext}^i(\wedge^{a+k}, D_{k+1} \otimes \wedge ^{a-1})=\mathrm{Ext}^i(\wedge^{k+1}, D_{k+1})$ and the first equality of the lemma follows.
We have $\mathrm{Ext}^1(\wedge ^{k+1}, D_{k+1}) = \mathbb{Z}_2,$ by \cite{Ak}, Section 4, and $\mathrm{Ext}^k(\wedge ^{k+1}, D_{k+1}) = \mathbb{Z}_{r_k},$ according to \cite{Ma}, eqn. (6) p. 2207. \end{proof} \subsection{Summary of notation} For the reader's convenience we gather here some of the notation introduced in the previous subsections that will be used often. \begin{itemize}[noitemsep]
\item$\mathrm{h}=(a,1^b)$ and $\mathrm{h}(k)=(a+k,1^{b-k})$: hooks, where $1\le k\le b$ and $r=a+b$, (subsection 2.1).
\item ${i_1}^{(a_1)} ... {i_t}^{(a_t)} | {j_1} ... {j_b}$: the image in $\Delta(\mathrm{h})$, where $a=a_1+...+a_t$, of the element ${i_1}^{(a_1)} ... {i_t}^{(a_t)} \otimes {j_1} ... {j_b} \in D_a \otimes \wedge^b$ under the isomorphism $cok(\theta_{a}) \simeq \Delta(\mathrm{h})$, (subsection 2.2).
\item $P_{*}(a,b)$ and $\theta_{*}(a,b)$: projective resolution of $ \Delta(\mathrm{h})$ and the differential of $P_{*}(a,b)$ respectively, (subsection 2.3).
\item $E^i(\Delta(\mathrm{h}),M)$: the cokernel of the map $\mathrm{Hom}(\theta_i(a,b),M)$, (subsection 2.3).
\item $\phi_t$: a summand of the differential $\mathrm{Hom}(\theta_i(a,b),M)$ (defined before Remark 2.2).
\end{itemize} \section{$\mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$} In this section we determine $\mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$ for $k>1$. The case $k=1$ was computed in \cite{Ma}, Theorem 6. \subsection{Matrices $e^{(1)}(a,b,M)$ and a generator}
Let $M=D_{a+k} \otimes \wedge ^{b-k}$. For the semi-standard basis $B$ of the domain $\mathrm{Hom}(D(\mathrm{h}),M)$ $=\mathrm{Hom}(D(a,1,...,1),M)$ of the map $\mathrm{Hom}(\theta_1{(a,b)},M)$ we have $B=B_0 \cup B_1$, where $B_0$ (resp., $B_1$) consists of those elements of $B$ that have no (resp., exactly one) appearance of 1 in the $\wedge^{b-k}$ part. We consider the usual lexicographic ordering on $B$ and note that every element of $B_0$ is less than every element of $B_1$. We have $$|B|=\tbinom{b+1}{k+1}, |B_0|= \tbinom{b}{k}, |B_1|= \tbinom{b}{k+1}.$$
Likewise, for the semi-standard bases $B^1$, $B^2, ...,B^b $ of $\mathrm{Hom}(D(a+1,1,...,1),M)$, $\mathrm{Hom}(D(a,2,...,1),M)$, ... , $\mathrm{Hom}(D(a,1,...,2),M)$ respectively, we have $B^t=B^t_0 \cup B^t_1$, where $B_0^i$ (resp. $B_1^i$) consists of those elements of $B^i$ that have no (resp. exactly one) appearance of 1 in the $\wedge^{b-k}$ part, and thus \begin{equation*}B'=B^1_0 \cup B^1_1 \cup ... \cup B^b_0 \cup B^b_1\end{equation*} is a basis of the codomain of the map $\mathrm{Hom}(\theta_1{(a,b)},M)$. We order each set $B^i$ lexicographically and declare that every element of $B^i$ is less than every element of $B^{i+1}$. For each $i$ we have $$|B^i|=\tbinom{b}{k}, |B_0^i|= \tbinom{b-1}{k-1}, |B_1^i|= \tbinom{b-1}{k}.$$ Consider the matrix $e^{(1)}(a,b,M) \in M_{b\binom{b}{k} \times \binom{b+1}{k+1}}(\mathbb{Z})$ of the map $\mathrm{Hom}(\theta_1{(a,b)},M)$, with respect to the previous orderings, partitioned into $b$ row blocks according to $B'=B^1 \cup \cdots B^b $.
In the next Lemma, the missing entries of any matrix are assumed to be equal to 0.
\begin{lm} Let $p=\tbinom{b-1}{k-1}, q=\tbinom{b-1}{k}.$ The matrix $e^{(1)}(a,b,M)$ has the following properties.\begin{enumerate} \item The first block is $\left( \begin{array}{c|c} A(a,b;k) &B(a,b;k) \end{array}\right)$, where
\begin{align*} &A(a,b;k)=diag(\underbrace{a+1,...,a+1}_{p},\underbrace{1,...,1}_{q}) \in M_{\binom{b}{k}}(\mathbb{Z}), \\&B(a,b;k)= \left(
\begin{array}{c|cc }
& &\\ \hline
aI_{q} & &
\end{array}
\right) \in M_{\binom{b}{k} \times \binom{b}{k+1}}(\mathbb{Z}).\end{align*}
\item The t-th block, $t>1$, is of the form $\left(
\begin{array}{c|cc}
C_t& \\ \hline
&D_t
\end{array}
\right)$, where $C_t \in M_{p \times \binom{b}{k}}(\mathbb{Z})$, $D_t \in M_{q \times \binom{b}{k+1}}(\mathbb{Z})$.
\item The sum of the elements in any row of the t-th block, where $t \ge 2$, is $(-1)^{t-1}2.$
\item The last row is of the form $(0 \; ... \; 0 \; \pm 2)$.
\end{enumerate} \end{lm} \begin{proof} (1) The set $B_0$ consists of all $T=1^{(a)}i_1...i_k\otimes j_1...j_{b-k} \in B$ such that \begin{gather*}i_1 <... < i_k, \; \; \; j_1 <... < j_{b-k} \\ \{i_1 ,..., i_k\} \cup \{j_1 ,... , j_{b-k}\}=\{2,...,b+1\} \\ \{i_1 ,..., i_k\} \cap \{j_1 ,... , j_{b-k}\}= \emptyset. \end{gather*} The definition of $\phi_1$ yields \begin{equation*} \phi_1 (T)=\begin{cases} (a+1)1^{(a+1)}i'_2...i'_k\otimes j'_1...j'_{b-k}, & \mbox{if}\;i_1=2 \\ \; \; \; \; \; \; \; \; \; \; \; 1^{(a)}i'_2...i'_k\otimes 1j'_2...j'_{b-k}, & \mbox {if}\; i_1 \ne 2 \end{cases}, \end{equation*} where $i'=i-1$. From this it easily follows that the matrix of the restriction of $\phi_1$ on the subgroup of $ \mathrm{Hom}(D(\mathrm{h}),M)$ spanned by $B_0$ is $A(a,b;k)$.
The set $B_1$ consists of all $S=1^{(a-1)}i_1...i_{k+1}\otimes j_1...j_{b-k} \in B$ such that \begin{gather*}i_1 <... < i_{k+1}, \; \; \; j_1 <... < j_{b-k}, \; \; \; j_1=1 \\ \{i_1 ,..., i_{k+1}\} \cup \{j_1 ,... , j_{b-k}\}=\{1,...,b+1\} \\ \{i_1 ,..., i_{k+1}\} \cap \{j_1 ,... , j_{b-k}\}= \emptyset. \end{gather*} Then \begin{equation*} \phi_1 (S)=\begin{cases} a1^{(a-1)}i'_2...i'_k\otimes 1j'_2...j'_{b-k}, & \mbox{if}\;j_2 \ne 2 \\ 0, & \mbox {if}\; j_2 = 2 \end{cases}, \end{equation*} From this it easily follows that the matrix of the restriction of $\phi_1$ on the subgroup of $ \mathrm{Hom}(D(\mathrm{h}),M)$ spanned by $B_1$ is $B(a,b;k)$.
(2) If $T=1^{(a)}i_1...i_k\otimes j_1...j_{b-k} \in B_0$, then $j_1 \ge 2$, and thus for $t \ge 2$, $\phi_t(T)$ is a multiple of an element of $B^t$ that does not contain a 1 in the $\wedge^{b-k}$ part. Hence $\phi_t(T) \in spanB^t_0$. If $S=1^{(a-1)}i_1...i_{k+1}\otimes j_1...j_{b-k} \in B_1$, then $j_1=1$ and thus $\phi_t(T) \in spanB^t_1$ for all $t$.
(3) Suppose $T' \in B^t$. Since $t>1$, $t$ appears exactly twice in $T'$.\\ \textit{Case 1.} Suppose $T'$ is of the form $T'=xt^{(2)}y\otimes z.$ Since each element of $ B $ has weight $ (a,1,...,1) $, from the definition of $ \phi_t $, it follows that there is a unique $T \in B$ such that $\phi_t(T)=cT', c \ne 0,$ namely $T=xt(t+1)y_1\otimes z_1$, where $y_1$ and $z_1$ are obtained from $y$ and $z$ respectively by replacing each $ i>t $ by $i+1$. We have $\phi_t(T)=2T'$.\\ \textit{Case 2.} Suppose $T'$ is of the form $T'=xty\otimes ztw.$ Since each element of $ B $ has weight $ (a,1,...,1) $, from the definition of $ \phi_t $, it follows that there are exactly two $T_1, T_2 \in B$ such that $\phi_t(T_i)=c_iT', c_i \ne 0,$ namely $$T_1=x(t+1)y_1\otimes ztw_1, \; T_2=xty_1\otimes x(t+1)w_1,$$ where $y_1$ and $w_1$ are obtained from $y$ and $w$ respectively by replacing each $ i>t $ by $i+1$. We have $\phi_t(T_1)=\phi_t(T_2)=T'$.
(4) This follows from case 1 of (3) since the greatest element in $B^k$ is $T'=1^{(a-1)}(b-k+1)...(b-1)b^{(2)}\otimes 12...(b-k)$. \end{proof}
Let $1\le k<b$ and $M=D_{a+k} \otimes \wedge ^{b-k}$. From Lemma 2.3 we have $\mathrm{Ext}^1(\Delta(\mathrm{h}),M)$ $= \mathbb{Z}_2$. We will determine a generator of this $\mathrm{Ext}$ group.
We have mentioned that the torsion subgroup of the abelian group $E^1(\Delta(\mathrm{h}),M)$ with presentation matrix $e^{(1)}(a,b,M)$ is isomorphic to $\mathrm{Ext}^1(\Delta(\mathrm{h}),M)$. We denote by $\pi$ the natural projection $\pi:\mathrm{Hom}(P_{1}(a,b),M)\to E^1(\Delta(\mathrm{h}),M)$. \begin{lm}
Let $\mathrm{h}=(a,1^b)$, $1 \le k <b$ and $M=D_{a+k} \otimes \wedge ^{b-k}$. A cyclic generator of the abelian group $ \mathrm{Ext}^1(\Delta (\mathrm{h}),M)$ is $\pi(g_k)$, where
\[g_k= \tbinom{a+1}{2} \sum\limits_{T \in B_0^1} T + a \sum\limits_{T \in B_1^1} T + \sum\limits_{i=2}^{b}(-1)^{i-1} \Big( a\sum\limits_{T \in B_0^i}T + \sum\limits_{T \in B_1^i} T \Big).\] \end{lm} \begin{proof} Let $E_i$ be the i-th column of $e^{(1)}(a,b,M)$ and let $p=\binom{b-1}{k-1}, q=\binom{b-1}{k}$. Consider the $e^{(1)}(a,b,M)$ partitioned into $b$ blocks each consisting of $p+q=\binom{b}{k}$ consecutive rows. From Lemma 3.1 it follows that \begin{align} \nonumber & a\big(E_1+...+E_{\binom{b}{k}}\big)+E_{\binom{b}{k}+1}+...+E_{\binom{b+1}{k+1}}= \\ \nonumber &\Big( \underbrace{a(a+1),...,a(a+1)}_{p}, \underbrace{2a,...,2a}_{q}, \underbrace{-2a,...,-2a}_{p}, \underbrace{-2,...,-2}_{q},..., \\ \label{lin_com} &\underbrace{(-1)^{b-1}2a,...,(-1)^{b-1}2a}_{p}, \underbrace{(-1)^{b-1}2,...,(-1)^{b-1}2}_{q} \Big)^t. \end{align} Hence in the cokernel $E^1(\Delta(\mathrm{h}),M)$ of the differential $\mathrm{Hom}(\theta_1(a,b),M)$ we have $2\pi(g_k)=0$. This shows that $\pi(g_k) \in \mathrm{Ext}^1(\Delta (\mathrm{h}),M)$. We have $\pi(g_k)\ne 0$, since otherwise the integer matrix-column $\frac{1}{2} X$, where $X$ is the right hand side of (\ref{lin_com}), would be a $\mathbb{Z}$ - linear combination of columns of $e^{(1)}(a,b,M)$. This is not possible because the last entry of $\frac{1}{2} X$ is $\pm1 $ while all entries of the last row of $e^{(1)}(a,b,M)$, $k>0$, are even according to Lemma 3.1 (4). Since $ \mathrm{Ext}^1(\Delta(\mathrm{h}),M)= \mathbb{Z}_2$, it follows that $\pi(g_k)$ generates $\mathrm{Ext}^1(\Delta(\mathrm{h}),M)$.\end{proof}
\subsection{Proof for $\mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta (\mathrm{h}(k)))$.} We determine $\mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k))), k \ge 2$ in this subsection. The main computation is done in the next two lemmas the first of which takes care of the case $k=2$.
We will use the following notation. If $f:M \to N$ is a map of $S(n,r)$-modules, we have in the usual way various induced maps \begin{align*}\mathrm{Hom}(P_i(a,b),M) &\to \mathrm{Hom}(P_{i}(a,b),N), \\E^i(\Delta(\mathrm{h}),M) &\to E^i(\Delta(\mathrm{h}),N), \\ \mathrm{Ext}^i(\Delta(\mathrm{h}),M) &\to \mathrm{Ext}^i(\Delta(\mathrm{h}),N)\end{align*} of abelian groups which will all will be denoted by $f^*$. For an integer $m$ let $\epsilon_m$ be the remainder of the division of $m$ by 2.
In the statement of the next Lemma, we recall that $\mathrm{\mathrm{Ext}}^1(\Delta(\mathrm{h}),D_{a+1} \otimes \wedge ^{b-1}) = \mathbb{Z}_2$ according to Lemma 2.3. Also from \cite{Ma}, Theorem 6, we have that $\mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(1)))$ is a cyclic group.
\begin{lm}
Let $\mathrm{h}=(a,1^b)$ and $\mathrm{h}(1)=(a+1,1^{b-1}), b \ge 2$. The map $$ \mathrm{\mathrm{Ext}}^1(\Delta(\mathrm{h}),D_{a+1} \otimes \wedge ^{b-1}) \xrightarrow{\pi_0^{*}} \mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(1)))$$
induced by $D_{a+1} \otimes \wedge ^{b-1} \xrightarrow{\pi_0} \Delta (\mathrm{h}(1))$ is multiplication by the integer $ \frac{(a+ \epsilon_b-1)(a+b)}{2} $. \end{lm}
\begin{proof} The matrix of the map $\mathrm{Hom}(P_0(a,b),\Delta(\mathrm{h}(1))) \rightarrow \mathrm{Hom}(P_1(a,b),\Delta(\mathrm{h}(1)))$ with respect to the lexicographic order of semi-standard bases is the following $b \times b$ matrix according to \cite{Ma}, p. 2211, \[ \left( \begin{array}{cccccc} a+1 &-1 &1 & \dots & &(-1)^{b-1} \\ -1 &-1 &0 & \dots & &0 \\ 0 &1 &1 & \dots & &0 \\ \vdots &\vdots &\vdots & & &\vdots \\ 0 &0 &0 &\dots &(-1)^{b-1} &(-1)^{b-1} \end{array} \right). \]
Let $$S_1=1^{(a+1)}|23...b, S_2=1^{(a)}2|23...b,...,S_b=1^{(a)}b|23...b$$ be the semi-standard basis of $\mathrm{Hom}(P_1(a,b),\Delta(\mathrm{h}(1)))$. From the above matrix it follows that for each $i$ there is an integer $m_i$ such that $\pi(S_i) = m_i\pi(S_1)$, and hence a cyclic generator of $E^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(1)))$ is $\pi(S_1)$. Moreover by adding the even numbered columns $2,4,...,2[\frac{b}{2}]$ we have the relation $$\sum\limits_{i=2}^{b}(-1)^{i-1}\pi(S_i) =[\frac{b}{2}]\pi(S_1)$$ where $[\frac{b}{2}]$ is the largest integer less than or equal to $\frac{b}{2}$.
Using the notation established at the beginning of subsection 3.1, we have
$B_0^1=\{T_1\}$ and $B_1^1=\{T_2,..,T_b\}$, where $T_1=1^{(a+1)}\otimes 23...b$ and $T_j=1^{(a)}j\otimes 12...\widehat{j}...b, j=2,...,b$, and it is understood that $\widehat{j}$ means that $j$ is omitted. Now $\pi_0^*(T_1)=S_1$. Also $$\pi_0^*(T_j)=1^{(a)}j|12...\widehat{j}...b = -1^{(a+1)}|j2...\hat{j}...b=(-1)^{j-1}S_1, $$ where in the second equality the straightening law was used. Thus \[ \pi_0^* \Big( \tbinom{a+1}{2} \overline{B_0^1} + a \overline{B_1^1} \Big) = \Big(\tbinom{a+1}{2} + a(\epsilon_b-1) \Big)S_1, \] where $ \overline{X} = \sum_{T \in X}T$, if $X$ is one of the sets $B_0^i, B_1^i$. A similar computation for $j=2,..,b$ yields \[ \pi_0^* \Big( \overline{B_0^j} + \overline{ B_1^j} \Big) = \big(a + \epsilon_b-1 \big)S_j, \] and therefore in $E^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(1)))$ we have \begin{align*} \nonumber \pi_0^*(\pi(g_1)) & = \Big(\tbinom{a+1}{2} + a(\epsilon_b-1) \Big)\pi(S_1) +\big( a+ \epsilon_b -1 \big) \sum\limits_{i=2}^{b} (-1)^{i-1} \pi(S_i) \\ & =\Big(\tbinom{a+1}{2} + a(\epsilon_b-1) + \big( a+ \epsilon_b -1 \big)[\frac{b}{2}] \Big) \pi(S_1). \end{align*} It is easy to verify that $\tbinom{a+1}{2} + a(\epsilon_b -1) + \big( a+ \epsilon_b -1 \big)[\frac{b}{2}] = \frac{(a+ \epsilon_b -1)(a+b)}{2} $. We have shown that the map $ \mathrm{Ext}^1(\Delta(\mathrm{h}),D_{a+1} \otimes \wedge ^{b-1}) \xrightarrow{\pi_0^{*}} \mathrm{Ext}^1(\Delta (\mathrm{h}),\Delta (\mathrm{h}(1)))$ is multiplication by this integer. Since $g_1$ is a generator of $\mathrm{Ext}^1(\Delta(\mathrm{h}),D_{a+1} \otimes \wedge ^{b-1})$ (Lemma 3.2) which is the torsion submodule of $E^1(\Delta(\mathrm{h}),D_{a+1} \otimes \wedge ^{b-1})$, the result follows.\end{proof} Since multiplication in the divided power algebra is commutative, we will often denote a semi-standard basis element of the form $1^{(a)}i_1...i_st^{(2)}i_{s+1}...i_{k-2}\otimes j_1...j_{b-k}$ by $1^{(a)}t^{(2)}i_1...i_{k-2}\otimes j_1...j_{b-k}$ and likewise for $1^{(a)}i_1...i_sti_{s+1}...i_{k-1}\otimes j_1...t...j_{b-k-1}$.
The $\mathrm{Ext}$ groups appearing in the next Lemma are both equal to $\mathbb{Z}_2$ by Lemma 2.3. We want to identify a particular map between these.
\begin{lm}
Let $\mathrm{h}=(a,1^b)$ and $ 1<k<b$. The map $$ \mathrm{Ext}^1(\Delta (\mathrm{h}),D_{a+k} \otimes \wedge ^{b-k}) \xrightarrow{\theta^{*}} \mathrm{Ext}^1(\Delta(\mathrm{h}),D_{a+k-1} \otimes \wedge ^{b-k+1})$$
induced by $D_{a+k} \otimes \wedge ^{b-k} \xrightarrow{\theta} D_{a+k-1} \otimes \wedge ^{b-k+1}$ is multiplication by $a+ \epsilon_{b-k+1}-1 $. \end{lm}
\begin{proof} Consider the semi-standard basis $B'=B^1_0 \cup B^1_1 \cup ... \cup B^b_0 \cup B^b_1$, with the notation as in the beginning of subsection 3.1, of the codomain of the map $\mathrm{Hom}(\theta_1{(a,b)},D_{a+k} \otimes \wedge ^{b-k})$. Likewise we denote by $C'=C^1_0 \cup C^1_1 \cup ... \cup C^b_0 \cup C^b_1$ the semi-standard basis of the codomain of the map $\mathrm{Hom}(\theta_1{(a,b)},D_{a+k-1} \otimes \wedge ^{b-k+1})$. If $ X $ is any one of the sets $B_0^t, B_1^t, C_0^t, C_1^t$, we let $ \overline{X} = \sum_{T \in X}T .$
We claim that for each $t=1,...,b,$
\begin{align} &\theta^{*}(\overline{B^t_0}) =\epsilon_{b-k+1} \overline{C^t_0}+\overline{C^t_1} \label{claim1}, \\ &\theta^{*}(\overline{B^t_1}) =-\epsilon_{b-k} \overline{C^t_1}\label{claim2}. \end{align}
Indeed, let $t>1$. We note that $B_0^t$ consists of all $1^{(a)}t^{(2)}i_1...i_{k-2}\otimes j_1...j_{b-k}$ such that
\begin{gather*}i_1<...<i_{k-2}, \; \; j_1<...<j_{b-k},\\ \{ i_1,...,i_{k-2} \} \cap \{ j_1,...,j_{b-k} \} = \emptyset, \; \{ i_1,...,i_{k-2} \} \cup \{ j_1,...,j_{b-k} \} = \{2,...,\widehat{t},...,b\} \end{gather*} and of all $1^{(a)}ti_1...i_{k-1}\otimes j_1...t...j_{b-k-1}$ such that
\begin{gather*}i_1<...<i_{k-1}, \; \; j_1<...<t<...<j_{b-k-1},\\ \{ i_1,...,i_{k-1} \} \cap \{ j_1,...,j_{b-k-1} \} = \emptyset, \; \{ i_1,...,i_{k-1} \} \cup \{ j_1,...,j_{b-k-1} \} = \{2,...,\widehat{t},...,b\} \end{gather*}
The definition of $\theta^*$ on the above elements yields
\begin{align}
\nonumber\theta^{*}(1^{(a)}t^{(2)}i_1...i_{k-2}\otimes j_1...j_{b-k})
= &1^{(a-1)}t^{(2)}i_{1}...i_{k-2}\otimes1j_1...j_{b-k}\\ \nonumber
&+1^{(a)}ti_1...i_{k-2}\otimes tj_1...j_{b-k}\\
&+\sum_{u=1}^{k-2}1^{(a)}t^{(2)}i_1...\widehat{i_u}...i_{k-2} \otimes i_uj_1...j_{b-k})\label{diaf1},\end{align}
\begin{align}
\nonumber\theta^{*} (1^{(a)}ti_1...i_{k-1}\otimes j_1...t...j_{b-k-1})
= &1^{(a-1)}ti_1...i_{k-1}\otimes1j_1...t...j_{b-k-1}\\
&+\sum_{u=1}^{k-1}1^{(a)}ti_1...\widehat{i_u}...i_{k-1} \otimes i_uj_1...t...j_{b-k-1})\label{diaf2},\end{align} where $\widehat{i_u}$ means that $i_u$ is omitted. We note that each term in the right hand side of equations (3.4) and (3.5) is of the form $\pm S$, where $S \in C^t$. Moreover, the terms in the right hand side of (3.4) are distinct and those in the right hand side of (3.5) are distinct. Now let $ S \in C^t=C^t_0 \cup C^t_1. $ \\ (1) Let $S \in C_0^t$.
\begin{enumerate}
\item[(a)] Suppose $S=1^{(a)}tu_1...u_{k-2} \otimes v_1...t...v_{b-k}$. From (3.4) and (3.5), it follows that the elements $T_i \in B^t$ such that $S$ appears with nonzero coefficient in $\theta^*(T_i)$ are
\begin{align*}
&&T_0&=1^{(a)}t^{(2)}u_1...u_{k-2}\otimes v_1...v_{b-k}, \\
&&T_i&=1^{(a)}tu_1...u_{k-2}v_i\otimes v_1...\widehat{v_i}...t...v_{b-k}, \; i=1,...,b-k.
\end{align*}
Moreover by straightening the $\wedge^{b-k+1}$ part, the coefficient of $S$ in $\theta^*(T_0)$ is $ (-1)^s $, $s=\# \{i:v_i<t\}$, and the coefficient of $S$ in $\theta^*(T_i)$ is $$\begin{cases} (-1)^{i-1}, & \mbox{if}\; \mbox{$i\le s$} \\ (-1)^i, & \mbox{$i\ge s+1$}. \end{cases}$$
Therefore, by summing over $B_0^t$ be see that the coefficient of $S$ in $\theta^{*}(\overline{B^t_0})$ is $\sum_{i=1}^{b-k+1}(-1)^{i-1}=\epsilon_{b-k+1}.$
\item[(b)] Suppose $S=1^{(a)}t^{(2)}u_1...u_{k-3}\otimes v_1...v_{b-k+1}$. From (3.4) and (3.5) it follows that the elements $T_i \in B^t$ such that $S$ appears with nonzero coefficient in $\theta^*(T_i)$ are
\begin{align*}
&&T_i=1^{(a)}t^{(2)}u_1...u_{k-3}v_i\otimes v_1...\widehat{v_i}...t...v_{b-k}, \; i=1,...,b-k.
\end{align*}
Moreover by straightening the $\wedge^{b-k+1}$ part, the coefficient of $S$ in $\theta^*(T_0)$ is $(-1)^{i-1}$. Thus summing over $B_0^t$, the coefficient of $S$ in $\theta^*(\overline{B_0^t})$ is $\sum_{i=1}^{b-k+1}(-1)^{i-1}=\epsilon_{b-k+1}$.\end{enumerate}
(2) Let $S \in C_1^t$.
\begin{enumerate}
\item[(a)] Suppose $S=1^{(a-1)}t^{(2)}u_1...u_{k-2}\otimes 1v_1...v_{b-k}$. From (3.4) and (3.5), it follows that there is a unique $T \in B_0^t$ such that $S$ appears with nonzero coefficient in $\theta^*(T)$, namely $T=1^{(a)}t^{(2)}u_1...u_{k-2}\otimes v_1...v_{b-k}$, and the coefficient is 1.
\item[(b)] Suppose $S=1^{(a-1)}tu_1...u_{k-1}\otimes 1v_1...t...v_{b-k-1}$. From (3.4) and (3.5), it follows that there is a unique $T \in B_0^t$ such that $S$ appears with nonzero coefficient in $\theta^*(T)$, namely $T=1^{(a)}tu_1...u_{k-1}\otimes v_1...t...v_{b-k-1}$, and the coefficient is 1.
\end{enumerate}
From the cases (1) and (2), equation (3.2) follows for $ t>1. $ The proof of (3.3), $t>1$, is similar (and a bit shorter) and omitted. Finally, the proof of (3.2) and (3.3) for $ t=1 $ is similar (and a bit simpler) and omitted.
We now prove the statement of the Lemma.
\noindent$\it{Case \; 1.}$ Suppose $b-k+1$ is even. By substituting (3.2) and (3.3) we obtain \[ \theta^{*}(g_{k})= \tbinom{a}{2} \overline{C^1_1} + (a-1)\sum_{i=2}^{b}(-1)^{i-1}\overline{C_0^i} \] and thus \[ \theta^{*}(g_k) -(a-1)g_{k-1}= - \tbinom{a}{2} \Big( (a+1) \overline{C^1_0} +\overline{C^1_1} + 2\sum_{i=2}^{b} (-1)^{i-1}\overline{C^i_0} \Big). \] But $ \pi\big((a+1) \overline{C^1_0} +\overline{C^1_1} + 2\sum_{i=2}^{b} (-1)^{i-1}\overline{C^i_0}\big)=0$ in $E^1(\Delta(\mathrm{h}),D_{a+k-1} \otimes \wedge ^{b-k+1})$ because this is the relation coming from adding the first $\tbinom{b}{k-1}$ columns of the matrix $e^{(1)}(a,b,D_{a+k-1}\otimes\wedge^{b-k+1})$ according to Lemma 3.1 (1)-(3). Thus in this case the map $\theta^{*}$ is multiplication by $a-1$.
\noindent$\it{Case \; 2.}$ Suppose $b-k+1$ is odd. By substituting (3.2) and (3.3) we obtain \[ \theta^{*}(g_k)= \tbinom{a+1}{2} \big( \overline{C^1_0} + \overline{C^1_1} \big) + a \sum_{i=2}^{b} (-1)^{i-1}(\overline{C^i_0}+\overline{C^i_1}) \] and using this we have \[ \theta^{*}(g_k) - ag_{k-1}= -\tbinom{a}{2} \Big( (a+1) \overline{C^1_0} +\overline{C^1_1} + 2\sum_{i=2}^{b} (-1)^{i-1}\overline{C^i_0} \Big). \] Thus in this case the map $\theta^{*}$ is multiplication by $a$. \end{proof} \begin{theo}
Let $\mathrm{h}=(a,1^b)$ and $\mathrm{h}(k)=(a+k,1^{b-k})$, where $ 2 \le k \le b$. If $n \ge b+1$, then
$\mathrm{Ext}^1(\Delta(\mathrm{h}), \Delta(\mathrm{h}(k)) = 0$ unless $a+b+k$ is odd in which case $\mathrm{Ext}^1(\Delta(\mathrm{h}), \Delta(\mathrm{h}(k)) = \mathbb{Z}_2$. \end{theo} \begin{proof} Applying $\mathrm{Hom}(\Delta(\mathrm{h}),-)$ to the short exact sequence sequence (2.1) for $\mathrm{h}(k-1)$ in place of $\mathrm{h}$ yields the exact sequence \begin{align*} 0 &\rightarrow \mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)) \xrightarrow{i*} \mathrm{Ext}^1(\Delta(\mathrm{h}),D_{a+k-1} \otimes \wedge^{b-k+1}) \\ &\xrightarrow{\pi_0^*} \mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k-1))) \end{align*} because $\mathrm{Hom}(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k-1)))=0$ as $ \mathbb{Q} \otimes \Delta(\mathrm{h})$ and $ \mathbb{Q} \otimes \Delta(\mathrm{h}(k-1))$ are distinct irreducible representations of $GL_n( \mathbb{Q} )$.
First let $k=2$. From the above exact sequence, Lemma 2.3 and \cite{Ma}, Theorem 6, it follows that $\mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(2)))$ is the kernel of the map $\mathbb{Z}_2 \rightarrow \mathbb{Z}_{a+b}$ which is multiplication by the integer $ \frac{(a+ \epsilon_b-1)(a+b)}{2}$ according to Lemma 3.3. Thus the result follows.
Suppose $k \ge 3$. Then $\mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k-1)))$ injects in $\mathrm{Ext}^1(\Delta(\mathrm{h}),D_{a+k-2} \otimes \wedge^{b-k+2})$ and thus $\mathrm{Ext}^1(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k))$ is the kernel of the composite map \[ \psi : \mathrm{Ext}^1(\Delta(\mathrm{h}),D_{a+k-1} \otimes \wedge^{b-k+1}) \rightarrow \mathrm{Ext}^1(\Delta(\mathrm{h}),D_{a+k-2} \otimes \wedge^{b-k+2}). \] This map is induced by $D_{a+k-1} \otimes \wedge ^{b-k+1} \xrightarrow{\theta} D_{a+k-2} \otimes \wedge ^{b-k+2}$. According to Lemma 3.4, $\psi : \mathbb{Z}_2 \rightarrow \mathbb{Z}_2$ is by multiplication by $a+ \epsilon_{b-k}-1 $. Hence the result follows. \end{proof}
\section{$\mathrm{Ext}^k(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$} Let $\mathrm{h}=(a,1^b)$ and $\mathrm{h}(k)=(a+k,1^{b-k})$, where we assume throughout this section that $1 \le k \le b$. We will prove the following result. \begin{theo}If $n \ge b+1$, then $\mathrm{Ext}^k(\Delta(\mathrm{h}), \Delta(\mathrm{h}(k)))= \mathbb{Z}_{d_k}$, where $ d_k=gcd (\tbinom{a+b}{1},\tbinom{a+b}{2}, \dots ,\tbinom{a+b}{k} )$. \end{theo}
This result is known in the special cases $a=1$, $b=k$ \cite{Ak}, Section 4, and
any $a$, $b=k$ \cite{Ma}, eqn. (6) p. 2207.
According to the following Remark, the above $\mathrm{Ext}$ group is the highest possible nonzero $\mathrm{Ext}$ group between the hooks $\mathrm{h}$ and $\mathrm{h}(k)$. We thank both H. H Andersen and the referee for pointing out an error in a previous version of this paper concerning the proof of the Remark and for suggesting the proof that follows.
\begin{remark}Let $n \ge b+1$. If $i>k$, then $\mathrm{Ext}^i(\Delta(\mathrm{h}), \Delta(\mathrm{h}(k)))=0.$\end{remark}
\begin{proof}
We use induction on $k$. For $k=0$ the result follows from the general fact that no Weyl module has non trivial self extension, see [10], B.4. Remark. Applying $\mathrm{Hom}(\Delta (\mathrm{h}),-)$ to the short exact sequence
\[0\rightarrow\Delta(\mathrm{h}(k+1))\rightarrow \Delta(a+k)\otimes \Lambda(b-k) \rightarrow \Delta(\mathrm{h}(k))\rightarrow 0\] we obtain the exact sequence
\begin{align*} \mathrm{Ext}^i(\Delta(\mathrm{h}), \Delta(\mathrm{h}(k)))&\rightarrow \mathrm{Ext}^{i+1}(\Delta(\mathrm{h}),\Delta(h(k+1))) \\&\rightarrow \mathrm{Ext}^{i+1}(\Delta(\mathrm{h}),\Delta(a+k)\otimes \Lambda^{b-k}). \end{align*}
The term on the left is zero by induction. By Lemma 2.3, the term on the right is
$$\mathrm{Ext}^{i+1}(\Delta(\mathrm{h}),\Delta(a+k)\otimes \Lambda^{b-k})=\mathrm{Ext}^{i+1}(\Lambda^{k+1},\Delta(k+1))$$
which is zero by induction since $i+1>k$. Hence the middle term is zero.
\end{proof}
Recall the following notation from Section 2.3. $E^i(\Delta(\mathrm{h}),M)$ is the cokernel of the differential $\mathrm{Hom}(\theta_i(a,b),M)$ of the complex $\mathrm{Hom}(P_*(a,b),M)$, where $M$ is a skew Weyl module. The torsion part of this abelian group is isomorphic to $\mathrm{Ext}^i(\Delta(\mathrm{h}),M)$. Let $\pi$ the natural projection $\pi:\mathrm{Hom}(P_{i}(a,b),M)\to E^i(\Delta(\mathrm{h}),M)$ and $e^{(i)}(a,b,M)$ the matrix of the map $\mathrm{Hom}(\theta_i(a,b),M)$ with respect to orderings to be specified.
We order lexicographically the semi-standard basis of $\mathrm{Hom}(D(a_1,...,a_m),\Delta(\mathrm{h}(k))).$ Now if $(a_1,...,a_m)$ is greater than $(b_1,...,b_{m'})$ , where $m,m' \le n$, in the usual lexicographic ordering of sequences, we declare that each element of the semi-standard basis of $\mathrm{Hom}(D(a_1,...,a_m),\Delta(\mathrm{h}(k)))$ is less than each element of the semi-standard basis of $\mathrm{Hom}(D(b_1,...,b_{m'}),\Delta(\mathrm{h}(k)))$. With respect to the above orderings, Remark 2.1 yields the following, where the missing entries in the bottom left part of the matrix are equal to zero. \begin{lm}
For $i>1$ and $b>1,$ $
e^{(i)}(a,b,\Delta(\mathrm{h}(k)))=
\left(
\begin{array}{c|c}
A &*\\
\hline \;
& *
\end{array}
\right),$
where $A=e^{(i-1)}(a+1,b-1,\Delta(\mathrm{h}(k)))$. \end{lm}
\subsection{A generator of $\mathrm{Ext}^k(\Delta(\mathrm{h}),D_{a+k}\otimes\wedge^{b-k})$} In this subsection we will identify a generator of $\mathrm{Ext}^k(\Delta(\mathrm{h}),D_{a+k} \otimes \wedge^{b-k})$. We assume throughout that $1 \le k \le b.$
Let $\Gamma_k \in \mathrm{Hom}(P_k(a,b),D_{a+k}\otimes\wedge^{b-k})$,
\begin{equation}\Gamma_k = \tbinom{a+k}{k+1} \triangle_{1} - \triangle_{2} + \cdots + {(-1)}^{b-k} \triangle_{q},\end{equation} where $q=b-k+1$ and \begin{align*}\triangle_{1} &= 1^{(a+k)} \otimes 2 \dots q \in \mathrm{Hom}(D(a+k,1,...,1),D_{a+k}\otimes \wedge^{b-k})\\ \triangle_{i} &= 1^{(a-1)} i^{(k+1)} \otimes 1\dots\widehat{i} \dots q \\ &\,\,\,\,\,\,\in \mathrm{Hom}(D(a,1,\dots,k+1,\dots,1), D_{a+k} \otimes \wedge^{b-k}),\end{align*} $i = 2, \dots,q,$ where $k+1$ is located at the i-th position. Consider the natural projection $\pi: \mathrm{Hom}(P_k(a,b), D_{a+k}\otimes\wedge^{b-k}) \to E^k(\Delta(\mathrm{h}),D_{a+k}\otimes\wedge^{b-k}).$
\begin{lm} If $k+1=p^e$, $p$ prime, then $\pi(\Gamma_k) \ne 0.$ \end{lm} \begin{proof}Suppose $\Gamma_k$ is equal to a linear combination of the columns of $A$, where $A=e^{(k)}(a,b,D_{a+k}\otimes \wedge^{b-k})$. Then the coefficient ${(-1)}^{q-1}$ of $\triangle_{q}$, $q=b-k+1$, is a linear combination of the entries of the last row of $A$, as $\triangle_{q}$ is the last basis element in $\mathrm{Hom}(P_k(a,b),D_{a+k}\otimes \wedge^{b-k})$ with respect to the lexicographic order. We claim that the set of nonzero elements in the last row of $A$ is $\{(-1)^{q-1}\tbinom{k+1}{1},\dots ,(-1)^{q-1}\tbinom{k+1}{k}\}.$
Indeed, let $i\in \{1,\dots ,q\}$, $\lambda = (\lambda_1,\dots ,\lambda_{q+1})$, where $\lambda_1 \in \{0, \dots ,k-1\}$, $\lambda_j \geq 1$ for $j \in\{2,\dots, q+1\}$, $\sum_{j=1}^{q+1}\lambda_j =b$, and consider a semi-standard basis element $T \in \mathrm{Hom}(D(a+\lambda_1, \lambda_2, \dots ,\lambda_{q+1}),D_{a+k}\otimes \wedge^{b-k})$, such that $$\phi_i(T)=c\triangle_{q}, \; c\in \mathbb{Z}-\{0\}.$$ By Remark 2.2, the left hand side has weight $$\left(1^{a+\lambda_1},2^{\lambda_2},\dots,i^{\lambda_i + \lambda_{i+1}}, \dots ,q^{\lambda_{q+1}}\right),$$
which must be equal to the weight $\left(1^a,2,\dots, q-1, q^{k+1}\right)$ of the right hand side. So $\lambda_1 =0$ and if $1<i\leq q-1$ then $\lambda_i + \lambda_{i+1}=1$ which contradicts the hypothesis $\lambda_i \geq 1$ for $i \in \{2,\dots ,q+1\}$. This implies that $i=q$, $\lambda_1=0$, $\lambda_j=1$ for $j \in \{2,\dots, q-1\}$ and $\lambda_{q} + \lambda_{q+1}=k+1$. Hence $T=1^{(a-1)}{q}^{\lambda_{q}} {(q+1)}^{\lambda_{q+1}} \otimes1 \dots (q-1)$, where $\lambda_{q} + \lambda_{q+1} =k+1$. For such a $T$ we have $(-1)^{q-1}\phi_{q}(T)=(-1)^{q-1}\tbinom{k+1}{\lambda_q}\triangle_{q},$ which proves the claim.
It follows that $gcd\left(\tbinom{k+1}{1},\dots,\tbinom{k+1}{k}\right)=1$ contradicting the assumption $k+1=p^e$, $p$ prime. Hence $\pi(\Gamma_k)\neq 0$. \end{proof}
Let $q=b-k+1$. Define $T_{1,j} \in \mathrm{Hom}(D(a+k-1, 1,\dots ,1), D_{a+k}\otimes \wedge^{b-k})$, $j=2,\dots,q+1$, $$T_{1,j}=1^{(a+k-1)}j \otimes 2\dots \widehat{j} \dots (q+1), $$ and $T_{i,j} \in \mathrm{Hom}(D(a,1,\dots,k,\dots,1), D_{a+k}\otimes \wedge^{b-k})$, where $k$ is at the i th-position, $i=2,\dots,q$, $j=i+1,\dots,q+1,$ $$T_{i,j}=1^{(a-1)}i^{(k)}j \otimes1\dots\widehat{i}\dots\widehat{j} \dots (q+1). $$ Let $$A=\tbinom{a+k-1}{k} \sum_{j=2}^{q+1}(-1)^jT_{1,j} + \sum_{i=2, j>i}^{q, q+1}(-1)^{j-i-1}T_{i,j}$$ and consider $\phi(A)$, where $\phi$ is the differential $\phi=\mathrm{Hom}(\theta_k(a,b),D_{a+k}\otimes\wedge^{b-k})$.
\begin{lm} We have $\phi(A)=(k+1)\Gamma_k$. Moreover, if $k+1=p^e$, $p$ prime, then $\pi(\Gamma_k)$ is a a generator of $\mathrm{Ext}^k(\Delta(\mathrm{h}), D_{a+k}\otimes\wedge^{b-k})$.
\end{lm} \begin{proof} For the first statement, it suffices to show that \begin{equation*}\phi_1(A)=\tbinom{a+k-1}{k}(a+k)\triangle_{1} \;\; \hbox{and} \; \; \phi_t(A)=(k+1)\triangle_{t}
\end{equation*}
for $t=2,\dots,q$, since $\binom{a+k-1}{k}(a+k)=(k+1)\binom{a+k}{k+1}.$ Using Remark 2.2 an immediate calculation in each case shows the following.\\
$ i=1 $: Then $\phi_1(T_{1,2})=(a+k)\triangle_1$.\\
$ i=2 $: Then $\phi_1(T_{2,j})=\binom{a+k-1}{k}\phi_1(T_{1,j})$ for all $j \ge 3.$\\
$ i>2$: Then $\phi_1(T_{i,j})=0$ for all $j>i$.
Upon substituting, \begin{align*}\phi_1(A)=&\tbinom{a+k-1}{k}(a+k)\triangle_1+\tbinom{a+k-1}{k}\sum_{j=3}^{q+1}(-1)^j\phi_1(T_{1,j}) +\sum_{j=3}^{q+1}(-1)^{j-3}\phi_1(T_{2,j})\\ =&\tbinom{a+k-1}{k}(a+k)\triangle_1. \end{align*} Similarly for $t>1$, an immediate calculation in each case yields the following.\\$ i<t $: Then $\phi_t(T_{i,t})=\phi_t(T_{i,t+1})$ and $\phi_t(T_{i,j})=0$ if $j \neq t,t+1$.\\$ i=t $: Then $\phi_t(T_{t,t+1})=(k+1)\triangle_t$ and $\phi_t(T_{t,j})=\phi_t(T_{t+1,j})$ if $j \ge t+2$.\\$ i\ge t+2 $: Then $\phi_t(T_{i,j})=0$ for all $j>i$.
Upon substituting, \begin{align*}\phi_t(A)=&\tbinom{a+k-1}{k}\left((-1)^t\phi_t(T_{i,t})+(-1)^{t+1}\phi_t(T_{i,t})\right)\\&+\sum_{i=2}^{t-1}\left((-1)^t\phi_t(T_{i,t})+(-1)^{t+1}\phi_t(T_{i,t})\right) \\&+(a+k)\triangle_t+\sum_{j=t+2}^{q+1}\left((-1)^{j-t-1}\phi_t(T_{t,j})+(-1)^{j-t}\phi_t(T_{t,j})\right)\\ =& (a+k)\triangle_t.\end{align*}
Let $k+1=p^e$, $p$ prime. By Lemma 4.3 and the first part of the present Lemma, $\pi(\Gamma_k$) is a nonzero torsion element of the abelian group $E^k(\Delta(\mathrm{h}), D_{a+k}\otimes\wedge^{b-k})$. Thus it is a nonzero element of $\mathrm{Ext}^{k}(\Delta(\mathrm{h}), D_{a+k}\otimes\wedge^{b-k})$ which according to Lemma 2.3 is $\mathbb{Z}_p$. Hence it is a generator of $\mathrm{Ext}^{k}(\Delta(\mathrm{h}), D_{a+k}\otimes\wedge^{b-k})$.\end{proof}
\subsection{Relations and a generator of $E^k(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$} Let $q=b-k+1$ and define $\delta_1\in \mathrm{Hom}(D(a+k,1,\dots,1),\Delta(\mathrm{h}(k)))$ and $\delta_{i,j} \in \mathrm{Hom}(D(a+k-j,1,\dots,j+1,\dots ,1), \Delta(\mathrm{h}(k)))$, where $j+1$ is located at the i-th position, by
\begin{align*}&\delta_1=1^{(a+k)}|2\dots q, \\ &\delta_{i,j} = 1^{(a+k-j)}i^{(j)}| 2 \dots q, \; i=2,\dots,q, \; j=0, \dots k,\end{align*}
where it is understood that for $j=0$ we have $\delta_{i,0}=\delta_1.$ \begin{lm} In $E^k(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$ the following relations hold. \[\pi(\delta_{i,j}) = \tbinom{a+k+i-2}{j} \pi(\delta_1), \; i=2,\dots q-1, j=0, \dots k. \] \end{lm}
\begin{proof} Let $S_{i,j} \in \mathrm{Hom}(P_{k-1}(a,b),\Delta(\mathrm{h}(k)))$, $i=1,\dots,q$, $$S_{i,j}=1^{(a+k-j)}(i+1)^{(j)}|2\dots \widehat{(i+1)} \dots (q+1)$$ and consider the differential in degree $k$ \[\sum_{t \ge 1}(-1)^{t-1}\phi_t: \mathrm{Hom}(P_{k-1}(a,b),\Delta(\mathrm{h}(k)) \rightarrow \mathrm{Hom}(P_{k}(a,b),\Delta(\mathrm{h}(k)))\] of the complex $\mathrm{Hom}(P_*(a,b),\Delta(\mathrm{h}(k)))$. An immediate calculation in each case using Remark 2.2 (and the straightening law for the first equality in the case $i>1$) yields the following.
\begin{alignat*}{2}i=1: \;&\phi_1(S_{1,j})=\tbinom{a+k}{j} \delta_1, \; \phi_2(S_{1,j})=\delta_{2,j}, \; \phi_j(S_{1,j})=0, \; j>2.\\
i>1: \;&\phi_1(S_i) ={(-1)}^{i-1} \delta_{i,j-1}, \; \phi_t(S_{i,j}) = 0, \; t\in \{2,\dots,i-1\},\\
&\phi_i(S_{i,j})=\delta_{i,j},\; \phi_{i+1}(S_{i,j})=\delta_{i+1,j},\; \phi_t(S_{i,j})=0, \;t \ge i+2.\end{alignat*}
In $E^{k}(a,b,\Delta(\mathrm{h}(k)))$, we have the relations \[\sum_{t=1}^{q}(-1)^{t-1} \pi(\phi_t(S_{i,j}))=0, \; i=1,\dots q.\] Substituting the above for $i=1$ yields \begin{equation} \pi(\delta_{2,j}) = \tbinom{a+k}{j} \pi(\delta_1), j=0, \dots k\end{equation} and substituting the above for $i \ge 2$ yields \begin{equation} \pi(\delta_{i+1,j})=\pi(\delta_{i,j})+\pi(\delta_{i,j-1}), \; i=2,\dots q-1, j=1, \dots k. \end{equation} The equation of the Lemma follows by induction on $i$ using (4.2) and (4.3). \end{proof}
\begin{lm} $E^k(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$ is a cyclic group generated by $\pi(\delta_1)$. \end{lm} \begin{proof} Induction on $k$, the case $k=1$ owing to the first paragraph of the proof of Lemma 3.3.
Let $T \in \mathrm{Hom}(P_{k}(a,b),\Delta(\mathrm{h}(k)))$ be a semi-standard tableau of weight $\lambda=(\lambda_1,...,\lambda_q)$, $q=b-k+1$. If $\lambda_1 >a $ then by induction and Lemma 4.2 (since $\mathrm{h}(k)=(a+1+(k-1), 1^{b-1-(k-1)}$), $\pi(T)$ is a multiple of $\pi(\delta_1)$.
Suppose $\lambda_1=a$ in which case $T = 1^{(a)}2^{(\lambda_2 -1)}\dots q^{(\lambda_{q} -1)} | 2 \dots q.$ (In this notation it is understood that if $\lambda_j=1$, then the term $j^{(\lambda_j-1)}$ is omitted.) We will show that there are semi-standard tableaux $T_i \in \mathrm{Hom}(P_{k}(a,b),\Delta(\mathrm{h}(k)))$ and $a_i \in \mathbb{Z}$ such that $\pi(T) =\sum_{j}a_i\pi(T_j)$ and $T_j<T$ for every $j$ in the lexicographic ordering of semi-standard tableaux of $\mathrm{Hom}(P_{k}(a,b),\Delta(\mathrm{h}(k)))$. Since this set is finite and $\delta_1$ is the least element, we obtain by induction on the ordering that $\pi(T)=a\pi(\delta_1)$, $a \in \mathbb{Z}$.
There exists an $i \ge 2$ such that $\lambda_i \ge 2$ because $k>0.$ Let $m$ be the largest such $i$ and let
$$S=1^{(a)}2^{(\lambda_2 -1)}\dots m^{(\lambda_{m} -1)} | 2 \dots \widehat{m} \dots (q+1),$$
which is a semi-standard tableau in $\mathrm{Hom}(P_{k-1}(a,b),\Delta(\mathrm{h}(k))).$ Then for $t\in \{1,\dots,q\}$, straightforward calculations yield the following, where we assume that $m \ge 3$.\begin{enumerate}
\item $\phi_1(S)= (-1)^{m-2}\tbinom{a+\lambda_2-1}{\lambda_2-1}1^{(a+\lambda_2)}2^{(\lambda_3-1)}\dots (m-1)^{(\lambda_m-2)}|2\dots q.$
\item $\phi_t(S)=0$ if $m \ge 4$, $t=2,...,m-2$.
\item $\phi_{m-1}(S)= (-1)^m \tbinom{\lambda_{m-1}+\lambda_m-2}{\lambda_m-2} 1^{(a+\lambda_2)}2^{(\lambda_3-1)}\dots (m-1)^{(\lambda_{m-1}+\lambda_m-2)}|2\dots q.$
\item
$\phi_m(S)= {(-1)}^{m-1} T$.
\item $\phi_t(S)=0$, if $j \ge m+1$.\end{enumerate}
The tableaux in the right-hand sides of equations (1) and (3) are semi-standard and less than $T$ in our ordering since $\lambda_2>0$, and the coefficient of $T$ in the right hand side of (4) is $\pm1$. Hence the desired result for $m \ge 3$ follows from $\sum_{t=1}^q(-1)^{t-1}\pi\phi_t(S)=0$.
Let $ m=2 $. Then similarly, $0=\sum_{t=1}^{q}(-1)^{t-1}\pi\phi_t(S)=\tbinom{a+\lambda_2-1}{\lambda_2-1}\pi(\delta_1)-\pi(T)$ and the result follows.\end{proof}
\subsection{Proof of Theorem 4.1} We prove Theorem 4.1 by induction on $k$, the case $ k=1 $ owing to \cite{Ma}, Theorem 6. Applying $\mathrm{Hom}(\Delta(\mathrm{h}),-)$ to the short exact sequence $$0 \rightarrow \Delta(\mathrm{h}(k+1)) \xrightarrow {i_k} D_{a+k} \otimes \wedge^{b-k} \xrightarrow {\pi_k} \Delta(\mathrm{h}(k)) \rightarrow 0 $$ yields the exact sequence \begin{align}\cdots &\rightarrow \mathrm{Ext}^k(\Delta(\mathrm{h}),D_{a+k} \otimes \wedge^{b-k}) \xrightarrow {\pi^*_k} \mathrm{Ext}^k(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k))) \rightarrow \nonumber \\ &\rightarrow \mathrm{Ext}^{k+1}(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k+1))) \rightarrow 0 \end{align} because from Lemma 2.3, $ \mathrm{Ext}^{k+1}(\Delta(\mathrm{h}),D_{a+k} \otimes \wedge^{b-k})= \mathrm{Ext}^{k+1}(\wedge^{k+1}, D_{k+1})=0$ as the length of the projective resolution $P_*(1,k)$ of $\wedge^{k+1}$ is less than $k+1$. By Lemma 2.3 we have $\mathrm{Ext}^k(\Delta(\mathrm{h}),D_{a+k} \otimes \wedge^{b-k})= \mathbb{Z}_{r_k}.$
If $\mathrm{Ext}^k(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))=0$, then by induction $d_k=1$. Since $d_{k+1}|d_k$, we have $ d_{k+1}=1.$ Moreover, $\mathrm{Ext}^{k+1}(\Delta(\mathrm{h}),\Delta(h(k+1)))=0$ by (4.4) and hence the result holds in this case.
We may assume that $\mathrm{Ext}^k(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))\neq0$. Since $E^k(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$ is a cyclic $\mathbb{Z}$-module according to Lemma 4.6, and its torsion subgroup is nonzero, we have $E^k(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$ $=\mathrm{Ext}^k(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$.\\ \textit{Case 1}: Let $k+1=p^e$, $p$ prime. By Lemma 4.4, $\pi(\Gamma_k)$ is a generator of $\mathrm{Ext}^k(\Delta(\mathrm{h}),D_{a+k} \otimes \wedge^{b-k})$. We compute its image under the map $\pi_k^*$ of (4.4).
With the notation established at the beginning of subsections 4.1 and 4.2 and using the straightening law and Lemma 4.5 we have
\begin{align*}
\pi^*_k(\pi(\triangle_i)) &= \pi(1^{(a-1)} i^{(k+1)}|1 \dots \widehat{i} \dots q)= {(-1)}^{i+1}\pi(\delta_{i,k})\\&={(-1)}^{i+1}\tbinom{a+k+i-2}{k}\pi(\delta_{1}).
\end{align*}
By substituting in (4.1) and using the binomial coefficient identity $$\tbinom{a+k}{k+1} + \sum_{i=2}^{q}\tbinom{a+k+i-2}{k} = \tbinom{a+b}{k+1},$$ we obtain $\pi^*_k (\Gamma_k) =\tbinom{a+b}{k+1} \pi(\delta_{1}).$ Since, by Lemma 4.6, $\pi(\delta_1)$ is a generator of $\mathrm{Ext}^{k}(\Delta(\mathrm{h}),\Delta(\mathrm{h}(k)))$, we obtain from (4.4) and the induction hypothesis that $\mathrm{Ext}^{k+1}(\Delta(\mathrm{h}),\Delta(h(k+1)))=\mathbb{Z}_{d_{k+1}}$.\\
\textit{Case 2}: Suppose $k+1$ is divisible by two distinct primes, whence according to Lemma 2.3, $\mathrm{Ext}^k(\Delta(\mathrm{h}),D_{a+k} \otimes \wedge^{b-k})=0$. From (4.4) it suffices to show that $ d_k=d_{k+1}.$
By Theorem 1 of \cite{JOS}, $d_k=\frac{a+b}{l_k}$, where $l_k=lcm(1^{\eta_1},2^{\eta_2}, \dots ,k^{\eta_k})$ and $\eta_i=1$ if $i | a+b$, and $\eta_i=0$ otherwise.
If $k+1 \not| a+b$, then $\eta_{k+1} =0$ and hence $l_k=l_{k+1}$. If $k+1 | a+b$, then, since $ k+1 $ is divisible by two distinct primes, every prime power factor of $k+1$ is less than $k+1$. Hence $l_{k+1}=l_{k}$. \qed
Let $\mathbb{K}$ be an infinite field of characteristic $p>0$, $S_{\mathbb{K}}(n,r)$ the Schur algebra for $GL_n(\mathbb{K})$ and $\Delta_{\mathbb{K}}(\lambda)$ the Weyl module for $S_{\mathbb{K}}(n,r)$ corresponding to a partition $\lambda$ of $r$ with at most $ n $ parts. Then $S_{\mathbb{K}}(n,r)$ =$\mathbb{K}\otimes S(n,r)$ and $\Delta_{\mathbb{K}}(\lambda)$ =$\mathbb{K}\otimes \Delta(\lambda)$. From this and the universal coefficient theorem \cite{AB}, Theorem 5.3, our results yield the following. \begin{cor} Let $\mathbb{K}$ be an infinite field of characteristic $p>0$ and $n\ge b+1$.\begin{enumerate}
\item Let $2 \le k \le b$. Then $\mathrm{Hom}_{S_{\mathbb{K}}(n,r)}(\Delta_{\mathbb{K}}(\mathrm{h}), \Delta_{\mathbb{K}}(\mathrm{h}(k)))=0$, unless $p=2$ and $ a+b+k $ is odd, in which case $\mathrm{Hom}_{S_{\mathbb{K}}(n,r)}(\Delta_{\mathbb{K}}(\mathrm{h})), \Delta_{\mathbb{K}}(\mathrm{h}(k)))=\mathbb{K}$.
\item Let $1 \le k \le b$. Then $\mathrm{Ext}^{k}_{S_{\mathbb{K}}(n,r)}(\Delta_{\mathbb{K}}(\mathrm{h}), \Delta_{\mathbb{K}}(\mathrm{h}(k)))=0$, unless $p|\tbinom{a+b}{i}, i=1,...,k$, in which case $\mathrm{Ext}^{k}_{S_{\mathbb{K}}(n,r)}(\Delta_{\mathbb{K}}(\mathrm{h}), \Delta_{\mathbb{K}}(\mathrm{h}(k)))=\mathbb{K}$. \end{enumerate}\end{cor}
\section{Acknowledgments} We thank H. H. Andersen for various helpful discussions and for pointing out an error in the Remark after Theorem 4.1 and suggesting a proof. We thank the reviewer for detailed constructive comments and suggestions that helped improve the presentation and clarity of the paper and for pointing out an error in the Remark after Theorem 4.1 and suggesting a proof.
\end{document} | arXiv | {
"id": "2005.00578.tex",
"language_detection_score": 0.5977299809455872,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\sloppy
\title{{f Duality between Hyperbolic and de Sitter Geometry}
\begin{abstract} In this paper we describe trigonometry on the de Sitter surface. For that a characterization of geodesics is given, leading to various types of triangles. We define lengths and angles of these. Then, transferring the concept of polar triangles from spherical geometry into the Minkowski space, we relate hyperbolic with de Sitter triangles such that the proof of the hyperbolic law of cosines for angles becomes much clearer and easier than it is traditionally. Furthermore, polar triangles turn out to be a powerful tool for describing de Sitter trigonometry. \end{abstract}
\tableofcontents
\section*{Notation} \addcontentsline{toc}{section}{Notation}
Throughout this paper we use the following notations, mostly without further explanation in the main body:
\begin{tabular}{p{0.05\textwidth}p{0.85\textwidth}}
${\ensuremath{\langle\!\langle}}.,.{\ensuremath{\rangle\!\rangle}}$ & the Minkowski product on $\ensuremath{\mathbb{R}}^3$, i.e.\ the bilinear form given by the matrix ${\ensuremath{\mathbb{J}}}:=(-e_1,e_2,e_3)$, where $\{e_1,e_2,e_3\}$ is the standard basis of $R^3$ \\
${\ensuremath{\lvert\!\lvert\!\lvert}}.{\ensuremath{\rvert\!\rvert\!\rvert}}$ & the Minkowski (pseudo-)norm: ${\ensuremath{\lvert\!\lvert\!\lvert}} x {\ensuremath{\rvert\!\rvert\!\rvert}}=\sqrt{\lvert{\ensuremath{\langle\!\langle}} x,x{\ensuremath{\rangle\!\rangle}}\rvert}$ \\
$\ensuremath{\mathcal{C}}$ & the light cone, i.e.\ the solution set of ${\ensuremath{\lvert\!\lvert\!\lvert}} x{\ensuremath{\rvert\!\rvert\!\rvert}}=0$ \\
${\ensuremath{\mathbb{HS}}}^2$ & the solution set of ${\ensuremath{\lvert\!\lvert\!\lvert}} x {\ensuremath{\rvert\!\rvert\!\rvert}}=1$, consisting of the de Sitter surface $S^{1,1}$ and two copies of the hyperbolic surface, which we denote by $H^2$ (the part containing $e_1$) and $(-H^2)$, respectively \\
$\hat{x}$ & $x$ divided by its Minkowski norm. Obviously, $x$ must not be in $\ensuremath{\mathcal{C}}$. \\
${\ensuremath{\mathop{\perp\:\!\!\!\!\!\perp}}}$ & Minkowski or Lorentz orthogonal: $x{\ensuremath{\mathop{\perp\:\!\!\!\!\!\perp}}} y:\Leftrightarrow{\ensuremath{\langle\!\langle}} x,y{\ensuremath{\rangle\!\rangle}}=0$ \\
$\ensuremath{\mathcal{L}}(3)$ & the Lorentz group. Its elements are the Lorentz transformations. \\
$d_H$ & {\raggedright the hyperbolic distance, a metric on $H^2$:\\$d_H(x,y):={\ensuremath{\mathrm{arcosh}}}(-{\ensuremath{\langle\!\langle}} x,y {\ensuremath{\rangle\!\rangle}})$} \\
$d_H'$ & {\raggedright the antipodal hyperbolic distance, a metric on $(-H^2)$:\\$d_H'(x,y):=d_H(-x,-y)$} \\
$\dd$ & the proper de Sitter distance, a pseudometric on $S^{1,1}$ (will be defined later) \\
$d_{\ensuremath{\mathbb{HS}}}$ & the generalized de Sitter distance, which equals $d_H$, $d_H'$ or $\dd$ depending on the surface that the points are located on \\
$\ooverline{ab}$ & the generalized de Sitter segment (defined later) \end{tabular}
\section{Introduction}
There exists a vast variety of books dealing with hyperbolic trigonometry. They give one or two laws of cosines and the law of sines, and almost each of these books provides another proof for those theorems. The literature given in the references section shows some typical ways to prove the hyperbolic trigonometric rules.
The most elementary proofs may be found in Wilson \cite{Wil} and Anderson \cite{And}. Wilson constructs a triangle with given side lengths in the hyperboloid model, where the first point equals $e_1$, the second one is located in the $e_1$-$e_2$-plane, and the third one has a positive third coordinate. A simple computation yields the law of cosines for sides and the law of sines. By congruence, these results can be generalized to arbitrary triangles. The law of cosines for angles is not mentioned. Anderson goes mainly the same way, except that he uses the Poincar\'e disc model, where the constructed triangle has one point in the origin, the second one on the positive real axis, and the third one has positive imaginary part. He directly derives the law of cosines for sides. For both of the other trigonometric laws he needs a purely algebraic but not obvious computation.
Iversen \cite{Ive} and Ungar \cite{Ung} prove the trigonometric rules in a more direct and simple way, but use rather abstract models. Iversen describes hyperbolic geometry in the $sl_2$ model, where each point of $H^2$ is given by a $2\times2$ matrix with vanishing trace, determinant equal to 1 and positive lower left component. Ungar uses the gyrovector space, where even the trigonometric rules themselves take an almost unrecognizable form.
Finally, Thurston \cite{Thu} works in the hyperboloid model. He uses the fact that a hyperbolic triangle $\{v_1,v_2,v_3\}$ forms a basis of $\ensuremath{\mathbb{R}}^3$, computes the dual basis $\{w_1,w_2,w_3\}$ and then shows that the matrix $\bigl({\ensuremath{\langle\!\langle}} w_i,w_j{\ensuremath{\rangle\!\rangle}}\bigr)_{i,j}$ is the inverse of $\bigl({\ensuremath{\langle\!\langle}} v_i,v_j{\ensuremath{\rangle\!\rangle}}\bigr)_{i,j}$. From this he obtains the law of cosines for sides. He repeats the same with the $v_i$ lying on the de Sitter surface. However, Thurston does not use these vectors to describe de Sitter geometry, but to describe hyperbolic geodesics (or hyperplanes, in higher dimensions). To then obtain the law of cosines for angles, he needs to take enhanced care of the occuring signs. The law of sines is derived by algebraic transformations from the law of cosines for sides, firstly only for right triangles. Dividing an arbitrary triangle into two right triangles by one of its altitudes and applying the known relation to these right triangles yields the general law of sines.
In this work we only use the hyperboloid model, in which we are able to provide simple and illustrative proofs for all of the trigonometric laws for hyperbolic geometry. Our proof for the hyperbolic law of cosines for sides is similar to the one given by Iversen. From the paremetrization of hyperbolic geodesics, we can compute unit tangent vectors at one vertex of a triangle, pointing in the direction of another vertex: $$ X_{AB}=\frac{B+{\ensuremath{\langle\!\langle}} A,B{\ensuremath{\rangle\!\rangle}} A}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times B{\ensuremath{\rvert\!\rvert\!\rvert}}}. $$ We find the size of an angle at a vertex, according to its definition, by applying the Minkowski product to the respective tangent vectors: $$ \cos(\alpha)={\ensuremath{\lvert\!\lvert\!\lvert}} X_{AB},X_{AC}{\ensuremath{\rvert\!\rvert\!\rvert}}. $$ This yields directly the hyperbolic law of cosines for sides $$ \cosh(a)=\cosh(b)\cosh(c)-\cos(\alpha)\sinh(b)\sinh(c). $$
The same can be done for a certain kind of triangles on the de Sitter surface, which we call non-contractible spatiolateral triangles. We obtain for these triangles the following law of cosines for sides: $$ \cos(a)=\cos(b)\cos(c)-\cosh(\alpha)\sin(b)\sin(c). $$
By the duality between the hyperbolic plane and the de Sitter surface we get the relation $\alpha=\pi-a'$ between the angles of a hyperbolic triangle $\{A,B,C\}$ and the sides of the associated polar triangle $\{A',B',C'\}$, which corresponds to the dual basis used in Thurston's work, save that they are normalized: $$ A':=\det(A,B,C)\frac{B\times C}{{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}}. $$ Conversely, we relate the sides of the hyperbolic triangle with the angles of the polar triangle and get $a=\alpha'$. Plugging in these relations into the law of cosines for sides in non-contractible spatiolateral triangles, we get the hyperbolic law of cosines for angles, $$ \cos(\alpha)=-\cos(\beta)\cos(\gamma)+\cosh(a)\sin(\beta)\sin(\gamma). $$
To obtain the hyperbolic law of sines, we again use the duality to see that $$ \sin(\alpha)=\sin(a')=\frac{\lvert\det(A,B,C)\rvert}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times B{\ensuremath{\rvert\!\rvert\!\rvert}}\cdot{\ensuremath{\lvert\!\lvert\!\lvert}} A\times C{\ensuremath{\rvert\!\rvert\!\rvert}}}. $$ Dividing by $\sinh(a)={\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}$ gives a term that is symmetric in $A$, $B$, and $C$, which proves the theorem.
On the way to these results, we get all these trigonometric rules for three types of de Sitter triangles, called contractible spatiolateral, non-contractible spatiolateral, and tempolateral triangles. Furthermore, we are able to make statements about the sum of the side lengths in spatiolateral triangles, and we show that in every contractible spatiolateral triangle there exists one side longer than the sum of the other sides.
\section{Fundamentals of Minkowski Geometry}
When we talk about Minkowski space, we think of $\ensuremath{\mathbb{R}}^n$ with a non-degenerate, symmetric bilinear form ${\ensuremath{\langle\!\langle}}.,.{\ensuremath{\rangle\!\rangle}}$, where the matrix representing ${\ensuremath{\langle\!\langle}}.,.{\ensuremath{\rangle\!\rangle}}$ has the eigenvalues 1 with multiplicity $(n-1)$, and $-1$ with multiplicity 1. In this paper we deal with three-dimensional Minkowski space exclusively, because when observing triangles on a higher than two-dimensional hyperbolic or de Sitter surface, each such triangle lies completely in a two-dimensional submanifold that is either hyperbolic, de Sitter, or spherical.
In the two-dimensional case, thus, the bilinear form is represented by $(-e_1,e_2,e_3)$, as given in the ``notation'' section. Because of its indefiniteness, the Minkowski product gives a partition of $\ensuremath{\mathbb{R}}^3$, according to the sign of the associated quadratic form. A vector $x\in\ensuremath{\mathbb{R}}^3$ is called \begin{itemize}
\item \emph{timelike,} if ${\ensuremath{\langle\!\langle}} x,x{\ensuremath{\rangle\!\rangle}}<0$;
\item \emph{lightlike,} if ${\ensuremath{\langle\!\langle}} x,x{\ensuremath{\rangle\!\rangle}}=0$ and $x\neq0$; and
\item \emph{spacelike,} if ${\ensuremath{\langle\!\langle}} x,x{\ensuremath{\rangle\!\rangle}}>0$ or $x=0$, respectively. \end{itemize} Timelike and lightlike vectors are embraced by the term \emph{``causal''} vectors. The lightlike vectors together with 0 form the \emph{light cone} $\ensuremath{\mathcal{C}}$.
Obviously, lightlike vectors cannot be normalized. The set of normalized vectors, denoted by ${\ensuremath{\mathbb{HS}}}^2$, consists of three components, as figure~\ref{C,SS2} shows.
\begin{figure}\label{C,SS2}
\end{figure}
The outer surface is the de Sitter surface $S^{1,1}$, which is a Lorentz manifold, while the inner ones are two copies of the hyperbolic surface, which is a Riemannian manifold. According to the sign of the first component, the copies are denoted by $H^2$ and $(-H^2)$, respectively.
We call two vectors $x$ and $y$ \emph{Lorentz} or \emph{Minkowski orthogonal,} if ${\ensuremath{\langle\!\langle}} x,y {\ensuremath{\rangle\!\rangle}}=0$. Euclidean orthogonality is neither a necessary nor a sufficient condition for Lorentz orthogonality, but they may go together. For example, $(1,1,0)^t$ is Minkowski orthogonal to itself for it is lightlike, whereas $(1,1,0)^t$ and $(-1,1,0)^t$ are Euclidean, but not Lorentz orthogonal. Vectors $e_1$ and $e_2$ are orthogonal on both counts.
\Bem{\label{bem:lichtlicht} Two lightlike vectors are Minkowski orthogonal if and only if they are linearly dependent. To obtain this, we write
$$x=\spacevec{x_1}{x_1\cos\alpha}{x_1\sin\alpha} \quad \mbox{and} \quad
y=\spacevec{y_1}{y_1\cos\beta}{y_1\sin\beta}.$$ Computing
$$0={\ensuremath{\langle\!\langle}}{}x,y{\ensuremath{\rangle\!\rangle}}=x_1y_1(\cos\alpha\cos\beta+\sin\alpha\sin\beta-1)$$ leads to
$$\alpha=\beta+2k\pi,$$ and thus $\sin\alpha=\sin\beta$ and $\cos\alpha=\cos\beta$ hold, showing linear dependence.}
\Lemma{\label{satz:exist_MOB} Let $U\subset\ensuremath{\mathbb{R}}^3$ be a vector subspace.
Then, there exists a Lorentz orthogonal basis of $U$, i.e.~a basis $\{b_i\}$ which satisfies $b_i{\ensuremath{\mathop{\perp\:\!\!\!\!\!\perp}}} b_j$ whenever $i\neq j$.}
\Beweis{Since ${\ensuremath{\langle\!\langle}}.,.{\ensuremath{\rangle\!\rangle}}|_U$ is symmetric, the matrix representing this bilinear form can be diagonalized. This is equivalent to the statement of the lemma.}
We call a basis $\{b_i\}$ \emph{Lorentz} or \emph{Minkowski orthonormal,} if it is Lorentz orthogonal and
$$ {\ensuremath{\langle\!\langle}}{}b_i,b_i{\ensuremath{\rangle\!\rangle}}=\begin{cases}-1,&i=1\\1,&otherwise\end{cases}$$ holds. The Lorentz group
\[\ensuremath{\mathcal{L}}(3):=\left\{\Phi\in{\ensuremath{\mathrm{Hom}}}(\ensuremath{\mathbb{R}}^3,\ensuremath{\mathbb{R}}^3)\ensuremath{\,|\,}{\ensuremath{\langle\!\langle}}\Phi(x),\Phi(y){\ensuremath{\rangle\!\rangle}}={\ensuremath{\langle\!\langle}} x,y {\ensuremath{\rangle\!\rangle}}\;\forall x,y\in\ensuremath{\mathbb{R}}^3\right\}\] is therefore equivalently characterized by consisting of matrices $(b_1,b_2,b_3)$ with $\{b_1,b_2,b_3\}$ being a Minkowski orthonormal basis of $\ensuremath{\mathbb{R}}^3$.
\Lemma{\label{folg:Basis_zeit_2raum} Each Lorentz orthogonal basis of $\ensuremath{\mathbb{R}}^3$ consists of one timelike vector and two spacelike vectors.}
\Beweis{This again is pure linear algebra, namely Sylvester's law of inertia applied to ${\ensuremath{\langle\!\langle}}.,.{\ensuremath{\rangle\!\rangle}}$.}
It can be easily proven that any nonzero vector Lorentz orthogonal to a timelike one must itself be spacelike (cf.~Naber \cite{Nab}),
so together with note~\ref{bem:lichtlicht} one can conclude that every two-dimensional subspace of $\ensuremath{\mathbb{R}}^3$
contains a nonzero spacelike vector.
This finding allows us to classify those subspaces by the type of the second vector in a basis.
\dfn{Let $U$ be a two-dimensional vector subspace of $R^3$.
Let further $\{b_1,b_2\}$ be a Minkowski orthogonal basis of $U$, with $b_1$ being spacelike.
We call $U$
\begin{enumerate}
\item \DEF{spacelike,} if $b_2$ is spacelike as well;
\item \DEF{lightlike,} if $b_2$ is lightlike; and
\item \DEF{timelike,} if $b_2$ is timelike.
\end{enumerate}}
This classification is independent of the choice of the basis, and the property of a plane to be spacelike, timelike, or lightlike, respectively, does not change under Lorentz transformations.
\begin{figure}
\caption{Examples of spacelike, lightlike, and timelike planes}
\end{figure}
Geodesics on ${\ensuremath{\mathbb{HS}}}^2$ (so-called \emph{generalized de Sitter geodesics}) can be obtained from the intersection of a two-dimensional vector subspace of $\ensuremath{\mathbb{R}}^3$ with ${\ensuremath{\mathbb{HS}}}^2$: The intersection of a spacelike or lightlike plane with $S^{1,1}$ will be a great ellipse or a pair of parallel lines, respectively. Such planes do not intersect with $(\pm H^2)$. Timelike planes, however, intersect with each component of ${\ensuremath{\mathbb{HS}}}^2$, giving two great hyperbolas (i.e.\ four great hyperbola branches).
Geodesics obtained in this way are the usual geodesics according to the Riemannian (for $H^2$) or semi-Riemannian metric (for $S^{1,1}$). Geodesics on $H^2$ we call \emph{hyperbolic,} whereas geodesics on $(-H^2)$ will be referred to as being \emph{antipodal hyperbolic.} Geodesics on $S^{1,1}$ are called \emph{(proper) de Sitter geodesics,} but we should keep in mind, that they can be of three different kinds: either ellipses, straight lines, or hyperbola branches.
For a metric on $H^2$, we choose the standard Riemannian metric \[d_H(x,y):={\ensuremath{\mathrm{arcosh}}}\bigl(-{\ensuremath{\langle\!\langle}} x,y{\ensuremath{\rangle\!\rangle}}\bigr) \quad \forall x,y\in H^2,\] which is called \emph{hyperbolic distance.} The same is done the most simple way for $(-H^2)$, leading to the \emph{antipodal hyperbolic distance} \[d_H'(x,y):=d_H(-x,-y) \quad \forall x,y\in (-H^2).\] One can easily see, that both distance functions are well-defined and are indeed metrics.
Things are getting more complicated when the de Sitter surface is concerned. On the hyperbolic plane, every two points are located on some great hyperbola branch, thus making it sensible to define their distance by using hyperbolic functions. However, on de Sitter surface, two points may be located on a great hyperbola branch as before; but they can also be on a great ellipse, where it would be more reasonable to use trigonometric functions for defining a metric. We have found the following definition to be the most logical:
\dfn{For every two points $x,y\in S^{1,1}$ we call \[\dd(x,y)=\begin{cases}
{\ensuremath{\mathrm{arcosh}}}({\ensuremath{\langle\!\langle}} x,y{\ensuremath{\rangle\!\rangle}}),&\mbox{if $x-y$ is timelike,}\\
0,&\mbox{if $x-y$ is lightlike,}\\
\infty,&\mbox{if } {\ensuremath{\langle\!\langle}} x,y{\ensuremath{\rangle\!\rangle}}\leq-1 \mbox{ and } x\neq-y,\\
\arccos({\ensuremath{\langle\!\langle}} x,y{\ensuremath{\rangle\!\rangle}})&\mbox{otherwise} \end{cases}\] the \emph{(proper) de Sitter distance} of $x$ and $y$.}
This distance is well-defined, but obviously no metric. Nevertheless, it is non-negative and symmetric, and its value equals zero if and only if $x-y$ is located on the light cone. Under certain circumstances, also the triangle inequality holds true; but these circumstances will be dealt with in section \ref{Triangles}.
The cases in the definition also correspond to geometrical circumstances: The first case ($x-y$ being timelike) is equivalent to $x$ and $y$ lying on a great hyperbola branch. Vector $x-y$ lying on the light cone means, that the geodesic connecting $x$ and $y$ is a straight line. In these cases, the de Sitter distance equals the ``time separation'' $\tau(x,y)$ (cf. O'Neill \cite{One}), where $y$ has greater or equal $e_1$-coordinate. In the remaining cases, this time seperation is zero. The third case describes algebraically that $x$ and $y$ cannot be connected by a geodesic -- the reason is, that the two-dimensional subspace spanned by these vectors is timelike or lightlike and $x$ and $y$ are located on the different components of the intersection with $S^{1,1}$. And finally, what is left: The ``otherwise'' condition reflects the property of $x$ and $y$ to be points on a great ellipse. Since the restriction of the Minkowski product to a spacelike vector subspace is a Riemannian metric, the last case describes the distance function defined by this metric.
Finally, we subsume all distance functions under the term \emph{(generalized) de Sitter distance,} symbolized with $d_{\ensuremath{\mathbb{HS}}}$ and defined as follows: \[d_{\ensuremath{\mathbb{HS}}}(x,y)=\begin{cases}
d_H(x,y),&\mbox{if } x,y\in H^2,\\
d_H'(x,y),&\mbox{if } x,y\in (-H^2),\\
\dd(x,y),&\mbox{if } x,y\in S^{1,1},\\
\infty,&\mbox{otherwise.} \end{cases}\] This new distance function defines the distance for every pair of points in ${\ensuremath{\mathbb{HS}}}^2$. The context will make clear, if the term ``de Sitter distance'' means the generalized or the proper de Sitter distance.
For two different points $A,B\in{\ensuremath{\mathbb{HS}}}^2$ with $d_{\ensuremath{\mathbb{HS}}}(A,B)<\infty$ we find a tangent vector $X_{AB}$ at $A$ pointing in the direction of $B$ by computing
\[X_{AB}:=\begin{cases} B-A,&\mbox{if ${\mbox{span}}\{A,B\}$ is lightlike,}\\
\frac{B+{\ensuremath{\langle\!\langle}} A,B{\ensuremath{\rangle\!\rangle}} A}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times B{\ensuremath{\rvert\!\rvert\!\rvert}}},&\mbox{if $A,B\in(\pm H^2)$,}\\
\frac{B-{\ensuremath{\langle\!\langle}} A,B{\ensuremath{\rangle\!\rangle}} A}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times B{\ensuremath{\rvert\!\rvert\!\rvert}}},&\mbox{otherwise.}
\end{cases}\]
\Bem{Except for the first case, $X_{AB}$ is normalized.
If ${\mbox{span}}(A,B)$ is spacelike or timelike, the denominator in the definition fraction can be replaced by $\sin(\dd(A,B))$ or $\sinh(d_{\ensuremath{\mathbb{HS}}}(A,B))$, respectively.}
Once we have tangent vectors, we can easily describe segments of de Sitter geodesics between two points. Of course, if no such geodesic exists, the segment should be empty. Thus, we define the \emph{(generalized) de Sitter segment} for $A\neq\pm B$, $d_{\ensuremath{\mathbb{HS}}}(A,B)<\infty$ as follows: \[\ooverline{AB}:=\begin{cases} \{A+tX_{AB}\ensuremath{\,|\,} t\in[0,1]\},&\mbox{if ${\mbox{span}}\{A,B\}$ is lightlike,}\\
\{\cos(t)A+\sin(t)X_{AB}\ensuremath{\,|\,} t\in[0,\dd(A,B)]\},&\mbox{if ${\mbox{span}}\{A,B\}$ is spacelike,}\\
\{\cosh(t)A+\sinh(t)X_{AB}\ensuremath{\,|\,} t\in[0,d_{\ensuremath{\mathbb{HS}}}(A,B)]\},&\mbox{otherwise.}
\end{cases}\]
In analogy to the naming of de Sitter geodesics, we can distinguish hyperbolic, antipodal hyperbolic, and proper de Sitter segments. In addition, proper de Sitter segments can be either line, great ellipse, or great hyperbola segments. To not confuse great hyperbola segments on the de Sitter surface with hyperbolic segments, We name proper de Sitter segments after the type of the plane whose intersection with $S^{1,1}$ gave the geodesic, i.e.\ line segments are called lightlike, great ellipse segments spacelike, and great hyperbola segments timelike.
We have two remaining cases to consider: If $A=B$, we define the generalized de Sitter segment to be \[\ooverline{AB}:=\{A\};\] and if $A=-B$ or $d_{\ensuremath{\mathbb{HS}}}(A,B)=\infty$, the segment is empty.
Note that for any Lorentz transformation $\Phi$, \[\ooverline{\Phi(A)\Phi(B)}=\Phi\left(\ooverline{AB}\right)\] holds.
Finally, because we are going to obtain trigonometric laws, we still need to define angles. We do this as follows:
\dfn{Let $A,B,C\in{\ensuremath{\mathbb{HS}}}^2$ be distinct.
If $\ooverline{AB}$ and $\ooverline{AC}$ are of the same type (either hyperbolic, antipodal hyperbolic, proper de Sitter timelike, or spacelike segments) and neither lightlike nor empty,
the de Sitter angle between these segments computes to
\[\varangle(B,A,C):=\begin{cases}{\ensuremath{\mathrm{arcosh}}}(\lvert{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}\rvert),&\mbox{for proper de Sitter segments;}\\
\arccos({\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}),&\mbox{otherwise.}\end{cases}\]}
\Bem{Applying a Lorentz transformation does not change de Sitter angles.}
\Bem{We do not have to explicitly compute the relevant tangent vectors, because we find the relation
\[{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}=\pm\ensuremath{\left\langle\!\left\langle}\frac{A\times B}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times B{\ensuremath{\rvert\!\rvert\!\rvert}}},\frac{A\times C}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times C{\ensuremath{\rvert\!\rvert\!\rvert}}}\ensuremath{\right\rangle\!\right\rangle},\]
where the minus sign applies for $A,B,C\in S^{1,1}$, while the plus sign applies for $A,B,C\in H^2\cup(-H^2)$.}
\Beweis{Consider the case, that the relevant segments are proper de Sitter segments.
The other case can be proved analogous.
We have the definition
\[X_{AB}=\frac{B-{\ensuremath{\langle\!\langle}} A,B{\ensuremath{\rangle\!\rangle}}\cdot A}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times B{\ensuremath{\rvert\!\rvert\!\rvert}}}.\]
Solving the equation for $B$ and plugging the solution into $A\times B$ leads to
\[A\times B={\ensuremath{\lvert\!\lvert\!\lvert}} A\times B{\ensuremath{\rvert\!\rvert\!\rvert}}\cdot A\times X_{AB}.\]
Now, doing the same for $A\times C$ results in
\begin{eqnarray*}
\ensuremath{\left\langle\!\left\langle} \frac{A\times B}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times B{\ensuremath{\rvert\!\rvert\!\rvert}}},\frac{A\times C}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times C{\ensuremath{\rvert\!\rvert\!\rvert}}}\ensuremath{\right\rangle\!\right\rangle}
&=&{\ensuremath{\langle\!\langle}} A\times X_{AB},A\times X_{AC}{\ensuremath{\rangle\!\rangle}}\\
&=&-({\ensuremath{\langle\!\langle}} A,A{\ensuremath{\rangle\!\rangle}}{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}-{\ensuremath{\langle\!\langle}} A,X_{AC}{\ensuremath{\rangle\!\rangle}}{\ensuremath{\langle\!\langle}} A,X_{AB}{\ensuremath{\rangle\!\rangle}})\\
&=&-{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}.
\end{eqnarray*}
}
\Bem{De Sitter angles are well-defined:
If the involved segments are timelike, tangent vectors are timelike and normalized.
WLOG, let $X_{AB}=e_1$. Denote the components of $X_{AC}$ by $x_1,x_2,x_3$. Then,
\[|{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}|=|x_1|\geq\sqrt{x_1^2-x_2^2-x_3^2}={\ensuremath{\lvert\!\lvert\!\lvert}} X_{AC}{\ensuremath{\rvert\!\rvert\!\rvert}}=1\]
holds.
Thus, we can identify $|{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}|$ with a hyperbolic cosine.
When dealing with (antipodal) hyperbolic segments, let WLOG be $A=\pm e_1$.
Since the tangent vectors are Lorentz orthogonal to $A$, their first component must vanish, leading to
\[{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}=\langle X_{AB},X_{AC}\rangle\leq\lVert X_{AB}\rVert \lVert X_{AC}\rVert=1,\]
which allows to identify this product with a cosine.
Finally, concerning spacelike segments, we note that ${\ensuremath{\mathbb{J}}}(A\times B)$ is Minkowski orthogonal to the (spacelike) vectors $A$ and $B$.
Therefore it has to be timelike.
Since ${\ensuremath{\mathbb{J}}}$ describes a Lorentz transformation, $A\times B$ is timelike as well.
WLOG, we let $A\times B=e_1$ and continue in the same way as we did with timelike segments.}
\Bem{If $\ooverline{AB}$ and $\ooverline{AC}$ are of different types or both lightlike,
the magnitude of the vertex angle at $A$ cannot reasonably be defined,
for in this case the value of ${\ensuremath{\langle\!\langle}} X_B,X_C{\ensuremath{\rangle\!\rangle}}$ can be any real number.}
\Bem{If one wants to define a congruence between angles on the de Sitter surface in a reasonable way,
one must take into account the sign of ${\ensuremath{\langle\!\langle}} X_B,X_C{\ensuremath{\rangle\!\rangle}}$.
If one did not, there are angles obviously not being congruent, but having the same magnitude, e.g.
$\varangle(B,A,B)$ and $\varangle(B,A,C)$ with $A$ being in $\ooverline{BC}\subset S^{1,1}$.}
\section{Classification of de Sitter Triangles} \label{Triangles}
By the term \emph{generalized de Sitter triangle} we denote any subset $\Delta\subset{\ensuremath{\mathbb{HS}}}^2$ consisting of three and only three elements. This definition includes triangles that have empty sides. It also includes degenerate triangles which have one vertex on the (open) segment between the other two, but excludes the case of two vertices being identical.
\Konv{We name the vertices of a triangle using capital letters;
the de Sitter distance or the segment between two vertices is denoted
by the corresponding lower case variant of the labeling of the remainig point.}
We now want to classify generalized de Sitter triangles according to the types of their sides.
\dfn{A generalized de Sitter triangle $\Delta$ is said to be
\begin{itemize}
\item \DEF{hyperbolic,} if $\Delta\subset H^2$;
\item \DEF{antipodal hyperbolic,} if $\Delta\subset(-H^2)$;
\item \DEF{proper,} if $\Delta\subset S^{1,1}$; and
\item \DEF{strange} otherwise.
\end{itemize}
The sides of strange triangles whose vertices are located on different components of ${\ensuremath{\mathbb{HS}}}^2$ are called
\DEF{strange segments.}
Every de Sitter triangle that is not proper is \DEF{improper.}
Proper de Sitter triangles may still have sides of different type.
Therefore these can be further divided into the following:
\begin{itemize}
\item \DEF{spatiolateral triangles,} with all sides being spacelike;
\item \DEF{chorosceles triangles,} with two sides being spacelike and the third being timelike;
\item \DEF{tempolateral triangles,} with all sides being timelike;
\item \DEF{chronosceles triangles,} with two sides being timelike and the third being spacelike;
\item \DEF{lucilateral triangles,} with all sides being lightlike;
\item \DEF{bimetrical chorosceles triangles,} with two sides being spacelike and the third being lightlike;
\item \DEF{photosceles triangles with spacelike base,} with two sides being lightlike and the third being spacelike;
\item \DEF{bimetrical chronosceles triangles,} with two sides being timelike and the third being lightlike;
\item \DEF{photosceles triangles with timelike base,} with two sides being lightlike and the third being timelike; and
\item \DEF{multiple triangles,} with one side of each type.
\end{itemize}
If at least one side of the triangle is empty, then $\Delta$ is called an \DEF{impossible triangle.}
The empty sides are then called \DEF{impossible sides.}}
The names we gave to the different types of proper de Sitter triangles are meant to remind of equilateral and isosceles triangles, respectively. The term ``bimetrical'' refers to the fact that the length of the lightlike segment is zero, so only the two remaining sides of the triangle can be measured in a sensible way.
When we start looking for examples of all these different types, it comes out that lucilateral triangles are always degenerate.
To see this, we choose one vertex $A$ to equal $e_2$. For lightlike segments, we have tangent vectors of the form $X_{AB}=B-A$. These are Minkowski orthogonal to $A$ and lightlike, what leads to $$X_{AB}=C_1\cdot(e_1\pm e_3),\quad X_{AC}=C_2\cdot(e_1\pm e_3),$$ where $C_{1,2}$ are arbitrary constants. If $X_{AB}$ and $X_{AC}$ were not linearly dependent, we would easily obtain that the side $a$ could only be lightlike, if one of the constants $C_{1,2}$ were zero, which cannot hold.
$\Box$
For all of the other types, one can find non-degenerate examples (cf.~fig.~\ref{Abb:Typen}).
\begin{figure}
\caption{Different types of de Sitter triangles: hyperbolic, chorosceles (1st row),
chronosceles, bimetrical chorosceles, photosceles with spacelike base (2nd row),
bimetrical chronosceles, photosceles with timelike base, multiple (3rd row),
tempolateral, contractible and non-contractible spatiolateral (4th row) triangles}
\label{Abb:Typen}
\end{figure}
Note that the last two examples in fig.~\ref{Abb:Typen} that have $U=X:=e_2$, $V=Y:=e_3$, and $W=-Z:=({\frac{1}{7}},{\frac{5}{7}},{\frac{5}{7}})^t$ are both spatiolateral, but don't quite appear to be of same type. It is useful here to make a distinction between these two types of spatiolateral triangles again: \emph{contractible} triangles on the one hand, and \emph{non-contractible} ones on the other hand.
One can distinguish them by projecting onto $e_2$-$e_3$-plane, where for contractible triangles it is possible to find a line through the origin such that all three points lie on one side of the line, whereas for non-contractible triangles this is impossible. Another way of distinction is to compute the lengths of the sides of the triangle. If they sum up to a number less than $2\pi$, the triangle is contractible, and otherwise it is not. This will be proved in theorem \ref{Satz:gross_klein}.
Let us finally investigate for what types of de Sitter triangles $\{A,B,C\}$ the triangle inequality \[d_{\ensuremath{\mathbb{HS}}}(A,B)+d_{\ensuremath{\mathbb{HS}}}(B,C)\geq d_{\ensuremath{\mathbb{HS}}}(A,C)\] holds. Obviously, this is trivially true for strange triangles, because all of them have at least two strange sides. The inequality also holds for (antipodal) hyperbolic triangles, since $d_H$ is the standard metric and $d_H'$ is derived from it. Photosceles triangles never satisfy the inequality (choose $\ooverline{AC}$ to be the base). Bimetrical and multiple triangles satisfy the inequality if and only if they are isosceles (which, in the case of multiple triangles, is mere chance). For lucilateral triangles, the inequality trivially holds. The same is true for non-contractible spatiolateral triangles, where the inequality follows from the fact that the lengths of the sides sum up to a number greater than $2\pi$, whereas each single side can only have a length less than $\pi$. Impossible triangles that have two sides of infinite length satisfy the triangle inequality, too. For impossible triangles that have only one such side, the inequality obviously does not hold.
There are even impossible triangles having no infinite side (namely those with two antipodal points). For those, we find the triangle inequality to be true. To see that, we start with noticing that they are just one case of degenerate triangles with all their vertices lying on a great ellipse. Parametrizing this great ellipse allows us to write the distance of two vertices simply as a difference of two values for the parameter. Having done this, we easily see the validity of the inequation.
The same method leads to the result, that every degenerate triangle satisfies the triangle inequality.
\satz{If the triangle $\Delta$ is chorosceles or chronosceles,
then the triangle inequality generally does not hold.}
\Beweisenum{We give examples both for triangles that do not satisfy and for those that satisfy the inequality:
\begin{enumerate}
\item $\{A=e_2,B=e_3,C=({\frac{30}{41}\sqrt{2},\frac{59}{82}\sqrt{2},\frac{59}{82}\sqrt{2}})^t\}$
is a chronosceles triangle. The lengths compute to
\[a=b={\ensuremath{\mathrm{arcosh}}}\left(\frac{59}{82}\sqrt{2}\right)\approx0.187;\,c=\frac{\pi}{2}\approx1.571,\]
which shows that the length of the spacelike side $c$ is greater than the sum of the other lengths.
\item For the chronosceles triangle $\{A,B,C\}$ where
\[A=e_2,\quad B=\left({0,\frac{1}{2}\sqrt{3},-\frac{1}{2}}\right)^t,\quad C=({41,29,29})^t,\]
we see that one of the timelike legs ($b$) is of greater length than the other two sides together.
\item There are, however, some chronosceles triangles that satisfy the inequality,
e.g.~$\{A=e_2,\, B=({0,\frac{1}{2}\sqrt{2},\frac{1}{2}\sqrt{2}})^t,\, C=({41,29,29})^t\}$.
\item Now consider the chorosceles triangle $\{A,B,C\}$ given by
\[A=({1,0,\sqrt{2}})^t,\quad B=({-1,0,\sqrt{2}})^t,\quad C=\left({0,\frac{1}{2}\sqrt{3},\frac{1}{2}}\right)^t.\]
Its lengths compute to $a=b=\frac{\pi}{4}\approx0.785$ and $c={\ensuremath{\mathrm{arcosh}}}(3)\approx1.763$,
which gives $a+b<c$.
It is the length of the timelike base that is too great.
\item Another chorosceles triangle, one of whose legs is to great, is the following:
\[\Delta=\left\{A=({1,0,\sqrt{2}})^t,\, B=\left({\frac{20}{21},0,\frac{29}{21}}\right)^t,\, C=\left({0,\cos\left(\frac{6\pi}{25}\right),\sin\left(\frac{6\pi}{25}\right)}\right)\right\}.\]
Here, $a>b+c$ holds.
\item Finally, we see that $\{A,B,C\}$ given by
\[A=({1,0,\sqrt{2}})^t,\quad B=e_3,\quad C=\left({0,\frac{1}{2}\sqrt{3},\frac{1}{2}}\right)^t\]
satisfies the triangle inequality.
$\Box$
\end{enumerate}}
\Lemma{\label{lemma:pos_neg_2}\label{bem:pos_neg_3}Every tempolateral triangle has one and only one vertex,
at which the tangent vectors pointing in the directions of the other two vertices
have different signs in their respective first component.}
\Beweis{Let $\Delta=\{A,B,C\}$ be tempolateral.
To show the existence of such a vertex, we assume that the first component of $X_{AB}$ is positive.
Then, by parametrizing the great hyperbola $A$ and $B$ are located on,
we obtain the result that the first component of $X_{BA}$ must be negative.
If $X_{BC}$ has a positive first component, the proof is done.
Thus, let us assume that the first component of $X_{BC}$ is negative.
Then, we find $X_{CB}$ having positive first component.
Again, a negative first component of $X_{CA}$ would finish the proof,
so we assume that $X_{CA}$ has a positive first component.
But now we find $X_{AC}$ having a negative first component,
giving the result that $A$ is a vertex with the desired property.
Analogously one can see that assuming two such vertices implies
that the tangent vectors at the third vertex must also have different signs in their respective first component.
We obtain, thus, that the first component of $X_{AB}$ has the same sign as those of $X_{BC}$ and $X_{CA}$.
Furthermore, we can assume that the first component of $A$ has the same sign, too.
Then we compute
\begin{eqnarray*}
A&=&\cosh(b)C+\sinh(b)X_{{CA}}\\
&=&\cosh(b)(\cosh(a)B+\sinh(a)X_{{BC}})+\sinh(b)X_{{CA}}\\
&=&\cosh(b)(\cosh(a)(\cosh(c)A+\sinh(c)X_{{AB}})+\sinh(a)X_{{BC}})+\sinh(b)X_{{CA}}\\
&=&\underbrace{\cosh(a)\cosh(b)\cosh(c)}_{>1}\cdot{}A+\underbrace{\cosh(a)\cosh(b)\sinh(c)}_{>0}\cdot{}X_{{AB}}\\
&&\qquad+\underbrace{\sinh(a)\cosh(b)}_{>0}\cdot{}X_{{BC}}+\underbrace{\sinh(b)}_{>0}\cdot{}X_{{CA}},
\end{eqnarray*}
noticing that the first component of $A$ has increased (or decreased, depending on its sign),
what cannot happen.
Thus, we have shown the uniqueness of the vertex with the desired property.}
\Satz{\label{satz:Dreiecksungl_zeit} Non-degenerate tempolateral triangles do not satisfy the triangle inequality.}
\Beweis{Let $\{A,B,C\}$ be such a triangle with the first component of $X_{AB}$ and $X_{AC}$ having different signs.
If we apply a Lorentz transformation that maps $X_{AB}$ to $e_1$, we easily see that
\[{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}\geq1,\]
where the equality holds for linearly dependent $X_{AB}$ and $X_{AC}$;
but this would result in a degenerate triangle,
so we obtain
\begin{eqnarray*}
{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}&>&1\\
\sinh(b)\sinh(c){\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}&>&\sinh(b)\sinh(c).
\end{eqnarray*}
Finally, we compute
\begin{eqnarray*}
\cosh(a)&=&{\ensuremath{\langle\!\langle}} B,C{\ensuremath{\rangle\!\rangle}}\\
&=&{\ensuremath{\langle\!\langle}}\cosh(c)A+\sinh(c)X_{AB},\cosh(b)A+\sinh(b)X_{AC}{\ensuremath{\rangle\!\rangle}}\\
&=&\cosh(b)\cosh(c)+\sinh(b)\sinh(c){\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}\\
&>&\cosh(b)\cosh(c)+\sinh(b)\sinh(c)\\
&=&\cosh(b+c),
\end{eqnarray*}
which shows the relation
\[a>b+c,\]
contradicting the triangle inequality.}
\rm What is still left to show is the validity or invalidity of the triangle inequality for contractible spatiolateral triangles.
We will show that the inequality does not hold in this case.
This result, however, can most easily be obtained by using the duality between hyperbolic and de Sitter geometry,
having us to postpone the proof to corollary \ref{Folg:gross_klein}.
Actually, the proof for the validity of the triangle inequality for non-contractible spatiolateral triangles
depends on the statement that the sum of lengths is greater than $2\pi$.
Although that is a rather intuitive statement, it is still waiting for its proof,
which will directly precede the corollary just mentioned.
\section{Polar Triangles}
The purpose of this section is to find a connection between hyperbolic triangles and proper de Sitter triangles. Since we primarily want to investigate hyperbolic trigonometry, we have the aim that triangles in $H^2\cup(-H^2)$ possess polar triangles in $S^{1,1}$, even if not all the types of proper de Sitter triangles may occur. The obvious way to provide such a mapping would be to consider the planes defining the sides of hyperbolic triangles, then taking their intersection with de Sitter surface to obtain proper de Sitter geodesics, and finally defining the intersection points of these geodesics as vertices of the polar triangle. Unfortunately, these geodesics won't intersect.
Thus, we simply use the definition of polar triangles in spherical geometry and apply it to our case.
\dfn{The \emph{polar triangle} $\Delta'=\{A',B',C'\}$ of a generalized de Sitter triangle $\Delta=\{A,B,C\}$
is defined by
\[A':=\varepsilon\cdot\frac{B\times C}{{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}},\quad B':=\varepsilon\cdot\frac{C\times A}{{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}},\quad C':=\varepsilon\cdot\frac{A\times B}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times B{\ensuremath{\rvert\!\rvert\!\rvert}}},\]
where $\varepsilon={\mbox{sign}}(\det(A,B,C))=\pm1$.}
The polar triangle does not exist for every de Sitter triangle, because one of the cross products may happen to be lightlike or zero.
If, on the other hand, $\Delta'$ exists, then it is well-defined, and even the vertices of $\Delta'$ are each well-defined, regardless of the order the vertices of $\Delta$ are given to find the polar triangle. (Making this sure is the primary intent of $\varepsilon$.)
Since the cross product $B\times C$ is orthogonal to ${\mbox{span}}(B,C)$, one may think of vertex $A'$ as being determined (except for the sign) by the side $a$.
It might appear more reasonable to define the vertices of the polar triangle by using Lorentz orthogonality instead of simple Euclidean orthogonality. If we attempted to do so, we could define a new ``Minkowski'' polar triangle $\Delta_{\rm Mink}'$ by $A_{\mathrm{Mink}}'={\ensuremath{\mathbb{J}}}(A'),\,B_{\mathrm{Mink}}'={\ensuremath{\mathbb{J}}}(B'),\,C_{\mathrm{Mink}}'={\ensuremath{\mathbb{J}}}(C');$ but this would only affect the actual position of the polar triangle, whereas the lengths of its sides, its angles, and its type will be preserved. So we stick to our first definition, because it is easier to handle.
\Bem{This definition satisfies our claim that any non-degenerate generalized de Sitter triangle $\Delta\subset H^2\cup(-H^2)$ has a polar triangle $\Delta'\subset S^{1,1}$.}
\Beweis{We know that ${\ensuremath{\mathbb{J}}}(A'){\ensuremath{\mathop{\perp\:\!\!\!\!\!\perp}}} B$. A vector Minkowski orthogonal to a timelike one must be spacelike, so that is the case for ${\ensuremath{\mathbb{J}}}(A')$. Hence, also $A'$ is spacelike and, since it is normalized, lies on $S^{1,1}$.}
\Satz{\label{Satz:Polar_Polar}Let $\Delta=\{A,B,C\}$ be a non-degenerate generalized de Sitter triangle. If the polar triangle $\Delta'=\{A',B',C'\}$ exists, then the polar triangle of $\Delta'$ also exists, and \[(A')'=A,\,(B')'=B,\,(C')'=C\] hold.}
\Beweis{To see that $\Delta'$ is non-degenerate, we compute
\begin{eqnarray*}
{\mbox{sign}}(\det(A',B',C'))&=&{\mbox{sign}}(\det(\varepsilon B\times C,\varepsilon C\times A,\varepsilon A\times B))\\
&=&\varepsilon{\mbox{sign}}(\det(B\times C,C\times A,A\times B)))\\
&=&\varepsilon{\mbox{sign}}(\langle(B\times C)\times(C\times A),A\times B\rangle)\\
&=&\varepsilon{\mbox{sign}}(\langle B\times C,A\rangle\langle C\times A,B\rangle-\langle B\times C,B\rangle\langle C\times A,a\rangle)\\
&=&\varepsilon{\mbox{sign}}(\det(A,B,C)^2)\\
&=&\varepsilon\\
&\neq&0.
\end{eqnarray*} From the construction of $A'$ und $B'$ follows $A',B'\perp C$ (Euclidean orthogonality). Furthermore, of course, $A'\times B'\perp A',B'$ holds. Therefore $A'\times B'$ and $C$ are linearly dependent. Particularly, $A'\times B'$ is of the same type as $C$ (either spacelike or timelike) and can thus be normalized. Doing this, we get $(C')'=\pm C=:\delta C$.
Now we have on the one hand the relation
\[{\ensuremath{\langle\!\langle}} A'\times B',(C')'{\ensuremath{\rangle\!\rangle}}=\ensuremath{\left\langle\!\left\langle} A'\times B',\varepsilon\frac{A'\times B'}{{\ensuremath{\lvert\!\lvert\!\lvert}} A'\times B'{\ensuremath{\rvert\!\rvert\!\rvert}}}\ensuremath{\right\rangle\!\right\rangle}=\varepsilon\cdot(\sigma{\ensuremath{\lvert\!\lvert\!\lvert}} A'\times B'{\ensuremath{\rvert\!\rvert\!\rvert}}),\]
where $\sigma=+1$, if $A'\times B'$ is spacelike (and hence $C$ is spacelike, too),
and $\sigma=-1$, if $A'\times B'$ (and hence $C$) is timelike.
On the other hand we have:
\begin{eqnarray*}
{\ensuremath{\langle\!\langle}} A'\times B',(C')'{\ensuremath{\rangle\!\rangle}}&=&\ensuremath{\left\langle\!\left\langle}\frac{B\times C}{{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}}\times\frac{C\times A}{{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}},\delta C\ensuremath{\right\rangle\!\right\rangle}\\
&=&\frac{\delta}{{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}}{\ensuremath{\langle\!\langle}} \langle A,B\times C\rangle\cdot C-\underbrace{\langle C,B\times C\rangle}_{=0}\cdot A,C{\ensuremath{\rangle\!\rangle}}\\
&=&\frac{\delta}{{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}}\det(A,B,C){\ensuremath{\langle\!\langle}} C,C{\ensuremath{\rangle\!\rangle}}\\
&=&\frac{\delta\varepsilon\cdot(\sigma{\ensuremath{\lvert\!\lvert\!\lvert}} C{\ensuremath{\rvert\!\rvert\!\rvert}})}{{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}}.
\end{eqnarray*} Comparing these equations gives \[\delta=\frac{1}{{\ensuremath{\lvert\!\lvert\!\lvert}} C{\ensuremath{\rvert\!\rvert\!\rvert}}}{\ensuremath{\lvert\!\lvert\!\lvert}} A'\times B'{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}>0,\] thus leading to $\delta=1$ and hence
\[(C')'=C,\] which was to show.}
The remaining part of this section deals with the connection between the type of a triangle and the type of its polar triangle.
\Satz{\label{Satz:kein_Polar} Bimetrical, photosceles, multiple, and lucilateral de Sitter triangles
do not have a polar triangle, nor do such strange triangles that have two opposite points,
or such impossible triangles one of whose impossible sides is contained in a lightlike plane.}
\Beweis{It is rather obvious that triangles with two opposing points have no polar triangles,
for the cross product of those points equals zero.
All the other cases describe a triangle that has at least one side located on a lightlike plane.
The intersection of $S^{1,1}$ with such a plane is a pair of parallel lines, which can be parametrized as
\[\{u\in\ensuremath{\mathbb{R}}^3\ensuremath{\,|\,} u=\pm A+t\cdot X,\,t\in\ensuremath{\mathbb{R}}\},\]
where $A$ is one of the vertices on the lightlike or impossible side, and $X$ is Lorentz orthogonal to $A$.
(In case of a lightlike side, this is a tangent vector).
For a certain $t_0\in\ensuremath{\mathbb{R}}$, we thus have for the other vertex
\[B=\pm A+t_0\cdot X,\]
and we compute
\[A\times B=A\times(\pm A+t_0\cdot X)=t_0\cdot A\times X.\]
If $t=0$ (that means $B=-A$) or $A\times X=0$ holds, then it is again obvious that the polar triangle cannot exist.
If, on the other hand, $t\cdot A\times X\neq0$ holds, we still know
that ${\ensuremath{\mathbb{J}}}(A\times X)$ is Lorentz orthogonal to $A$ and $X$.
If ${\ensuremath{\mathbb{J}}}(A\times X)\not\in{\mbox{span}}(A,X)$, we would have found a Minkowski orthogonal basis $\{A,X,{\ensuremath{\mathbb{J}}}(A\times X)\}$ of $\ensuremath{\mathbb{R}}^3$, which contains the lightlike vector $X$;
but this is forbidden by lemma \ref{folg:Basis_zeit_2raum}.
So we have ${\ensuremath{\mathbb{J}}}(A\times X)\in{\mbox{span}}(A,X)$.
Because ${\ensuremath{\mathbb{J}}}(A\times X)$ is Lorentz orthogonal to any vector in ${\mbox{span}}(A,X)$,
it is in particular Lorentz orthogonal to itself, meaning lightlike.
Thus, $A\times B$ is also lightlike and therefore cannot be normalized.}
\Lemma{\label{lemma:exist_Polar} The polar triangle of a generalized de Sitter triangle $\Delta$
which does not match one of the categories of theorem \ref{Satz:kein_Polar} always exists.}
\Beweis{The polar triangle exists if and only if the vectors $A\times B$,
$B\times C$, and $C\times A$ can be normalized, i.e.~do not belong to the light cone.
All triangles that contain two opposite points are dealt with in theorem \ref{Satz:kein_Polar} --
strange triangles of that kind were mentioned explicitly,
while two opposite points in proper de Sitter triangles would define an impossible side,
which belongs to a plane of any type, and by this also to a lightlike plane.
Therefore, the case that $A\times B=0$ is excluded.
Furthermore, none of the planes ${\mbox{span}}(A,B)$, ${\mbox{span}}(A,C)$, and ${\mbox{span}}(B,C)$ is lightlike,
so what we have to show is that no lightlike vector is Euclidean orthogonal to a spacelike or timelike plane.
Again, we use the property of ${\ensuremath{\mathbb{J}}}$ to be among the Lorentz transformations
to see that $A\times B$ is lightlike if and only if ${\ensuremath{\mathbb{J}}}(A\times B)$ is lightlike,
and the latter is Lorentz orthogonal to ${\mbox{span}}(A,B)$.
So we restate our aim to show as follows:
There is no lightlike vector Minkowski orthogonal to a spacelike or timelike plane.
Firstly, we notice that a lightlike vector contained in a timelike plane cannot be orthogonal to the same,
because this would mean that there was a basis of that plane consisting of a spacelike and a lightlike vector,
and thus the plane would be lightlike.
So a Minkowski orthogonal basis of the plane ${\mbox{span}}(A,B)$ can be extended by ${\ensuremath{\mathbb{J}}}(A\times B)$
to form a Minkowski orthogonal basis of $\ensuremath{\mathbb{R}}^3$.
By Lemma \ref{folg:Basis_zeit_2raum}, we know that ${\ensuremath{\mathbb{J}}}(A\times B)$ is timelike if ${\mbox{span}}(A,B)$ is spacelike
and vice versa, but never is it lightlike.}
\Satz{The polar triangle of a degenerate de Sitter triangle that does not match one of the categories of theorem\ref{Satz:kein_Polar}
consists of only the zero vector.
(It has never been stated that the polar triangle, if existing, has to be a de Sitter triangle!)}
\Beweis{By lemma \ref{lemma:exist_Polar}, we know that the polar triangle exists.
We then compute
\[\varepsilon={\mbox{sign}}(\det(A,B,C))={\mbox{sign}}(0)=0\]
and get the ``vertices'' $A'=B'=C'=0$.}
\Satz{\label{Satz:Polar_uneigtl_3raum}Improper de Sitter triangles on $H^2\cup(-H^2)$ without a pair of opposite points
have spatiolateral polar triangles.}
\Beweis{Lemma \ref{lemma:exist_Polar} makes sure that $\Delta'=\{A',B',C'\}$ exists.
Theorem \ref{Satz:Polar_Polar} says that $\Delta$ then is the polar triangle of $\Delta'$.
In the proof of lemma \ref{lemma:exist_Polar} we saw that $A=(A')'$ is only timelike
if $B'$ and $C'$ span a spacelike plane.
Thus, since all of the points $A$, $B$, and $C$ are timelike, $\Delta'$ possesses only spacelike sides.}
\Satz{\label{folg:Polar_3raum_uneigtl}Polar triangles of non-degenerate spatiolateral de Sitter triangles
are subsets of $H^2\cup(-H^2)$.}
\Beweis{The proof is analogous to the previous one.
The only difference lies in the needed direction of an equivalence:
$A'$ is timelike whenever $B$ and $C$ span a spacelike plane.}
\Bem{\label{bem:Polar_gross_hyp}Polar triangles of non-degenerate non-contractible spatiolateral triangles are not strange.
To obtain this, name $A,B,C\in\Delta$ in such a way
that moving on side $c$ from vertex $A$ to vertex $B$,
then further along side $a$ to vertex $C$, and back on side $b$ to vertex $A$,
means surrounding the $e_1$ axis in positive rotational direction.
Since two sides alone will not add up to a full turn, each of the three sides must itself be passed in positive direction.
Therefore, we know that each of the cross products $A\times B$, $B\times C$, and $C\times A$
points in the direction of $e_1$, and thus, when normalized, lies on $H^2$.
When searching for the vertices of the polar triangle,
each of these normalized cross products are multiplied by the same $\varepsilon=\pm1$.
Hence, either are all of them in $(-H^2)$, or they all stay in $H^2$.
Non-degenerate contractible spatiolateral triangles, on the other hand, all have strange polar triangles.
If we had the sides from $A$ to $B$, from $B$ to $C$ and from $C$ to $A$ all moving in the same rotational direction around the $e_1$ axis,
the triangle would surround this axis and could therefore not be contractible.
Following the argumentation given above about non-contractible triangles,
we see that the polar triangle must have both vertices in $H^2$ and vertices in $(-H^2)$.}
\Kor{\label{folg:Polar_hyp_gross}Polar triangles of (antipodal) hyperbolic triangles are non-contractible,
whereas those of strange triangles with vertices in $H^2\cup(-H^2)$ are contractible.}
\Satz{\label{folg:Polar_schenklig_seltsam}Chronosceles and chorosceles triangles both have strange polar triangles.
The same is true for impossible triangles that contain a spacelike side and do not match one of the categories of theorem \ref{Satz:kein_Polar}.}
\Beweis{We already saw in the proof of lemma \ref{lemma:exist_Polar} that spacelike sides in a triangle
correspond to timelike vertices in the polar triangle and vice versa.
The statement about impossible triangles results from the fact
that the vertices of the impossible sides span a timelike plane,
or otherwise the triangle would be mentioned by theorem \ref{Satz:kein_Polar}.}
\Satz{\label{Satz:Polar_3zeit_unmoegl_uu} Any non-degenerate tempolateral triangle
possesses an impossible polar triangle with one timelike side and two impossible sides,
and each of these impossible sides is defined by a timelike plane.
Concerning this kind of impossible triangles, the corresponding polar triangle is either of the same type
or tempolateral.}
\Beweis{The polar triangle of a tempolateral triangle consists of only spacelike vertices.
We compute
\begin{eqnarray*}
{\ensuremath{\langle\!\langle}} A',B'{\ensuremath{\rangle\!\rangle}}&=&\ensuremath{\left\langle\!\left\langle}\varepsilon\frac{B\times C}{{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}},\varepsilon\frac{C\times A}{{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}}\ensuremath{\right\rangle\!\right\rangle}\\
&=&\underbrace{\varepsilon^2}_{=1}{\ensuremath{\langle\!\langle}} X_{CA},X_{CB}{\ensuremath{\rangle\!\rangle}},
\end{eqnarray*}
and in analogy
\[{\ensuremath{\langle\!\langle}} B',C'{\ensuremath{\rangle\!\rangle}}={\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}},\quad{\ensuremath{\langle\!\langle}} A',C'{\ensuremath{\rangle\!\rangle}}={\ensuremath{\langle\!\langle}} X_{BA},X_{BC}{\ensuremath{\rangle\!\rangle}}.\]
Let $X_{AB},X_{AC}$ be the pair of tangent vectors at a vertex that have different signs in their first components.
We already noticed in theorem \ref{satz:Dreiecksungl_zeit} that under this condition,
${\ensuremath{\langle\!\langle}} B',C'{\ensuremath{\rangle\!\rangle}}={\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}>1$ holds.
This means that $B'$ and $C'$ span a timelike plane.
(One can easily parametrize a great ellipse or a pair of straight lines to see
that two points on such a de Sitter geodesic have Minkowski product of norm less or equal than 1.)
Furthermore, $B'$ and $C'$ are on the same branch of the great hyperbola,
because otherwise the Minkowski product would be negative.
Thus, $a'$ is timelike.
By the same argumentation, we have for the other sides
\[{\ensuremath{\langle\!\langle}} A',B'{\ensuremath{\rangle\!\rangle}}<-1 \mbox{ and } {\ensuremath{\langle\!\langle}} A',C'{\ensuremath{\rangle\!\rangle}}<-1,\]
which describes two impossible sides.
Now let $\{A,B,C\}$ be an impossible triangle as described above.
Let $a$ and $b$ be the impossible sides.
That means, $B$ and $C$ span a timelike plane, but lie on the different branches of the corresponding great hyperbola,
and the same holds true for $C$ and $A$.
Now, replace $C$ by $-C$.
We have a new triangle $\{\ensuremath{\mathfrak{A}},\ensuremath{\mathfrak{B}},\ensuremath{\mathfrak{C}}\}$ with $\ensuremath{\mathfrak{A}}=A,\,\ensuremath{\mathfrak{B}}=B,\,\ensuremath{\mathfrak{C}}=-C,$ which is timelike.
One can easily verify that $\ensuremath{\mathfrak{A}}'=A'$, $\ensuremath{\mathfrak{B}}'=B'$, and $\ensuremath{\mathfrak{C}}'=-C'$ hold.
If $\ensuremath{\mathfrak{A}}$ is the vertex with different signs in the first components of the tangent vectors,
we have $\ensuremath{\mathfrak{a}}'$ being timelike and $\ensuremath{\mathfrak{b}}',\,\ensuremath{\mathfrak{c}}'$ both being impossible.
Now re-substitute $C'$ for $\ensuremath{\mathfrak{C}}'=-C'$, whence $c'=\ensuremath{\mathfrak{c}}'$ is not affected at all,
$b'$ becomes timelike and $a'$ becomes impossible.
So we got a polar triangle of the same type as the original triangle.
No difference appears if we assume that $\ensuremath{\mathfrak{B}}$ is the vertex with opposing tangent vectors.
If $\ensuremath{\mathfrak{C}}$ is this vertex, $\ensuremath{\mathfrak{c}}'$ is timelike, whereas $\ensuremath{\mathfrak{b}}'$ and $\ensuremath{\mathfrak{a}}'$ are impossible.
Re-substituting $C$ for $\ensuremath{\mathfrak{C}}'$, we do not notice any influence on $c'=\ensuremath{\mathfrak{c}}'$,
but both impossible sides $\ensuremath{\mathfrak{a}}',\ensuremath{\mathfrak{b}}'$ change for timelike sides $a',b'$.
Thus, the polar triangle $\{A',B',C'\}$ in this case is timelike.}
\Satz{\label{Satz:Polar_unmoegl_unmoegl} Let $\Delta$ be an impossible triangle that does not match
one of the categories of theorem \ref{Satz:kein_Polar}.
If $\Delta$ has two timelike sides, the same holds for $\Delta'$.
If all the sides of $\Delta$ are impossible, the same is true for $\Delta'$.}
\Beweis{Let $b$ and $c$ be two lightlike sides of $\Delta$, and $a$ be impossible.
If the tangent vectors $X_{AB}$ and $X_{AC}$ had different signs in their respective first component,
then we know from the proof of the triangle inequality for lucilateral triangles (theorem \ref{satz:Dreiecksungl_zeit}), that
\[{\ensuremath{\langle\!\langle}} B,C{\ensuremath{\rangle\!\rangle}}>\cosh(b+c)>1\]
holds.
However, this inequality describes the property of $a$ being timelike and not impossible.
Thus, we have the first components $X_{AB}$ and $X_{AC}$ bearing the same sign,
which gives us an impossible side $a'$, according to the previous proof.
Replacing $C$ with $-C$ gives a new triangle $\{\ensuremath{\mathfrak{A}}=A,\ensuremath{\mathfrak{B}}=B,\ensuremath{\mathfrak{C}}=-C\}$,
that has two timelike sides $\ensuremath{\mathfrak{a}}$ and $\ensuremath{\mathfrak{c}}$, and one impossible side $\ensuremath{\mathfrak{b}}$.
Following our recent thoughts, $\ensuremath{\mathfrak{b}}'$ has to be impossible.
Re-substituting $C'$ for $\ensuremath{\mathfrak{C}}'=-C'$ gives a timelike side $b'$.
By the same argumentation, we have $a'$ being timelike.
Now, if all the sides of $\Delta$ are impossible, we replace $A$ with $-A$ to get the triangle
$\{\ensuremath{\mathfrak{A}}=-A,\ensuremath{\mathfrak{B}}=B,\ensuremath{\mathfrak{C}}=C\}$,
which has two timelike sides $\ensuremath{\mathfrak{b}}$ and $\ensuremath{\mathfrak{c}}$, and still one impossible side $\ensuremath{\mathfrak{a}}$.
As we already know, under this conditions $\ensuremath{\mathfrak{a}}'$ is impossible,
whereas $\ensuremath{\mathfrak{b}}'$ and $\ensuremath{\mathfrak{c}}'$ are timelike.
Re-substituting $A'$ by $\ensuremath{\mathfrak{A}}'=-A'$ then gives three impossible sides $a',$ $b',$ and $c'$.}
\Satz{Finally, the polar triangle of a non-degenerate strange triangle that has a point in $S^{1,1}$
is either of the same type, chorosceles, chronosceles, or impossible with one spacelike side.
In the latter case, the polar triangle does not match one of the categories of theorem \ref{Satz:kein_Polar}.}
\Beweis{Firstly, consider the case that the triangle has a spacelike side $a$, and vertex $A\in H^2\cup(-H^2)$.
Then we have $A'\in H^2\cup(-H^2)$ and $B',C'\in S^{1,1}$,
which means we get a polar triangle that is strange, non-degenerate and no subset of $H^2\cup(-H^2)$.
The other cases result from the theorems \ref{folg:Polar_schenklig_seltsam} and \ref{Satz:Polar_Polar}.}
\section{Trigonometry of ${\ensuremath{\mathbb{HS}}}^2$}
This section gives an application of polar triangles. We investigate the relations between the angles and the lengths of sides in a triangle. To this end, we have to be able to measure these quantities, which is only possible if the sides are spacelike or timelike (to measure their lengths) and if adjacent sides are of same type (to measure the angle between them). For a degenerate triangle, angles do not contain much information, so we restrict our analysis to non-degenerate triangles that are either spatiolateral, tempolateral, or (antipodal) hyperbolic.
We abbreviate the term \emph{law of cosines for sides} by LCS and \emph{law of cosines for angles} by LCA.
All proofs in this section are rather elementary, i.e.\ they contain no ``tricks'' or unexpected transformations.
\subsection*{Laws of Cosines}
\satz[Hyperbolic and Antipodal Hyperbolic LCS]{
Let $\Delta=\{A,B,C\}$ be an (antipodal) hyperbolic triangle.
Denote the sides as usual by $a$, $b$, and $c$,
denote the angle at vertex $A$ by $\varangle(B,A,C)=:\alpha$, at vertex $B$ by $\beta$,
and at vertex $C$ by $\gamma$.
Then,
\[\cosh(a)=\cosh(b)\cosh(c)-\cos(\alpha)\sinh(b)\sinh(c),\]
\[\cosh(b)=\cosh(a)\cosh(c)-\cos(\beta)\sinh(a)\sinh(c), \makebox[0pt][l]{ and}\]
\[\cosh(c)=\cosh(a)\cosh(b)-\cos(\gamma)\sinh(a)\sinh(b)\]
hold.}
\rm This theorem can be found in most works about hyperbolic geometry (see the references section) with proofs of different degrees of difficulty. A proof similar to ours is given by Iversen \cite{Ive}, but he does the computation in a more sophisticated vector space (the $sl_2$ space).
\Beweis{We obtain this result by simply computing \begin{eqnarray*}
\cos(\alpha)&=&{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}\\
&=&\ensuremath{\left\langle\!\left\langle}\frac{B-\cosh(c)A}{\sinh(c)},\frac{C-\cosh(b)A}{\sinh(b)}\ensuremath{\right\rangle\!\right\rangle}, \end{eqnarray*} hence \begin{eqnarray*}
\cos(\alpha)\sinh(b)\sinh(c)&=&{\ensuremath{\langle\!\langle}} B,C{\ensuremath{\rangle\!\rangle}}-\cosh(c)\cdot{\ensuremath{\langle\!\langle}} A,C{\ensuremath{\rangle\!\rangle}}-\cosh(b)\cdot{\ensuremath{\langle\!\langle}} B,A{\ensuremath{\rangle\!\rangle}}\\
&&\qquad+\cosh(b)\cosh(c)\cdot{\ensuremath{\langle\!\langle}} A,A{\ensuremath{\rangle\!\rangle}}\\
&=&-\cosh(a)+\cosh(b)\cosh(c). \end{eqnarray*} Renaming the vertices yields the other equations.}
\satz[LCS in Non-Contractible Spatiolateral Triangles]{
For any non-contractible spatiolateral de Sitter triangle with sides and angles named as previously,
the following equalities hold.
\[\cos(a)=\cos(b)\cos(c)-\cosh(\alpha)\sin(b)\sin(c),\]
\[\cos(b)=\cos(a)\cos(c)-\cosh(\beta)\sin(a)\sin(c),\]
\[\cos(c)=\cos(a)\cos(b)-\cosh(\gamma)\sin(a)\sin(b).\]}
\Beweis{As we already saw in the proof of theorem \ref{Satz:Polar_3zeit_unmoegl_uu}, \[{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}={\ensuremath{\langle\!\langle}} B',C'{\ensuremath{\rangle\!\rangle}}\] holds. This expression is negative, because $B'$ and $C'$ are either both hyperbolic or both antipodal hyperbolic. Thus, we obtain
\[\cosh(\alpha)=\lvert{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}\rvert=-{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}},\] and hence
\[-\cosh(\alpha)\sin(b)\sin(c)=\cos(a)-\cos(b)\cos(c).\]
}
\satz[LCS in Contractible Spatiolateral Triangles]{
Let $\{A,B,C\}$ be a contractible spatiolateral de Sitter triangle.
Name the vertices in such a way that $a'$ is not strange.
Then, the following is true.
\[\cos(a)=\cos(b)\cos(c)-\cosh(\alpha)\sin(b)\sin(c),\]
\[\cos(b)=\cos(a)\cos(c)+\cosh(\beta)\sin(a)\sin(c),\]
\[\cos(c)=\cos(a)\cos(b)+\cosh(\gamma)\sin(a)\sin(b).\]}
The proof is completely analogous to the previous proofs.
\satz[LCS in Tempolateral Triangles]{
Let $\Delta$ be a tempolateral de Sitter triangle
with $X_{AB}$ and $X_{AC}$ having different signs in their respective first component.
Then,
\[\cosh(a)=\cosh(b)\cosh(c)+\cosh(\alpha)\sinh(b)\sinh(c),\]
\[\cosh(b)=\cosh(a)\cosh(c)-\cosh(\beta)\sinh(a)\sinh(c), \makebox[0pt][l]{ and}\]
\[\cosh(c)=\cosh(a)\cosh(b)-\cosh(\gamma)\sinh(a)\sinh(b)\]
hold.}
\Beweis{We have proven in theorem \ref{satz:Dreiecksungl_zeit} that \[{\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}>1>0\] holds. For the other vertices, we have \[{\ensuremath{\langle\!\langle}} X_{BC},X_{BA}{\ensuremath{\rangle\!\rangle}}<0 \quad\mbox{and}\quad {\ensuremath{\langle\!\langle}} X_{CA},X_{CB}{\ensuremath{\rangle\!\rangle}}<0\] instead. What remains is a computation that is again completely analogous to the previous ones.}
\Lemma{\label{Satz:H2_S11} Let $\{A,B,C\}$ be (antipodal) hyperbolic with sides and angles named as usual.
Let $\{A',B',C'\}$ be the corresponding polar triangle with accordingly named sides and angles.
Then we have the following correlation between the sides and angles of these triangles.
\[\alpha=\pi-a',\;\beta=\pi-b',\;\gamma=\pi-c';\]
\[a=\alpha',\;b=\beta',\;c=\gamma'.\]}
\Beweis{We already know that \[\cos(\alpha)={\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}=-{\ensuremath{\langle\!\langle}} B',C'{\ensuremath{\rangle\!\rangle}}=-\cos(a')\] holds, since the polar triangle is spatiolateral according to theorem \ref{Satz:Polar_uneigtl_3raum}. With $\alpha,a'\in[0,\pi]$ we obtain the desired result $a'=\pi-\alpha$, or $\alpha=\pi-a'$. Furthermore, for the polar triangle, which is non-contractible, we have \[-\cosh(\alpha')={\ensuremath{\langle\!\langle}} X',Y'{\ensuremath{\rangle\!\rangle}}={\ensuremath{\langle\!\langle}} B,C{\ensuremath{\rangle\!\rangle}}=-\cosh(a),\] from which $\alpha'=a$ results.}
\satz[Hyperbolic and Antipodal Hyperbolic LCA]{\label{hyp_Winkelcos}
Let $\{A,B,C\}$ be an (antipodal) hyperbolic triangle with sides and angles named as usual. Then,
\[\cos(\alpha)=-\cos(\beta)\cos(\gamma)+\cosh(a)\sin(\beta)\sin(\gamma),\]
\[\cos(\beta)=-\cos(\alpha)\cos(\gamma)+\cosh(b)\sin(\alpha)\sin(\gamma), \makebox[0pt][l]{ and}\]
\[\cos(\gamma)=-\cos(\alpha)\cos(\beta)+\cosh(c)\sin(\alpha)\sin(\beta)\]
hold.}
This theorem is also mentioned by Anderson \cite{And}, Iversen \cite{Ive}, and Thurston \cite{Thu}, but they all give considerable longer proofs. Although Thurston uses a similar duality like we do, his proof stays rather complex since the hyperbolic laws of cosines are derived by means of Minkowski product matrices.
\Beweis{The polar triangle $\{A',B',C'\}$ is non-contractible spatiolateral.
By the LCS for triangles of this type, we have
\[\cos(a')=\cos(b')\cos(c')-\cosh(\alpha')\sin(b')\sin(c').\]
Lemma \ref{Satz:H2_S11} leads to the desired result.}
\satz[LCA in Non-Contractible Spatiolateral Triangles]{
For any non-contractible spatiolateral de Sitter triangle with the usual labels,
\[\cosh(\alpha)=\cosh(\beta)\cosh(\gamma)+\cos(a)\sinh(\beta)\sinh(\gamma),\]
\[\cosh(\beta)=\cosh(\alpha)\cosh(\gamma)+\cos(b)\sinh(\alpha)\sinh(\gamma), \makebox[0pt][l]{ and}\]
\[\cosh(\gamma)=\cosh(\alpha)\cosh(\beta)+\cos(b)\sinh(\alpha)\sinh(\beta)\]
hold.}
\Beweis{Now, the polar triangle is hyperbolic or antipodal hyperbolic.
From the (antipodal) hyperbolic LCS we know that
\[\cosh(a')=\cosh(b')\cosh(c')-\cos(\alpha')\sinh(b')\sinh(c')\]
holds.
Lemma \ref{Satz:H2_S11}, applied to $\Delta'$, leads to the equations above.}
\satz[LCA in Contractible Spatiolateral Triangles]{\label{klein_Winkelcos}
Let $\{A,B,C\}$ be a contractible spatiolateral de Sitter triangle with side $a'$ of the polar triangle not being strange.
Then the following equations hold.
\[\cosh(\alpha)=\cosh(\beta)\cosh(\gamma)+\cos(a)\sinh(\beta)\sinh(\gamma),\]
\[\cosh(\beta)=\cosh(\alpha)\cosh(\gamma)-\cos(b)\sinh(\alpha)\sinh(\gamma),\]
\[\cosh(\gamma)=\cosh(\alpha)\cosh(\beta)-\cos(c)\sinh(\alpha)\sinh(\beta).\]}
\Beweis{We change the orientation of vertex $A'$ in the polar triangle
to get the hyperbolic or antipodal hyperbolic triangle $\{\ensuremath{\mathfrak{A}}'=-A',\ensuremath{\mathfrak{B}}'=B',\ensuremath{\mathfrak{C}}'=C'\}$.
Now let us look for a relation between the angles of $\{A,B,C\}$ and the sides of $\{\ensuremath{\mathfrak{A}}',\ensuremath{\mathfrak{B}}',\ensuremath{\mathfrak{C}}'\}$:
\[-\cosh(\alpha)={\ensuremath{\langle\!\langle}} X_{AB},X_{AC}{\ensuremath{\rangle\!\rangle}}={\ensuremath{\langle\!\langle}} B',C'{\ensuremath{\rangle\!\rangle}}={\ensuremath{\langle\!\langle}}\ensuremath{\mathfrak{B}}',\ensuremath{\mathfrak{C}}'{\ensuremath{\rangle\!\rangle}}=-\cosh(\ensuremath{\mathfrak{a}}'),\]
\[\cosh(\beta)={\ensuremath{\langle\!\langle}} X_{BA},X_{BC}{\ensuremath{\rangle\!\rangle}}={\ensuremath{\langle\!\langle}} A',C'{\ensuremath{\rangle\!\rangle}}=-{\ensuremath{\langle\!\langle}}\ensuremath{\mathfrak{A}}',\ensuremath{\mathfrak{C}}'{\ensuremath{\rangle\!\rangle}}=\cosh(\ensuremath{\mathfrak{b}}'),\]
and in analogy $\cosh(\gamma)=\cosh(\ensuremath{\mathfrak{c}}')$.
That resembles the relations we already know from non-contractible triangles.
But what about the sides of $\{A,B,C\}$ and the angles $\alpha',\beta',\gamma'$ of $\{\ensuremath{\mathfrak{A}}',\ensuremath{\mathfrak{B}}',\ensuremath{\mathfrak{C}}'\}$?
Since we know that $\{\ensuremath{\mathfrak{A}}=-A,\ensuremath{\mathfrak{B}}=B,\ensuremath{\mathfrak{C}}=C\}$ is the polar triangle of $\{\ensuremath{\mathfrak{A}}',\ensuremath{\mathfrak{B}}',\ensuremath{\mathfrak{C}}'\}$,
we can compute
\[\cos(a)={\ensuremath{\langle\!\langle}} B,C{\ensuremath{\rangle\!\rangle}}={\ensuremath{\langle\!\langle}}\ensuremath{\mathfrak{B}},\ensuremath{\mathfrak{C}}{\ensuremath{\rangle\!\rangle}}=-{\ensuremath{\langle\!\langle}} X_{\ensuremath{\mathfrak{A}}'\ensuremath{\mathfrak{B}}'},X_{\ensuremath{\mathfrak{A}}'\ensuremath{\mathfrak{C}}'}{\ensuremath{\rangle\!\rangle}}=-\cos(\alpha'),\]
\[\cos(b)={\ensuremath{\langle\!\langle}} A,C{\ensuremath{\rangle\!\rangle}}=-{\ensuremath{\langle\!\langle}}\ensuremath{\mathfrak{A}},\ensuremath{\mathfrak{C}}{\ensuremath{\rangle\!\rangle}}={\ensuremath{\langle\!\langle}} X_{\ensuremath{\mathfrak{B}}'\ensuremath{\mathfrak{A}}'},X_{\ensuremath{\mathfrak{B}}'\ensuremath{\mathfrak{C}}'}{\ensuremath{\rangle\!\rangle}}=\cos(\beta'),\]
and $\cos(c)=\cos(\gamma')$.
The LCS for $\{\ensuremath{\mathfrak{A}}',\ensuremath{\mathfrak{B}}',\ensuremath{\mathfrak{C}}'\}$ yields the desired result.}
\satz[LCA in Tempolateral Triangles]{\label{zeit_Winkelcos}
Let $\Delta$ be tempolateral
with the tangent vectors at $A$ bearing different signs in their respective first component.
Then we have the following relations.
\[\cosh(\alpha)=\cosh(\beta)\cosh(\gamma)+\cosh(a)\sinh(\beta)\sinh(\gamma),\]
\[\cosh(\beta)=\cosh(\alpha)\cosh(\gamma)-\cosh(b)\sinh(\alpha)\sinh(\gamma),\]
\[\cosh(\gamma)=\cosh(\alpha)\cosh(\beta)-\cosh(c)\sinh(\alpha)\sinh(\beta).\]}
\Beweis{We replace the vertex $A'$ of the polar triangle by $-A'$ and get the tempolateral triangle
$\{\ensuremath{\mathfrak{A}}'=-A',\ensuremath{\mathfrak{B}}'=B',\ensuremath{\mathfrak{C}}'=C'\}$ with different signs in the first components of the tangent vectors at $\ensuremath{\mathfrak{A}}'$.
Following the previous proof and applying the $LCS$ to $\{\ensuremath{\mathfrak{A}}',\ensuremath{\mathfrak{B}}',\ensuremath{\mathfrak{C}}'\}$,
we get the desired equations.}
\rm Let us close our investigation of the laws of cosines by considering the case of degenerate triangles. How do our achievements appear now? For the LCS's, we simply have rules of the type \[\cos(a)=\cos(b+c).\] Again, we have proven that the triangle inequality holds for degenerate triangles.
Regarding the LCA's, we notice that the sine terms vanish, whence in most cases we have vacant formulae like this: \[\cos(0)=\cos(0)\cdot\cos(0),\] optionally with hyperbolic cosine. The only more ``interesting'' result appears from the (antipodal) hyperbolic LCA, which gives \[\cos(\alpha)=-\cos(\beta)\cos(\gamma).\] The cosines can only take the values $-1$ or $1$, thus we have either all angles amount to $\pi$ or one angle amount to $\pi$ and the other ones vanish. The first case can be eliminated in analogy to the second part of the proof of lemma \ref{lemma:pos_neg_2}.
To conclude, the only case with any mathematical content shows us the hardly astonishing fact that a degenerate (antipodal) hyperbolic triangle has one straight and two zero angles. By now, it should be obvious why we can constrict our investigation to non-degenerate triangles.
\subsection*{Laws of Sines} \enlargethispage{2\baselineskip}
With the concept of polar triangles, we quickly deduce the laws of sines.
\satz[Hyperbolic and Antipodal Hyperbolic Law of Sines]{
For any (antipodal) hyperbolic triangle with labels as usual,
\[\frac{\sin(\alpha)}{\sinh(a)}=\frac{\sin(\beta)}{\sinh(b)}=\frac{\sin(\gamma)}{\sinh(c)}\]
holds.}
\rm This theorem is subject of the most books about hyperbolic geometry (of the literature mentioned in the references section, only Ungar \cite{Ung} does not mention it). We found none of the proofs being similar to ours.
\Beweis{We know that
\begin{eqnarray*}
\sin(\gamma)&=&\sin(\pi-c')=\sin(c')\\
&=&{\ensuremath{\lvert\!\lvert\!\lvert}} A'\times B'{\ensuremath{\rvert\!\rvert\!\rvert}}\\
&=&\ensuremath{\left\lvert\!\left\lvert\!\left\lvert}\varepsilon\frac{B\times C}{{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}}\times\left(\varepsilon\frac{C\times A}{{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}}\right)\ensuremath{\right\rvert\!\right\rvert\!\right\rvert}\\
&=&\frac{1}{{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}}{\ensuremath{\lvert\!\lvert\!\lvert}}\langle B,C\times A\rangle\cdot C-\langle C,C\times A\rangle\cdot B{\ensuremath{\rvert\!\rvert\!\rvert}}\\
&=&\frac{\lvert\det(A,B,C)\rvert}{{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}},
\end{eqnarray*}
and thus
\[\frac{\sin(\gamma)}{\sinh(c)}=\frac{\lvert\det(A,B,C)\rvert}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times B{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}}.\]
This expression is entirely symmetric in $A$, $B$, and $C$,
whence it remains the same for the other fractions
\[\frac{\sin(\alpha)}{\sinh(a)}\mbox{ and }\frac{\sin(\beta)}{\sinh(b)}.\]
}
\satz[Law of Sine in Non-Contractible Spatiolateral Triangles]{
For any non-contractible spatiolateral triangle labeled as usual,
\[\frac{\sinh(\alpha)}{\sin(a)}=\frac{\sinh(\beta)}{\sin(b)}=\frac{\sinh(\gamma)}{\sin(c)}\]
holds.}
\Beweis{This follows from $\{A',B',C'\}$ being (antipodal) hyperbolic and
\[\frac{\sinh(\alpha)}{\sin(a)}=\frac{\sinh(a')}{\sin(\alpha')}.\]
}
\satz[Law of Sine in Contractible Spatiolateral Triangles]{
Let $\{A,B,C\}$ be such a triangle with the side $a'$ of the polar triangle not being strange.
Then the following equation holds.
\[-\frac{\sinh(\alpha)}{\sin(a)}=\frac{\sinh(\beta)}{\sin(b)}=\frac{\sinh(\gamma)}{\sin(c)}.\]}
\Beweis{By changing the orientation of $A'$ we get the (antipodal) hyperbolic triangle $\{\ensuremath{\mathfrak{A}}',\ensuremath{\mathfrak{B}}',\ensuremath{\mathfrak{C}}'\}$
(cf.\ theorem \ref{klein_Winkelcos}).
Now we compute
\begin{eqnarray*}
-\frac{\sinh(\alpha)}{\sin(a)}=-\frac{\sinh(\ensuremath{\mathfrak{a}}')}{-\sin(\alpha')}&=&\frac{\sinh(\ensuremath{\mathfrak{b}}')}{\sin(\beta')}=\frac{\sinh(\beta)}{\sin(b)}\\
&=&\frac{\sinh(\ensuremath{\mathfrak{c}}')}{\sin(\gamma')}=\frac{\sinh(\gamma)}{\sin(c)}.
\end{eqnarray*}
}
\satz[Law of Sines in Tempolateral Triangles]{
Let $\Delta$ be a tempolateral triangle labeled as usual.
Let the first components of the tangent vectors at $A$ have different signs.
Then,
\[-\frac{\sinh(\alpha)}{\sinh(a)}=\frac{\sinh(\beta)}{\sinh(b)}=\frac{\sinh(\gamma)}{\sinh(c)}\]
holds.}
\Beweis{In the proof of theorem \ref{zeit_Winkelcos} we constructed various auxiliary triangles,
to which we gave blackletter labels.
These triangles are still useful for this proof, when we compute
\begin{eqnarray*}
\sinh(\gamma)&=&\sinh(\ensuremath{\mathfrak{c}}')\\
&=&{\ensuremath{\lvert\!\lvert\!\lvert}}\ensuremath{\mathfrak{A}}'\times\ensuremath{\mathfrak{B}}'{\ensuremath{\rvert\!\rvert\!\rvert}}\\
&=&-{\ensuremath{\lvert\!\lvert\!\lvert}} A'\times B'{\ensuremath{\rvert\!\rvert\!\rvert}}\\
&=&-\frac{\lvert\det(A,B,C)\rvert}{{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}},
\end{eqnarray*}
and hence
\[\frac{\sinh(\gamma)}{\sinh(c)}=-\frac{\lvert\det(A,B,C)\rvert}{{\ensuremath{\lvert\!\lvert\!\lvert}} A\times B{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} B\times C{\ensuremath{\rvert\!\rvert\!\rvert}}{\ensuremath{\lvert\!\lvert\!\lvert}} C\times A{\ensuremath{\rvert\!\rvert\!\rvert}}}.\]
The same expression arises for $\frac{\sinh(\beta)}{\sinh(b)}$.
For the third angle, however, we get
\begin{eqnarray*}
\sinh(\alpha)&=&\sinh(\ensuremath{\mathfrak{a}}')\\
&=&{\ensuremath{\lvert\!\lvert\!\lvert}}\ensuremath{\mathfrak{B}}'\times\ensuremath{\mathfrak{C}}'{\ensuremath{\rvert\!\rvert\!\rvert}}\\
&=&{\ensuremath{\lvert\!\lvert\!\lvert}} B'\times C'{\ensuremath{\rvert\!\rvert\!\rvert}},
\end{eqnarray*}
which results in the minus sign appearing in the equation to prove.}
\subsection*{Sums of Angles and Lengths}
\rm Reasonable statements can only be obtained for the sum of angles in (antipodal) hyperbolic triangles and for the sum of lengths in spatiolateral triangles. All the other quantities that could be considered may be infinitely large or infinitely small. At the end of this section, we are able to finally prove the invalidity of the triangle equation for contractible spatiolateral triangles.
\Satz{\label{Winkelsumme_hyp}The sum of angles in an (antipodal) hyperbolic triangle is less than $\pi$.}
\rm The proof can be found in Iversen \cite{Ive}.
\Beweis{WLOG assume that $\alpha\geq\beta$ holds.
The (antipodal) hyperbolic LCA yields
\begin{eqnarray*}
\cos(\alpha)&=&\cosh(\alpha)\sin(\beta)\sin(\gamma)-\cos(\beta)\cos(\gamma)\\
&>&-\cos(\beta+\gamma)\\
&=&\cos(\lvert\pi-(\beta+\gamma)\rvert).
\end{eqnarray*}
Since $0<\beta+\gamma<2\pi$ holds, we have $\lvert\pi-(\beta+\gamma)\rvert\in[0,\pi)$,
where the cosine function is strictly monotonically decreasing.
Thus we have
\[\alpha<\lvert\pi-(\beta+\gamma)\rvert.\]
The assumption $\alpha\geq\beta$ assures that $\pi-(\beta+\gamma)$ is positive,
whence we get the desired result.}
\Satz{\label{Satz:gross_klein} The lengths of the sides in a non-contractible spatiolateral triangle sum up
to a number greater than $2\pi$.
For contractible spatiolateral triangles, the sum of the lengths of its sides is less than $2\pi$.}
\Beweis{Let firstly the spatiolateral triangle $\{A,B,C\}$ be non-contractible.
Then the polar triangle $\{A',B',C'\}$ is hyperbolic or antipodal hyperbolic.
The relation between the angles of the polar triangle and the sides of the original triangle
which we got by lemma \ref{Satz:H2_S11} tells us
\[\alpha'=\pi-a,\quad\beta'=\pi-b,\quad\gamma'=\pi-c,\]
which together with the previous theorem proves our assertion.
Regarding a contractible spatiolateral triangle, we construct the triangle $\{\ensuremath{\mathfrak{A}}',\ensuremath{\mathfrak{B}}',\ensuremath{\mathfrak{C}}'\}$
according to theorem \ref{klein_Winkelcos}.
This theorem tells us that
$$\cos(a)=-\cos(\alpha'),$$
$$\cos(b)=\cos(\beta'), \makebox[0pt][l]{ and}$$
$$\cos(c)=\cos(\gamma')$$
hold, where $\alpha',\beta',\gamma'$ are the angles of $\{\ensuremath{\mathfrak{A}}',\ensuremath{\mathfrak{B}}',\ensuremath{\mathfrak{C}}'\}$.
Since the possible values for the sides and angles are in the interval $[0,2\pi]$, we have
$$\alpha'=\pi-a,\quad\beta'=b,\mbox{ and } \gamma'=c.$$
Together with theorem \ref{Winkelsumme_hyp}, we get
\begin{eqnarray*}
\pi-a+b+c&<&\pi\\
b+c&<&a<\pi\\
a+(b+c)&<&2\pi.
\end{eqnarray*}
}
\Kor{\label{Folg:gross_klein}For non-degenerate contractible spatiolateral de Sitter triangles,
the triangle inequality does not hold.}
\Beweis{This can be read in the penultimate line of the previous proof.}
\end{document} | arXiv | {
"id": "0810.5303.tex",
"language_detection_score": 0.6925979852676392,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\preprint{AIP/123-QED}
\title{Constant-complexity Stochastic Simulation Algorithm\\with Optimal Binning}
\author{Kevin R. Sanft} \email{kevin@kevinsanft.com} \affiliation{ School of Mathematics, University of Minnesota, Minneapolis, MN 55455, USA }
\author{Hans G. Othmer} \email{othmer@math.umn.edu} \affiliation{ School of Mathematics, University of Minnesota, Minneapolis, MN 55455, USA } \affiliation{ Digital Technology Center, University of Minnesota, Minneapolis, MN 55455, USA }
\date{\today}
\begin{abstract}
At the cellular scale, biochemical processes are governed by random interactions between reactant molecules with small copy counts, leading to behavior that is inherently stochastic. Such systems are often modeled as continuous-time Markov jump processes that can be described by the Chemical Master Equation. Gillespie's Stochastic Simulation Algorithm (SSA) generates exact trajectories of these systems. The amount of computational work required for each step of the original SSA is proportional to the number of reaction channels, leading to computational complexity that scales linearly as the problem size increases. The original SSA is therefore inefficient for large problems, which has prompted the development of several alternative formulations with improved scaling properties. We describe an exact SSA that uses a table data structure with event time binning to achieve constant computational complexity. Optimal algorithm parameters and binning strategies are discussed. We compare the computational efficiency of the algorithm to existing methods and demonstrate excellent scaling for large problems. This method is well suited for generating exact trajectories of large models that can be described by the Reaction-Diffusion Master Equation arising from spatially discretized reaction-diffusion processes.
\end{abstract}
\maketitle
\section{\label{sec:intro}Introduction}
Traditional differential equation models of chemical systems work well when the interacting molecules are present in high concentrations. However, at cellular scales, biochemical reactions occur due to the random interactions of reactant molecules present in small numbers. These processes display behavior that cannot be captured by deterministic approaches. Under certain simplifying assumptions, including spatial homogeneity, these systems can be modeled as continuous-time Markov jump processes. The \textit{Chemical Master Equation} (CME) describes the evolution of the probability of the system being in any state at time $t$ \cite{mcquarrie1967, gillespie_cme}. The invariant distribution and the evolution can be found for certain classes of reaction networks \cite{jahnke2007,lee2012,gadgil2005}. However, the CME is too high dimensional to solve for many realistic models. Gillespie's Stochastic Simulation Algorithm (SSA) generates exact trajectories from the distribution described by the CME \cite{Gillespie76,Gillespie77}. Typically, an ensemble of trajectories is run to estimate the probability distribution.
Consider a well-mixed (spatially homogeneous) biochemical system of $S$ different chemical species with associated populations $N(t) = (n_1(t), n_2(t), ..., n_S(t))$. The population is a random variable that changes via $M$ elementary reaction channels $\left\{R_1, R_2, ...,R_M \right\}$. Each reaction channel $R_j$ has an associated \textit{stoichiometry vector} $\nu_j$ that describes how the population $N$ changes when reaction $R_j$ ``fires". The SSA (and CME) are derived by assuming that each reaction channel is a non-homogeneous Poisson process. The stochastic rate or intensity of reaction $R_j$ is determined by a \textit{propensity function}, denoted $a_j$, defined as \cite{Gillespie76,Gillespie77,gillespie2009}: \begin{align} \label{eq:propensity_fun} a_j(N)dt \equiv & \mbox{ probability that reaction } R_j \\
& \mbox{ will occur in } [t, t+dt).\nonumber \end{align} \noindent The definition in \eqref{eq:propensity_fun} describes a property of an exponential distribution. The time $\tau$ and index $j$ of the next reaction event, given the current state $N$ and time $t$, can therefore be characterized by the joint probability density function \begin{equation} \label{eq:joint_density}
p(\tau, j | N, t) = e^{ - \sum_{i=1}^{M} a_i(N) \tau}a_j(N). \end{equation} \noindent Equation \eqref{eq:joint_density} combined with the fact that the process is Markovian (a consequence of the well-mixed assumption) leads naturally to the SSA.
We will refer to the (exact) ``SSA" as any simulation algorithm that generates trajectories by producing exact samples of $\tau$ and $j$ from the density in \eqref{eq:joint_density}. Note that when other sources describe ``the SSA" or ``the Gillespie algorithm", they are often implicitly referring to the \textit{direct method} variant of the SSA (see Section \ref{sec:direct_method}) \cite{Gillespie76,Gillespie77}. However, many other SSA variants have been proposed that achieve different performance and algorithm scaling properties by utilizing alternate data structures to sample the density function \eqref{eq:joint_density} \cite{NextRxnMethod,composition_rejection, nested_ssa, mauchEfficient, flavor, ramaswamy2009new, yates2013recycling, anderson_2007, hu_2014}. Many of these methods apply random variate generation techniques described in Devroye \cite{devroye1986} to the SSA. Among the most popular of these alternate formulations of the SSA for large problems is the \textit{Next Reaction Method} (NRM) of Gibson and Bruck \cite{NextRxnMethod}. We discuss the NRM and other formulations in detail in later sections. Many authors have also described \textit{approximate} methods that sacrifice exactness in exchange for computational efficiency gains, including the class of \textit{tau-leaping} algorithms and various hybrid approaches \cite{tau, ssSSA, dynamic_partitioning,haseltine_rawlings2002, salis_kaznessis_hybrid_2005, hy3s, yang_stiffness_detection_2011, rem_tau, ferm_j_sci_comput_2008, implicit_tau, rao_arkin}. In this paper, we restrict our attention to exact methods.
An important class of problems in which large models arise come from spatially-discretized reaction diffusion processes. The assumption underlying the CME and SSA that the reactant molecules are spatially homogeneous is often not justified in biological settings. One way to relax this assumption is to discretize the system volume into $N_s$ subvolumes and assume that the molecules are well-mixed within each subvolume. Molecules are allowed to move from one subvolume to an adjacent one via diffusive transfer events. This setting can also be described as a Markov jump process and leads to the reaction-diffusion master equation (RDME) \cite{gardiner1976,gillespie2013}. In simulating the RDME, reaction events within each subvolume are simulated with the standard SSA and diffusive transfers are modeled as pseudo-first order ``reactions". The resulting simulation method is algorithmically equivalent to the SSA (see Gillespie \textit{et al.} \cite{gillespie2013} for an overview). However, the state space and the number of transition channels grows quickly as the number of subvolumes increases. The population of every species must be recorded in every subvolume and the number of transition channels includes all diffusive transfer channels plus each reaction channel in the homogeneous system is effectively duplicated once for each subvolume. In a spatial model, $M$ is the total number of reaction and diffusion channels.
In this paper we present an exact SSA variant that is highly efficient for large problems. The method can be viewed as a variation of the NRM that uses a table data structure in which event times are stored in ``bins". By choosing the bin size relative to the average simulation step size, one can determine an upper bound on the average amount of computational work per simulation step, independent of the number of reaction channels. This constant-complexity NRM is best suited for models where the fastest timescale stays relatively constant throughout the simulation. Optimal performance of the algorithm is sensitive to the binning strategy, which we discuss in detail in Section \ref{sec:optimal_binning}.
The remainder of the paper is organized as follows. In the next section, we discuss the standard direct and NRM methods in more detail. In Section \ref{sec:alternate_formulations}, we demonstrate how alternate methods with various scaling properties can be derived by modifying the standard methods. Section \eqref{sec:constant_nrm} presents the constant-complexity NRM algorithm and the optimal binning strategy. The performance and scaling of several methods is presented in Section \ref{sec:numerical_experiments}. Section \ref{sec:conclusions} concludes with a brief summary and discussion.
\section{Standard Methods for Spatially Homogeneous Models} \label{sec:standard_methods}
The majority of exact SSA methods are based on variations of the two standard implementations: the direct method and the NRM. Here we review these standard methods in more detail.
\subsection{Direct Method} \label{sec:direct_method}
The \textit{direct method} variant of the SSA is the most popular implementation of the Gillespie algorithm \cite{Gillespie76,Gillespie77}. In the direct method, the step size $\tau$ is calculated by generating an exponentially distributed random number with rate parameter (or intensity) equal to the sum of the propensities. That is, $\tau$ is chosen from the probability density function \begin{equation} f(\tau) = \left(\sum_{i=1}^{M} a_i(N)\right) e^{- \sum_{i=1}^{M} a_i(N)\tau},\;\; \tau \ge 0. \end{equation} \noindent The index of the next reaction, $j$, is an integer-valued random variable chosen from the probability mass function \begin{equation} \label{eq:j_pmf} f(j) = \frac{a_j}{\sum_{i=1}^{M} a_i(N)}, \;\; j \in \{1, 2, ..., M\}. \end{equation} \noindent The direct method is typically implemented by storing the propensities in a one dimensional data structure (e.g. an array in the C programming language). Generating the reaction index $j$ as described in \eqref{eq:j_pmf} can be implemented by choosing a continuous uniform random number $r$ between $0$ and the propensity sum and choosing $j$ such that \begin{equation} \label{eq:select_j} j = \mbox{ smallest integer satisfying } \sum_{i=1}^{j} a_i(N) > r. \end{equation} \noindent This ``search" to find the index of the next reaction proceeds by iterating over the elements in the array until condition \eqref{eq:select_j} is satisfied. After the time step size and reaction index are selected, the simulation time $t$ and system state $N$ are updated as $t = t + \tau$ and $N = N + \nu_j$. Changing the population $N$ will generally lead to changes in some propensities. These affected propensities are recalculated by evaluating the propensity functions for the new population $N$ and the new propensity values replace the old values in the propensities array data structure. The propensity sum is adjusted accordingly. The algorithm repeats until a termination condition is satisfied. Typically, an ensemble of many simulation trajectories is computed. To begin each simulation, the system time is reset to zero and the state vector is set to the initial condition and the algorithm is repeated.
An analysis of the scaling properties for a single step of the direct method, or any SSA variant, can be done by considering the computational work required for the two primary tasks of the algorithm: 1) searching for the next reaction index, and 2) updating the reaction generator data structure (see footnote \label{build_footnote} \cite{footnote1}). If the propensities are stored in no particular order in the linear array, the search will take approximately $M/2$ steps on average. If knowledge of the average magnitudes of the propensities is available, then the average search can be shortened from $M/2$ to some smaller fraction of $M$ by utilizing static or dynamic sorting, which can speed up the simulation \cite{odm,sdm}. In both the sorted and unsorted case, the search step has computational cost that scales as $O(M)$. In updating the propensity array data structure, the following operations occur for each propensity that must be updated: the propensity function is evaluated, the propensity value is updated in the propensity array, and the propensity sum is updated. In a typical implementation, these are all $O(1)$ operations as the amount of computational work does not depend on the problem size. The total cost of the update step therefore depends on how many propensities must be updated when a given reaction occurs. The average cost of the update step will be this number of dependencies for each reaction, weighted by the firing frequencies of the reactions. The total cost of each step of the direct method is therefore $O(M)$ for the search for the next reaction plus an update cost $D$, where $D$ is proportional to the average number of propensity updates required per simulation time step. We assume that $D$ is bounded above by a constant independent of $M$, making the direct method an $O(M)$ algorithm overall. The SSA direct method is easy to implement with simple data structures that have low overhead, making it a popular choice that performs well for small problem sizes.
\subsection{Next Reaction Method}
In his original 1976 paper\cite{Gillespie76}, Gillespie also proposed the \textit{first reaction method}, which involves generating the (tentative) next reaction time for every reaction channel and then finding the smallest value to determine the next event that occurs. Like the direct method, this can be implemented using a simple 1D array data structure. The smallest event time is then found by iterating over all the elements. The direct method and first reaction method both generate exact simulation trajectories and both scale as $O(M)$. However, in practice the direct method tends to be more efficient for standard implementations, primarily because the direct method requires about $M/2$ operations per step whereas the first reaction method requires $M$ operations to find the smallest element.
The NRM is a variation of the first reaction method that uses a binary min-heap to store the next reaction times \cite{NextRxnMethod}. A binary min-heap is a complete binary tree in which each node contains a value that is less than (or equal to) its children. The search for the next reaction is therefore a constant-complexity, or $O(1)$, operation because the next reaction corresponds to the smallest reaction time, which is always located at the top of the heap. However, updating the binary min-heap data structure when a propensity changes is computationally expensive. For each affected propensity, the propensity function has to be evaluated, the reaction time has to be recomputed based on this new propensity, and the binary min-heap must be updated to maintain the heap property. Maintaining the heap structure generally requires $O(log_2(M))$ operations for each affected event time, implying that the total computational cost of the update step is $DO(log_2(M)) = O(log_2(M))$, where $D$ is again the average number of propensity updates required per step \cite{NextRxnMethod}. In practice, the actual update cost will depend on many factors, including the extent to which the fastest reaction channels are coupled and whether or not the random numbers are ``recycled" \cite{NextRxnMethod, anderson_2007}. The interested reader should consult Gibson and Bruck \cite{NextRxnMethod} for a detailed analysis. The $O(log_2(M))$ scaling makes the NRM superior to the direct method for large problems.
\section{Alternative Formulations} \label{sec:alternate_formulations}
There are two main techniques for improving the scaling properties of an exact SSA implementation. The first is to use different data structures, as in the binary tree utilized for the NRM min-heap implementation. The other technique is to split the search problem into smaller subproblems by partitioning the reaction channels into subsets. The two techniques are complementary, and in some cases conceptually equivalent. By utilizing different combinations of data structures and partitioning schemes, it is possible to define an infinite number of alternate SSA formulations with varying performance and scaling properties.
To fix ideas, we consider a simple variation of the direct method. Instead of storing all $M$ propensities in a single array, one could partition the reaction channels into two subsets of size $M/2$ and store them using two arrays. If we label the propensity sums of the two subsets $a_{S_1}$ and $a_{S_2}$, respectively, then the probability that the next reaction will come from subset $i$ is $a_{S_i}/(a_{S_1}+a_{S_2})$. Conditioned on the next reaction coming from subset $i$, the probability that the next reaction is $R_j$ will be $a_j/a_{S_i}$, for all $j$ in subset $i$. These statistical facts follow from the properties of exponential random variables and lead naturally to a simulation algorithm. We first select the subset $i$ from which the next reaction will occur. To do this, we choose a continuous uniform random number $r_1$ between 0 and the propensity sum and then chose $i$ such that \begin{equation} \label{eq:select_subset} i = \mbox{ smallest integer satisfying } \sum_{k=1}^{i} a_{S_k}(N) > r_1. \end{equation} The similarity to \eqref{eq:select_j} is apparent as we are essentially performing the direct method's linear search but applied to subset propensity sums instead of propensities. We have written \eqref{eq:select_subset} in a generic way to accommodate more than two subsets, which we will consider in the next subsection. Once we have determined the subset $i$ from which the next reaction will occur, we can use the direct method's linear search from \eqref{eq:select_j} again to select the reaction $j$ from within subset $i$, but choosing the uniform random number between 0 and $a_{S_i}$ and iterating over only the elements in the array corresponding to subset $i$.
The resulting algorithm can be viewed as a ``2D" version of the direct method. The algorithm requires, on average, a search of depth 1.5 to choose the subset, assuming the reaction channels were partitioned in no particular order. The search within the subset will then require an average search depth of $M/4$ to select the particular reaction within the chosen subset. By partitioning the reaction channels into two subsets, we have derived an algorithm that is statistically equivalent to the direct method but with different scaling properties. The new method has slightly more overhead than the original direct method because we must update the subset propensity sums at each step. It will therefore be less efficient than the original direct method for small problems. However, the $M/4$ scaling will outperform the $M/2$ scaling of the direct method for larger problems. The algorithmic complexity of this variation is still $O(M)$, so the NRM will also outperform this method for sufficiently large problems. Below we show how more sophisticated partitioning and data structures can be used to achieve improved scaling properties.
\subsection{Alternate Direct Formulations} \label{subsec:alternate_direct}
One can expand on the idea above to create other direct method variants. If each of the two subsets is then split into two more subsets, a similar procedure can be applied to derive a method with $M/8$ scaling. If this processes is repeated recursively, the resulting partitioning of the reaction channels can be viewed as a binary tree \cite{NextRxnMethod,mauchEfficient,hu_2014}. This \textit{logarithmic direct method} has $O(log_2(M))$ cost for the search and update steps. If instead of partitioning into two subsets, the reaction channels were partitioned into $\sqrt{M}$ subsets each containing $\sqrt{M}$ channels, the search for the subset and the search within the subset are both $O(\sqrt{M})$ operations, leading to $O(\sqrt{M})$ algorithmic complexity. This is the theoretically optimal ``2D search" formulation. Similarly, one can define a three-dimensional data structure, leading to a ``3D search" which scales as $O(\sqrt[3]{M})$, and so on \cite{mauchEfficient,hu_2014}.
Slepoy \textit{et al.} \cite{composition_rejection} proposed a constant-complexity (O(1)) algorithm that uses a clever partitioning strategy combined with rejection sampling. First, the reaction channels are partitioned based on the magnitude of their propensities with partition boundaries corresponding to propensities equal to powers of two. That is, if we number the partitions using (positive and negative) integers, partition $i$ contains all of the reaction channels with propensities in the range $[2^i, 2^{i+1})$. The particular partition $g$ from which the next reaction will occur is chosen exactly via a linear search. The average search depth to select the subset will depend on the range between the largest and smallest nonzero propensity value. This search is assumed to have a small average search depth, independent of $M$. Once the subset $g$ from which the next reaction will occur is determined, a rejection sampling technique is employed to select the particular channel within the subset. An integer random number $r_1$ is chosen between $[1, M_g]$, where $M_g$ is the number of reaction channels in the chosen subset. This tentatively selects the reaction channel $j$ corresponding to the $r_{1}^{th}$ channel in the subset. Then a continuous random number $r_2$ is chosen between $[0, 2^{g+1})$. If $r_2 < a_j(x)$, reaction channel $j$ is accepted as the next event, otherwise it is rejected and new random numbers $r_1$ and $r_2$ are chosen. The process is repeated until a reaction is accepted. This procedure selects the reaction according to the exact probability density function. Since the subsets have been engineered such that all propensities in subset $g$ are within the range $[2^g, 2^{g+1})$, on average less than half of the selected reactions will be rejected. Therefore, it takes fewer than two samples on average to select the next reaction, independent of the number of reaction channels. Updating the data structure for each affected propensity is also an amortized constant-complexity operation that requires, in the worst case, removing an element from one partition and inserting it into a different partition, which occurs whenever the change in propensity crosses a power of two boundary. The search and update steps are both constant-complexity, leading to $O(1)$ algorithmic complexity independent of $M$.
\subsection{Next Subvolume Method}
The preceding methods are general formulations that can be applied to any model that can be described by the CME. In this subsection we describe the \textit{next subvolume method} (NSM, not to be confused with the NRM), which is formulated specifically for simulating processes described by the RDME. The NSM is a variation of a 3D search method \cite{elf2004spontaneous} that partitions the channels (reaction and diffusion) based on the spatial structure of the model. The NSM has several desirable properties that make it efficient, and hence popular, for simulating spatial models. The NSM first partitions on subvolumes and uses the NRM (implemented using a binary min-heap) to select the subvolume in which the next event occurs. Within each subvolume, the diffusion channels and reaction channels are stored using two arrays, similar to the two-partition direct method scheme from the beginning of this section. To choose the particular event within the subvolume, the NSM first does a linear search to determine whether the next reaction is diffusion or a reaction, then chooses the particular event channel via a linear search within that partition. Among the favorable properties of the NSM is that organizing the events by subvolume tends to keep the updates local in memory. Partitioning by subvolume ensures that an event affects at most two subvolumes (which occurs when the event is a diffusive transfer), therefore, at most two values in the binary min-heap need to be updated on each step of the algorithm. The NSM scales as $O(log_2(N_s))$, where $N_s$ is the number of subvolumes. In practice, the NSM performs well on spatial models spanning a wide range of problem sizes.
Recently, Hu \textit{et al.} presented a method in which the NSM search is effectively reversed \cite{hu_2014}. First, the type of event is selected, then the subvolume is chosen using a binary, 2D, or 3D search, leading to algorithmic complexity that is $O(log_2(N_s)$, $O(\sqrt{N_s})$, or $O(\sqrt[3]{N_s})$, respectively.
\section{Constant-complexity Next Reaction Method} \label{sec:constant_nrm}
The NRM was derived by taking the basic idea of the first reaction method and utilizing a different data structure to locate the smallest event time. The abstract data type for this situation is known as a \textit{priority queue} and in principle any correct priority queue implementation can be used. Here we present an implementation of the next reaction method that uses a table data structure comprised of ``bins" to store the event times for the priority queue. If the propensity sum, and therefore the expected simulation step size, can be bounded above and below, then the average number of operations to select the next reaction and update the priority queue will be bounded above, independent of M, resulting in a constant-complexity NRM.
To implement a constant-complexity NRM, we partition the total simulation time interval, from the initial time $t=0$ to final time $t=T_f$, into a table of $K$ bins of width $W$. For now, we will let $WK = T_f$. Bin $B_i$ will contain all event times in the range $[iW, (i+1)W)$. We generate putative event times for all reaction channels as in the original NRM, but insert them into the appropriate bin in the table instead of in a binary min-heap. Events that fall outside of the table range are not stored in the table. Values within a bin are stored in a 1D array (though alternative data structures could be used). To select the next reaction, we must find the smallest element. Therefore, we must find the first nonempty bin (i.e. the smallest $i$ such that $B_i$ is not empty). To locate the first non-empty bin, we begin by considering the bin $i$ from which the previous event was found. If that bin is empty, we repeatedly increment $i$ and check the $i^{th}$ bin until a nonempty bin is found. We then iterate over the elements within that bin to locate the smallest value. The update step of the algorithm requires computing the new propensity value and event time for each affected propensity. In the worst case, the new event time will cause the event to move from one bin to another, which is an $O(1)$ operation. Therefore, the update step will be an $O(D)$ operation, where again $D$ is the average number of propensity updates required per simulation step. We assume that $D$ is bounded above independent of $M$. Therefore, if the propensity sum is bounded above and below independent of $M$, the overall complexity of the algorithm is $O(1)$. In the next section we consider the optimal bin width to minimize the cost to select the next reaction.
\begin{figure}
\caption{Priority queue using binning. The next reaction in a simulation is the event with the smallest event time. Locating the smallest event time requires first locating the smallest non-empty bin, then finding the smallest element within that bin. Elements in the bins contain the event time and the event (reaction) index. Bin boundaries are integers here for simplicity. Elements within a bin are unsorted. Elements with zero propensity or with an event time beyond the last bin are not stored in the queue. In the top figure, the first bin, $B_0$, contains the smallest element as indicated by the arrow. After that event occurs, the search for the next nonempty bin begins at the current bin and, if empty, the bin pointer is incremented until the next nonempty bin is located, as indicated by the arrow under bin $B_2$ in the bottom figure. Here the next event is ``$R_7$". In a real simulation, the update step may cause elements to move from one bin to another.}
\label{fig:fig1}
\end{figure}
\subsection{Optimal Bin Width} \label{sec:optimal_binning}
Storing the elements within a bin using a 1D array is similar to the chaining approach (sometimes referred to as ``closed addressing") to collision resolution in a hash table. A hash table is a data structure in which values are mapped to bins using a \emph{hash function}. Since there are generally more possible values than total bins, collisions, where multiple values are mapped to the same bin, are possible. Chaining is a collision resolution strategy where multiple values that map to the same bin are stored in a linked list. A well-designed hash table typically has amortized constant time insertion and retrieval. (Interested readers should consult an introductory computer science data structures textbook.) An important difference between our data structure and a hash table implementation is that hash tables are typically designed to have a particular \textit{load factor}. The load factor is defined as the number of elements stored in the table divided by the number of bins. For a hash table with a good hash function, a low load factor ensures that each bin will contain a small number of elements, on average. However, we are effectively using the reaction event times as a hash function. Whereas a good hash function distributes elements uniformly amongst the bins, the reaction times are not uniformly distributed, they are \textit{exponentially} distributed. Targeting a particular load factor could lead to good or bad performance depending on the distribution of the propensities, because the key to efficiency is choosing the appropriate bin width relative to the mean simulation step size. If the bin width is too small, there will be many empty bins and if the bin width is too large there will be many elements within each bin.
In considering the search cost and optimal bin width, it is helpful to consider two extreme cases. For the first case, suppose one reaction channel is ``fast" and the rest are slow. For the second case, suppose all reaction channels have equal rates (propensities). For simplicity of analysis we can rescale time so that the propensity sum equals one and we assume $T_f \gg 1$. Then in the first case, if the propensity of the fast channel is much larger than the sum of the slow propensities (i.e. the fast propensity is approximately equal to one), we can choose the bin width to be large on the scale of the fast reaction but small on the scale of the slow reactions. For example, choosing $W \approx 6.64$ (corresponding to the $99^{th}$ percentile for a unit rate exponential distribution) ensures that the fast reaction will initially be in the first bin with approximately $99\%$ probability. By assumption, there is a small probability that any of the slow reactions will appear in the first bin. Upon executing the simulation, the fast reaction will repeatedly appear in the first bin and be selected during the next step, until it eventually appears in the second bin (with probability $< 1\%$ of landing beyond the second bin). If it takes on average roughly 6.6 steps before the fast reaction appears in the second bin, then the average search depth to locate the first nonempty bin is about $1 + \frac{1}{6.6} \approx 1.15$ and the average search depth within a bin is approximately one. The ``total search depth" will be approximately $2.15$. The slow reactions contribute a relatively small additional cost in this scenario. If, however, the slow reactions are not negligible, then the fast reaction plays a less important role in the search cost and the situation can be viewed as similar to the second case, which we consider next.
Here we suppose that all reaction channels have equal propensities and the propensity sum equals one. In this case, the number of elements that will initially be placed in the first bin will be approximately Poisson distributed with mean $W$. As the simulation progresses, elements will be removed from the first bin until it is emptied and the simulation will move on to the second bin where the process repeats. If the number of events per bin is Poisson distributed with rate $W$, the average search depth to locate the first nonempty bin is $1/W + 1$ and the average search depth within a bin is $W/2 + 1$. The total search depth is minimized when $W = \sqrt{2}$, leading to a total search depth of $2 + \sqrt{2} \approx 3.41$. That is, it takes an average of about $3.41$ operations to select the next reaction, independent of the size of the model. If the propensity sum does not equal one, this minimum total search depth will be achieved with a bin width of $W = \sqrt{2} \sum_{i=1}^{M} a_i(x)$.
The theoretical optimal relative bin width $W = \sqrt{2}$ does not minimize the search cost in an actual implementation. Figure \ref{fig:fig2} shows that the search cost is minimized at a bin width much larger than $\sqrt{2}$. One reason for this is that accessing consecutive items within a bin is generally faster than traversing between bins because items within a bin are stored in contiguous blocks of memory. In our experience, a bin width of approximately 16 times the mean simulation step size performs well across a wide range of problem sizes. Widths between 8 and 32 times the step size perform well, making the choice of 16 robust to modest changes in the propensity sum. However, it is possible that the optimal values may vary slightly depending on the system architecture. More importantly, if the propensity sum varies over many orders of magnitude during a simulation, a static bin width may be far from optimal during portions of the simulation.
\begin{figure}
\caption{Search cost for various bin widths. In this example, $M = 10^7$ and every propensity $= 10^{-7}$ (the propensity sum equals one). The theoretical optimal bin width for minimizing the total search depth corresponding to $W = \sqrt{2}$ is shown as a dashed vertical line. In practice, the true optimal bin width is larger than $W = \sqrt{2}$. A bin width of 16 times the mean simulation step size performs well over a wide range of problem sizes. Search cost is measured in seconds for $10^7$ simulation steps.}
\label{fig:fig2}
\end{figure}
\subsection{Optimal Number of Bins and Dynamic Rebuilding}
It is not necessary to choose the number of bins $K$ such that $WK \ge T_f$, where again $T_f$ is the simulation end time. Clearly, choosing $K$ such that $W(K-1) > T_f$ means that the table is larger than necessary, which is inefficient as larger memory use leads to slower simulations. However, it is less obvious that a choice of $K$ such that $WK < T_f$ can lead to improved performance. When $WK < T_f$, the table data structure must be rebuilt when the simulation time exceeds $WK$. A tradeoff exists between choosing $K$ large, which is less efficient because it uses more memory, and choosing $K$ small, which requires more frequent rebuilding of the table data structure. The propensity and reaction time ``update step" also benefits slightly from a smaller table because fewer reaction channels will be stored in the table leading to fewer operations required to move reactions from one bin to another. Figure \ref{fig:fig3} shows the elapsed time to execute simulations for varying bin widths and numbers of bins.
\begin{figure}
\caption{Simulation time for various bin widths and number of bins. In this example, $M = 10^7$ and every propensity $= 10^{-7}$ (the propensity sum equals one). The dashed line corresponds to the theoretical optimal bin width $W = \sqrt{2}$ that minimizes the total search depth. The solid line corresponds to $W = 16$, which is the target bin width used in practice. The algorithm performs well over a fairly wide range of bin widths and number of bins. As the problem size increases, the optimal number of bins increases roughly proportional to $\sqrt{M}$.}
\label{fig:fig3}
\end{figure}
As the problem size increases, the optimal number of bins gets larger due to the increased rebuild cost. We have found that the optimal number of bins scales roughly proportional to the square root of the number of reaction channels. In practice, choosing $K = 20\sqrt{M}$ leads to good performance across a wide range of problem sizes, though the optimal value may vary across different system architectures. In the case where many of the reaction channels have zero propensity, it is more efficient to use the average number of nonzero propensity channels instead of $M$ in computing the number of bins. To facilitate rebuilding the table, we record the number of steps since the last rebuild. This allows for the bin width to be chosen adaptively based on the simulation step sizes used most recently. This adaptive bin sizing strategy partially mitigates the problem of suboptimal bin widths that may arise due to changing propensity sums. Overall, the constant-complexity NRM algorithm with a fixed target relative bin width and dynamic rebuilding strategy exhibits excellent efficiency across a wide range of problem sizes as demonstrated in the next section.
\section{Numerical Experiments} \label{sec:numerical_experiments}
In this section we demonstrate the performance and scaling of the constant-complexity NRM (``NRM (constant)") relative to other popular methods. Among the other methods considered are the constant-complexity direct method (``Direct (constant)" composition-rejection algorithm) of Slepoy \emph{et al.} \cite{composition_rejection}, the original NRM (``NRM (binary)") of Gibson and Bruck \cite{NextRxnMethod}, and the NSM of Elf and Ehrenberg \cite{elf2004spontaneous}. The algorithms were implemented in C++, using code from StochKit2 \cite{StochKit2} and Cain \cite{cain} where possible. Pseudocode that outlines the constant-complexity NRM is given in Appendix A. All timing experiments were conducted on a Macbook Pro laptop with a 2.4 GHz Core i5 processor and 8 GB of memory.
\subsection{Reaction Generator Test}
Most exact SSA variants can be viewed as either a Direct Method or NRM implementation with varying data structures used to select the next reaction. The performance of the reaction generator data structure is the primary determinant of the overall algorithm performance. In this section we test the efficiency of several reaction generator data structures, independent of the rest of the solver code, by simulating the ``arrivals" of a network of $M$ Poisson processes.
As shown in Figure \ref{fig:fig4}, methods utilizing simple data structures with low overhead perform best on small to moderate sized problems. The example in Figure \ref{fig:fig4} used a random network model of unit-rate Poisson processes with a relatively high degree of connectivity (10 updates required for each step; note that the data structure updates were performed as if the propensities were changing, even though they were always set to unit rates.). The original NRM, implemented with a binary min-heap, would perform better relative to the others if fewer updates were required at each step. The constant-complexity NRM exhibits small timing fluctuations due to the method being tuned for much larger problems. In Figure \ref{fig:fig5}, we see that the constant-complexity direct method and constant-complexity NRM method outperform the others on large problems, with the constant NRM performing best. However, we see that the $O(1)$ scaling does not appear constant across large problem sizes. This is due to the effects of using progressively larger amounts of memory. Running the same experiments on a different system architecture could lead to differences in crossing points between methods, but the overall trends should be similar.
\begin{figure}
\caption{Scaling on small problems. Elapsed time in seconds to generate the reaction index and update the data structure $10^7$ times for various reaction generators. Each reaction channel has a unit rate propensity and a random network in which 10 updates are required was generated. For extremely small models, where $M < 100$ or so, the original direct method with linear search performs best. As the problem size increases, the direct method with a 3D search is optimal. Not shown is the direct method with 2D search, which slightly outperforms 3D search when $M < 5000$. The constant-complexity NRM performance exhibits some fluctuations because the implementation was not optimized for small problems.}
\label{fig:fig4}
\end{figure}
\begin{figure}
\caption{Scaling on large problems. Under the same conditions as Figure \ref{fig:fig4} with larger problem sizes, the constant-complexity methods outperform the others. Although the O(1) algorithms scale roughly constant across a wide range of moderate problem sizes, as the problem size becomes large, the increased memory demands lead to imperfect scaling.}
\label{fig:fig5}
\end{figure}
\subsection{3D Spatial Model}
The next subvolume method is a popular method for simulating spatial models. The NSM is different from the other methods considered here in that information about the spatial structure is built in to the algorithm. Therefore, it does not make sense to test the NSM reaction generator independent of a spatial model and full solver implementation.
Here we compare the NSM, the constant-complexity direct method, and the constant-complexity NRM on a model using a 3D geometry comprised of equal sized cubic subvolumes with periodic boundary conditions. The reactions and parameters are shown below \cite{elf2004spontaneous}.
\begin{subequations} \begin{align*} E_A & \xrightarrow{k_1} E_A + A\\ E_B & \xrightarrow{k_1} E_B + B\\ E_A + B & \xrightleftharpoons[k_d]{k_a} E_AB\\ E_AB + B & \xrightleftharpoons[k_d]{k_a} E_AB_2\\ E_B + A & \xrightleftharpoons[k_d]{k_a} E_BA\\ E_BA + A & \xrightleftharpoons[k_d]{k_a} E_BA_2\\ A & \xrightarrow{k_4} \emptyset\\ B & \xrightarrow{k_4} \emptyset. \end{align*} \end{subequations} \begin{subequations} \begin{align*} k_1=150\;s^{-1}, \;k_a & = 46.2\;(\mu M)^{-1}s^{-1}, \\ k_d=3.82\;s^{-1}&, \;k_4=6\;s^{-1}\\ D = 10^{-8} & cm^2s^{-1} \end{align*} \end{subequations} \begin{equation*} [E_A](0) = [E_B](0) = 12.3 \; nM. \end{equation*}
The diffusion constant $D$ is equal for all species. The rate constant for all diffusive transfer events is therefore $D/l^2$, where $l$ is the subvolume side length \cite{elf2004spontaneous}. We note that the first two reaction channels are a common motif used to model processes such as protein production for an activated gene or, in this case, enzyme-catalyzed product formation in the presence of excess substrate, where the rate constant $k_1$ implicitly accounts for the effectively constant substrate population.
It is possible to scale up the number of transition (reaction and diffusion) channels by changing the system volume or changing the subvolume side length. First, we consider a large volume, with domain side length $12 \mu m$ and subvolume side lengths ranging from $0.6 \mu m$ to $0.2 \mu$, corresponding to a range of $8000$ to $216000$ subvolumes, respectively (see Fig. N1C in the Supplementary Material of Elf and Ehrenberg \cite{elf2004spontaneous}). As shown in Figure \ref{fig:fig6}, the constant-complexity NRM outperforms the NSM and constant-complexity direct method for problems larger than $M=480000$, which corresponds to meshes finer than $20^3=8000$ subvolumes. \begin{figure}
\caption{3D spatial model with system volume $V = 12^3 \mu m^3$. Plotted is the elapsed time in seconds to execute $10^8$ simulation steps. Below $M = 480000$ channels, the NSM is more efficient than the constant-complexity methods. Above $M = 480000$ channels, the constant-complexity NRM is about 20\% faster than the constant-complexity direct method.}
\label{fig:fig6}
\end{figure}
We next consider the same 3D model with system volume $V = 6^3 \mu m^3$. As shown in Figure \ref{fig:fig7}, the constant-complexity NRM still achieves a benefit over the NRM and constant-complexity direct method, albeit a smaller improvement. In this example, there are approximately 28000 molecules in the system after the initial transient. At the finest resolution in Figure \ref{fig:fig7} there are 216000 subvolumes, of which most contain no molecules. Therefore, the majority of the reaction channels are effectively switched off, or inactive, with propensity zero. In this example at the finest resolution, typically fewer than $2\times10^5$ of the nearly $1.3\times10^7$ channels have nonzero propensities. The constant-complexity direct method and the NSM exclude zero propensity reactions from their reaction selection data structures, effectively reducing the problem size to the number of nonzero channels. The constant-complexity NRM does not benefit much from having many zero propensity channels.
Changing the spatial discretization influences the rates of $0^{th}$ order and bimolecular reactions and diffusion events. For instance, using a finer discretization increases the frequency of diffusion events. This means that more simulation steps are required to reach a fixed simulation end time. Changing the relative frequencies of different reaction channels influences simulation performance, though the effect is typically small for all methods. In the NSM, for example, increasing the relative frequency of diffusion events will improve performance slightly if there are fewer diffusion directions (e.g four for a 2D Cartesian mesh) than reaction channels for each subvolume because the average ``search depth" will be weighted more heavily toward the smaller diffusion event search.
\begin{figure}
\caption{3D spatial model with system volume $V = 6^3 \mu m^3$. At this smaller system volume, when the mesh is highly refined, most subvolumes are empty. Hence many of the reaction channels have zero propensity. The constant-complexity direct method and the NSM exclude all events with propensity zero from their data structures. The constant-complexity NRM uses the number of active (nonzero propensity) reactions to determine the number of bins $K$ to use, but this algorithm does not benefit much from having many zero-propensity reaction channels.}
\label{fig:fig7}
\end{figure}
\section{Discussion and Conclusions} \label{sec:conclusions}
We have shown it is possible to formulate a constant-complexity version of the NRM that is efficient for exact simulation of large discrete stochastic models by using event time binning. Rather than targeting a load factor as one would with a hash table, the key to consistent efficiency of the algorithm is to choose the bin width based on the average propensity sum (and, therefore, step size) of the simulation. The examples in Section \ref{sec:numerical_experiments} demonstrate the advantages and some of the disadvantages of the constant-complexity NRM. The algorithm is not well suited for small models. However, for models with a large number of \textit{active} channels and timescales that do not vary too rapidly, the constant-complexity NRM is often more efficient than other popular methods.
For models with many inactive (zero propensity) channels, the performance of some SSA variants depends on the number of active channels rather than the total number of channels. The NSM scales proportional to the logarithm of the number of subvolumes containing active channels. The constant-complexity direct and NRM methods scale $O(1)$ in algorithmic complexity, but their performance does depend on the amount of memory used. Both constant-complexity methods have memory requirements for their reaction generator data structures that scale roughly proportional to the number of active channels. However, the table rebuild step in the constant-complexity NRM method scales as $O(M)$. This typically constitutes a small fraction of the total computational cost (e.g. $<3\%$ for the largest problem in Figure \ref{fig:fig5}). However, in the case of extremely large $M$ and an extremely small number of active channels, the relative cost of rebuilding the table in the constant-complexity NRM becomes more significant. In an extreme case, other methods such as the constant-complexity direct method, NSM, and original NRM may be more efficient.
It may be possible to modify the constant-complexity NRM to make it less sensitive to changes in the average simulation step size and number of active channels. The current dynamic table rebuilding strategy handles this well in many cases. However, in the case of extreme changes one could implement a ``trigger" that initiates a table rebuild if changes in the timescale or number of active channels exceeds a threshold. One could also envision utilizing step size data from previous realizations to guide the binning strategy, possibly even utilizing unequal bin sizes, to further improve performance.
Performance comparisons are inherently implementation, model, and system architecture dependent. While we have attempted to present a fair comparison and the general algorithm analysis is universal, the exact simulation elapsed times may vary in different applications. The constant-complexity NRM presented here is an efficient method in many situations but inappropriate in others. As modelers develop larger and more complex models and as spatial models become more common, this algorithm provides a valuable exact option among the large family of exact and approximate stochastic simulation algorithms.
\begin{acknowledgments} \noindent Research reported in this publication was supported by the National Institute Of General Medical Sciences of the National Institutes of Health under Award Number R01GM029123. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health. \end{acknowledgments}
\appendix \section{Algorithm Pseudocode}
The following pseudocode is representative of an implementation of this algorithm method.
\noindent Model: propensities, $\nu$, dependencyGraph\\ \noindent DataStructure: table, lowerBound, binWidth, bins \begin{algorithmic}[ht] \Procedure{NRM}{x0, tFinal}
\State $t \gets 0$
\State $x \gets x0$
\State $buildDataStructure()$
\While{$t < tFinal$}
\State $event \gets selectReaction()$
\State $t \gets event.time$
\State $x \gets x + \nu(event.index)$
\State $updateDataStructure(event.index)$
\State \% store output as desired
\EndWhile \EndProcedure
\vskip 1em \Procedure{selectReaction}{}
\State \% return min event time and index
\State \% first, locate bin index of smallest event
\While{$table(minBin).isEmpty()$}
\State $minBin \gets minBin+1$
\If{$minBin>bins$}
\State $buildDataStructure()$
\EndIf
\EndWhile
\State \% smallest event time is in table(minBin)
\State \% find and return smallest event time and index
\State \textbf{return} $min(table(minBin))$ \EndProcedure
\vskip 1em \Procedure{buildDataStructure}{}
\State $lowerBound \gets t$
\State \% default 20*sqrt(ACTIVE channels)
\State $bins \gets 20*sqrt(propensities.size)$
\State \% default 16*step size
\State \% in practice, an approximation to
\State \% sum(propensities) is used
\State $binWidth \gets 16/sum(propensities)$
\For{$i =$1:$propensities.size$}
\State $rate \gets propensities(i)$
\State $r \gets exponential(rate)$
\State $ eventTime(i) \gets t + r$
\State $table.insert(i,eventTime(i))$
\EndFor
\State $minBin \gets 0$ \EndProcedure
\vskip 1em \Procedure{updateDataStructure}{index}
\For{$i$ in $dependencyGraph(index)$}
\State $oldTime \gets eventTime(index)$
\State $oldBin \gets ComputeBinIndex(oldTime)$
\State $rate \gets propensities(i)$
\State $r \gets exponential(rate)$
\State $ eventTime(i) \gets t + r$
\State $bin \gets ComputeBinIndex(eventTime(i))$
\If{$bin \ne oldBin$}
\State $table(oldBin).remove(i)$
\State $table.insert(i,eventTime(i))$
\EndIf
\EndFor \EndProcedure
\vskip 1em \Procedure{table.insert}{i, time}
\State $bin \gets computeBinIndex(time)$
\State \% insert into array
\State $table(bin).insert(i,time)$ \EndProcedure
\vskip 1em \Procedure{ComputeBinIndex}{time}
\State $offset \gets time - lowerBound$
\State $range \gets lowerBound*binWidth*bins$
\State $bin \gets integer(offset/range*bins)$
\State \textbf{return} $bin$ \EndProcedure
\end{algorithmic}
\nocite{*}
\end{document} | arXiv | {
"id": "1503.05832.tex",
"language_detection_score": 0.8679229021072388,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\renewcommand{\abstractname}{} \renewcommand{Bibliography}{Bibliography} \renewcommand{Proof}{Proof}
\begin{center} \bf{\textsc{EXAMPLE OF A 6-BY-6 MATRIX WITH DIFFERENT TROPICAL AND KAPRANOV RANKS}} \end{center}
\begin{center} \textsc{yaroslav shitov} \end{center}
\parshape=10 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm \textsc{Abstract.} We provide an example of a $6$-by-$6$ matrix $A$ such that $rk_t(A)=4$, $rk_K(A)=5$. This answers a question asked by M.~Chan, A.~Jensen, and E.~Rubei.
\parshape=10 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm \textsc{Keywords}: matrix theory, tropical semiring, tropical rank, Kapranov rank.
\parshape=10 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm 1.3cm 11.1cm \textit{Mathematics Subject Classification:} 15A03, 15A15.
\section{Introduction}
We work over the \textit{tropical semiring} $(\mathbb{R},\oplus,\otimes)$ whose operations are $$a\oplus b=\min\{a,b\},\mbox{ }a\otimes b=a+b.$$ We consider \textit{tropical matrices}, i.e. matrices over the tropical semiring. There exist many different ways to define the rank of a tropical matrix, see~\cite{AGG,DSS}. We deal with the notions of tropical rank and Kapranov rank, see also~\cite{CJR, KR}.
\begin{defn}\label{perm} We define the \textit{permanent} of a tropical matrix $S\in\mathbb{R}^{n\times n}$ as \begin{equation}\label{def1}perm(S)=\min\limits_{\sigma\in{\cal S}_n} \{s_{1,\sigma(1)}+\ldots+s_{n,\sigma(n)}\}.\end{equation} \end{defn}
\begin{defn}\label{deftropdeg} The matrix $S$ is called \textit{tropically singular} if the minimum in~(\ref{def1}) is attained at least twice. Otherwise, $S$ is called \textit{tropically non-singular}. \end{defn}
\begin{defn}\label{deftrop} The \textit{tropical rank} of a matrix $M\in\mathbb{R}^{p\times q}$ is the largest integer $r$ such that $M$ has a tropically non-singular $r$-by-$r$ submatrix. We denote the tropical rank of $M$ by $rk_t(M)$. \end{defn}
Let $\textbf{K}$ denote the field whose elements are formal sums $$a(t)=\sum_{i=1}^{\infty}a_it^{\alpha_i} \mbox{ such that } a_n\in\mathbb{C}, \alpha_n\in\mathbb{R}, \lim_{n\rightarrow\infty}\alpha_n=+\infty.$$
Let $deg: \textbf{K}^*\rightarrow\mathbb{R}$ be a natural valuation sending $a(t)$ to the least of the exponents $\alpha_i$, i.e. $deg(a)=\min_{n:a_n\neq0}\{\alpha_n\}.$ By definition, assume $deg(0)=\infty$. We say that $B\in\textbf{K}^{m\times n}$ is a \textit{lift} of $T\in\mathbb{R}^{m\times n}$ if $deg(b_{ij})=t_{ij}$ for any $i,j$. The notion of the Kapranov rank of a matrix can be defined in the following way, see~\cite[Corollary 3.4]{DSS}.
\begin{defn}\label{defKap} Let $M\in\mathbb{R}^{m\times n}$. We define the Kapranov rank of $M$ as $$rk_K(M)=\min_{\mathcal K_M}\{rank(\mathcal K_M)\},$$ where the minimum is taken over all lifts of $M$. The expression $rank(\mathcal K_M)$ means the usual rank of a matrix $\mathcal K_M$ over the field $\textbf{K}$. \end{defn}
The notion of Kapranov rank was deeply investigated in~\cite{CJR, DSS, KR}. Develin, Santos, and Sturmfels in~\cite{DSS} show that $rk_K(M)\geqslantrk_t(M)$ for every matrix $M$. The following theorem points out the connection with matroids.
\begin{thr}\cite[Corollary 7.4]{DSS}\label{DSSthr} Let ${\cal M}$ be a matroid which is not representable over $\mathbb{C}$. Then the Kapranov and tropical ranks of the cocircuit matrix ${\cal C}({\cal M})$ are different. \end{thr}
Theorem~\ref{DSSthr} makes it possible to construct examples of matrices with different tropical and Kapranov ranks. The example of a $7$-by-$7$ matrix with different ranks is provided in~\cite{DSS}.
Kim and Roush in~\cite{KR} mostly deal with algorithmical aspects of the Kapranov rank. They prove that determining Kapranov rank of tropical matrices is NP-hard. Also, in~\cite{KR} it was shown that there exist matrices of tropical rank $3$ and arbitrarily high Kapranov rank.
The following theorem was proven in~\cite{CJR}.
\begin{thr}\cite[Corollary 1.5]{CJR}\label{CJRthr} Let $M\in\mathbb{R}^{m\times n}$, $\min\{m,n\}\leqslant5$. Then $rk_K(M)=rk_t(M)$. \end{thr}
Chan, Jensen, and Rubei in~\cite{CJR} point out the connection with the notion of tropical basis. They ask the following question.
\begin{quest}\cite[Question 1.1]{CJR}\label{hy1} For which numbers $d$, $n$, and $r$ do the $(r+1)\times(r+1)$-minors of a d-by-n matrix form a tropical basis? Equivalently, for which $d$, $n$, $r$ does every $d$-by-$n$ matrix of tropical rank at most $r$ have Kapranov rank at most $r$? \end{quest}
In~\cite{CJR} the following conjecture was also made.
\begin{hyp}\cite[Conjecture 1.6]{CJR}\label{hy} The $(r + 1)\times(r + 1)$ minors of a d-by-n matrix are a tropical basis if and only if either $r\leqslant2$ or $r\geqslant\min\{d,n\}-2$. \end{hyp}
Also, in~\cite{CJR} it was asked whether there exists a $6$-by-$6$ matrix with different tropical and Kapranov ranks. We answer this question by providing an example of a $6$-by-$6$ matrix with tropical rank $4$ and Kapranov rank $5$.
Now let us take into account the equivalence given in Question~\ref{hy1}. Our example shows that the 5-by-5 minors of a 6-by-6 matrix are not a tropical basis. Thus we disprove Conjecture~\ref{hy}.
Additionally, we note that the difference between the tropical and Kapranov ranks of our matrix does not have a matroidal nature. Indeed, matroids with at most $6$ elements are all representable over $\mathbb{C}$, see~\cite{BCH}.
\section{The Example}
\begin{ex}\label{6-6} Let $$A=\left( \begin{array}{cccccc} 0 & 0 & 4 & 4 & 4 & 4 \\ 0 & 0 & 2 & 4 & 1 & 4 \\ 4 & 4 & 0 & 0 & 4 & 4 \\ 2 & 4 & 0 & 0 & 2 & 4 \\ 4 & 4 & 4 & 4 & 0 & 0 \\ 2 & 4 & 1 & 4 & 0 & 0 \\ \end{array} \right) .$$ Then $rk_t(A)=4$, $rk_K(A)=5$. \end{ex}
\begin{proof} 1. Note that every $5$-by-$5$ submatrix of $A$ can be written in some of the following forms (up to permutations of rows and columns): $$S'=\left( \begin{array}{cccccc} 0 & s'_{12} & s'_{13} & s'_{14} & s'_{15} \\ s'_{21} & 0 & 0 & s'_{24} & s'_{25} \\ s'_{31} & 0 & 0 & s'_{34} & s'_{35} \\ s'_{41} & s'_{42} & s'_{43} & 0 & 0 \\ s'_{51} & s'_{52} & s'_{53} & 0 & 0 \\ \end{array} \right), \mbox{ } S''=\left( \begin{array}{cccccc} 0 & 4 & 4 & 4 & 4 \\ 0 & x & 4 & y & 4 \\ s''_{31} & 0 & 0 & 4 & 4 \\ s''_{41} & 0 & 0 & z & 4 \\ s''_{51} & s''_{52} & s''_{53} & 0 & 0 \\ \end{array} \right),$$ where $x,y,z\in\{1,2\}$, $s'_{ij},s''_{ij}\in\{1,2,4\}$. By Definition~\ref{perm}, $perm(S')=0$. The minimum in~(\ref{def1}) for $S'$ is given by $id,(23)\in{\cal S}_5$. Analogously, $perm(S'')=y$, the minimum is given by $(24), (243)\in{\cal S}_5$. Thus by Definition~\ref{deftropdeg}, every $5\times 5$-submatrix of $A$ is tropically singular. From Definition~\ref{deftrop} it follows that $rk_t(A)\leqslant4$.
Now consider the $4$-by-$4$ submatrix which is formed by the $1$st, $2$nd, $4$th, and $6$th rows and the $1$st, $4$th, $5$th, and $6$th columns of $A$: $$ \left( \begin{array}{cccccc} 0 & 4 & 4 & 4 \\ 0 & 4 & 1 & 4 \\ 2 & 0 & 2 & 4 \\ 2 & 4 & 0 & 0 \\ \end{array} \right).$$ The minimum in the expression for its permanent is given by the only permutation $(23)\in{\cal S}_4$. Thus by Definition~\ref{deftrop}, $rk_t(A)=4$.
2. Let us consider the matrix $$M_0=\left( \begin{array}{cccccc} 1 & 1 & t^4 & t^4 & t^4 & t^4 \\ -1 & -1 & t^2 & t^4 & t & t^4 \\ t^4 & t^4 & 1-t^2 & 1 & -t^4 & -t^4 \\ t^2 & t^4 & -1-t & -1 & t^2 & -t^4 \\ -t^4 & -t^4 & -t^4 & -t^4 & -1-t^2 & 1 \\ -t^2 & -t^4 & t & -t^4 & 1-t & -1 \\ \end{array} \right)\in\textbf{K}^{6\times6},$$ which is a lift of $A$. The sum of the rows of $M_0$ is the zero row, so that the rank of $M_0$ is at most $5$. Thus by Definition~\ref{defKap}, $rk_K(A)\leqslant 5$.
Now let $H\in\textbf{K}^{6\times6}$ be an arbitrary lift of $A$. It follows directly from definitions that $deg(ab)=deg(a)+deg(b)$, $deg(a+b)\geqslant\min\{deg(a),deg(b)\}$ for any $a,b\in\textbf{K}$. Since $deg(h_{pq})=a_{pq}$ for any $p,q$, we obtain the following expression for the minor $H_{25}$: $$H_{25}=h_{12}h_{34}h_{41}h_{56}h_{63}+h_{12}h_{33}h_{44}h_{56}h_{61}-h_{12}h_{34}h_{43}h_{56}h_{61}+g_1,$$ where $deg(g_1)\geqslant4$. Analogously, the minor $H_{61}$ can be expressed as $$H_{61}=h_{12}h_{25}h_{33}h_{44}h_{56}-h_{12}h_{25}h_{34}h_{43}h_{56}+g_2,\mbox{ }deg(g_2)\geqslant4.$$
We denote $\Delta=h_{33}h_{44}-h_{34}h_{43}$, $\delta=deg(\Delta)$. We obtain $$H_{25}=h_{12}h_{34}h_{41}h_{56}h_{63}+h_{12}\Delta h_{56}h_{61}+g_1,\mbox{ }deg(h_{12}h_{34}h_{41}h_{56}h_{63})=3,$$ \begin{equation}\mbox{$ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $ $}\label{eqex1}\mbox{ }deg(h_{12}\Delta h_{56}h_{61})=2+\delta;\end{equation} \begin{equation}\label{eqex2}H_{61}=h_{12}h_{25}\Delta h_{56}+g_2, \mbox{ }deg(h_{12}h_{25}\Delta h_{56})=1+\delta.\end{equation}
It follows from definitions that $deg(v_1+v_2)=\min\{deg(v_1),deg(v_2)\}$ for any $v_1,v_2\in\textbf{K}$ such that $deg(v_1)\neq deg(v_2)$. Thus if $\delta>1$, then from~(\ref{eqex1}) it follows that $deg(H_{25})=3$, i.e. $H_{25}\neq0$. Analogously, if $\delta<1$, then $deg(H_{25})=2+\delta$, i.e. $H_{25}\neq0$. Finally, if $\delta=1$, then from~(\ref{eqex2}) it follows that $deg(H_{61})=2$, i.e. $H_{61}\neq0$. We see that some of the minors $H_{25}$ and $H_{61}$ differs from $0$. This shows that the rank of $H$ is at least $5$. By Definition~\ref{defKap}, $rk_K(A)\geqslant5$. The proof is complete. \end{proof}
\begin{thr}\label{genc} The matrix $A$ from Example~\ref{6-6} contains the least number of rows and the least number of columns among tropical matrices $M$ such that $rk_K(M)\neqrk_t(M)$. \end{thr}
\begin{proof} Follows from Theorem~\ref{CJRthr} and Example~\ref{6-6}. \end{proof}
\textsc{Faculty of Algebra, Department of Mathematics and Me\-chanics, Moscow State University, GSP-1, 119991 Moscow, Rus\-sia.}
\textit{E-mail:} \verb"yaroslav-shitov@yandex.ru"
\end{document} | arXiv | {
"id": "1012.5507.tex",
"language_detection_score": 0.5463294386863708,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Deep Fusion: Efficient Network Training via Pre-trained Initializations} \renewcommand{Deep Fusion}{Deep Fusion} \author{Hanna Mazzawi \\ Google Research, NY \\ mazzawi@google.com \And Xavi Gonzalvo \\ Google Research, NY \\ xavigonzalvo@google.com \And Michael Wunder \\ Google Research, NY \\ mwunder@google.com}
\twocolumn[\maketitle]
\begin{abstract} In recent years, deep learning has made remarkable progress in a wide range of domains, with a particularly notable impact on natural language processing tasks. One of the challenges associated with training deep neural networks is the need for large amounts of computational resources and time. In this paper, we present Deep Fusion, an efficient approach to network training that leverages pre-trained initializations of smaller networks.
We show that Deep Fusion accelerates the training process, reduces computational requirements, and leads to improved generalization performance on a variety of NLP tasks and T5 model sizes.
Our experiments demonstrate that Deep Fusion is a practical and effective approach to reduce the training time and resource consumption while maintaining, or even surpassing, the performance of traditional training methods. \end{abstract}
\section{Introduction}
Large language models (LLMs) have significantly advanced the state of the art in various natural language processing (NLP) tasks, including text generation, translation, summarization, and question answering. However, training these models demands substantial amounts of data and computational resources. As a result, there has been a growing interest in developing efficient training methods to address the challenges associated with the high computational costs and energy consumption during the training process \cite{Narayanan21large_scale_llm}.
While some studies \cite{kaplan2020scaling, touvron2023llama, zhou2023lima} discuss that a balance of data and model size is important, it's undeniable that larger models often yield better performance \cite{chowdhery2022palm}. Several experiments and publications have demonstrated that as model size increases, the performance on various natural language processing tasks continues to improve \cite{devlin2018bert, radford2019language, brown2020language}. This trend is evident in the progression of LLMs, such as BERT, \mbox{GPT-2}, \mbox{GPT-3}, and PaLM where each successive generation is larger and achieves better results across a wide range of benchmarks~\cite{gu2021palm}.
Advancements in large language model (LLM) efficiency have been driven by a variety of innovative techniques that enable faster training or inference without sacrificing performance. One such approach is model compression, which has been shown to reduce LLM size without significant loss in accuracy \cite{Ganesh21compress,zhang2022platon,kwon2022a}. Similarly, adaptive computation time methods have been proposed to dynamically allocate computational resources during LLM training, leading to improved efficiency \cite{graves2016adaptive}. Techniques such as layer-wise adaptive rate scaling (LARS) and layer-wise adaptive rate control (LARC) have demonstrated accelerated convergence in LLMs by adapting learning rates on a per-layer basis \cite{you2017scaling, you2017large}. Moreover, recent studies have explored the potential of mixed-precision training, where lower-precision computation is employed during the training process to speed up training and reduce memory requirements \cite{micikevicius2017mixed}.
On top of that, efficient training distribution is a combination of data and model parallelization. Data parallelization splits the training batch across accelerators (e.g., GPUs), while model parallelization splits the model operations across accelerators so that each accelerator computes part of the model.
While data parallelism alone is typically the easiest to implement, it is not well suited for very large models as it needs the whole model to fit in a single accelerator. Model parallelism can be efficient, but it can be more difficult to implement as the dependency between accelerators' input and outputs can lead to degraded performance.
In our research, we emphasize training efficiency as a primary goal. Unlike the traditional approach concentrating on discovering pruned networks \cite{song15learningboth,frankle2018lottery}, our approach aims to minimize training time by initializing large networks from training smaller ones. We employ fusion operators to combine these smaller networks, promoting wide over-parameterization.
\subsection{Contribution}
As part of the deep fusion method, this paper proposes:
\begin{itemize}
\item A method that focuses on initializing large networks from training smaller networks, and employing fusion operators to combine them. This method promotes wide over-parameterization, which leads to improved efficiency in network training.
\item An effective framework for the utilization of data and model parallelization techniques, as well as the strategic use of accelerator devices to train models of smaller size. This allows our approach to significantly reduce training time while increasing the performance of the resulting networks.
\item A downstream task evaluation with LLMs, demonstrating its effectiveness and efficiency in various scenarios. \end{itemize}
\section{Related Work}\label{related}
In line with the lottery ticket hypothesis \cite{frankle2018lottery, frankle2019lottery}, our work shares the following belief: The most commonly used initialization schemes, primarily discovered heuristically \cite{glorot2010understanding, he2015delving}, are sub-optimal.
While there's some evidence that over-parameterization may not be necessary during training \cite{nakkiran2020deep, belkin2019reconciling}, we still believe over-parameterization and a ``good'' initialization can yield better performance. Thus, we aim to actualize some of the potential that comes from finding a more principled initialization scheme.
From a transfer learning perspective, progressive networks \cite{rusu2016progressive} grow networks to address the problem of forgetting previous tasks. Another approach, deep model consolidation \cite{zhang2020classincremental}, uses a smaller pre-trained model to provide a better initialization for a larger model, which is then fine-tuned on a new task. Network morphism \cite{wei2016network} is another approach that aims to find a larger network by transforming a smaller, pretrained network while preserving the network function during the transformation. This is achieved by expanding the original network with layer-wise operations that preserve input-output behavior.
Similar to our method, staged training \cite{shen2022staged} also focuses on network efficiency. This approach involves defining a growth operator while preserving constraints associated with loss and training dynamics. By gradually expanding the model capacity
staged training allows for more efficient training. We argue that preserving training dynamics might not be the most effective approach when it comes to fusion. In fact, it could be counterproductive, and exploring high learning rate cycles could offer a preferable alternative. Furthermore, we enhance the fuse operator by developing a more efficient initialization for cross-connections.
\section{Fusion}\label{fusion}
We start by demonstrating our \textsc{Fusion}\ operator on two fully connected layers before expanding to T5 transformers.
A generic neural network is a function $f \colon \mathbb R^d \to \mathbb R^k$ defined with $L$ layers with weights in layer $k \in [L]$ being $w_k$ and biases being $b_k$. That is, For each layer $k$ we calculate \begin{equation} \label{eq:layer_architecture}
a_k = h_k(a_{k-1}) = g_k( a_{k-1}w_k + b_k), \end{equation} where $a_0 = x$ is the input vector, and $g_k$ is the $k$th activation function. In what follows, we will omit $a_k$ when it is clear from context.
The output of the neural network is defined as the composition of the $L$ layers,
\begin{equation}
\label{eq:composition}
f(x) = h_L \circ \ldots \circ h_2 \circ h_1(x). \end{equation}
Our \textsc{Fusion}\ operator $F$ takes two layers from two different models and generates a new layer by composing their weights and biases. The fused layer has two characteristics: \begin{itemize}
\item \textbf{Fusion rule}: the fused layer maintains the same composition or architecture defined in Eq.\ref{eq:layer_architecture}. That is, we do not allow a change in the architecture, but rather a change in the dimensionality of the operations.
\item \textbf{Fusion property}: the fused layer calculates the concatenation of the the two original layers that are fused.
\end{itemize} The \textsc{Fusion}\ operator is defined as follows. Given two layers with $d$, $d'$ inputs and $k$, $k'$ outputs,
\begin{align}
F_w & \colon \mathbb R^{d\times k} \times \mathbb R^{d'\times k'} \to \mathbb R^{(d+d')\times(k+k')}, \\
F_b & \colon \mathbb R^{k} \times \mathbb R^{k'} \to \mathbb R^{(k+k')}. \end{align}
The \textsc{Fusion}\ of the weights performed by $F_w$ results in a new matrix where the weights of the layers of the two models are located in the diagonal and the rest is set to zero. Similarly, the new bias is simply the concatenation of the bias of the two layers being fused. So the new fused weight $w^{(f)}$ and new bias $b^{(f)}$ taking the weights of two layers,~$w$,~$w'$, and bias $b$, $b'$, respectively is defined as,
\begin{equation}
w^{(f)} = \begin{pmatrix}
w & \vec{0} \\
\vec{0} & w'
\end{pmatrix}, \quad\quad b^{(f)} = [b, b'],
\label{eq:diagonal} \end{equation}
where $\vec{0}$ is the zero matrix.
The output of the fused layer $k$ is defined as,
\begin{align*}
h_k^{(f)} = & F(h_k,h_k') = g_k(a_{k-1}^{(f)} F_w(w_k,w_k') + F_b(b_k,b_k')) \\\nonumber
= & g_k\left([a_{k-1},a_{k-1}']\begin{pmatrix}
w_k & \vec{0} \\
\vec{0} & w_k'
\end{pmatrix} + [b_k,b_k']\right) \\\nonumber
= & g_k([a_{k-1}w_k+b_k,a_{k-1}'w_k'+b_k'])= [h_k,h_k'].
\end{align*}
This means that the result of the \textsc{Fusion}\ operator on two layers is the concatenation of the outputs, that is $[h_k,h_k']$.
\subsection{Deep Fusion and Self Deep Fusion} For two neural networks $f$ and $f'$ defined as in Eq.~\ref{eq:composition}, the deep fusion of the two models is defined as follows. Denote by,
$ L( f,f') = F(h_{L}, h_{L}') \circ \ldots \circ F(h_1, h_1')([x,x])$;
And by $ \textsc{Avg}(x,y) = (x+y)/2. $ The function that averages two vectors of the same dimension, then the deep fusion is defined as,
\begin{equation*}
DF(f,f') = \textsc{Avg}(L(f,f')). \end{equation*}
Intuitively, the deep fused model is maintaining a concatenation of the hidden representations from models $f$ and $f'$ (fusion property) throughout the network, and taking the average of their logits.
This means that after the deep fusion operation, the function calculated by the model is equivalent to the function of average ensemble of the two models. However, if we continue training the fused model, the extra parameters added by the zero blocks in the example can start leveraging the hidden representation from the cross model, and potentially lead to better performance.
Deep fusion allows the models to be distributed across multiple GPUs while still taking advantage of the strengths of both data parallelism and model parallelism.
Self deep fusion of a model $f$ is defined as deep fusing the model with itself (that is, $DF(f, f)$). It can be thought of as a growth operation that does not change the network's predictions to any given input.
\subsection{Deep Fusing T5 Transformers}
This section describes how to deep fuse two (or more) T5 models \cite{t5paper}, $f$ and $f'$, discussing the particularities of each layer type. Once the fusion is completed, the hidden representation of the newly fused model should be a combination of the two hidden representations from the original models, aligned along the feature dimension axis.
Starting from the bottom layer, the fusion of the embedding layer is trivial. Next, for the multi-head attention, if $f$ has $y$ heads, and $f'$ has $y'$ heads, then, the fused model will have $y~+~y'$ heads.
All projections (query, key, value, attention output) as well as the MLP blocks are treated to prevent leaking information from the wrong hidden representation at initialization.
Note that skip connections and activations are parameter free and do not need further handling. Similarly, the element-wise scaling operation holds a scaling parameter per element in the hidden representation, and thus is trivial to fuse.
Lastly, the fusion of the normalization of the hidden representation between attention and MLP layers proves to be unfeasible. This is due to the fact that it's not possible to uphold the fusion rule and the fusion property simultaneously.
For the normalization layer we either: 1)~Preserve the fusion property but break the fusion rule by normalizing the hidden representations of the sub-models individually and then concatenating them; or 2) keep the fusion rule but violate the fusion property by treating the concatenated hidden representation as a single vector for normalization. It's important to note that the first option requires additional coding beyond parameter initialization, unlike the second option. This dilemma doesn't occur in self deep fusion.
\section{Experiments}\label{experiments}
We begin by training T5 language models on the C4 dataset. The term `deep' will be dropped when context allows.
\subsection{Language Models}
The aim of this experiment is to establish a better initial checkpoint for a large T5 \cite{t5paper} transformer network, referred to as \textsc{T5-Medium}, by using two smaller T5 models, denoted as \textsc{T5-Small}. We present two types of results: fusing two unique small models and fusing one model with itself (self fusion).
We trained the following 4 experiments (see dimensionalities in Table \ref{tab:models_dim1} in Appendix~A): \begin{enumerate}
\item \texttt{baseline}: \textsc{T5-Medium} from random initialization.
\item \texttt{fusion-rule}: \textsc{T5-Medium} trained from fusing the two \textsc{T5-Small} models while maintaining the fusion rule.
\item \texttt{fusion-prop}: \textsc{T5-Medium} trained from fusing the two \textsc{T5-Small} models while maintaining the fusion property.
\item \texttt{self-fusion}: \textsc{T5-Medium} trained from self fusing a \textsc{T5-Small} model. \end{enumerate}
Zero matrices in Eq.~\ref{eq:diagonal} were substituted with blocks initialized randomly with a low variance. Final results are displayed in Table~\ref{performance1} and Figure \ref{accuracy_and_loss} shows the evaluation metric curves throughout the training.
\begin{table}[h] \centering \begin{footnotesize}
\begin{tabular}{c|c|c}
\textbf{Model} & \textbf{Loss @1M} & \textbf{Accuracy @1M} \\
\hline
\texttt{baseline} & 4.66e+4 & 66.65 $\pm$ 0.01 \\
\texttt{fusion-rule} & 4.61e+4 & 66.88 \\
\texttt{fusion-prop} & \textbf{4.53e+4} & \textbf{67.25} $\pm$ 0.03 \\
\texttt{self-fusion} & 4.55e+4 & 67.20 $\pm$ 0.05 \\ \end{tabular} \end{footnotesize} \caption{Performance of different T5-Medium fusion methods at 1 million steps, replicated three times for standard deviation.} \label{performance1} \end{table}
\begin{figure}
\caption{Accuracy and loss on validation data.}
\label{accuracy_and_loss}
\end{figure}
The outcomes of our experiments indicate that while it requires extra code changes to the T5 transformer, upholding the fusion property results in superior performance compared to adhering to the fusion rule. Furthermore, we discovered that self fusion yields comparable performance to standard fusion. Significantly, the \texttt{baseline} required an additional 860K steps to achieve the performance level of self fusion. When employing self fusion, training a \textsc{T5-Medium} {\it resulted in an 18\% reduction in computation time} compared to the \texttt{baseline}. \footnote{\textsc{T5-Small} model training time included.}
\subsection{Fusion in Stages}
We explored staged fusion using T5-S, T5-M, and T5-L architectures (Table \ref{models_dim2}, Appendix B) and tested various fusion settings depicted in Figure~\ref{topology}.
\begin{figure}
\caption{Settings for final T5-L fusion: yellow signifies fused models, white indicates regular training, and links represent fusion (double link signifies self fusion).}
\label{topology}
\end{figure}
Every model (T5-S, T5-M, T5-L) is trained 1M steps. Table~\ref{topology_table} below present the performance of the various models.
\begin{table}[h] \centering \begin{footnotesize}
\begin{tabular}{c|c|c} \textbf{Model} & \textbf{Loss @1M steps} & \textbf{Accuracy @1M steps} \\ \hline (1) & 4.04e+4 & 69.89 \\ (2) & 3.93e+4 & 70.45 \\ (3) & 3.9e+4 & 70.56 \\ (4) & \textbf{3.87e+4} & \textbf{70.74} \\ (5) & 3.91e+4 & 70.57 \\ (6) & 3.91e+4 & 70.47 \\ \end{tabular} \end{footnotesize} \caption{Performance of the various ways of fusing T5-L.} \label{topology_table} \end{table}
The results show similar performance between fusion and self fusion (settings (3) and (5)). However, repeated self fusion reduces performance, while multiple regular fusions enhance T5-L performance.
Training a model using a single application of self fusion, setting (5), results in a {\it 20\% reduction in computation time} compared to the standard setting (1).
\subsection{Fine Tuning for Down Stream Tasks} \label{downstream}
We fine-tuned high performing settings from the first experiment together with a baseline on NLP tasks using the GLUE benchmark.
We trained two \textsc{T5-Small} models for 500K steps before fusing and self fusing them to create a \textsc{T5-Medium}. We also trained a standalone \textsc{T5-Medium}. These models were fine-tuned at~0 (\texttt{baseline} without pretraining vs fusion without extra training), 250K, 500K, and 1M steps (\texttt{baseline} only). The GLUE average results are shown in Table \ref{finetune_table} and Figure \ref{glue_fig}. The complete results for each task is presented in Appendix C.
\setlength{\tabcolsep}{5pt} \begin{table}[h] \centering \begin{footnotesize}
\begin{tabular}{c|c|c|c|c}
\textbf{Model / step} & 0 & 250K & 500K & 1M\\
\hline
\texttt{baseline} & 64.07 & 83.33 & 84.35 & 84.74$\pm$0.13 \\
\texttt{fusion-prop} & \textbf{81.40} & \textbf{84.10} & 84.86$\pm$0.13 & - \\
\texttt{self-fusion} & 81.01 & 83.71 & \textbf{84.94}$\pm$0.2 & - \\\hline
\textsc{T5-Small} & - & - & - & 80.28 \end{tabular} \end{footnotesize} \caption{Performance (GLUE average) of the various models on downstream tasks, replicated three times for standard deviation.} \label{finetune_table} \end{table}
\begin{figure}
\caption{Performance (Glue average - an average over many NLP tasks that score between 0 and 100) of the various models.}
\label{glue_fig}
\end{figure}
Our results indicate that enhancing a pretrained model's performance may simply require self-fusion before fine-tuning, without further pretraining. For instance, a \textsc{T5-Small} model, trained for 500K steps, when self-fused and fine-tuned, outperforms the same model trained to 1M steps before fine-tuning (81.01 vs 80.28). It's evident that the extra parameters from self-fusion benefit NLP tasks more than extended pretraining.
Next, the results above also suggest that deep fusion can lead to faster training to better performance, when fine-tuning on downstream NLP tasks. However, while in pretrain, the training curves of fusion and self fusion look similar, we can see that for downstream tasks, fusion maintains higher performance throughout until convergence (here, both models converge to similar performance). \begin{table}[h] \centering \begin{footnotesize}
\begin{tabular}{c|c|c|c|c} \textbf{Model / time} & \textbf{Fusion} & \textbf{Post fusion} & \textbf{Time} & \textbf{GLUE} \\
\hline
\texttt{baseline} & 0 steps & 1M steps &39.2h & 84.74\\
& 0h & 39.2h & & \\ \hline
\texttt{fusion-prop} & 500k steps & 500k steps & 37.9h &84.86 \\
& 2$\times$8h & 21.9h & & \\ \hline
\texttt{self-fusion} & 500k steps & 500k steps & \textbf{29.9h} & \textbf{84.94}\\
& 8h & 21.9h & & \end{tabular} \end{footnotesize} \caption{Compute time in hours (TPU V3 4x4 topology).} \label{hour_performance} \end{table}
The total compute saving is about 24\% TPU time for this configuration as presented in Table \ref{hour_performance}. Even though we trained for less time, the final performance was slightly better than the baseline. \section{Discussion and Conclusion}\label{discussion} In this paper, we present a new technique for improving the training process of large models. Our technique, called deep fusion, combines multiple models into a single model that can be trained more efficiently. We demonstrate how model fusion can be used to reduce the restrictions of distributed training, save on overall compute costs, and improve model performance.
In our experiments we fused models that are trained on the same data and have identical architectures. While fusion has immediate training advantages, further research is needed to understand the implications and possible applications of fusing models trained on different sources and distinct architectures.
For example, it would be interesting to explore if transfer learning occurs when fusing models trained in different domains. Additionally, it would be interesting to understand the characteristics of models that are the fusion of models that differ in dimensionality. For example, one model could be attention-heavy, while another could be MLP-heavy. Finally, it would be interesting to explore model fusion when the models are trained on different sequence lengths. This could also lead to efficiency improvements, as lower-length models train faster.
We believe that model fusion is a promising technique for improving the training process of large models. We hope that our work will inspire further research in this area.
\begin{thebibliography}{10}
\bibitem{belkin2019reconciling} Mikhail Belkin, Daniel Hsu, Siyuan Ma, and Soumik Mandal. \newblock Reconciling modern machine-learning practice and the classical
bias-variance trade-off. \newblock {\em Proceedings of the National Academy of Sciences},
116(32):15849--15854, 2019.
\bibitem{brown2020language} Tom~B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla
Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell,
et~al. \newblock Language models are few-shot learners. \newblock In {\em Advances in Neural Information Processing Systems}, pages
14182--14193, 2020.
\bibitem{chowdhery2022palm} Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra,
Adam Roberts, Paul Barham, Hyung~Won Chung, Charles Sutton, Sebastian
Gehrmann, Parker Schuh, Kensen Shi, Sasha Tsvyashchenko, Joshua Maynez,
Abhishek Rao, Parker Barnes, Yi~Tay, Noam Shazeer, Vinodkumar Prabhakaran,
Emily Reif, Nan Du, Ben Hutchinson, Reiner Pope, James Bradbury, Jacob
Austin, Michael Isard, Guy Gur-Ari, Pengcheng Yin, Toju Duke, Anselm
Levskaya, Sanjay Ghemawat, Sunipa Dev, Henryk Michalewski, Xavier Garcia,
Vedant Misra, Kevin Robinson, Liam Fedus, Denny Zhou, Daphne Ippolito, David
Luan, Hyeontaek Lim, Barret Zoph, Alexander Spiridonov, Ryan Sepassi, David
Dohan, Shivani Agrawal, Mark Omernick, Andrew~M. Dai,
Thanumalayan~Sankaranarayana Pillai, Marie Pellat, Aitor Lewkowycz, Erica
Moreira, Rewon Child, Oleksandr Polozov, Katherine Lee, Zongwei Zhou, Xuezhi
Wang, Brennan Saeta, Mark Diaz, Orhan Firat, Michele Catasta, Jason Wei,
Kathy Meier-Hellstern, Douglas Eck, Jeff Dean, Slav Petrov, and Noah Fiedel. \newblock Palm: Scaling language modeling with pathways, 2022.
\bibitem{devlin2018bert} Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. \newblock Bert: Pre-training of deep bidirectional transformers for language
understanding. \newblock In {\em Proceedings of the 2019 Conference of the North American
Chapter of the Association for Computational Linguistics: Human Language
Technologies}, pages 4171--4186, 2018.
\bibitem{frankle2018lottery} Jonathan Frankle and Michael Carbin. \newblock The lottery ticket hypothesis: Finding sparse, trainable neural
networks. \newblock In {\em Proceedings of the International Conference on Learning
Representations}, 2018.
\bibitem{frankle2019lottery} Jonathan Frankle and Michael Carbin. \newblock The lottery ticket hypothesis at scale. \newblock In {\em arXiv preprint arXiv:1903.01611}, 2019.
\bibitem{Ganesh21compress} Prakhar Ganesh, Yao Chen, Xin Lou, Mohammad~Ali Khan, Yin Yang, Hassan Sajjad,
Preslav Nakov, Deming Chen, and Marianne Winslett. \newblock {Compressing Large-Scale Transformer-Based Models: A Case Study on
BERT}. \newblock {\em Transactions of the Association for Computational Linguistics},
9:1061--1080, 09 2021.
\bibitem{glorot2010understanding} Xavier Glorot and Yoshua Bengio. \newblock Understanding the difficulty of training deep feedforward neural
networks. \newblock In {\em Proceedings of the Thirteenth International Conference on
Artificial Intelligence and Statistics}, pages 249--256, 2010.
\bibitem{graves2016adaptive} Alex Graves. \newblock Adaptive computation time for recurrent neural networks. \newblock In {\em Proceedings of the International Conference on Learning
Representations}, 2016.
\bibitem{gu2021palm} Jiatao Gu, Jianqiang Hu, Tong Zhao, Ying Lin, Xiuying Cheng, Lijun Wang, and
Xiang Wan. \newblock Palm: Pre-training an autoencoding and autoregressive language model
for context-conditioned generation. \newblock In {\em Proceedings of the 2021 Conference on Empirical Methods in
Natural Language Processing}, pages 2643--2662, 2021.
\bibitem{song15learningboth} Song Han, Jeff Pool, John Tran, and William~J. Dally. \newblock Learning both weights and connections for efficient neural networks. \newblock In {\em Proceedings of the 28th International Conference on Neural
Information Processing Systems - Volume 1}, NIPS'15, page 1135–1143,
Cambridge, MA, USA, 2015. MIT Press.
\bibitem{he2015delving} Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. \newblock Delving deep into rectifiers: Surpassing human-level performance on
imagenet classification. \newblock In {\em Proceedings of the IEEE International Conference on Computer
Vision}, pages 1026--1034, 2015.
\bibitem{kaplan2020scaling} Jared Kaplan, Sam McCandlish, Tom Henighan, Tom~B Brown, Benjamin Chess, Rewon
Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. \newblock Scaling laws for neural language models. \newblock {\em arXiv preprint arXiv:2001.08361}, 2020.
\bibitem{kwon2022a} Woosuk Kwon, Sehoon Kim, Michael~W. Mahoney, Joseph Hassoun, Kurt Keutzer, and
Amir Gholami. \newblock A fast post-training pruning framework for transformers. \newblock In Alice~H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho,
editors, {\em Advances in Neural Information Processing Systems}, 2022.
\bibitem{micikevicius2017mixed} Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory Diamos, Erich Elsen,
David Garcia, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh
Venkatesh, et~al. \newblock Mixed precision training. \newblock In {\em Proceedings of the International Conference on Learning
Representations}, 2017.
\bibitem{nakkiran2020deep} Preetum Nakkiran, Gal Kaplun, Yamini Bansal, Tristan Yang, Boaz Barak, and Ilya
Sutskever. \newblock Deep double descent: Where bigger models and more data hurt. \newblock In {\em Proceedings of the International Conference on Learning
Representations}, 2020.
\bibitem{Narayanan21large_scale_llm} Deepak Narayanan, Mohammad Shoeybi, Jared Casper, Patrick LeGresley, Mostofa
Patwary, Vijay Korthikanti, Dmitri Vainbrand, Prethvi Kashinkunti, Julie
Bernauer, Bryan Catanzaro, Amar Phanishayee, and Matei Zaharia. \newblock Efficient large-scale language model training on gpu clusters using
megatron-lm. \newblock In {\em Proceedings of the International Conference for High
Performance Computing, Networking, Storage and Analysis}, SC '21, New York,
NY, USA, 2021. Association for Computing Machinery.
\bibitem{radford2019language} Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. \newblock Language models are unsupervised multitask learners. \newblock In {\em OpenAI Blog}, 2019.
\bibitem{t5paper} Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael
Matena, Yanqi Zhou, Wei Li, and Peter~J. Liu. \newblock Exploring the limits of transfer learning with a unified text-to-text
transformer. \newblock {\em CoRR}, 2019.
\bibitem{rusu2016progressive} Andrei~A Rusu, Neil~C Rabinowitz, Guillaume Desjardins, Hubert Soyer, James
Kirkpatrick, Koray Kavukcuoglu, Razvan Pascanu, and Raia Hadsell. \newblock Progressive neural networks. \newblock In {\em Proceedings of the 30th International Conference on Neural
Information Processing Systems}, pages 2212--2220, 2016.
\bibitem{shen2022staged} Sheng Shen, Pete Walsh, Kurt Keutzer, Jesse Dodge, Matthew Peters, and
Iz~Beltagy. \newblock Staged training for transformer language models. \newblock In Kamalika Chaudhuri, Stefanie Jegelka, Le~Song, Csaba Szepesvari,
Gang Niu, and Sivan Sabato, editors, {\em Proceedings of the 39th
International Conference on Machine Learning}, volume 162 of {\em Proceedings
of Machine Learning Research}, pages 19893--19908. PMLR, 17--23 Jul 2022.
\bibitem{touvron2023llama} Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne
Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro,
Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, and Guillaume
Lample. \newblock Llama: Open and efficient foundation language models, 2023.
\bibitem{wei2016network} Chen Wei, Haoyu Wang, Yongyang Rui, and Changqing Chen. \newblock Network morphism. \newblock In {\em Proceedings of the 33rd International Conference on
International Conference on Machine Learning}, pages 2662--2671, 2016.
\bibitem{you2017large} Yang You, Yaroslav Bulatov, Igor Gitman, and Andrej Risteski. \newblock Large batch training of convolutional networks. \newblock In {\em arXiv preprint arXiv:1708.03888}, 2017.
\bibitem{you2017scaling} Yang You, Igor Gitman, and Boris Ginsburg. \newblock Scaling sgd batch size to 32k for imagenet training. \newblock In {\em Deep Learning Scaling is Predictable, Empirically}, page~13,
2017.
\bibitem{zhang2020classincremental} Junting Zhang, Jie Zhang, Shalini Ghosh, Dawei Li, Serafettin Tasci, Larry
Heck, Heming Zhang, and C.~C.~Jay Kuo. \newblock Class-incremental learning via deep model consolidation, 2020.
\bibitem{zhang2022platon} Qingru Zhang, Simiao Zuo, Chen Liang, Alexander Bukharin, Pengcheng He, Weizhu
Chen, and Tuo Zhao. \newblock Platon: Pruning large transformer models with upper confidence bound
of weight importance, 2022.
\bibitem{zhou2023lima} Chunting Zhou, Pengfei Liu, Puxin Xu, Srini Iyer, Jiao Sun, Yuning Mao, Xuezhe
Ma, Avia Efrat, Ping Yu, Lili Yu, Susan Zhang, Gargi Ghosh, Mike Lewis, Luke
Zettlemoyer, and Omer Levy. \newblock Lima: Less is more for alignment, 2023.
\end{thebibliography}
\begin{table*}[!t] \centering \begin{tiny}
\begin{tabularx}{\textwidth}{c|X|X|X|X|X|X|X|X|X|X|X|X|X} Model & Glue avg & COLA Matthew's & SST acc & MRPC f1 & MRPC acc & STS-b pearson & STS-b \mbox{spearman} & qqp acc & qqp f1 & MNLI-m & MNLI-mm & QNLI & RTE\\ \hline baseline 1m & 84.74 & 54.18 & 94.38 & 93.17 & 90.69 & 89.83 & 89.75 & 91.94 & 89.13 & 86.77 & 86.6 & 92.13 & 78.34 \\ baseline 1m & 84.45 & 52.95 & 93.92 & 93.12 & 90.44 & 89.49 & 89.35 & 91.94 & 89.14 & 86.94 & 86.58 & 92.26 & 77.98 \\ baseline 1m & 84.69 & 53.15 & 93.81 & 92.15 & 88.97 & 89.06 & 88.93 & 92.04 & 89.24 & 86.55 & 86.21 & 91.69 & 82.31\\ \hline Fusion 500k & 84.98 & 53.97 & 94.27 & 92.91 & 90.2 & 89.68 & 89.49 & 92.04 & 89.36 & 86.6 & 86.43 & 92.39 & 80.87 \\ Fusion 500k & 84.68 & 54.46 & 93.81 & 91.8 & 88.97 & 89.44 & 89.28 & 91.95 & 89.13 & 86.67 & 86.65 & 92.11 & 80.14 \\ Fusion 500k & 84.92 & 53.94 & 94.5 & 92.73 & 89.71 & 90.62 & 90.44 & 92.01 & 89.23 & 86.64 & 86.45 & 91.56 & 80.51 \\ \hline Self fusion 500K & 84.69 & 54.58 & 93.12 & 92.03 & 89.22 & 89.23 & 89.16 & 91.81 & 89.01 & 86.64 & 86.68 & 92.11 & 80.87 \\ Self fusion 500K & 85.19 & 55.82 & 93.69 & 93.1 & 90.44 & 89.9 & 89.73 & 92.04 & 89.34 & 86.8 & 86.31 & 92.31 & 80.87 \\ Self fusion 500K & 84.95 & 58 & 94.27 & 92.36 & 89.71 & 89.93 & 89.7 & 91.96 & 89.18 & 86.47 & 86.4 & 91.93 & 77.62 \\ \hline \end{tabularx} \end{tiny} \caption{Performance (Glue tasks) of the various models on downstream tasks.} \label{tbl:full_glue} \end{table*}
\section*{Appendix A} In this appendix, we list the dimension of the T5 transformers used in the first experiment.
\begin{table}[h] \centering \begin{footnotesize}
\begin{tabular}{c|c|c} Model Name & T5-Small & T5-Medium \\ \hline embedding dim & 512 & 1024 \\ number of heads & 6 & 12 \\ enc./dec. layers & 8 & 8 \\ head dim & 64 & 64 \\ mlp dimension & 1024 & 2048 \\ \hline number of parameters & 77M & 242M \end{tabular} \end{footnotesize} \caption{Dimensions of T5 Small and Medium.} \label{tab:models_dim1} \end{table}
\section*{Appendix B} In this appendix, we list the dimension of the T5 transformers used in the second experiment.
\begin{table}[!h] \centering \begin{footnotesize}
\begin{tabular}{c|c|c|c} Model Name & T5-S & T5-M & T5-L \\ \hline embedding dim & 512 & 1024 & 2048 \\ number of heads & 6 & 12 & 24 \\ enc./dec. layers & 8 & 8 & 8\\ head dim & 128 & 128 & 128 \\ mlp dimension & 1024 & 2048 & 4096 \\ \hline number of parameters & 95M & 317M & 1.1B \end{tabular} \end{footnotesize} \caption{Dimensions of T5-S, T5-M and T5-L.} \label{models_dim2} \end{table}
\section*{Appendix C}
In this Appendix we list the full results (see Table~\ref{tbl:full_glue}) of the downstream on various Glue tasks. The average is calculated over all tasks. For tasks with more than one metrics, we average the metrics and then average over the tasks.
\end{document} | arXiv | {
"id": "2306.11903.tex",
"language_detection_score": 0.7638468146324158,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Entanglement generation resonances in XY chains} \author{R. Rossignoli, C.T. Schmiegelow} \affiliation{Departamento de F\'{\i}sica-IFLP, Universidad Nacional de La Plata, C.C.67, La Plata (1900), Argentina} \begin{abstract} We examine the maximum entanglement reached by an initially fully aligned state evolving in an $XY$ Heisenberg spin chain placed in a uniform transverse magnetic field. Both the global entanglement between one qubit and the rest of the chain and the pairwise entanglement between adjacent qubits is analyzed. It is shown that in both cases the maximum is not a monotonous decreasing function of the aligning field, exhibiting instead a resonant behavior for low anisotropies, with pronounced peaks (a total of $[n/2]$ peaks in the global entanglement for an $n$-spin chain), whose width is proportional to the anisotropy and whose height remains finite in the limit of small anisotropy. It is also seen that the maximum pairwise entanglement is not a smooth function of the field even in small finite chains, where it may exhibit narrow peaks above strict plateaus. Explicit analytical results for small chains, as well as general exact results for finite $n$-spin chains obtained through the Jordan-Wigner mapping, are discussed.
\pacs{03.67.Mn, 03.65.Ud, 75.10.Jm} \end{abstract} \maketitle
\section{Introduction} Quantum entanglement has been long recognized as one of the most fundamental and intriguing features of quantum mechanics \cite{S.35}. It denotes the ability of composite quantum systems to develop correlations which have no classical counterpart. Interest on all aspects of entanglement has grown enormously since its potential for permitting radically new forms of information transmission and processing was unveiled \cite{Be.93,Ek.91,Di.95,Be.00}, being now considered an essential {\it resource} in the field of quantum information science \cite{NC.00}, where rigorous entanglement measures have been introduced \cite{Be.96,W.98}. The interest has also extended to other areas like condensed matter physics, where it has provided a novel perspective for the analysis of correlations and quantum phase transitions \cite{ON.02,OS.02,V.03,VW.04,T.04}.
Spin systems with Heisenberg interactions \cite{LSM.61,S.99} constitute a particularly attractive scenario for studying quantum entanglement. They provide a scalable qubit representation suitable for quantum processing tasks \cite{LDV.98,I.99,Bos.04,L.02} and can be realized by diverse physical systems such as cold atoms in optical lattices \cite{D.03}, quantum dots \cite{LDV.98,I.99} and Josephson junctions arrays \cite{MSS.01}. Accordingly, several investigations of entanglement in ground and thermal equilibrium states of Heisenberg spin chains subject to an external magnetic field have been made (see for instance \cite{ON.02,OS.02,V.03,VW.04,Ar.01,Wi.02,RC.05}). There have also been relevant studies of entanglement dynamics in spin chains (for instance \cite{Bos.04,AOF.04,SSL.04,HK.05,HgK.06,KRB.05}), which discuss in particular the evolution of initial Bell states and the ensuing ``entanglement waves'' \cite{AOF.04}, non-ergodicity and dynamical phase transitions starting with equilibrium states \cite{SSL.04}, decoherence waves \cite{HK.05}, evolution in varying magnetic fields \cite{HgK.06}, generation of cluster states \cite{KRB.05} as well as other issues.
In the present work we want to focus on a particular aspect, namely the generation of entanglement in an interacting spin chain with fixed parameters starting from an initially fully separable aligned state, and examine the maximum entanglement that can be reached as a function of the anisotropy and the uniform transverse magnetic field (control parameter). We will concentrate here on the {\it global} entanglement between one qubit and the rest of the chain and on the {\it pairwise} entanglement between neighboring qubits, within the context of a cyclic $XY$ chain with nearest neighbor interactions \cite{LSM.61}. Questions which immediately arise include the possible existence of a threshold anisotropy for reaching maximum global entanglement (saturation), the maximum pairwise entanglement that can be reached and, most important, the behavior with the applied magnetic field. It will be shown that contrary to what can be naively expected, the maximum global entanglement reached is not a monotonous function of the aligning field, but exhibits instead a typical {\it resonant behavior} for low anisotropies, with narrow peaks located at characteristic field values, entailing a high sensitivity suitable for entanglement control. The pairwise entanglement exhibits a more complex resonant response, since it is affected by a competition between two incompatible types (essentially of positive or negative spin parity). These resonances {\it remain finite} in the limit of vanishing (but non-zero) anisotropy in finite chains, considering sufficiently long time evolutions. On the other hand, for large anisotropies they merge into a single broad maximum centered at zero field, with global saturation reached within a field window.
Our results are based on a fully exact treatment of the finite $n$-spin chain based on the Jordan-Wigner transformation \cite{LSM.61}, explicitly verified for the case of two and three-qubit chains. The Hamiltonian and the entanglement measures employed are discussed in section II. Section III contains the results, discussing first the two and three-qubit cases and then the exact results for general $n$-qubit chains. Finally, conclusions are drawn in IV.
\section{Formalism} We consider $n$ qubits or spins in a cyclic chain interacting through an XY nearest neighbor coupling, embedded in a uniform transverse magnetic field \cite{LSM.61,S.99}. The Hamiltonian reads \begin{subequations}\label{H1} \begin{eqnarray}H&=&bS^z-\sum_{j=1}^n(v_xs^x_js^x_{j+1}+v_ys^y_js^y_{j+1})\\ &=&bS^z-\frac{1}{2}\sum_{j=1}^n (vs^+_js^-_{j+1}+gs^+_js^+_{j+1}+h.c.)\,, \end{eqnarray} \end{subequations} where $S^z=\sum_{j=1}^n\!s^z_j$ is the total spin along the direction of the magnetic field $b$, $v,g=(v_x\pm v_y)/2$ and $n+1\equiv 1$. We will consider the evolution of the state which is initially fully aligned antiparallel to the magnetic field, \begin{equation}
|\Psi(t)\rangle=\exp[-iHt]|\!\!\downarrow\ldots\downarrow\rangle\,,\label{Psi} \end{equation} where $t$ denotes time over $\hbar$, and examine the emerging {\it global} entanglement between one qubit and the rest of the chain, as well as the {\it pairwise} entanglement between contiguous qubits, arising for non-zero anisotropy $\gamma=g/v$ (for $g=0$ the initial state is an eigenstate of $H$ and hence no entanglement is generated).
Since we are dealing with a pure state, the first one is determined by the entropy \cite{Be.96} \begin{equation} E_1=-{\rm Tr}\,\rho_1\log_2\rho_1\,, \end{equation} of the reduced {\it one-qubit} density $\rho_1={\rm Tr}_{n-1}\,\rho$, where
$\rho=|\Psi(t)\rangle\langle\Psi(t)|$ is the full density matrix, with $E_1=0$ for $\rho_1$ pure ($\rho_1^2=\rho_1$) and $E_1=1$ (maximum) for $\rho_1$ fully mixed. The second one is the entanglement of formation \cite{Be.96} of the adjacent {\it pair} density $\rho_{2}={\rm Tr}_{n-2}\,\rho$, which can be calculated as \cite{W.98} \begin{equation} E_2=-\!\sum_{\nu=\pm} q_\nu\log_2 q_\nu\,, \end{equation} where $q_{\pm}=(1\pm\sqrt{1-C_2^2})/2$ and \begin{equation} C_2={\rm Max}[2\lambda_{m}-{\rm Tr}\,R,0],\;\;
R=\sqrt{\rho_2\tilde{\rho}_2}\,,\label{C2} \end{equation} is the {\it concurrence} \cite{W.98}, with $\lambda_m$ the greatest eigenvalue of $R$ and $\tilde{\rho}_2=4s^y_js^y_{j+1}\rho_2^*s^y_{j+1}s^y_j$ the spin-flipped density. It satisfies $0\leq C_2\leq 1$. Since tracing out qubits of a subsystem can be considered a LOCC (local operations and classical communication) transformation, it cannot increase entanglement \cite{Be.96} and hence $E_{2}\leq E_1$, with $E_2=E_1$ for a pure two qubit state (in which case $q_\pm$ become the eigenvalues of $\rho_1$).
As $E_2$ is just an increasing function of $C_2$, pairwise entanglement is usually directly measured through the latter, which is more suitable for analytic description. The corresponding measure of the global $E_1$ entanglement is the square root of the {\it one-tangle} \cite{W.98},
\begin{equation} C_1=2\sqrt{{\rm Det}\,\rho_1}=\sqrt{2(1-{\rm Tr}\,\rho^2)}\,,\label{C1}
\end{equation} which coincides with $C_2$ for a pure two qubit state and satisfies $C_1\geq C_2$ in the general case (actually the more general inequality $C_i\geq \sqrt{\sum_{j\neq i}C_{ij}^2}$, with $C_{ij}$ the concurrence of the $(i,j)$ pair and $C^2_i$ the one-tangle of qubit $i$, conjectured in \cite{CKW.00}, was recently proven \cite{OV.06}). Both $E_1$ and $C_1$ are measures of the disorder associated with $\rho_1$ and are hence increasing functions of one another.
Due to the symmetries of $H$ and the present initial state, $|\Psi(t)\rangle$ will be invariant under translation $(j\rightarrow j+1)$ and inversion ($j\rightarrow n+1-j$), and will have {\it positive spin parity} $P=\exp[i\pi(S^z+n/2)]$, as this quantity is preserved by $H$ ($[H,P]=0$). The reduced density $\rho_{S}={\rm Tr}_{n-S}\rho$ of {\it any} subsystem $S$ will then depend just on the distance between its components and will {\it commute}
with the subsystem parity $P_S=\prod_{j\in S}\exp[i\pi(s_z^j+1/2)]$, as the reduction involves just diagonal elements in the rest of the chain. In the case of $\rho_1$, this implies that it will be the same for all qubits and {\it diagonal} in the standard basis $|\!\!\uparrow\rangle,|\!\!\downarrow\rangle$ of $s^z$ eigenstates: \begin{equation} \rho_1=\left(\begin{array}{cc}p(t)&0\\0&1-p(t)\end{array}\right)\,, \end{equation} where $p(t)$ represents the one-qubit spin flip probability \begin{equation} p(t)=\langle s^z_j\rangle_t+1/2=\langle S^z\rangle_t/n+1/2\,, \end{equation}
(here $\langle O\rangle_t\equiv \langle\Psi(t)|O|\Psi(t)\rangle$ and spin operators are considered dimensionless). Hence, \begin{equation} C_1(t)=2\sqrt{p(t)[1-p(t)]}\,,\label{C1t} \end{equation} with $C_1(t)=1$ when $p(t)=1/2$.
The same symmetries lead to a pair density of the form \begin{equation} \rho_{2}=\left(\begin{array}{cccc}p_{1}(t)&0&0&\alpha^*(t) \\0&p_{2}(t)&\beta(t)&0\\0&\beta(t)&p_2(t)&0\\ \alpha(t)&0&0&p_3(t)\end{array}\right)\,, \end{equation}
in the standard basis $|\!\!\uparrow\uparrow\rangle,
|\!\!\uparrow\downarrow\rangle,|\!\!\downarrow\uparrow\rangle,
|\!\!\downarrow\downarrow\rangle$, where $p_{1}(t) +2p_{2}(t)+p_{3}(t)=1$, $p_{1}(t)+p_{2}(t)=p(t)$ and \begin{subequations} \label{alp} \begin{eqnarray} \alpha(t)&=&\langle s^+_js^+_{j+1}\rangle_t\,, \;\;\beta(t)=\langle s^+_js^-_{j+1}\rangle_t\,,\\ p_1(t)&=&\langle (s^z_j+1/2)(s^z_{j+1}+1/2)\rangle_t\,, \end{eqnarray} \end{subequations} for adjacent qubits. Eq.\ (\ref{C2}) becomes then \begin{equation} C_{2}(t)=2\,{\rm Max}\,
[|\alpha(t)|-p_{2}(t),|\beta(t)|-\sqrt{p_{1}(t)p_{3}(t)},0]\,, \label{C2t} \end{equation}
where only one of the entries can be positive (this follows from the positivity of $\rho_2$, which requires $|\alpha(t)|\leq \sqrt{p_1(t)p_3(t)}$,
$|\beta(t)|\leq p_2(t)$). Two kinds of pairwise entanglement can therefore arise: type I ($|\alpha(t)|>p_{2}(t)$) and type II
($|\beta(t)|>\sqrt{p_{1}(t)p_{3}(t)}$), which cannot coexist and can then be present just at {\it different} times, and which stem from the positive (I) and negative (II) parity sectors of $\rho_2$.
The eigenvalues of $H$ and the entanglement of its eigenstates are obviously independent of the sign of $g$, and for even chains also of the sign of $v$, as for $n$ even it can be changed by a transformation $s_j^{x,y}\rightarrow (-1)^js_j^{x,y}$. Due to time reversal symmetry, the emerging entanglement in even chains will then be also independent of the sign of $b$, while in odd chains that for $(-b,v)$ will coincide with that for $(b,-v)$. We will then set in what follows $v\geq 0$, $g\geq 0$, and consider both signs of $b$.
\section{Results} \subsection{Two qubit case}
Let us first analyze this simple situation, which nonetheless provides already some insight on the behavior for general $n$. Here $C_1=C_2$ $\forall\, t$. The evolution subspace is spanned by the states $|\!\!\downarrow\downarrow\rangle$,
$|\!\!\uparrow\uparrow\rangle$, and the pertinent eigenstates of $H$ are
$|\pm\rangle=u_{\mp}|\!\!\downarrow\downarrow\rangle\mp u_{\pm}
|\!\!\uparrow\uparrow\rangle$, with energies $E_{\pm}=\pm \lambda$, where $u_{\pm}=\sqrt{(\lambda\pm b)/(2\lambda)}$ and $\lambda=\sqrt{b^2+g^2}$. The state (\ref{Psi}) will then be independent of $v$ and given by \begin{eqnarray}
|\Psi(t)\rangle&=&\sum_{\nu=\pm}e^{-iE_\nu t}
\langle\nu|\!\!\downarrow\downarrow\rangle|\nu\rangle \nonumber\\ &=&(\cos\lambda t+i{\textstyle\frac{b}{\lambda}}\sin\lambda t)
|\!\!\downarrow\downarrow\rangle +i{\textstyle\frac{g}{\lambda}}\sin\lambda t
|\!\!\uparrow\uparrow\rangle\,,\label{Psi2} \end{eqnarray} so that the spin-flip probability $p(t)$ is \begin{equation} p(t)={\frac{g^2}{b^2+g^2}}\sin^2\lambda t\,.\label{p2t} \end{equation} Its maximum $p_m=g^2/(b^2+g^2)$ is thus {\it a Lorentzian of width} $g$
centered at $b=0$, satisfying $p_m\geq 1/2$ if $|b|\leq g$. Hence, for {\it any} $g>0$ the system will always reach {\it maximum entanglement} $C_1=1$
within the field window $|b|\leq g$, at times $t_m$ such that $p(t_m)=1/2$, where Eq.\ (\ref{Psi2}) becomes a type I Bell state:
\[|\Psi(t_m)\rangle=\pm i(|\!\!\uparrow\uparrow\rangle+
e^{\pm i\phi}|\!\!\downarrow\downarrow\rangle)/\sqrt{2}\,,\;\;\cos\phi=b/g\,.\] The maximum concurrence reached (Fig.\ \ref{f1}) is then \begin{equation}
C_1^m=C_2^m=\left\{\begin{array}{lr}1\,,&|s|\leq 1\\
\frac{2|s|}{s^2+1}\,,&|s|\geq 1\end{array}\right.\,,\;\;s=b/g\,, \label{C12} \end{equation}
which is {\it higher} than the concurrence $C^{\pm}=g/\lambda$ of the Hamiltonian eigenstates $\forall$ $b\neq 0$, becoming $\approx 2g/|b|$ for
$|b|\gg g$. $C_1(t)$ will follow the evolution of $p(t)$ if $p_m\leq 1/2$
($|s|\leq 1$), but will develop saturated maxima plus an intermediate minima when $p_m>1/2$.
We also note that for $b=0$, i.e., where the gap $E_+-E_-=2\lambda$ is minimum and vanishes for $g\rightarrow 0$, maximum entanglement can in principle be attained for {\it any} $g>0$. In this case the eigenstates $|\pm\rangle$ become {\it independent} of $g$ and maximally entangled, and none of them approaches the aligned initial state for $g\rightarrow 0$ (in contrast with the behavior for $b\neq 0$). The initial state becomes then equally distributed over both eigenstates ($u_{\pm}=1/\sqrt{2}$) $\forall$ $g>0$, implying
$|\Psi(t)\rangle=\cos gt|\!\!\downarrow\downarrow\rangle+i\sin gt
|\!\!\uparrow\uparrow\rangle$. Hence, in this case the only limit for reaching maximum entanglement ($\sin^2 gt=1/2$) for arbitrarily small but non-zero $g$ is the long waiting time ($t_m=\pi/(4g)$). We will see that an analogous situation will occur for any $n$ at particular field values.
\begin{figure}
\caption{(Color online). Top: Left: Maximum entanglement (measured by the concurrence) reached by the two qubit chain as a function of the (scaled) magnetic field for an initially aligned state. The dotted line depicts the concurrence of the Hamiltonian eigenstates. Right: Maximum global concurrence $C_1^m$ between one-qubit and the rest (upper curve, in blue) and maximum pairwise concurrence $C_2^m$ (lower curve, in red+dashed pink) in the three qubit system, in terms of the (shifted+scaled) magnetic field. $C_2^m$ exhibits a sharp type II resonance at $b=v/2$. Bottom: Left: Plot of $C_1$ and $C_2$ in the three qubit chain in terms of the spin flip probability $p$ ($0\leq p\leq 2/3$). Right: The temporal evolution of $C_1$ and $C_2$ in the three qubit chain at the $C_2^m$ plateau ($b=v/2\pm 0.6 g$, left) and at resonance ($b=v/2$, right). $T=2\pi/\lambda$ is the period. Type I (II) sectors in $C_2$ are depicted in solid red (dashed pink) lines.}
\label{f1}
\end{figure}
\subsection{Three qubit case} For $n=3$, the evolution subspace is still two-dimensional and spanned by
$|\!\!\downarrow\downarrow\downarrow\rangle$ and the $W$-state \cite{DC.00}
$|W\rangle\equiv(|\!\!\downarrow\uparrow\uparrow\rangle+
|\!\!\uparrow\downarrow\uparrow\rangle+
|\!\!\uparrow\uparrow\downarrow\rangle)/\sqrt{3}$, which for $g=0$ have energies $-3b/2$ and $b/2-v$. The coupling induced by $g$ leads to eigenstates
$|\pm\rangle=u_{\mp}|\!\!\downarrow\downarrow\downarrow\rangle\mp u_{\pm}|W\rangle$ with energies $E_{\pm}=\varepsilon\pm\lambda$, where $u_{\pm}=\sqrt{[\lambda\pm (b-v/2)]/(2\lambda)}$, $\varepsilon=-(b+v)/2$ and $\lambda=\sqrt{(b-v/2)^2+3g^2/4}$. We then obtain
\[|\Psi(t)\rangle=e^{-i\varepsilon t}[(\cos\lambda t+i {\textstyle\frac{b-v/2}{\lambda}}\sin\lambda t)
|\!\!\downarrow\downarrow\downarrow\rangle
+i{\textstyle\frac{\sqrt{3}g}{2\lambda}}\sin\lambda t|W\rangle]\]
which leads to
\begin{equation} p(t)=\frac{g^2}{2[(b-v/2)^2+3g^2/4]}\sin^2\lambda t\label{p3t}\,. \end{equation}
Its maximum $p_m=g^2/(2\lambda^2)$ is again a Lorentzian of width proportional to $g$ but centered at $b=v/2$ due to the hopping term, where $p_m=2/3$ (the value at the $W$-state), with $p_m\geq 1/2$ for $|b-v/2|\geq g/2$. Hence, for {\it any} $g\neq 0$ there is again a field interval where {\it maximum} $E_1$ entanglement is attained. The maximum of $C_1(t)$ (Fig.\ \ref{f1}, top right) is then \begin{equation}
C_1^m=\left\{\begin{array}{lr}1\,,&|s|\leq 1/2\\
\frac{\sqrt{2s^2+1/2}}{s^2+3/4}\,,&|s|\geq 1/2\end{array}\right.\,,\; s=(b-v/2)/g\,.\label{C13} \end{equation}
For $|b|\gg v,g$, $C_1^m\approx \sqrt{2}g/|b|$, an asymptotic result which turns out to be {\it valid $\forall$ $n\geq 3$}. The evolution of $C_1(t)$ remains qualitatively similar to that for $n=2$. Note also that for $b=v/2$, i.e., where the gap $2\lambda$ is minimum and vanishes for $g=0$, maximum $C_1$ is again reached for any $g>0$, the situation being similar to that for $n=2$ at $b=0$.
The behavior of the pairwise entanglement is, however, more complex. The
$W$-state contains type II pairwise entanglement, but $|\Psi(t)\rangle$ will first develop that of type I, so that transitions between both types can be expected to occur in the evolution for large $g$. From the expression of
$|\Psi(t)\rangle$ we obtain $|\alpha(t)|=\sqrt{p(t)(2-3p(t))}/2$, $p_{2}(t)=p_{1}(t)=\beta(t)=p(t)/2$, so that Eq.\ (\ref{C2t}) becomes \begin{equation}
C_{2}(t)=|\sqrt{p(t)[2-3p(t)]}-p(t)|\,,\label{C23} \end{equation} which corresponds to type I (II) for $p(t)<1/2$ ($>1/2$). It is thus a non-monotonous function of $p\equiv p(t)$ (left bottom panel in Fig.\ \ref{f1}), having a maximum at $p=1/6$ (where $C_2=1/3$), vanishing at the ``critical'' value $p=1/2$ (where $C_1$ is maximum) and increasing again for $p>1/2$ up to its absolute maximum at the endpoint $p=2/3$, where $C_2=2/3$ (i.e., the value at the $W$-state). Hence, saturation ($C_2=1$) cannot be reached. Moreover, it is verified that $C_{2}(t)/C_1(t)\leq 1/\sqrt{2}$ (the maximum ratio allowed by the generalized inequality \cite{CKW.00} for $C_{12}=C_{13}$), the maximum reached for $p\rightarrow 0$ or $p\rightarrow 2/3$. The evolution of $C_2(t)$ will then not follow that of $p(t)$ or $C_1(t)$ if $p_m>1/6$, developing for $p_m<1/2$ a minimum when $p(t)$ is maximum, which will evolve into two vanishing points plus a type II maximum if $p_m>1/2$ (see right bottom panel in Fig.\ \ref{f1}). The maximum of $C_{2}(t)$ is then \begin{equation} C_{2}^m=\left\{\begin{array}{lc}
\frac{1/2-|s|}{s^2+3/4}\,, &|s|\leq s_c\\
1/3\,,&s_c\leq |s|\leq 3/2\\
\frac{|s|-1/2}{s^2+3/4}\,, &|s|\geq 3/2\\ \end{array}\right. \,,\;s=\frac{b-v/2}{g} \label{C23m} \end{equation} where $s_c=\sqrt{3}-3/2\approx 0.23$ determines the second point where $C_2=1/3$ and encloses the region of dominant type II entanglement. It then exhibits {\it a sharp type II peak at $b=v/2$}, above a strict {\it type I plateau} (see Fig.\ \ref{f2}). Note that at $b=v/2$, $C_2^m=2/3$ for {\it any}
$g>0$, as in this case the system will always reach the $W$-state if the waiting time is sufficiently long ($t_m=\pi/(\sqrt{3}g)$). For $|b|\gg v,g$,
$C_{2}^m\approx g/|b|\approx C_1^m/\sqrt{2}$, an asymptotic result {\it which is again valid $\forall$ $n\geq 3$}.
\subsection{General $n$} By means of the Jordan-Wigner transformation \cite{LSM.61}, we may exactly convert the Hamiltonian (\ref{H1}) within a fixed spin parity subspace ($P=\pm 1$) to a quadratic form in fermion operators $c_j^\dagger,c_j$, defined by $c^\dagger_{j}=s^+_j\exp[-i\pi\sum_{l=1}^{j-1}s^+_{l}s^{-}_{l}]$.
For a finite cyclic chain with positive parity $P=1$, the result for $H'=H+bn/2$ is \begin{subequations} \begin{eqnarray} H'&=&\sum_{j=1}^n bc^\dagger_jc_j-({\textstyle\frac{1}{2}}-\delta_{jn}) (vc^\dagger_jc_{j+1}+gc^\dagger_{j}c^\dagger_{j+1}+h.c.)\label{Hf1}\\ &=&\sum_{k}(b-v\cos\omega_k)c'^\dagger_kc'_k-{\textstyle\frac{1}{2}} g\sin\omega_k(c'^\dagger_kc'^\dagger_{-k}+c'_{-k}c'_k)\,,
\label{Hf2}\end{eqnarray}
\end{subequations} where the fermion operators $c'_k,c'^\dagger_k$ are related to $c_j,c^\dagger_j$ by a finite Fourier transform \[c^\dagger_j={\textstyle\frac{e^{i\pi/4}}{\sqrt{n}}}
\sum_{k}e^{i\omega_k j}c'^\dagger_k,\;\;\omega_k=2\pi k/n\,,\] with $k$ {\it half-integer} for the present cyclic conditions: $k=-\frac{n-1}{2},\ldots,\frac{n-1}{2}$ for $n$ even and $k=-\frac{n}{2}+1,\ldots,\frac{n}{2}$ for $n$ odd. We then obtain the diagonal form
\begin{eqnarray}H'&=& \sum_{k}\lambda_k a^\dagger_k a_k-{\textstyle\frac{1}{2}} [\lambda_k-(b-v\cos\omega_k)]\,,\nonumber\\ \lambda_k&=&\sqrt{(b-v\cos\omega_k)^2+g^2\sin^2\omega_k}\,,\label{lj}
\end{eqnarray} by a means of a BCS-like transformation $c'^\dagger_k= u_ka^\dagger_k+v_ka_{-k}$, $c'_{-k}=u_ka_{-k}-v_ka^\dagger_k$ to quasiparticle fermion operators $a^\dagger_k,a_k$, with $u_k^2,v_k^2=[\lambda_k\pm (b-v\cos\omega_k)]/(2\lambda_k)$. The quasiparticle energies (\ref{lj}) are two-fold degenerate ($\lambda_k=\lambda_{-k}$) except for $k=n/2$ for $n$ odd.
We can now determine the exact evolution for any $n$. In the Heisenberg representation ($dO/dt=i[H,O]$), we have $a^\dagger_k(t)=e^{i\lambda_kt}a^\dagger_k(0)$,
$a_k(t)=e^{-i\lambda_k t}a_k(0)$, and the ensuing contractions \[\langle a^\dagger_k(t)a_k(t)\rangle_0=v_k^2\,,\;\;\; \langle a^\dagger_k(t)a^\dagger_{-k}(t)\rangle_0=-u_kv_k e^{2i\lambda_k t}\,,\] with respect to the present initial state (vacuum of the operators $c,c'$). The average of any operator can now be evaluated by substitution and use of Wick's theorem \cite{RS.80}.
\subsubsection{Evaluation of $p(t)$ and $C_1(t)$} The one-qubit spin flip probability becomes \begin{equation} p(t)=\langle c^\dagger_j(t)c_j(t)\rangle_0 =\frac{2}{n}{\sum_k}'\frac{g^2\sin^2\omega_k}{\lambda_k^2}\sin^2\lambda_kt
\label{pnt}\,,\end{equation} where $\sum'_k\equiv\sum_{k=1/2}^{[n/2]-1/2}$ ($[n/2]$ denotes integer part). For $n=2,3$ the sum in (\ref{pnt}) reduces to a single term ($k=1/2$, with $\omega_k=\pi/2$ and $\pi/3$ respectively) and we recover {\it exactly} Eqs.\ (\ref{p2t}) and (\ref{p3t}).
For $n\geq 4$, the evolution of $p(t)$ will be in general quasiperiodic. Its upper envelope can nevertheless be obtained setting $\sin^2\lambda_k t=1$ $\forall$ $k$ in (\ref{pnt}): \begin{equation} p(t)\leq p_m=\frac{2}{n} {\sum_k}' \frac{g^2\sin^2\omega_k}{(b-v\cos\omega_k)^2+g^2\sin^2\omega_k} \label{pmast}\,, \end{equation} the maximum of $p(t)$ lying arbitrarily close to $p_m$ for sufficiently long time intervals (except for rational ratios $\lambda_k/\lambda_{k'}$). For low $g\ll v$, $p_m$ {\it will then exhibit $[n/2]$ peaks, located at} \begin{equation} b=b_k\equiv v\cos\omega_k,\;\;\;k={\textstyle\frac{1}{2},\ldots, [\frac{n}{2}]-\frac{1}{2}}\,, \end{equation} (i.e. $\omega_k=\pi/n,3\pi/n,\ldots,(2[n/2]-1)\pi/n)$, which are the fields where the quasiparticle energies $\lambda_{\pm k}$ are minimum and vanish for $g\rightarrow 0$. Hence, they are located symmetrically around $b=0$ for even $n$ ($b_{[n/2]-k}=-b_k$), with a peak at $b=0$ ($k=n/4$) for $n/2$ odd, but asymmetrically for odd $n$. Moreover, while for $b\neq b_k$, $p_m\propto g^2$, vanishing for $g\rightarrow 0$, {\it at $b=b_k$ $p_m$ remains finite $\forall$ $g\neq 0$, with $p_m\rightarrow 2/n$ for $g\rightarrow 0$} (Eq.\ \ref{pmast}). This implies \begin{equation} C_1^m\rightarrow {\textstyle 2\sqrt{\frac{2}{n}(1-\frac{2}{n})}}\,, \label{C1m} \end{equation} at $b=b_k$ for $g\rightarrow 0$ and $n\geq 4$ (and $C_1^m\rightarrow 1$ for $n=2,3,4$ as in these cases $2/n\geq 1/2$). Thus, by adjusting the field it is always possible to achieve, in principle, {\it finite} $E_1$ entanglement even for arbitrarily low (but non-zero) values of $g$. The effect of low anisotropies is just to determine the {\it width} of these peaks, given by
$\approx g|\sin\omega_k|$ in $p_m$, which increases as $g$ increases or as
$|b_k|$ decreases.
The evolution at $b=b_k$ becomes purely harmonic for $g\rightarrow 0$, with \begin{equation} p(t)\rightarrow{\textstyle\frac{2}{n}}\sin^2\lambda_kt\,,\;\lambda_k= g\sin\omega_k\,.\label{ptg} \end{equation} The maximum of $p(t)$ is first reached at $t_k=\pi/(2g\sin\omega_k)$, so that the smaller the value of $g$ (or $\omega_k$), the longer it will take to reach the maximum. In this sense, while the maximum entanglement reached in an unbounded time interval is not a continuous function of $g$ for $g\rightarrow 0$ at $b=b_k$, that reached in a {\it finite} interval $[0,t_f]$ will actually vanish for $g\rightarrow 0$ also at $b=b_k$, in agreement with the result for $g=0$, becoming lower than (\ref{C1m}) if $t_f<t_k$.
The situation at the resonances $b=b_k$ is thus similar to that encountered for $n=2$ at $b=0$ or for $n=3$ at $b=v/2$. At $b=b_k$ the energy gap $2\lambda_k$
between positive parity states with the pair $(k,-k)$ occupied and empty (in particular that between the quasiparticle vacuum $|0_q\rangle$ and the state
$a^\dagger_ka^\dagger_{-k}|0_q\rangle$) is minimum, {\it vanishing} for $g\rightarrow 0$ (level crossings). Due to these degeneracies, at $b=b_k$ the aligned state is not approached by any of the Hamiltonian eigenstates for $g\rightarrow 0$, remaining distributed over essentially two eigenstates. The previous limits (\ref{C1m})-(\ref{ptg}) can then be directly derived from Eq.\ (\ref{Hf2}), where for $g\rightarrow 0$ and $b=b_k$, we may conserve just the
$\pm k$ terms in the $g$-interaction. The evolution subspace in this limit is then spanned by the original fermionic vacuum $|0\rangle$ (the present initial state) and the two particle state
$|k,-k\rangle=c'^\dagger_kc'^\dagger_{-k}|0\rangle$, with $g$-independent eigenstates $|\pm\rangle=(|0\rangle\mp|k,-k\rangle)/\sqrt{2}$ of perturbed energies $\pm g\sin\omega_k$ (i.e., $\pm \lambda_k$). We then obtain (omitting a global phase) \begin{equation}
|\Psi(t)\rangle\rightarrow\cos \lambda_k t|0\rangle+i\sin\lambda_k t|k,-k\rangle\,, \label{apro} \end{equation}
for the fermionic $|\Psi(t)\rangle$, which leads immediately to Eq.\
(\ref{ptg}). The factor $2/n$ is just the average occupation $\langle c^\dagger_j c_j\rangle=\sum_{k'}\langle c'^\dagger_{k'}c'_{k'}\rangle/n$ in the state $|k,-k\rangle$.
As $g$ increases, the resolutions of the individual peaks diminish, merging eventually into a single broad peak centered at $b\approx 0$. Since the separation between maxima is $\delta b\approx (2\pi v/n)|\sin\omega_k|$, we have the approximate bound $g\alt\pi v/n$ for visible individual peaks. On the other hand, it is to be noticed that for $n\geq 5$ maximum $E_1$ entanglement can be reached only above a certain {\it threshold} value $g_c$ of $g$ (and then within a certain field window), with $g_c\leq v$ $\forall$ $n$ since at $b=0$ and $g=v$ we have exactly $p_m=(2/n)\sum'_k\sin^2\omega_k=1/2$ for {\it any} $n$. In fact, $g_c\approx v$ for large $n$. For $g\gg (v,b)$,
$p_m\rightarrow 1$ $(1-1/n)$ for $n$ even (odd), so that saturation in $C_1$ is always reached. Finally, for large fields $|b|\gg v,g$, \begin{equation} p_m\approx \frac{2g^2}{n b^2}{\sum_k}'\sin^2\omega_k =\frac{g^2}{2b^2},\;\;n\geq 3\,, \end{equation}
implying $C_1^m\approx \sqrt{2}g/|b|$. This asymptotic result is {\it independent} of $n$ (for $n\geq 3$) and coincident with the result previously obtained for $n=3$.
\begin{figure}
\caption{(Color online). Maximum concurrence between one qubit and the rest (upper blue curves) and between adjacent qubits (lower red+dashed pink curves), reached in the four (left) and five (right) qubit chains for two different anisotropies $\gamma=g/v$ (type II sectors in $C_2$ depicted again with dashed pink lines). For $n=4$ the peaks in the global concurrence at $b/v=\pm 1/\sqrt{2}$ are no longer resolved for $\gamma\geq 1$, but remain in the pairwise concurrence. For $n=5$, the resonances are located at $b/v=(1\pm\sqrt{5})/4$ and merge again in a saturated maximum for $\gamma\geq 1$, while the pairwise concurrence presents a type II resonance just at the second peak, which again remains visible for large $\gamma$. Dotted lines in the upper curves depict results obtained with the upper envelope (\ref{pmast}), and are almost coincident with the numerically obtained maximum in the interval $0\leq vt\leq 40$. See text for more details.}
\label{f2}
\end{figure}
Results for $n=4,5$ and $14,15$ are shown in Figs.\ \ref{f2} and \ref{f3}. For $n=4$, the resonances are located at $b_k=\pm v/\sqrt{2}$, with $p_m\geq 1/2$
(and hence $C_1^m=1$) for $|b^2-v^2/2|\leq g^2/2$. This determines two saturated plateaus in $C_1^m$ centered at $b=b_k$ for $g<v$, which merge into a {\it single} plateau centered at $b=0$ for $g>v$. For $n=5$ the peaks are located at $b_k=v(1\pm\sqrt{5})/4\approx 0.81,-0.31$, where $C_1^m\rightarrow 2\sqrt{6}/5\approx 0.98$ for $g\rightarrow 0$ (Eq.\ \ref{C1m}). Saturation is reached only for $g/v\agt 0.67$, initially just at the right peak, although for $g>v$, $C_1^m$ exhibits again a saturated plateau covering $b=0$. For $n=14$ (15), $C_1^m\rightarrow 0.7$ (0.68) at the seven peaks for $g\rightarrow 0$, and saturation is reached for $g\agt 0.92$.
\begin{figure}
\caption{(Color online) Maximum concurrence between one qubit and the rest of the chain (upper blue lines) and between adjacent qubits (lower red+dashed pink lines) in a $n=14$ (left) and $n=15$ (right) qubit chain for different anisotropies, reached in an interval $0\leq vt\leq 180$. The dashed pink lines depict the maximum of the type II pairwise concurrence, which becomes now lower than the type I plateau for $\gamma\agt 0.25$. Results for $C_1$ obtained with the upper bound (\ref{pmast}) are also depicted (dotted lines, almost overlapping with the blue solid lines). The peaks in $C_1$ are visible for $\gamma\alt 0.4$, and saturation ($C_1=1$) is reached for $\gamma\agt 0.92$.}
\label{f3}
\end{figure}
The small or tiny dips in the numerical result for $C_1^m$ that can be seen in Figs.\ 2 and 3 arise due to the occurrence of rational ratios between the quasiparticle energies $\lambda_k$ at particular values of $b/v$, in which case the maximum of $p(t)$ can be lower than the smooth upper envelope (\ref{pmast}). For instance, for $n=4$ the ratio of the two distinct energies
$\lambda_{1/2}$, $\lambda_{3/2}$ becomes $2$ at $|b|/v=\sqrt{2}(5\pm \sqrt{16-9\gamma^2})/6$ (provided $\gamma<4/3$), where the maximum reached by $p(t)$ is just $(4/5)p_m$ (20\% reduction). A reduction in the maximum of
$p(t)$ will also occur in the vicinity of these values of $|b|/v$ for finite time intervals. This effect gives rise to the noticeable dip in $C_1^m$ at
$|b|/v\approx 1.8$ for $\gamma=1$ (the other value $|b|/v\approx 0.55$ lies within the plateau region and its effect on $C_1^m$ is unobservable) and to those at $|b|/v\approx 0.24$ and $\approx 2.12$ for $\gamma=0.1$.
It should be also mentioned that for short times $\lambda_k t\ll 1$ $\forall k$, $p(t)$ becomes independent of $n$, its series expansion of order $m$ remaining stable for $n>m$. For instance, up to $O((\lambda_kt)^4)$ in $p(t)$, we obtain, for $n\geq 5$,
\begin{eqnarray} p(t)&\approx& {\textstyle\frac{1}{2}g^2t^2[1-\frac{1}{12}t^2(v^2+4b^2+3g^2)]} \,,\nonumber\\ C_1(t)&\approx&\sqrt{2}gt[1-{\textstyle\frac{1}{24}}t^2(v^2+4b^2+9g^2)/24]\,.
\nonumber\end{eqnarray} It is thus seen that for $g\gg (b,g)$ and $n\agt 8$, $p(t)$ exhibits an initial peak at $t\approx 1.92/g$, where $p(t)\approx 0.7$, with $p(t)\geq 1/2$ for $1.2\alt g t\alt 2.75$, so that in this limit saturation in $C_1$ is rapidly reached (see Fig.\ \ref{f5}). The initial peak in $C_1$ can be correctly predicted by its $7^{\rm th}$ order expansion.
\subsubsection{Evaluation of $C_2(t)$} Let us now examine the pairwise concurrence. The relevant elements (\ref{alp}) of the adjacent pair density are \begin{eqnarray} \beta(t)&=&\langle c^\dagger_j(t)c_{j+1}(t)\rangle_0= \frac{2}{n}{\sum_{k}}'\frac{g^2\cos\omega_k\sin^2\omega_k}{\lambda_k^2} \sin^2\lambda_kt\,,\nonumber\\ \alpha(t)&=&\langle c^\dagger_j(t)c^\dagger_{j+1}(t)\rangle_0 =\frac{2}{n}{\sum_k}'\frac{g\sin^2\omega_k}{\lambda_k}\label{alta}\\ &&\times\sin\lambda_kt[{\textstyle\frac{b-v\cos\omega_k}{\lambda_k}} \sin\lambda_kt-i\cos\lambda_kt]\,,\nonumber\\ p_1(t)&=&\langle c^\dagger_j(t)c_j(t)c^\dagger_{j+1}(t)c_{j+1}(t)\rangle_0 \nonumber\\
&=&p^2(t)-\beta^2(t)+|\alpha^2(t)|\,,\label{p0} \end{eqnarray} where $j<n$ and in (\ref{p0}) we have applied Wick's theorem for vacuum expectation values.
The corresponding results for $n=4,5$ and $14,15$ are also depicted in Figs.\
\ref{f2}-\ref{f3}. It is seen that for low $g$, $C_2(t)$ presents sharp type II resonances only below the outer peaks of $C_1$, and actually just below the rightmost peak for small odd $n$. In order to understand this behavior, we note that for $g\rightarrow 0$ and $b=b_k$, \begin{equation} \beta(t)\rightarrow{\textstyle\frac{2}{n}}\cos\omega_k\sin^2\lambda_kt\,,\;
|\alpha(t)|\rightarrow{\textstyle\frac{1}{n}}|\sin\omega_k\sin 2\lambda_kt|
\,.\label{bag}\end{equation} These limits can also be directly read from Eq.\ (\ref{apro}), as
$(2/n)\cos\omega_k$ is the average $\langle c^\dagger_jc_{j+1}\rangle=\sum_{k'}\cos\omega_{k'}\langle c'^\dagger_{k'}c_{k'}\rangle/n$ in the state $|k,-k\rangle$ whereas $\alpha(t)$ is the average $\sum_{k'}\sin\omega_{k'}\langle c'^\dagger_{k'}c'^\dagger_{-k'}\rangle/n$ in the full state (\ref{apro}). The type II maxima of $C_{2}$ are then obtained for $\sin^2\lambda_kt=1$, leading to \begin{equation}
C^m_{2}\rightarrow {\textstyle\frac{4}{n}[|\cos\omega_k|-\sin\omega_k \sqrt{1-\frac{4}{n}+\frac{4}{n^2}\sin^2\omega_k}}]\,,
\label{cmn2}\end{equation} in this limit at $b=b_k$. Eq.\ (\ref{cmn2}) is actually positive for \[\sin^2\omega_k\leq {\textstyle[1-\frac{2}{n}+\sqrt{(1-\frac{2}{n})^2 +\frac{4}{n^2}}]^{-1}}\approx
{\textstyle\frac{1}{2}+\frac{1}{n}+O(\frac{1}{n^2})}\,,\]
i.e., $\omega_k\alt\pi/4$ or $\omega_k\agt 3\pi/4$ ($|b_k|/v\agt 1/\sqrt{2}$) for large $n$, so that they arise just beneath the outer peaks of $C_1$, the strongest located at the rightmost peak for $n$ odd ($k=1/2$) and outermost peaks for $n$ even ($k=1/2$ or $[n/2]-1/2$). Thus, type II resonances in $C_{2}$ {\it remain also finite for $g\rightarrow 0$} but are of order $n^{-1}$, becoming smaller than those of $C_1$ for large $n$ ($C_2^m/C_1^m\propto \sqrt{2/n}$). The scaled concurrence $nC_2^m$ remains nevertheless finite for large $n$.
For $n=3$ we {\it exactly} recover from (\ref{cmn2}) the previous result $C^m_{2}=2/3$ for the type II peak. For $n=4$, Eq.\ (\ref{cmn2}) yields $C^m_2=(2\sqrt{2}-1)/4\approx 0.46$, whereas for $n=5$ it leads to a single peak at $\omega_k=\pi/5$, of height $\approx 0.41$. For $n=14$, there are sharp type II peaks at the outer resonances, of height $\approx 0.22$, plus smaller peaks at the next resonance, of height $\approx 0.08$, which rapidly fall below the type I plateau. For $n=15$ the visible type II peaks are asymmetric and appear at $b_k/v\approx 0.98,0.81$ and $-0.91$, with heights $\approx 0.21,0.08$ and $0.15$.
For $g\rightarrow 0$ there are also type I maxima of $C_{2}$ at $b=b_k$, visible in the central region (Fig.\ \ref{f3}). These maxima are broader and occur at times determined by \[\cos (2\lambda_kt)= \frac{1-{\textstyle\frac{2}{n}}\sin^2\omega_k}{\sqrt{\sin^2\omega_k+ (1-{\textstyle\frac{2}{n}}\sin^2\omega_k)^2}}\,,\] (the first peak at $t_1\approx\pi/(8\lambda_k)$ for $\omega_k\approx \pi/2$), where the concurrence approaches for $g\rightarrow 0$ the value
\begin{equation} C^m_{2}\rightarrow {\textstyle\frac{2}{n}} [\sqrt{(1-{\textstyle\frac{2}{n}}\sin^2\omega_k)^2+\sin^2\omega_k}- (1-{\textstyle\frac{2}{n}}\sin^2\omega_k)]\,.
\label{cmn1}\end{equation}
Since this is an increasing function of $|\sin\omega_k|$, i.e., a decreasing function of $|b_k|$, the type $I$ maxima fall below those of type II for low
$|\sin\omega_k|$ ($|\sin\omega_k|\alt 0.66$ or $|b_k|/v\agt 0.75$ for large $n$). Moreover, at the highest type I peak ($\omega_k\approx\pi/2$), $C^m_{2}\approx 2(\sqrt{2}-1)/n$ for large $n$, which is just 21\% of the highest type II peak ($C^m_{2}\approx 4/n$). For $n=3$ we also recover from (\ref{cmn1}) the previous exact result $C^m_{2}=1/3$ in the type I plateau, while for $n=2$ it yields the correct maximum value $C_2^m=1$. For $n=4$ and $5$ we obtain $C^m_2\approx 0.14$ and $C^m_2\approx 0.07,0.2$ at the type I peaks, while for $n=14,15$, $C^m_2\approx 0.07,0.06$ at the centermost type I peak for $g\rightarrow 0$.
As $g$ increases, the lower type I resonances in $C_2^m$ become rapidly smoothed out, merging into a broad plateau (Figs.\ \ref{f2},\ref{f3}). Moreover, while for low $n$ the type II peaks remain visible even for large $g$
(Fig.\ \ref{f2}), as $n$ increases these peaks become as well superseded by the type I plateau (Fig.\ \ref{f3}), which is discussed below. On the other hand, for $|b|\gg v,g$, we obtain, up to first order in $g/|b|$, $v/|b|$,
$C_2(t)\approx 2|\alpha(t)|\leq C_2^m$, with
\[C_2^m\approx \frac{4g}{n|b|}{\sum_k}'\sin^2\omega_k=\frac{g}{|b|}\,,\;\;\;
n\geq 3\,,\] in agreement with the previous result for $n=3$. In this limit, $C_2^m\approx C_1^m/\sqrt{2}$.
\subsubsection{Temporal Evolution} Fig.\ \ref{f4} depicts $C_1(t)$ and $C_2(t)$ for $n=15$ at two different anisotropies, {\it at} and {\it away} from resonances. For low $\gamma$ (left panels), we observe a low frequency periodic-like evolution of $C_1(t)$ and $C_2(t)$ at the outer resonance ($b/v\approx 0.98$), in agreement with (\ref{ptg}) and (\ref{bag}), with $C_2(t)$ exhibiting regions of both type I and type II entanglement, whereas for large fields $b=2v$ both $C_1(t)$ and $C_2(t)$ become very small, with $C_2(t)$ of type I. Both $C_1(t)$ and $C_2(t)$ are also smaller for $b=0$ (with $C_2(t)$ again of type I), which here corresponds approximately to a minimum of $C_1^m$ and $C_2^m$.
\begin{figure}
\caption{(Color online). The evolution of $C_1(t)$ (upper curves in blue) and $C_2(t)$ (lower curves, in red and pink) for $n=15$ at two different anisotropies and different fields. The central panels depict the evolution at the outer resonance $b_c/v=\cos(\pi/n)\approx 0.98$. Both the type I (red) and type II (pink, dashed lines) sectors of $C_2(t)$ are indicated. }
\label{f4}
\end{figure}
On the other hand, for $\gamma=1$ the emerging global entanglement is non-negligible for all moderate fields, with saturation in $C_1$ reached for $b\alt v$. In this case $C_2(t)$ does not follow the behavior of $C_1(t)$ for low fields, where it strictly vanishes at finite time intervals, although for large $b>v$ the evolution of $C_2(t)$ becomes again similar to that of $C_1(t)$ (with $C_2^m\approx C_1^m/\sqrt{2}$), and intervals of vanishing value are {\it removed}. Thus, the {\it average} pairwise entanglement is in this case {\it enhanced} by a large field $b\approx 2v$, in comparison with that for $b\approx v$, as a consequence of the lower global entanglement. In other words, the decoherence of the pair for large $\gamma$ due to the interaction with the spin chain (representing here the environment for the pair) is prevented by large fields.
It is also seen that the evolution for $\gamma=1$ ($g=v$) and $b=0$ is strictly periodic. In this case $\lambda_k=v$ $\forall$ $k$ and Eqs.\ (\ref{alta}) become {\it independent} of $n$ for $n\geq 4$ and of the form \begin{eqnarray} p(t)&=&{\textstyle\frac{1}{2}}\sin^2 vt\,,\;\;\beta(t)=0,\;\; \alpha(t)=-i{\textstyle\frac{1}{4}}\sin 2vt\,,\nonumber\\
C_1(t)&=&|\sin vt|\sqrt{2-\sin^2 vt}\,,\label{c1pe}\\
C_{2}(t)&=&|\sin vt|\,{\rm Max}\,[|\cos vt|-|\sin vt|/2,0]\,.\label{c2pe} \end{eqnarray}
Hence, $C_1(t)$ reaches saturation when $|\sin vt|=1$, whereas $C_{2}(t)$ has maxima when $\cos 2vt=1/\sqrt{5}$, where $C_{2}(t)=(\sqrt{5}-1)/4\approx 0.31$, and vanishes in the interval where $|\cos vt|<1/\sqrt{5}$ or when $\sin vt=0$. The previous maximum of $C_2$ is already close to the maximum obtained for large $\gamma$ (see below) and is higher than the resonant values for $n>9$.
\begin{figure}
\caption{(Color online). Evolution for large anisotropy and short times of $C_1(t)$ (upper curves in blue) and $C_2(t)$ (lower curves, in red), for neighboring odd-even systems.}
\label{f5}
\end{figure}
Fig.\ (\ref{f5}) depicts the typical evolution for short times and large anisotropy. As seen here, the plateau in the maximum concurrence $C_2^m$ arising for $g>(v,b)$ is originated by the first maximum in the evolution of $C_{2}(t)$, which exhibits in this region a prominent initial ``burst'' followed by intervals of vanishing value (i.e., decoherence of the pair) and lower revivals (near the most prominent minima of $C_1(t)$). For $g\gg (b,v)$ and $n\agt 5$, the initial peak of $C_{2}$ occurs at $gt\approx 0.66$, with height $C_{2}^m\approx 0.35$, and is practically {\it independent} of $n$. The resonances in $C_2^m$, of order $n^{-1}$, become then rapidly covered by the plateau as $n$ or $g$ increases. This initial peak can be approximately reproduced by a fourth order expansion of $C_2(t)$, given for $n\geq 5$ by \begin{eqnarray} C_2(t)&\approx&{\textstyle gt[1-\frac{1}{2}gt-\frac{1}{6}t^2(v^2+b^2+3g^2)} \nonumber\\&&+{\textstyle\frac{1}{12}}gt^3(2b^2+3g^2-v^2)\,.
\nonumber\end{eqnarray} Nonetheless, odd-even differences and $n$-dependence do arise for longer times ($gt\agt 10$ in the case of Fig.\ \ref{f5}) and affect the revivals of $C_2$.
Let us finally mention that as the resonances arising for low $\gamma$ develop their first maximum at $t_k=\pi/(2g\sin\omega_k)$, the relevant timescale for their observation is $\tau\approx\hbar/(\gamma v)\approx\tau_v/\gamma$, where $\tau_v\approx\hbar/v$ is the operation time associated with the hopping strength $v$, and should be smaller than the characteristic decoherence time $\tau_d$ of the chain determined by its interaction with the environment. This limits the smallness of the anisotropy (i.e., $\gamma\agt \tau_v/\tau_d$) and hence the sharpness of the peaks. For instance, if $\gamma=0.1$ and $v\approx 0.02$ meV, which is a typical strength for realizations based on quantum dots electron spins coupled through a cavity mode \cite{I.99}, $\tau\approx 3\times 10^{-10}s$, which is smaller than the typical decoherence time \cite{I.99}. On the other hand, the results for $C_2$ represent the evolution of the entanglement of an adjacent pair in the present spin chain environment, and indicate that resonances remain finite at the pairwise level in such scenario.
\section{Conclusions}
We have examined the entangling capabilities of a finite anisotropic $XY$ chain with constant parameters for an initially completely aligned state in the transverse direction. The exact analytical results obtained (valid for all $n$) show that the maximum attainable entanglement exhibits for low anisotropy $\gamma$ a clear resonant behavior as a function of the transverse magnetic field, with peaks at those fields where the effective quasiparticle energies $\lambda_k$ are minimum and vanish for $\gamma=0$. At these fields, the energy levels become then degenerate for $\gamma\rightarrow 0$ and the aligned state remains mixed with its degenerate partner for arbitrarily small but non-zero $\gamma$. The height of these resonances remains thus finite for $\gamma\rightarrow 0$ and their width is proportional to the anisotropy, implying a fine field sensitivity apt for efficient control, although the time required to reach the peak is proportional to $\gamma^{-1}$ and the height decreases as the number of qubits increases. The resonances are notorious in the maximum global entanglement between one-qubit and the rest of the chain, and are present as well in the entanglement of other global partitions.
They also arise in the maximum pairwise concurrence, and can be of both spin parities, although they are of lower height and decrease more rapidly with $n$, being hence more easily smoothed out for increasing $\gamma$. Here we have shown that type II (I) resonances become dominant at large (low) critical fields for adjacent pairs, those of type II being extremely narrow. Another feature is that odd-even differences in the resonant behavior remain appreciable for moderate $n$, odd chains exhibiting field sign sensitivity both in the global and pairwise peaks. On the other hand, saturation can be reached in the global $E_1$ entanglement within a certain field window above a threshold anisotropy ($\gamma\approx 1$ for large $n$), but not in the pairwise entanglement, whose maximum exhibits instead a broad low plateau for large $\gamma$ and hence low field sensitivity. Let us finally remark that resonances of the present type will also occur for non-adjacent pairs as well as for other geometries or interaction ranges, although details (i.e., relative widths and strengths) may certainly differ from the present ones and are currently under investigation.
RR acknowledges support of CIC of Argentina.
\end{document} | arXiv | {
"id": "0707.0473.tex",
"language_detection_score": 0.767426609992981,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title[Rayleigh quotient and left eigenvalues]{Rayleigh quotient and left eigenvalues of quaternionic matrices}
\author[E.~Mac\'ias-Virg\'os, M.J.~Pereira-S\'aez, Ana D.~Tarr\'io-Tobar]{
E.~Mac\'ias-Virg\'os,
\address{Instituto de Matem\'aticas, Universidade de Santiago de Compostela, 15782-Spain}
M.J.~Pereira-S\'aez
\address{Facultade de Econom\'{\i}a e Empresa, Universidade da Coru\~na, 15071-Spain}
\and
Ana D.~Tarr\'io-Tobar
\address{E.U. Arquitectura T\'ecnica, Universidade de A Coru\~na, 15008- Spain}
}
\thanks{$^\ast$
{\tt quique.macias@usc.es, maria.jose.pereira@udc.es, {ana.dorotea.tarrio.tobar@udc.es}}
}
\begin{abstract}
We study the Rayleigh quotient of a Hermitian matrix with quaternionic coefficients and prove its main properties. As an application, we give some relationships between left and right eigenvalues of Hermitian and symplectic matrices. \end{abstract}
\maketitle
\section{Introduction} The Rayleigh quotient of a matrix, introduced by the British physicist Lord Rayleigh in 1904 in his book ``The theory of sound'', is a well known tool which is widely used to obtain estimates of the eigenvalues of real and complex matrices (\cite{HORN,BHATIA}).
For quaternionic matrices, however, only a few references about the Rayleigh quotient can be found in the literature, and there is a lack of a general exposition of its properties and main results. Since quaternions have many applications, among them in quantum mechanics, solid body rotations, and signal theory (\cite{BADENSKA}), it seems useful to fill that gap. Notice that most of the results and proofs are analogous to the complex case, but they refer to {\em right} eigenvalues.
On the other hand, very little is known about {\em left} eigenvalues of quaternionic matrices. Wood (\cite{WOOD}) proved that every quaternionic matrix has at least one left eigenvalue. Huang and So {(\cite{HUANG-SO})} completely solved the case of $2\times 2$ matrices. The authors (\cite{MP2009,MP2010}) applied Huang and So's results to $2\times 2$ symplectic matrices. {The case $n=3$ was studied by So (\cite {SO}) and the authors (\cite{MP2014})}. Finally, Zhang (\cite{ZHANG2007}) and Farid, Wang and Zhang (\cite{FARID}) gave several Ger\v{s}gorin type theorems for quaternionic matrices.
Consequently, many problems still remain open, in particular those about the relationship between left and right eigenvalues. In this paper we give some partial answers to this question, for Hermitian and symplectic matrices, as an application of the previously proved properties of the Rayleigh {quotient.}
The contents of the paper are as follows. In Section \ref{SECTPRELIM} we {present} some preliminaries about the right eigenvalues of a quaternionic matrix.
In Section \ref{SECTquotient}, we consider a Hermitian quaternionic $n\times n$ matrix $S$, and we define its Rayleigh quotient $h_S$ as a real function defined on the sphere $S^{4n-1}$. We compute the gradient, the Hessian and the mean value of $h_S$, and we prove its main properties, among them the min-max principle for eigenvalues (Section \ref{MINMAXSECT}).
In Section \ref{SECTLEFT}, we introduce left eigenvalues and we study the case $n=2$ with some detail, as a testing bench for later results. {For an arbitrary Hermitian matrix $S$, our main result (Theorem \ref{HERMITMAIN}) is that the real part of any left eigenvalue $\lambda$ is bounded by the right eigenvalues, in a way that depends on the dimension of the $\lambda$-eigenspace.} {As we shall see, this implies that the existence of left eigenvalues with a {high-dimensional} space of eigenvectors depends on the mutiplicity of the right eigenvalues.}
Finally, in Section \ref{SECTSYMPL} we state similar results for symplectic matrices.
{Our results suggest that there are still many more hidden relations between left and right eigenvalues.}
\section{Preliminaries}\label{SECTPRELIM}
As a general reference for quaternionic linear algebra we take Rodman's book \cite{RODMAN}. For a brief survey on {quaternions} and matrices of quaternions, see Zhang's paper \cite{ZHANG1997}.
\subsection{Basic notions} We denote by $\mathbb{H}$ the non-commutative algebra of quaternions. For the quaternion $ q\in\mathbb{H}$ we {denote} its conjugate by $\overline q$, its norm by {$\vert q \vert$} and its real part by $\Re( q)$.
Let $\mathbb{H}^{n\times n}$ be the space of $n\times n$ matrices with quaternionic coefficients. If $M\in\mathbb{H}^{n\times n}$, we denote by $M^*$ its conjugate transpose {$(\overline M)^T$}. The quaternionic space of $n$-tuples {$\mathbf{u}=(u_1,\dots,u_n)^T$}, with $u_i\in\mathbb{H}$, will be denoted by $\mathbb{H}^n$. We shall always consider it as a {\em right} vector space over $\mathbb{H}$, endowed with the {\em Hermitian} product $\langle \mathbf{u},\mathbf{v}\rangle =\mathbf{u}^*\mathbf{v}$. Notice that $\vert \mathbf{u} \vert^2 =\langle \mathbf{u},\mathbf{u}\rangle $ is the Euclidean norm in $\mathbb{R}^{4n}$, so the {\em scalar} product is $\mathbf{u}\cdot \mathbf{v}=
\Re \langle \mathbf{u},\mathbf{v}\rangle$.
The matrix {$S\in\mathbb{H}^{n\times n}$} is {\em Hermitian} if it is self-adjoint for the Hermitian product, that is, $\langle S\mathbf{u},\mathbf{v}\rangle =\langle \mathbf{u},S\mathbf{v}\rangle $, or equivalently, $S^*=S$. The matrix {$A\in\mathbb{H}^{n\times n}$} is {\em symplectic} if the associated linear map preserves the Hermitian product, that is, $\langle A\mathbf{u},A\mathbf{v}\rangle =\langle \mathbf{u},\mathbf{v}\rangle $, or equivalently, $A^*A=AA^*=I_n$.
Two quaternions $q$ and $q'$ are {\em similar} if there exists some $ r \in \mathbb{H}$, $ r \neq 0$, such that $q'= r q r^{-1}$. Equivalently, they have the same norm and the same real part, that is, $\vert q \vert=\vert q' \vert$ and $\Re(q)=\Re(q')$ (\cite[Theorem 2.2]{ZHANG1997}). As a consequence, any quaternion $q$ is similar to a complex {number}, namely $z=t+s\,\mathbf{i}$, where $t=\Re(q)$ and $t^2+s^2=\vert q \vert^2$. Notice that {$z\in\mathbb{C}$} and its conjugate $\overline z$ are similar quaternions.
{Finally, we shall need the following result, which can be proved by a direct computation \begin{lema}\label{CONMUT}{Let $\omega,\omega^\prime$ be two quaternions such that $\vert \omega \vert=\vert \omega^\prime \vert=1$ and $\Re(\omega)=\Re(\omega^\prime)=0$. If $\omega\omega^\prime=\omega^\prime \omega$ then $\omega=\pm\omega^\prime$.} \end{lema}}
\subsection{Right eigenvalues}
The theory of right eigenvalues is well known, and has many properties in common with the complex case.
\begin{defi} The quaternion $q\in \mathbb{H}$ is a {\em right eigenvalue} of the matrix $M\in\mathbb{H}^{n\times n}$ if there exists some {vector} $\mathbf{u}\in\mathbb{H}^n$, $\mathbf{u}\neq \mathbf{0}$, such that $M\mathbf{u}=\mathbf{u} q$. \end{defi}
Notice that the {eigenvectors associated to a right eigenvalue do not form a vector subspace}. Instead, right eigenvalues are organized in similarity classes.
\begin{prop}\label{CHANGESIMIL}Let $q$ be a {right eigenvalue of $M\in \mathbb{H}^{n\times n}$,} and {let $\mathbf{u}$ be} a $q$-eigenvector. If $r\in \mathbb{H}$ is a non-zero quaternion, then $rq r^{-1}$ is {also a right eigenvalue of $M$,} and $\mathbf{u} r^{-1}$ is an $rq r^{-1}$-eigenvector. \end{prop} \begin{proof} $M(\mathbf{u} r^{-1} )= (\mathbf{u} q) r^{-1}= \mathbf{u} r^{-1} (rq r^{-1})$. \end{proof}
So, the computation of the right eigenvalues of the matrix $M$ is reduced to compute the complex representatives of their similarity classes. This can be done as follows. Each quaternion $ q$ can be written in a unique form as $ q =u+\mathbf{j} v$, with $u,v\in \mathbb{C}$ complex numbers. Then the matrix $M\in \mathbb{H}^{n\times n}$ decomposes as $M=U+\mathbf{j} V$, with $U,V\in \mathbb{C}^{n \times n}$ complex matrices. We define the associated complex matrix $$c(M)=\begin{bmatrix} U&-\overline V\,\cr V\,& \overline U\cr\end{bmatrix}.$$
It is straightforward to verify that he map $c\colon \mathbb{H}^{n\times n}\to \mathbb{C}^{2n \times 2n}$ is an injective morphism of $\mathbb{R}$-algebras and satisfies $c(M^*)=c(M)^*$.
\begin{prop} The right eigenvalues of $M$ are grouped in $n$ similarity classes $[z_1],\dots,[z_n]$. The complex representatives $z_1,\overline z_1,\dots, z_n,\overline z_n$ are the eigenvalues of the complex matrix $c(M)$. \end{prop}
See \cite[Theorem 5.5.3]{RODMAN} for a discussion of the Jordan form of $M$.
\begin{ejem}\label{EXRIGHT}Let the matrix $M=\begin{bmatrix} 0\ &\mathbf{j} \cr \mathbf{i} & 0\cr \end{bmatrix}$. The eigenvalues of $c(M)$ are $z_1=\frac{1}{\sqrt{2}}(1+\mathbf{i})$, $z_2=\frac{1}{\sqrt{2}}(-1+\mathbf{i})$ and their conjugates $\overline z_1$,$\overline z_2$. Then, the right eigenvalues of $M$ are all the quaternions $q$ such that $\Re(q)=\pm 1/\sqrt{2}$ and $\vert q\vert =1$. \end{ejem}
Notice that, unlike the usual complex case, the matrices $M-q I$ in the latter example are invertible, for all right eigenvalues $q$. This can be easily seen by computing their kernel. This leads to the notion of {\em left} eigenvalue, {as a quaternion $\lambda\in\mathbb{H}$ such that $M-\lambda I$ is not invertible (see Section \ref{SECTLEFT})}.
\section{The Rayleigh quotient }\label{SECTquotient}
{The well known Rayleigh quotient for complex matrices can be generalized to matrices of quaternions. We now focus on Hermitian matrices. }
\subsection{Definition and first properties} Recall that the $n\times n$ quaternion matrix $S$ is {\em Hermitian} if $S=S\sp*$. As it is well known, any {\em right} eigenvalue $q$ of $S$ is real: in fact, if $S\mathbf{u}=\mathbf{u} q$ then
$$\vert \mathbf{u}\vert^2q=\mathbf{u}^*\mathbf{u} q= \mathbf{u}^*S\mathbf{u}={(\mathbf{u}^*S\mathbf{u})^*}$$ is real,
hence $\overline q=q$. Moreover, $S$ is diagonalizable \cite[Theorem 5.3.6]{RODMAN}. Let $ t_1\leq\dots\leq t_n$ be {the eigenvalues} of $S$, and let $\mathbf{u}_1,\dots,\mathbf{u}_n$ be an orthonormal basis of eigenvectors. Then $S$ diagonalizes as $S=U\mathrm{diag}[ t_1,\dots, t_n]U^*$, where $U$ is a symplectic matrix whose columns are the $\mathbf{u}_j$'s.
\begin{defi}If $S$ is a Hermitian $n\times n$ matrix, and $\mathbf{v}\in \mathbb{H}^n$ is a vector, $\mathbf{v}\neq \mathbf{0}$, the {\em Rayleigh quotient } is the real number $$R(S,\mathbf{v})=\frac{\mathbf{v}^*S\mathbf{v}}{\vert \mathbf{v}\vert^2}.$$ \end{defi}
\begin{prop}
If $ t\in\mathbb{R}$ is an eigenvalue of $S$, and {$\mathbf{u}\in\mathbb{H}^n$} is a $ t$-eigenvector, then $R(S,\mathbf{u})= t$. \end{prop}
If $\mathbf{v}\in\mathbb{H}^n$ is a vector, $\mathbf{v}\neq \mathbf{0}$, {we can} write it in coordinates with respect to the orthonormal basis $\{\mathbf{u}_j\}_{j=1,\dots,n}$ as $$\mathbf{v}={\sum_{j=1}^n \mathbf{u}_jx_j}, \quad x_j\in\mathbb{H}.$$
\begin{prop}\label{WEIGHT} The Rayleigh quotient equals the weighted mean $$R(S,\mathbf{v})=\frac{\sum_j t_j\vert x_j\vert^2}{\sum_j \vert x_j\vert^2},$$ where {the real numbers $ t_j$ are the eigenvalues of $S$}. \end{prop}
\begin{proof} {Since $\mathbf{u}_i^*\mathbf{u}_j=\delta_{ij}$}, we have $$\vert \mathbf{v} \vert^2=\mathbf{v}^*\mathbf{v}=(\sum_i \overline x_i \mathbf{u}_i^*)(\sum_j \mathbf{u}_jx_j)=\sum_{i,j}\overline x_i\mathbf{u}_i^*\mathbf{u}_jx_j=\sum_j\overline x_j x_j=\sum_j\vert x_j\vert^2.$$
Analogously, since $S\mathbf{u}_j=\mathbf{u}_j t_j$, $$\mathbf{v}^*S\mathbf{v}=(\sum_i\overline x_i \mathbf{u}_i^*)(\sum_j\mathbf{u}_j t_jx_j)= \sum_j\overline x_j t_jx_j=\sum_j t_j \vert x_j\vert^2. \qedhere$$ \end{proof} By diagonalization, we have reduced the problem of an arbitrary bilinear form to the corresponding quadratic form. As a consequence we have: \begin{prop}\label{MINMAXPROP}{The minimum value of the function $R(S,\mathbf{v})$, defined in $\mathbb{H}^n\setminus{\{\mathbf{0}\}}$, is the lowest {eigenvalue of $S$}. The maximum value {is} the {highest eigenvalue of $S$}}. \end{prop} \begin{proof} First, notice that $R(S,\mathbf{v}/\vert\mathbf{v}\vert)=R(S,\mathbf{v})$, so we can assume that $\vert \mathbf{v} \vert=1$. It is known \cite[Appendix B]{HORN} that a convex function $$\sum\limits_{j=1}^n t_j s_j, \quad {\text{with\ }}\sum\limits_{j=1}^n s_j=1, s_j\geq 0,$$ attains its maximum value at $ t_n=\max t_j$ and its minimum value at $ t_1=\min t_j$. {By taking into account Proposition \ref{WEIGHT}, the result follows}. \end{proof} In fact, {we shall compute} all the critical values of the function. This is a variational characterization of eigenvalues, which was discovered in {connection} with problems of physics \cite{HORN,BHATIA}.
\begin{prop}\label{CRITICAL}The critical values of the function $R(S,\mathbf{v})$ are the {right} eigenvalues $ t_j$ of $S$. The index of $ t_j$ equals $\sum\nolimits_{i<j} l_i$, where $l_i$ is the multiplicity of $ t_i$. \end{prop} \begin{proof}
{Even though it is possible to do a direct computation by using coordinates, we shall give a more synthetic proof.}
By differentiating the function {$R=R(S,\mathbf{v})\colon \mathbb{H}\setminus \{\mathbf{0}\} \to \mathbb{R}$} we obtain
\begin{align*} R_{*\mathbf{v}}(\mathbf{w})&=\frac{1}{\vert \mathbf{v} \vert^4}\left((\mathbf{w}^*S\mathbf{v}+\mathbf{v}^*S\mathbf{w})\vert\mathbf{v}\vert^2-(\mathbf{v}^*S\mathbf{v})(\mathbf{w}^*\mathbf{v}+\mathbf{v}^*\mathbf{w})\right)\\ &=\frac{2}{\vert\mathbf{v}\vert^4}\left(\Re(\mathbf{v}^*S\mathbf{w})\vert \mathbf{v} \vert^2-(\mathbf{v}^*S\mathbf{v})\Re(\mathbf{v}^*\mathbf{w})\right) \\ &=\frac{2}{\vert \mathbf{v}\vert^4}\left((S\mathbf{v}\cdot\mathbf{w})\vert \mathbf{v}\vert^2-(\mathbf{v}^*S\mathbf{v})( \mathbf{v}\cdot\mathbf{w})\right)\\ &=\frac{2}{\vert \mathbf{v} \vert^2}\left(( S\mathbf{v}\cdot \mathbf{w})-R(S,\mathbf{v})( \mathbf{v}\cdot \mathbf{w})\right)\\ &=\frac{2}{\vert \mathbf{v} \vert^2}\left( S\mathbf{v}-R(S,\mathbf{v})\mathbf{v}\right)\cdot\mathbf{w}, \end{align*} {where $(\cdot)$ represents the scalar product} in $\mathbb{R}^{4n}$.
Hence the gradient of $R(S,\mathbf{v})$ is $$G_\mathbf{v}=\frac{2}{\vert \mathbf{v} \vert^2}\left(S\mathbf{v}-R(S,\mathbf{v})\mathbf{v}\right).$$
{Since $R(S,\mathbf{v}/\vert\mathbf{v}\vert)=R(S,\mathbf{v})$}, we can assume that $\vert \mathbf{v} \vert=1$. Let us denote by $h$ the restriction of the Rayleigh function to the sphere $S^{4n-1}\subset \mathbb{H}^n$ of unitary vectors. We check that $G_\mathbf{v} \perp \mathbf{v}$, because $${\frac{1}{2}}\langle \mathbf{v},G_\mathbf{v} \rangle=\mathbf{v}^*(S\mathbf{v}-R(S,\mathbf{v})\mathbf{v})=\mathbf{v}^*S\mathbf{v}-R(S,\mathbf{v})\vert \mathbf{v} \vert^2=0.$$ Hence, $\mathbf{v}\cdot G_\mathbf{v}=\Re\langle \mathbf{v},G_\mathbf{v} \rangle=0$ and $G_\mathbf{v}$ is tangent to the sphere for the scalar product, so it is also the gradient of the restriction $h$.
It follows that the point $\mathbf{v}$ is critical (both for the function and its restriction) if and only if $S\mathbf{v}=R(S,\mathbf{v})\mathbf{v}$, that is, $\mathbf{v}$ is an eigenvector of the eigenvalue {$R(S,\mathbf{v})= t\in\mathbb{R}$}.
This will allow {us} to compute the Hessian \begin{align*} H_\mathbf{v}(\mathbf{w})&=(G_\mathbf{v})_*(\mathbf{w})\\ & =\frac{2}{\vert \mathbf{v} \vert^4}{\big(S\mathbf{w}-(R_{*\mathbf{v}}(\mathbf{w})\mathbf{v}+R(S,\mathbf{v})\mathbf{w})\vert \mathbf{v} \vert^2}\\ & \quad - (S\mathbf{v}-R(S,\mathbf{v})\mathbf{v})(\mathbf{w}^*\mathbf{v}+\mathbf{v}^*\mathbf{w}) \big) \\ &={\frac{2}{\vert \mathbf{v} \vert^2}}\big(S\mathbf{w}-(R_{*\mathbf{v}}(\mathbf{w})\mathbf{v}+R(S,\mathbf{v})\mathbf{w})\big)\\ &={\frac{2}{\vert \mathbf{v} \vert^2}}\big(S\mathbf{w}-R(S,\mathbf{v})\mathbf{w}\big)\\ &={ \frac{2}{\vert \mathbf{v} \vert^2}(S\mathbf{w}- t\mathbf{w})=\frac{2}{\vert \mathbf{v} \vert^2}(S- tI)\mathbf{w}. } \end{align*}
Moreover, if $\vert\mathbf{v}\vert=1$ and $\mathbf{w}\in T_\mathbf{v}{S^{4n-1}}$, that is, $\mathbf{v}\cdot\mathbf{w}= \Re(\mathbf{v}^*\mathbf{w})=0$, then {$$\mathbf{v}\cdot H_\mathbf{v}(\mathbf{w})=\Re (\mathbf{v}^*H_\mathbf{v}(\mathbf{w}))=2\Re(\mathbf{v}^*S\mathbf{w})=2\Re(\mathbf{w}^*S\mathbf{v}) =2\mathbf{w}\cdot \mathbf{v} t=0.$$} Hence $H_\mathbf{v}(\mathbf{w})\in T_\mathbf{v}{S^{4n-1}}$, and $H_\mathbf{v}(\mathbf{w})$ is also the Hessian of the restriction $h$.
Now, we compute the index of $ t=R(S,\mathbf{v})$, which is the number of negative eigenvalues of the Hessian at the critical point $\mathbf{v}$. If $\mu$ is an eigenvalue of the Hessian, we have $H_\mathbf{v}(\mathbf{w})=\mathbf{w}\mu$, for some $\mathbf{w}\neq \mathbf{0}$, that is, $$2(S- t I)\mathbf{w}=\mathbf{w}\mu,$$ so we are looking for the eigenvalues $\mu$ of of the {\em shifted} matrix {$S- t I$}. Hence, the eigenvalues of the Hessian are $\mu_k= 2(t_k- t)$, {twice} the differences with the other {right} eigenvalues $ t_k$ of $S$, and the result follows. \end{proof} For instance, the {minimum} $ t_1$ has index $0$. The maximum $ t_n$ has index $n-l_n$.
\subsection{The min-max principle for eigenvalues}\label{MINMAXSECT} As in the complex case, it is possible to refine Proposition \ref{CRITICAL}. Now, we constrain $\mathbf{v}$ to a $k$-dimensional subspace, in order to obtain a quaternionic version of the the so-called min-max Courant-Fischer-Weyl theorem (\cite[Theorem 4.2.6]{HORN}).
Fix some $k\in\{1,\dots,n\}$ and let $\mathbb{E}\subset \mathbb{H}^n$ be any {$\mathbb{H}$-subspace} of dimension $k$. We shall denote $\mathbb{E}^*=\mathbb{E}\setminus \{\mathbf{0}\}$.
Let $\{\mathbf{u}_1,\dots,\mathbf{u}_n\}$ be again an orthonormal basis of eigenvectors. We have that $$\mathbb{E} \cap \langle \mathbf{u}_k,\dots \mathbf{u}_n \rangle \neq \mathbf{0},$$ due to dimension reasons. Then there exists $\mathbf{v}=\sum_{j=k}^n\mathbf{u}_ix_j\in\mathbb{E}^*$, and its Rayleigh quotient is $$R(S,\mathbf{v})= \frac{\sum_{j=k}^n t_j\vert x_j\vert^2}{\sum_{j=k}^n\vert x_j\vert^2}\geq t_k,$$ because $ t_j\geq t_k$ for all $j\geq k$.
This implies that $$M_\mathbb{E}:=\max\{R(S,\mathbf{v})\colon \mathbf{v}\in \mathbb{E}^*\}\geq t_k.$$ Since this is true for all $\mathbb{E}$ we conclude that $\min\{M_\mathbb{E}\colon \dim \mathbb{E}=k\}\geq t_k$. This is in fact an equality: \begin{teo}\label{MINMAXTEO}$ t_k=\min\{M_\mathbb{E}\colon \dim\mathbb{E}=k\}$. \end{teo}\begin{proof} It only remains to prove that $M_\mathbb{E}\leq t_k$ for some $\mathbb{E}$ with $\dim \mathbb{E}=k$. Take $\mathbb{E}=\langle \mathbf{u}_1,\dots,\mathbf{u}_k\rangle$. Then for all $\mathbf{v}\in\mathbb{E}^*$ we have $$R(S,\mathbf{v})=\frac{\sum_{j=1}^k t_j\vert x_j\vert^2}{\sum_{j=1}^k \vert x_j\vert^2}\leq t_k,$$ because $ t_j\leq t_k$ if $j\in\{1,\dots,k\}$. \end{proof}
Analogously, if we denote $$m_\mathbb{E}{:=}\min\{R(S,\mathbf{v})\colon \mathbf{v}\in\mathbb{E}^*\},$$
we have \begin{cor}\label{MAXMINCOR}
${t_{n-k+1}}=\max\{m_\mathbb{E}\colon \dim\mathbb{E}=k\}$. \end{cor} The proof is immediate if we take into account that $ t_{n-k+1}(S)=- t_k(-S)$.
As {particular cases}, for $k=1,n$ we have Proposition \ref{MINMAXPROP}.
\subsection{Mean value} We want to compute the mean value of the Rayleigh function over the sphere $\mathbb{S}^{N-1}\subset \mathbb{H}^n$, where $N=4n$.
Let $h\colon \mathbb{S}^{N-1}\to \mathbb{R}$ be the restriction given by $h(\mathbf{v})= \mathbf{v}^* S \mathbf{v}$, $ \vert \mathbf{v} \vert=1$. In order to compute the mean value of $h$, $$\mean{h}=\frac{1}{\mathrm{Vol} (\mathbb{S}^{N-1})}\int\nolimits_{\mathbb{S}^{N-1}}\mathbf{v}^*S\mathbf{v}\, \mathrm{d} \mathbf{v},$$ one can consider hyper-spherical coordinates and {to undertake a long direct computation}. Another proof follows {by using} Pizzetti's formula ({\cite[Formula (11.3)]{GRAY}}). However, in order to have a similar result for the variance, we shall use {\em moments}, as explained in
{Gray's book \cite[Appendix A.2]{GRAY}.}
{For any integrable function $F=F( u_1,\dots, u_N)\colon \mathbb{R}^N \to \mathbb{R}$ we denote by $\mean{F}$ the average of $F$ over the unit sphere $\mathbb{S}^{N-1}\subset \mathbb{R}^N$.
{\begin{lema}\label{PARTICULAR} {\cite[Theorem 1.5]{GRAY}.} $$\mean{ u_i^2}=\frac{1}{N}, \quad \mean{ u_i^4}=\frac{3}{N(N+2)}, \quad \mean{ u_i^2 u_j^2}=\frac{1}{N(N+2)} \text{\ if\ } i\neq j.$$ \end{lema} }
\begin{teo}The expected value of the Rayleigh quotient $R(S,\mathbf{v})$ over the sphere $\mathbb{S}^{4n-1}\subset \mathbb{H}^n$ equals $$\mean{h}=\frac{1}{n}\mathop{\mathrm{Trace}} S.$$ \end{teo}
\begin{proof} According to Proposition \ref{WEIGHT}, $h(\mathbf{v})={\sum_{j=1}^n } t_j\vert x_j\vert^2$, where $x_j\in\mathbb{H}$. Since each $x_j$ has four real coordinates, our function can be written as $$h( u_1,\dots, u_N)= t_1( u_1^2+\cdots+ u_4^2)+\cdots+ t_n( u_{N-3}^2+\cdots+ u_N^2), \quad N=4n,$$ where $ u_1^2+\cdots+ u_N^2=1$.
Then, by {Lemma} \ref{PARTICULAR},
$$\mean{h} = 4 \frac{1}{N}( t_1\cdots+ t_n)=\frac{1}{n}( t_1+\cdots+ t_n),$$ {is the arithmetic mean of the eigenvalues} and the result follows. \end{proof} A similar computation gives us the relationship between the second central moment of $h$ and the variance {of} the eigenvalues. {We denote by $$\mu=\frac{1}{n}\sum_{i=1}^n t_i$$ the mean of the eigenvalues and by $$\sigma^2=\frac{1}{n}\sum_{i=0}^n( t_i-\mu)^2$$ its variance.}
\begin{teo} {The second central moment of the Rayleigh quotient over the sphere {is proportional to} the variance of the eigenvalues, $$\mean{(h-\mu)^2}=\frac{1}{2n+1}\sigma^2.$$} \end{teo}
\begin{proof} The proof follows from the well-known identity $\mean{(h-\mu)^2}=\mean{h^2}-\mu^2$, and Lemma \ref{PARTICULAR}. \end{proof} }
\section{Left eigenvalues}\label{SECTLEFT} As mentioned in Example \ref{EXRIGHT}, {the matrix $M-q I$ can be invertible} for a right eigenvalue $q$ of $M$. {This motivates the following definition.}
\begin{defi} The quaternion $\lambda\in \mathbb{H}$ is a {\em left eigenvalue} of the matrix $M\in\mathbb{H}^{n\times n}$ if the matrix $M-\lambda I_n$ is not invertible. \end{defi}
{The existence of left eigenvalues for any quaternionic matrix was proved by Wood in \cite{WOOD}.} {Notice that the left eigenvalues of a matrix are not invariant by a change of basis {\cite[Example 7.1]{ZHANG1997}.}}
If $\lambda$ is a left eigenvalue of $M$, the set of vectors $\mathbf{v}\in\mathbb{H}^n$ such that $M\mathbf{v}=\lambda\mathbf{v}$ is a right $\mathbb{H}$-vector subspace $V(\lambda)\neq \{\mathbf{0}\}$ of $\mathbb{H}^n$. A non-null element $\mathbf{v}\neq 0$ of $V(\lambda)$ is called a {\em $\lambda$-eigenvector}. By dividing it by its norm we can always assume that
$\vert \mathbf{v} \vert=1$.
Clearly, if a right eigenvalue is a real number, then it is also a left eigenvalue. So {the problem is} to determine the non-real left eigenvalues of $M$, if any.
\subsection{$n=2$} The case $n=2$ was completely solved by Huang and So in \cite{HUANG-SO}. Let $M=\begin{bmatrix}a & b \cr c & d\cr\end{bmatrix}$. If $b=0$ or $c=0$ then the left eigenvalues are $a,d\in\mathbb{H}$, as it is straightforward to check by hand. When $bc\neq 0$, Huang and So gave explicit formulas for the left eigenvalues. In particular they proved (\cite[Theorems 2.3, {3.1 and 3.2}]{HUANG-SO}) the following result:
\begin{teo}\label{HUANGSO} If $bc\neq 0$, \begin{enumerate} \item the left eigenvalues of $M$ are given by $\lambda=a+bx$, where $x$ is any solution of the quadratic equation
$x^2+a_1x+a_0=0$,
with $ a_1 = b^{-1}(a-d)$ and $a_0 = -b^{-1}c$; \item the matrix $M$ has either one, two or infinite left eigenvalues; \item the infinite case happens if and only if $a_0\in\mathbb{R}$, $a_1\in\mathbb{R}$, and $\Delta=a_1^2-4a_0<0$; \item in the latter case, the left eigenvalues can be written as \begin{equation}\label{EIGEN} \lambda= \frac{1}{2}(a+d+b\xi), \quad \Re(\xi)=0, \vert \xi\vert^2 = \vert \Delta\vert. \end{equation} \end{enumerate} \end{teo}
\subsection{Left eigenvalues of $2\times 2$ Hermitian matrices}\label{LEFTHERMITE} {We apply the previous results to the Hermitian case}. If $S$ is a $2\times 2$ Hermitian matrix, the condition $S=S^*$ means that $S=\begin{bmatrix}s & b \cr b^* & s^\prime\cr\end{bmatrix}$, where $s, s^\prime\in\mathbb{R}$. If $b=0$, the matrix $S$ has {only} two left eigenvalues, the real numbers $s, s^\prime$, {which are also right eigenvalues.}
\begin{prop}\label{MAINDOS} {When $b\neq 0$, } \begin{enumerate} \item the matrix $S$ has two real eigenvalues (which may be different or not). They can be computed as the roots of the real equation \begin{equation}\label{MOOREQ}(s- t)( s^\prime- t)-\vert b\vert^2=0. \end{equation} \item it has {also} non-real left eigenvalues if and only if $\Re(b)=0$ and $s= s^\prime$. In this case, the left eigenvalues are given by the formula \begin{equation}\label{TODOS} \lambda=s+b\omega, \quad \Re(\omega)=0, \vert \omega\vert=1. \end{equation} \end{enumerate}
\end{prop}
\begin{proof} 1. Notice that the discriminant of Equation \eqref{MOOREQ} is $$\mathrm{disc}=(s+ s^\prime)^2-4(s s^\prime-\vert b\vert^2)=(s- s^\prime)^2+\vert b \vert^2\geq 0.$$ {It is easy to check that the two real roots
are eigenvalues. }
2. Since {there are already two real eigenvalues, we only have to consider the infinite case of Theorem \ref{HUANGSO}.} If $b\neq0$, Huang-So's conditions are \begin{align} a_1=&b^{-1}(s- s^\prime)=\frac{b^*}{\vert b\vert^2}(s- s^\prime)\in\mathbb{R},\label{A1}\\ a_0=&-b^{-1}b^*=-\frac{(b^*)^2}{\vert b \vert^2}\in\mathbb{R} \label{A2},\\ a_1^2-4a_0=&\frac{(b^*)^2}{\vert b \vert^2}\left[\frac{(s- s^\prime)^2}{\vert b\vert^2}+4\right]<0,\label{A3} \end{align} which imply $(b^*)^2\in\mathbb{R}$, by \eqref{A2}, and $(b^*)^2<0$, by \eqref{A3}.
Hence $b^2\in\mathbb{R}$ and $b^2<0$.
This implies $\Re(b)=0$, $b^*=-b$ and $b^2=-\vert b \vert^2$. But then $$\Re(a_1)=\frac{s- s^\prime}{\vert b \vert^2}\Re(b^*)=0,$$ so $s- s^\prime=0$ by \eqref{A1}.
Since $a_1=0$, $a_0=1$ and $\Delta=a_1^2-4a_0=-4$,
Formula \eqref{EIGEN} implies \eqref{TODOS}. \end{proof}
\begin{nota}\label{SEPARO} Notice that in the latter case, among the infinite left eigenvalues there are two real ones. {In fact, the two solutions of Equation \eqref{MOOREQ} are $t = s\pm \vert b \vert$. They correspond to Formula \eqref{TODOS} with $\omega=\pm \frac{b}{\vert b \vert}$.} \end{nota}
\begin{ejem}\cite[Example 2.5]{HUANG-SO} The matrix $S=\begin{bmatrix}0&1+\mathbf{i}\cr 1-\mathbf{i}&0\cr\end{bmatrix}$ has only two left eigenvalues, {$\lambda=\pm\sqrt{2}$}, which are also its right eigenvalues. \end{ejem}
\begin{ejem}\cite[Example 5.3]{ZHANG1997} Let $S=\begin{bmatrix}0&\mathbf{i}\cr -\mathbf{i}&0\cr\end{bmatrix}$. The real eigenvalues are $\pm 1$. The left eigenvalues are ${\lambda}=\mathbf{i} \,\omega$, where $\Re(\omega)=0$ and $\vert \omega\vert=1$, that is, ${\lambda} =t+y\mathbf{j}+z\mathbf{k}$, with {$t,y,z\in\mathbb{R}$,} $t^2+y^2+z^2=1$. \end{ejem}
\subsection{Relationship with the Rayleigh quotient } The previous section gives an idea of the difficulty of computing the left eigenvalues of a given (Hermitian) matrix. {In fact, no general method is known}. In this section we shall give a new relationship between left and right eigenvalues.
Let $S$ be a Hermitian matrix. Let $\lambda\in\mathbb{H}$ be a left eigenvalue of $S$ and let $\mathbf{v}$ be a $\lambda$-eigenvector, with $\vert \mathbf{v} \vert=1$. Then $R(S,\mathbf{v})=\mathbf{v}^*\lambda \mathbf{v}$ is a real number.
Notice that {$\mathbf{v}^*\overline\lambda \mathbf{v}=\mathbf{v}^*\lambda\mathbf{v}$} even if $\overline \lambda\neq \lambda$.
\begin{lema}\label{REALPART} $R(S,\mathbf{v})=\Re(\lambda)$, the real part of $\lambda$. \end{lema}
\begin{proof}If the coordinates of $\mathbf{v}$ with respect to an orthonormal basis are $x_1,\dots,x_n$, then since $\mathbf{v}^*S\mathbf{v}=\mathbf{v}^*\lambda\mathbf{v}$ is real, we have
\begin{align*}\mathbf{v}^*\lambda\mathbf{v}&=\sum \overline x_i \lambda x_i=\Re(\sum \overline x_i \lambda x_i)= \sum \Re(\overline x_i \lambda x_i)\\ &=\sum \Re(\lambda x_i\overline x_i)=\sum \Re(\lambda\vert x_i \vert^2)=\Re(\lambda)(\sum \vert x_i\vert^2)=\Re(\lambda). \qedhere \end{align*}
\end{proof}
Then, from Proposition \ref{MINMAXPROP} it follows that \begin{prop}\label{SIMPLE} If $\lambda$ is a left eigenvalue {of $S$} and $\mathbf{v}$ is a $\lambda$-eigenvector, then \begin{equation}\label{MINMAXZHANG}
t_1\leq R(S,\mathbf{v})=\Re(\lambda)\leq t_n. \end{equation} \end{prop}
Next Theorem refines the latter formula, as an application of the min-max theorems of Section \ref{MINMAXSECT}. {It gives a new relationship between left and right eigenvalues.}
\begin{teo}\label{HERMITMAIN} Let $\lambda$ be a left eigenvalue of the Hermitian matrix $S$, with real eigenvalues $ t_1\leq\dots\leq t_n$. If the $\lambda$-eigenspace $V(\lambda)$ verifies
$\dim V(\lambda)\geq k$ then $$ t_k \leq \Re(\lambda) \leq t_{{n-k+1}}.$$ \end{teo}
\begin{proof} Let $\mathbb{E}=V(\lambda)$. By Lemma \ref{REALPART}, the Rayleigh function is constant on $\mathbb{E}$, so $m_\mathbb{E}=\Re(\lambda)=M_\mathbb{E}$. Let $\dim V(\lambda)=j\geq k$, then, by Theorem \ref{MINMAXTEO} and Corollary \ref{MAXMINCOR}, we have $$ t_k \leq t_j \leq \Re(\lambda) \leq t_{n-j+1} \leq t_{n-k+1}. \qedhere$$ \end{proof}
Notice that the inequality \eqref{MINMAXZHANG} is a particular case, since $\dim V(\lambda)\geq 1$.
{\begin{nota} Using orthonormal coordinates one can prove that $\vert \lambda \vert \leq \max \vert t_i\vert$, where the right term is the (right) spectral radius of the Hermitian matrix \cite{ZHANG2007}. \end{nota}}
\begin{ejem}For the Hermitian matrices $S=\begin{bmatrix}s & b \cr b^* & s\cr\end{bmatrix}$ with non-real left eigenvalues {(that is, with $\Re(b)=0$, see Section \ref{LEFTHERMITE}),} we have that $\Re(\lambda)=s$ by \eqref{TODOS}, and $\vert \lambda \vert^2=s^2 + \vert b \vert^2$, while $ t_1=s-\vert b\vert$ and $ t_2=s+\vert b \vert$. \end{ejem}
{The next Corollary shows the influence of the left eigenvalues on the right ones.}
{
\begin{cor}\label{SIZE}Assume that the $n\times n$ Hermitian matrix $S$ has a left eigenvalue $\lambda$ such that $k=\dim V(\lambda)> \ceil{n/2}$. Then, the right eigenvalues $ t_{n-k+1}=\dots= t_k$ have multiplicity $l_k\geq 2k-n$ and they are equal to $\Re(\lambda)$.
\end{cor} }
{
\begin{proof}
By Theorem \ref{HERMITMAIN}, we have $ t_k\leq \Re(\lambda)\leq t_{n-k+1}$. Moreover, $k>\ceil{n/2}$ implies $n-k+1\leq k$, hence $ t_{n-k+1}\leq t_k$. That means that $ t_{n-k+1}= t_k$,
hence $\Re(\lambda)= t_j= t_k$, for all $n-k+1\leq j \leq k$. This implies that the multiplicity $l_k$ of $ t_k$ is at least $2k-n$.
\end{proof} }
\begin{ejem}Let $S$ be a Hermitian matrix of order $5$, diagonalizable to $\mathrm{diag}[ t_1,\dots, t_5]$. Assume that $S$ has some left eigenvalue $\lambda$ with $\dim V(\lambda)=4$. Then $\Re(\lambda)= t_4= t_3= t_2$ has at least multiplicity $3$.
\end{ejem}
\begin{ejem}Let $S$ be a Hermitian matrix of order $6$, diagonalizable to $\mathrm{diag}[ t_1,\dots, t_6]$. Assume that $S$ has some left eigenvalue $\lambda$ with $\dim V(\lambda)=4$. Then $\Re(\lambda)= t_4= t_3$ has at least multiplicity $2$.
\end{ejem}
\section{Symplectic matrices}\label{SECTSYMPL} In this section we extend our results to symplectic matrices {with quaternionic coefficients}.
Recall that the $n\times n$ matrix $A$ is {\em symplectic} if $A^*A=I_n$. Its right eigenvalues have norm $1$, because if {$q$ is a right eigenvalue}, $A\mathbf{u}=\mathbf{u} q$, with $\mathbf{u}\neq \mathbf{0}$, then $$\vert \mathbf{u} \vert^2=\langle \mathbf{u},\mathbf{u}\rangle=\langle A\mathbf{u},A\mathbf{u}\rangle =\langle \mathbf{u} q,\mathbf{u} q\rangle =\overline q \mathbf{u}^*\mathbf{u} q=\vert q\vert^2 \vert \mathbf{u}\vert^2,$$ so $\vert q\vert=1$. Moreover, the matrix is diagonalizable \cite[Theorem 5.3.6]{RODMAN}.
Analogously, the left eigenvalues also have norm 1, because {if $\lambda$ is a left eigenvalue,} $A\mathbf{v}=\lambda\mathbf{v}$, with $\mathbf{v}\neq\mathbf{0}$, {then} $$\vert \mathbf{v} \vert^2=\langle \mathbf{v},\mathbf{v}\rangle=\langle A\mathbf{v},A\mathbf{v}\rangle =\langle \lambda\mathbf{v},\lambda\mathbf{v}\rangle =\mathbf{v}^*\overline \lambda \lambda \mathbf{v}={\vert \lambda \vert^2 \, \vert \mathbf{v} \vert^2,}$$ {hence $\vert \lambda \vert=1$.}
\subsection{$n=2$}For $n=2$, the authors completely characterized in \cite{MP2009} the symplectic matrices which have an infinite number of left eigenvalues.
\begin{teo}\label{DOSPORDOS}
The only $2\times 2$ symplectic matrices with an infinite number of left eigenvalues are those of the form \begin{equation}\label{MATRIX} \begin{bmatrix} r\cos\theta & -r\sin\theta\cr r\sin\theta & r\cos\theta \cr \end{bmatrix},
\quad \quad r\in\mathbb{H}, \vert r \vert=1, \quad \sin\theta\neq 0. \end{equation} \end{teo}
\begin{prop}\label{SYMPLCASE} For the matrix in \eqref{MATRIX}: \begin{enumerate} \item {The right eigenvalues are the similarity classes of {$q=r(\cos\theta\pm \sin\theta\, \rho)$}, where $\rho$ is {any of the quaternions} such that $\Re(\rho)=0$, $\vert \rho \vert=1$, and $r=s+t\rho$, with $s,t\in \mathbb{R}$.}
\item The left eigenvalues are $\lambda=r(\cos\theta+\sin\theta\,\omega)$, where $\omega$ is an arbitrary quaternion such that $\Re(\omega)=0$ and $\vert \omega\vert=1$. \end{enumerate} \end{prop}
\begin{proof} {Part (1) follows from definition, by checking the eigenvectors $(\pm \rho,1)^T$, and taking into account that $r$ and $\rho$ commute, and that $\rho^2=-1$. }
Part (2) follows from Proposition \ref{MAINDOS}. {Notice that there are two left eigenvalues which are right eigenvalues.} \end{proof}
\subsection{Rayleigh quotient of a symplectic matrix}
The Rayleigh quotient can be defined for any non-Hermitian matrix. Let us assume that $A$ is symplectic.
\begin{defi} The Rayleigh quotient of $A$ is the real function $$R(A,\mathbf{v})=\frac{\Re(\mathbf{v}^*A\mathbf{v})}{\vert \mathbf{v} \vert^2}.$$ \end{defi} As before, we shall consider its restriction $h_A$ to the sphere $S^{4n-1}\subset \mathbb{H}^n$.
\begin{lema}Let $q$ be a right eigenvalue of the symplectic matrix $A$. If $\mathbf{u}$ is a {$q$-eigenvector} then $\overline q$ is {a right eigenvalue} of $A^*$, and $\mathbf{u}$ is a $\overline q$-eigenvector. \end{lema}
\begin{proof}We have $$A\mathbf{u}=\mathbf{u} q \Rightarrow \mathbf{u}=A^{-1} (\mathbf{u} q)=(A^*\mathbf{u})q \Rightarrow \mathbf{u} q^{-1}=A^*\mathbf{u}.$$ Moreover $\vert q\vert=1$ implies $q^{-1}=\overline q${.} \end{proof}
Let $S=\frac{1}{2}(A+A^*)$ be the Hermitian part of $A$.
\begin{cor}\label{PASO}If $q$ is {a right eigenvalue} of $A$ and $\mathbf{u}$ is a $q$-eigenvector, then $\Re(q)$ is an eigenvalue of $S$, and $\mathbf{u}$ is an $\Re(q)$-eigenvector. \end{cor}
\begin{cor}The Rayleigh functions of $A$ and $S$ are equal, $h_A=h_S$. \end{cor}
\begin{proof} We have $$h_S(\mathbf{v})=\mathbf{v}^*S\mathbf{v}=\frac{1}{2}(\mathbf{v}^*A\mathbf{v}+\mathbf{v}^*A^*\mathbf{v})= \Re(\mathbf{v}^*A\mathbf{v})=h_A(\mathbf{v}). \qedhere$$ \end{proof}
\begin{cor}\label{REALEIGEN} \begin{enumerate} \item The $n$ {right} {eigenvalues} of $S$ are the real parts {$t_j=\Re(q_j)$} of the $n$ similarity classes $[q_1],\dots,[q_n]$ of the {right} eigenvalues of $A$. \item The critical values of $h_A$ are $\Re(q_1),\dots,\Re(q_n)$. \end{enumerate} \end{cor}
\subsection{Left eigenvalues}
Now, assume that $\lambda$ is a left eigenvalue of the symplectic matrix $A$. {We know that it may exist an infinite number of them}.
\begin{prop} If $\mathbf{v}$ is a {$\lambda$-eigenvector}, that is, $A\mathbf{v}=\lambda\mathbf{v}$, then $h_A(\mathbf{v})=\Re(\lambda)$. \end{prop}
\begin{proof}The proof is identical to that of {Lemma} \ref{REALPART}. Since $A$ is normal, it is diagonalizable and we can take an orthonormal basis $\mathbf{u}_1\dots,\mathbf{u}_n$ of eigenvectors \cite[Theorem 5.3.6]{RODMAN}. By taking coordinates $\mathbf{v}=\sum_j \mathbf{u}_jx_j$, we can assume that $\vert \mathbf{v} \vert^2=\sum_j\vert x_j\vert^2=1$, then $$h_A(\mathbf{v})=\Re(\mathbf{v}^*\lambda\mathbf{v})=\Re(\sum_j \overline x_j \lambda x_j)=\sum_j\Re(\lambda\vert x_j\vert^2)=\Re(\lambda). \qedhere$$
\end{proof}
\begin{nota}Since $\vert \lambda \vert=1$, notice that $A\mathbf{v}=\lambda\mathbf{v}$ implies $A^*(\lambda\mathbf{v})=\overline \lambda (\lambda\mathbf{v})$, hence $\lambda\mathbf{v}$ is a $\overline \lambda$-eigenvector of $A^*$, but we cannot conclude nothing about the eigenvalues of $S$, as Example \ref{FINAL} shows. \end{nota}
\begin{cor}\label{XX}Let $A$ be a symplectic matrix whose right eigenvalues are organized in $n$ similarity classes $[q_1],\dots,[q_n]$, ordered in such a way that $\Re(q_1)\leq \dots\leq \Re(q_n)$. Let $\lambda$ be a left {eigenvalue} of $A$, such that its eingenspace verifies $\dim V(\lambda)\geq k$. {Then}
$$\Re(q_k)\leq \Re(\lambda) \leq \Re(q_{n-k+1}).$$ \end{cor}
\begin{proof} Take $\mathbf{v}\in V_A(\lambda)$, so $\Re(\lambda)=h_A(\mathbf{v})=h_S(\mathbf{v})$. This does not mean that $\lambda$ is a left eigenvalue of $S$ (see Example \ref{FINAL}). But $\Re(\lambda)=h_S(\mathbf{v})$ is the constant value of $h_S$ in the subspace $\mathbb{E}=V_A(\lambda)$, hence $m_\mathbb{E}=\Re(\lambda)=M_\mathbb{E}$. If $\dim \mathbb{E}=j\geq k$, then $$\Re(q_k)\leq \Re(q_j)\leq \Re(\lambda)\leq \Re(q_{n-j+1})\leq \Re(q_{n-k-1}),$$ from Theorem \ref{MINMAXTEO}, Corollary \ref{MAXMINCOR} {and Corollary \ref{REALEIGEN}}. \qedhere \end{proof}
{Remember that $\vert \lambda\vert=1$.}
\begin{ejem}\label{MATRIZJ} Let $ A=\frac{\sqrt{2}}{2}\mathbf{j} \begin{bmatrix} 1&-1\cr 1 &1\cr\end{bmatrix}, $ as in Proposition \ref{SYMPLCASE}, with $r=\mathbf{j}$ and $\theta=\pi/4$. We have $\rho=\mathbf{j}$, and the right eigenvalues are {the similarity classes of} $$q=\cos\theta\,\mathbf{j} \pm \sin\theta\mathbf{j}^2=\frac{\sqrt{2}}{2}(\pm 1+\mathbf{j}).$$ whose real part is $\Re(q)=\pm \sqrt{2}/{2}$.
On the other hand, the left eigenvalues are $$\lambda=\cos\theta\,\mathbf{j}+\sin\theta\, \omega\mathbf{j}=\frac{\sqrt{2}}{2}(1+\omega)\mathbf{j},$$ {where $\Re(\omega)=0$ and $\vert \omega \vert=1$}, {and their real part is $\Re(\lambda)=\frac{\sqrt{2}}{2}\Re(\omega\mathbf{j})$.}
Then, since $\vert \omega\mathbf{j}\vert=1$, it is true that $-1\leq \Re(\omega\mathbf{j})\leq 1$, hence $\Re(q_1)\leq \Re(\lambda) \leq \Re(q_2)$. \end{ejem}
\begin{ejem}\label{FINAL} For the symplectic matrix $A$ in Example \ref{MATRIZJ}, the Hermitian part is $S=\frac{\sqrt{2}}{2}\, \mathbf{j}\,\begin{bmatrix} 0&-1\cr 1 & 0\cr \end{bmatrix}$. Its right eigenvalues are $q=\pm \frac{\sqrt{2}}{2}$,
{which is the real part} of those of $A$, as stated in Corollary \ref{PASO}.
Its left eigenvalues are, by \eqref{TODOS}, $$\lambda=\frac{\sqrt{2}}{2}\mathbf{j}\omega, \quad \Re(\omega)=1, \vert \omega\vert=1,$$ which are different from those of $A$. \end{ejem}
{Now, we shall prove a result, analogous to Corollary \ref{SIZE}, showing that the existence of left eigenvalues with a high-dimensional space of eigenvectors depends on the multiplicity of the right eigenvalues.}
\begin{cor}Let $A$ be {an} $n\times n$ symplectic matrix. Assume that there is a left eigenvalue $\lambda$ such that $k=\dim V(\lambda) >\ceil{n/2}$. Then, {the similarity classes of right eigenvalues $[q_{n-k+1}]=\dots=[q_k]$ have multiplicity $l_k\geq 2k-n$, and they are equal to the similarity class $[\lambda]$ of $\lambda$.}\end{cor}
\begin{proof} Remember that the eigenvalue classes $[q_i]$ are ordered according to their real parts, as in Corollary \ref{XX}. From {that Corollary, we know that $$\Re(q_k)\leq \Re(\lambda) \leq \Re(q_{n-k+1}).$$ But $k>\ceil{n/2}$ implies $n-k+1\leq k$, hence $\Re(q_{n-k+1})\leq \Re(q_k)$. Then} $\Re(q_{n-k+1})=\dots=\Re(q_k)=\Re(\lambda)$. But since $\vert q_j\vert=1$, for all $j$, and $\vert \lambda \vert=1$, it follows that the similarity classes are equal, so we have $[q_{n-k+1}]=\dots=[q_k]=[\lambda]$. This proves the result. \end{proof}
\section*{Funding} The first two authors are partially supported by the MINECO and FEDER research project MTM2016-78647-P. The first author was partially supported by Xunta de Galicia ED431C 2019/10 with FEDER funds.
\end{document} | arXiv | {
"id": "2012.03621.tex",
"language_detection_score": 0.5526527762413025,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Dipole-dipole interaction between orthogonal dipole moments in time-dependent geometries}
\author{Sandra Isabelle \surname{Schmid}} \email{sandra.schmid@mpi-hd.mpg.de}
\author{J\"org \surname{Evers}} \email{joerg.evers@mpi-hd.mpg.de}
\affiliation{Max-Planck-Institut f\"ur Kernphysik, Saupfercheckweg 1, D-69117 Heidelberg, Germany}
\date{\today}
\begin{abstract} In two nearby atoms, the dipole-dipole interaction can couple transitions with orthogonal dipole moments. This orthogonal coupling accounts for a number of interesting effects, but strongly depends on the geometry of the setup. Here, we discuss several setups of interest where the geometry is not fixed, such as particles in a trap or gases, by averaging over different sets of geometries. Two averaging methods are compared. In the first method, it is assumed that the internal electronic evolution is much faster than the change of geometry, whereas in the second, it is vice versa. We find that the orthogonal coupling typically survives even extensive averaging over different geometries, albeit with qualitatively different results for the two averaging methods. Typically, one- and two-dimensional averaging ranges modelling, e.g., low-dimensional gases, turn out to be the most promising model systems. \end{abstract}
\pacs{42.50.Fx, 42.50.Lc, 42.50.Ct}
\maketitle
\allowdisplaybreaks
\section{Introduction}
Two nearby atoms can interact in an energy-transfer process via the vacuum where one of the atoms is de-excited whereas the other atom is
excited~\cite{book-agarwal,thiranumachandran,book-ficek, FiTa2002}. This dipole-dipole interaction has been studied in great detail, albeit mostly for the case of two-level atoms with parallel transition dipole moments~\cite{MaKe2003,chang,RuFiDa1995,Ja1993,jump,Fi1991,VaAg1992,entanglement,bargatin,AgPa2001,pra-geometry,breakdown,dfs,strong}. It is known to modify the collective system dynamics and thus virtually all observables considerably, as was also shown in a number of related experiments~\cite{experiment,experiment2,experiment3,hettich,exp-qdot,noel,experiment4}.
Recently, it was found that a new class of effects arises from the dipole-dipole coupling between transitions with orthogonal dipole moments~\cite{AgPa2001,pra-geometry,breakdown,dfs}. This coupling is somewhat surprising since for single-atom systems, only near-degenerate non-orthogonal transitions can be coupled via the vacuum~\cite{book-ficek}. But in real atoms, e.g., transitions from one state to different Zeeman-sublevels of a different electronic state typically have orthogonal transition dipole moments. Therefore, the vacuum-coupling of such transitions in single atoms usually does not occur, which is unfortunate, since the corresponding couplings are known to give rise to many fascinating applications~\cite{book-ficek}.
In contrast, orthogonal transition dipole moments in different atoms do interact via the vacuum, with coupling coefficients dependent on the relative alignment of the atoms, see Fig.~\ref{fig-system}. It was shown in~\cite{AgPa2001} that this interaction creates coherences involving excited states that are not driven by any laser fields. This observation can be generalized by studying the two-particle master equation under rotations of the inter-atomic distance vector~\cite{breakdown}. It was found that because of the orthogonal couplings, typically complete Zeeman manifolds have to be considered in modelling the dipole-dipole interaction of two atoms, such that the usual few-level approximation is no longer possible.
The orthogonal couplings crucially influence the system dynamics. For example, the long-time dynamics of a two-atom system can strongly depend on the relative orientation of the two atoms~\cite{pra-geometry}. For a suitable laser and detector setup, undampened periodic oscillations in the fluorescence intensity are observed for some relative orientations of the two atoms, whereas the system evolves into a stationary steady state for other relative orientations. The reason for this geometry-dependence is the structure of the dipole-dipole constants. If the coupling of orthogonal transition dipole moments vanishes, then also the oscillations in the long-time limit vanish.
\begin{figure}
\caption{(Color online) In the left subfigure the geometry of our system is shown. The inter-atomic distance vector $\bm{r}_{12}$ is parameterized by the angles $\theta$ and $\phi$ and the length $r_{12}$. Atom A is located in the origin and atom B at $\bm{r}_{12}$. Each atom is a three-level system in $\Lambda$ configuration (b). The two lower states have an energy separation $\delta$. $\Omega_1$ ($\Omega_2$) is the Rabi frequency of the driving laser field coupling to transition $1\leftrightarrow 3$ ($2\leftrightarrow 3$) and the spontaneous decay rates are $\gamma_1$ and $\gamma_2$.}
\label{fig-system}
\end{figure}
In many situations of interest, however, the geometry is not fixed. For example, in a linear trap, the inter-atomic distance usually can be described classically as a sinusoidal oscillation around a mean distance. In this case, a dependence of the dynamics on the orientation of the dipole moments relative to the oscillation direction can be expected. A gas of atoms corresponds to a setup where both the orientation and the distance of any given pair changes with time. Thus the question arises, whether the geometry-dependent effects of the dipole-dipole interaction of orthogonal transition dipole moments survive an averaging over different geometries.
Therefore, here we discuss the fluorescence intensity emitted by a pair of three-level $\Lambda$-type atoms when averaged over sets of different geometries of interest, see Fig.~\ref{fig-system}. Our primary interest is the question whether the dipole-dipole couplings of orthogonal transition dipole moments survives an averaging over different geometries and thus also is of relevance if the two atoms are not fixed in space. Since the modulations in the fluorescence intensity are a direct consequence of these couplings, they are a convenient indicator and allow for a quantitative analysis.
The second major question involves the way the averaging should be treated theoretically. For comparison, we discuss two different ansatzes. First, one can assume that the internal electronic dynamics is much faster than the change of the geometrical setup. On the other hand, we consider the case where the change on geometry is fast enough such that the atoms essentially see an averaged interaction potential. The latter approach for example is used in the context of ultracold quantum gases to derive the $1/r$ long-range potential from the dipole-dipole coupling of parallel dipole moments by averaging over all possible orientations of the inter-atomic distance vectors~\cite{thiranumachandran}.
We find that in general the orthogonal couplings can survive an extensive averaging over different geometries as long as the inter-particle distance remains small. The magnitude of the effects in the averaged signal, however, strongly depends on the averaging range, and also on the averaging method. Typically, one- or two-dimensional systems can be expected to show larger effects of the dipole-dipole coupling. We also show that the two averaging methods considered can give very different results when averaged over the same set of geometries. In most situations, however, the case where the change in geometry is slow as compared to the internal dynamics is more favorable.
The article is organized as follows: In Sec.~\ref{sec-model}, we present the model system, derive the equations of motion and discuss our main observable, the time-dependent fluorescence intensity. In Sec.~\ref{sec-avg}, the two averaging methods are presented and discussed. Sec.~\ref{sec-result} presents the results from the averaging for various different situations of interest. Finally, our findings are discussed and summarized in Sec.~\ref{sec-summary}.
\section{\label{sec-theory}Theory} \subsection{\label{sec-model}The model system} We consider a system consisting of two identical three-level atoms in $\Lambda$ configuration, see Fig.~\ref{fig-system}. The atomic states have energies $\hbar\omega_i$ ($i\in\{1,2,3\}$). The transition dipole moments of each individual atom are assumed perpendicular, as it is common for near-degenerate electronic states in atomic systems such as Zeeman sublevels. For simplicity, both transition dipole moments are assumed to be real; the one of the $1\!\leftrightarrow\!3$ transition $\bm{d}_1=(d_1,0,0)^T$ is orientated along the $x$ direction and that of the $2\!\leftrightarrow\!3$ transition $\bm{d}_2=(0,d_2,0)^T$ along the $y$ direction. A comparison with the case of complex dipole moments coupling to circularly polarized light was given in~\cite{AgPa2001}. It should be noted that it was found in~\cite{breakdown} that in general all Zeeman sublevels of two nearby dipole-dipole interacting multilevel atoms have to be considered in order to correctly account for the different dipole-dipole couplings occurring in the system. Couplings to certain Zeeman sublevels can be eliminated, however, in special geometries, or via a detuning between the different transition frequencies, thus recovering the well-known few-level systems. In the following, we are interested in arbitrary geometries, and are thus restricted to an elimination via detunings. A $\Lambda$-type level scheme could be realized, for example, in a four-level $J\!=\!1/2 \leftrightarrow J\!=\!1/2$ scheme~\cite{time-energy} subject to a static magnetic field, such that the energy spacing between the upper states is sufficiently large to neglect dipole-dipole coupling to one of the upper states in the four-level scheme. The frequency difference between the two lower states is denoted by $\delta$. Atom A is located in the origin of our coordinate system $\bm{r}_{1}=(0,0,0)^T$ and atom B at $\bm{r}_2=\bm{r}_{12}=r_{12}\, (\sin\theta\cos\phi, \sin\theta\sin\phi,\cos\theta)^T$, where the distance vector between the two atoms is $\bm{r}_{12}$. The driving laser fields propagate in $z$ direction. For this system the Hamiltonian reads
\begin{equation} H = H_a + H_f + H_{vac} + H_L\,, \end{equation}
with
\begin{subequations} \begin{align} H_a =& \sum_{\mu=1}^{2} \sum_{j=1}^{3} \hbar \omega_j \,
S_{jj}^{(\mu)} \,, \\
H_f =& \sum_{\bm{k}\lambda}\hbar \omega_{k\lambda}\:
a_{\bm{k}\lambda}^\dagger a_{\bm{k}\lambda}\,, \\
H_{vac} =& -\sum_{\mu=1}^{2}\left [ \left ( \bm{d}_1 \,S_{31}^{(\mu)}
+ \bm{d}_2 \,S_{32}^{(\mu)} \right ) \bm{E}(\bm{r}_\mu)
+ \textrm{H.c.} \right ] ,\\ H_L =& -\hbar \sum_{\mu=1}^2 \left ( \Omega_1(\bm{r}_\mu)
e^{-i\nu_1 t} S_{31}^{(\mu)} \right. \nonumber \\
&\left. + \Omega_2(\bm{r}_\mu) e^{-i\nu_2 t} S_{32}^{(\mu)}
+ \textrm{H.c.} \right )\,. \end{align} \end{subequations}
$H_a$ represents the free energy of the atomic states. The free energy of the vacuum field is described by $H_f$. $H_{vac}$ is the interaction Hamiltonian of the vacuum field, and $H_L$ is the term describing the interaction with the laser fields in rotating-wave approximation (RWA).
The laser fields have amplitudes $\mathcal{E}_i$, frequencies $\nu_i$ and polarization unit vectors $\hat{\bm{\epsilon}}_i$ ($i\in\{1,2\}$), respectively. $\Omega_i(\bm{r}) = \Omega_i \exp[i\bm{k}_i \cdot\bm{r}]$ with $\Omega_i = (\bm{d}_i \cdot \hat{\bm{\epsilon}}_i) \mathcal{E}_i / \hbar$ are the corresponding Rabi frequencies. $\bm{E}(\bm{r})$ represents the quantized vacuum field modes. Furthermore, $\omega_{k\lambda}$ is the frequency of a vacuum field mode with creation and annihilation operator $a_{\bm{k}\lambda}^\dagger$ and $a_{\bm{k}\lambda}$, respectively.
The energy of the atomic state $|i\rangle$ is $\hbar \omega_i$.
We define atomic operators \begin{equation} \label{sij}
S_{ij}^{(k)} = |i\rangle_k {}_k\langle j| \qquad (i,j\in\{1,2,3\} \textrm{ and } k\in\{1,2\})\,, \end{equation}
where $|i\rangle_k$ denotes the $i$th electronic state of atom $k$. For $i\!=\!j$, Eq.(\ref{sij}) corresponds to a population, whereas for $i\!\neq\!j$ it is a transition operator.
Choosing a suitable interaction picture it is possible to describe the system by the master equation for the atomic density operator $\rho$ given by~\cite{pra-geometry}
\begin{align} &\frac{\partial \rho}{\partial t} =
- i\sum_{\mu=1}^{2} \sum_{j=1}^{2} \left [\Delta_j S_{jj}^{(\mu)},
\rho \right ]
\nonumber \allowdisplaybreaks[2] \\
&+ i \sum_{\mu=1}^{2} \sum_{j=1}^{2} \left [ \left (S_{3j}^{(\mu)} \Omega_j(\bm{r}_\mu) + \textrm{H.c.} \right) , \rho \right ]
\nonumber \allowdisplaybreaks[2] \\
& - \sum_{\mu=1}^{2} \sum_{j=1}^{2} \Bigl [
\gamma_j \left (
S_{33}^{(\mu)}\rho - 2 S_{j3}^{(\mu)}\rho S_{3j}^{(\mu)}
+ \rho S_{33}^{(\mu)} \right )
\nonumber \allowdisplaybreaks[2] \\
& + \Gamma_j^{dd} \left ( S_{3j}^{(\mu)} S_{j3}^{(\neg \mu)} \rho - 2 S_{j3}^{(\neg \mu)} \rho S_{3j}^{(\mu)} + \rho S_{3j}^{(\mu)} S_{j3}^{(\neg \mu)} \right ) \Bigr ]
\nonumber \allowdisplaybreaks[2] \\
&+ \sum_{j=1}^{2} \left ( i \Omega_j^{dd} \left [S_{3j}^{(1)}S_{j3}^{(2)}, \rho \right ] + \textrm{H.c.} \right )
\nonumber \allowdisplaybreaks[2] \\
&- \sum_{\mu=1}^{2} \left [ \Gamma_{vc}^{dd} \left ( S_{32}^{(\mu)} S_{13}^{(\neg \mu)}\rho - 2 S_{13}^{(\neg \mu)} \rho S_{32}^{(\mu)} \right . \right . \nonumber \\ & \left . \left . \qquad \qquad + \rho S_{32}^{(\mu)}S_{13}^{(\neg \mu)} \right ) e^{i\Delta t} + \textrm{H.c.} \right ]
\nonumber \allowdisplaybreaks[2] \\
&+ \sum_{\mu=1}^{2} \left ( i \Omega_{vc}^{dd} \left [S_{32}^{(\mu)} S_{13}^{(\neg \mu)}, \rho \right ]e^{i\Delta t} + \textrm{H.c.} \right ) \,. \label{master} \end{align}
Here, the RWA and the Born-Markov approximation were used. The first term, which contains the detunings $\Delta_i = \nu_i - (\omega_3 - \omega_i)$ of the driving laser fields, appears because of the chosen interaction picture. The interaction with the laser fields is expressed by the second summand with the Rabi frequencies $\Omega_j(\bm{r}_\mu)$. The contribution containing $\gamma_j$ represents the individual spontaneous decay of each transition in the two atoms. In our case the spontaneous decay rate on transition $3\!\leftrightarrow\!j$ is denoted as $2\gamma_j$. The term with $\Gamma_j^{dd}$ contains the dipole-dipole coupling between a dipole of one atom and the corresponding parallel dipole of the other atom. The contribution proportional to $\Omega_j^{dd}$ represents the corresponding dipole-dipole energy shift. The interaction between a dipole moment of one atom and the perpendicular one of the other atom is described by the expression containing the cross coupling constants $\Gamma_{vc}^{dd}$ and $\Omega_{vc}^{dd}$. The symbol $\neg \mu$ denotes the other atom than $\mu$, e.g., for $\mu=2$ one has $\neg \mu = 1$. Note that the interaction picture in Eq.~(\ref{master}) is chosen such that the residual explicit time dependence $\exp[\pm i\Delta t]$ which cannot be transformed away is attributed to the terms that describe dipole-dipole coupling of orthogonal transition dipole moments. This choice is motivated by the physical origin of this time dependence, which arises from these orthogonal couplings~\cite{pra-geometry}. The frequency $\Delta$ is determined by $\Delta = \delta + \Delta_2 - \Delta_1 = \nu_2 - \nu_1$.
The spontaneous decay rates are given by \begin{equation}
\gamma_i = \frac{1}{4\pi\epsilon_0} \frac{2|\bm{d}_i|^2\omega_{3i}^3}{3\hbar c^3}\,, \end{equation} and the dipole-dipole coupling constants can be calculated from~\cite{AgPa2001} \begin{subequations} \label{couplings-gen} \begin{eqnarray} \Gamma_i^{dd} &=& \frac{1}{\hbar} [\bm{d}_i \cdot \textrm{Im}
(\overset{\leftrightarrow}{\chi}) \cdot \bm{d}_i^* ]\,, \\
\Omega_i^{dd} &=& \frac{1}{\hbar} [\bm{d}_i \cdot \textrm{Re}
(\overset{\leftrightarrow}{\chi}) \cdot \bm{d}_i^* ] \,, \\
\Gamma_{vc}^{dd} &=& \frac{1}{\hbar} [\bm{d}_2 \cdot \textrm{Im}
(\overset{\leftrightarrow}{\chi}) \cdot \bm{d}_1^* ]\,, \label{gamma_vc} \\
\Omega_{vc}^{dd} &=& \frac{1}{\hbar} [\bm{d}_2 \cdot \textrm{Re}
(\overset{\leftrightarrow}{\chi}) \cdot \bm{d}_1^* ] \,. \label{omega_vc} \end{eqnarray} \end{subequations}
In evaluating these coupling constants we have approximated $\omega_{31} \approx \omega_{32} \approx \omega_0$. Re and Im denote the real and imaginary part of the tensor $\overset{\leftrightarrow}{\chi}$ whose components are given by
\begin{align} \chi_{\mu\nu}&(\bm{r}_1, \bm{r}_2) =\frac{1}{4\pi \epsilon_0}
\left [ \delta_{\mu\nu} \left ( \frac{k_0^2}{r_{12}} + \frac{i k_0}{r_{12}^2} - \frac{1}{r_{12}^3} \right )\right. - \nonumber \\[0.1cm] & \left . \frac{(\bm{r}_{12})_\mu(\bm{r}_{12})_\nu }{r_{12}^2} \left( \frac{k_0^2}{r_{12}} + \frac{3ik_0}{r_{12}^2} - \frac{3}{r_{12}^3} \right )\right ]\,e^{i k_0 r_{12}}\,. \label{chi} \end{align}
$\delta_{\mu\nu}$ is the Kronecker delta symbol.
For our choice of the atomic system, the coupling constants between orthogonal dipole moments evaluate to ($\eta = k_0\,r_{12}$) \begin{subequations} \label{couplings} \begin{align} \Gamma_{vc}^{dd} &= -\frac{3}{4}\,\sqrt{\gamma_1 \gamma_2}\, \sin(2\phi)\,\sin^2 \theta \nonumber \\ &\qquad \times \left [ \frac{\sin \eta}{\eta} + 3\left ( \frac{\cos\eta}{\eta^2} - \frac{\sin\eta}{\eta^3}\right ) \right ] \,,\\
\Omega_{vc}^{dd} &= -\frac{3}{4}\,\sqrt{\gamma_1 \gamma_2}\, \sin(2\phi)\,\sin^2 \theta \nonumber \\ &\qquad \times\left [ \frac{\cos \eta}{\eta} - 3\left ( \frac{\sin\eta}{\eta^2} + \frac{\cos\eta}{\eta^3}\right ) \right ]\,. \end{align} \end{subequations}
Our main observable is the total time-dependent fluorescence intensity emitted by the two atoms. It is assumed to be measured by a detector placed on the $y$-axis at the point $\bm{R}=R\hat{\bm{R}}$ with $\hat{\bm{R}}=(0,1,0)^T$. This intensity is proportional to the normally ordered one-time correlation function
\begin{equation} I = \langle \,:\! \bm{E}^{(-)}(\bm{R},t)\:\bm{E}^{(+)}(\bm{R},t)\!:\,\rangle\,, \end{equation}
where $\bm{E}^{(\mp)}(\bm{x},t)$ are the positive and negative frequency parts of the vacuum field $\bm {E}(\bm{x},t) = \bm{E}^{(-)}(\bm{x},t) +\bm{E}^{(+)}(\bm{x},t)$. For our arrangement of the detector, the atoms and the laser fields the fluorescence intensity reduces to~\cite{pra-geometry}
\begin{align} I_y = w_1^2
\sum_{\mu,\nu=1}^{2} \left \langle S_{31}^{(\mu)}S_{13}^{(\nu)}
\right \rangle \: e^{ik_1 \hat{\bm{R}}\cdot\bm{r}_{\mu\nu}}\,, \label{int-y} \end{align}
where $w_1=(\omega_{31}^2\,d_1)/(4\pi \epsilon_0 c^2 R)$ is a pre-factor that we neglect in the following.
\subsection{\label{sec-avg}Averaging over different geometries}
The master equation Eq.~(\ref{master}) contains an explicit time dependence which is determined by the two driving laser field frequencies. Thus, in general, it cannot be expected that the system reaches a stationary steady state. This was demonstrated in~\cite{pra-geometry}, where it was shown that for $\Delta \neq 0$ in general it depends on the relative alignment of the two atoms whether the system reaches a stationary state or not. For some geometries, the long-time limit is constant, whereas for other geometries a periodic oscillation in the fluorescence intensity is predicted. Since the relative positions of nearby atoms in many experimental situations of relevance are not fixed, the question arises whether any time dependence survives when averaging over a set of geometries. The most obvious example for this is a three-dimensional volume of gas, where arbitrary relative orientations and distances can be observed. But also other sets of geometries may be considered. For example, in~\cite{noel}, an essentially one-dimensional ultracold quantum gas was studied. In this case, an external static field can be used to vary the relative alignment of dipoles and the trap axis.
In the following, we discuss two different approaches for calculating the averaged total fluorescence intensity, which is our main observable.
\subsubsection{The adiabatic case method}
In general, we have to average over the angles $\theta, \phi$ as well as over the distance $r_{12}$. We discretize the respective interval of each geometric parameter in $N_i$ equal steps of size $\Delta_i$, respectively, where $i\in\{r, \theta, \phi\}$. This gives rise to $N_r N_\phi N_\theta$ different geometries. For each of these geometries, we evaluate the coupling constants and numerically integrate the master equation Eq.~(\ref{master}). From this, we obtain the time-dependent fluorescence intensity $\left[I_y(t) \right]_{n_r, n_{\theta}, n_{\phi}}$ for this particular geometry $(n_i \in \{1,\dots,N_i\})$. Finally, we average over all time evolutions of the different geometries using the expression
\begin{subequations} \label{Mitteln} \begin{align} \overline{\left(I_y\right)}(t) &= \frac{1}{\mathcal{Q}}\sum_{n_{r}=1}^{N_r} \sum_{n_\theta=1}^{N_\theta} \sum_{n_\phi=1}^{N_\phi} \: \mathcal{V}_{r,\theta,\phi} \: \left[I_y(t) \right]_{n_r, n_{\theta}, n_{\phi}} \,,\\
\mathcal{Q} &= \sum_{n_{r}=1}^{N_r} \sum_{n_\theta=1}^{N_\theta} \sum_{n_\phi=1}^{N_\phi} \: \mathcal{V}_{r,\theta,\phi} \,, \\
\mathcal{V}_{r,\theta,\phi} &= \Delta V_r \: \Delta V_\theta(n_r) \: \Delta V_\phi(n_r,n_\theta) \,.
\end{align} \end{subequations}
$\mathcal{Q}$ is a normalization constant. We work in a spherical coordinate system and do not only consider uniform motions of the atoms. Thus an appropriate volume element $\mathcal{V}_{r,\theta,\phi}$ has to be considered. In the discretized form, the contributions from the different coordinates are given by
\begin{subequations} \begin{align} \Delta V_r &= \Delta_r \,, \\ \Delta V_\theta (n_r) &= r_{n_r} \Delta_\theta \,,\\ \Delta V_\phi (n_r,n_\theta)&=r_{n_r} \sin(\theta_{n_\theta})\Delta_\phi \,. \end{align} \end{subequations}
When we average over one or two parameters only we omit the other summation(s) and volume element(s).
This method of averaging describes the experimentally observable signal as long as the change of the geometric setup is slow compared to the internal dynamics of the system. Then, the internal dynamics adapts to its long-time evolution on a timescale much faster than the change of the geometry.
In the following, we will call this way of averaging the adiabatic case (AC) method because of the slow change of the geometry.
\subsubsection{The average potential method}
In our second method of averaging, a different physical situation is considered. Here, the change of the geometry is considered fast compared to the internal dynamics. Then, the time evolution of the atomic system according to the master equation Eq.~(\ref{master}) is not governed by coupling constants corresponding to a particular fixed geometry. Rather, the atom experiences an averaged coupling constant.
Therefore, in this case, we start by averaging all coupling constants Eqs.~(\ref{couplings-gen}) over the range of geometries considered. This can be done analytically without a discretization of the averaging range, but again taking into account an appropriate volume element. Then the averaged coupling constants are given by \begin{equation} \overline{\mathcal{C}}=\int_0^{2\pi}\int_0^\pi\int_0^{2\pi} \: \mathcal{C} \: dV_{r} \, dV_\theta \,dV_\phi\, , \end{equation} with $\mathcal{C}\in\left\{\Gamma_i^{dd}, \Omega_i^{dd},\Gamma_{vc}^{dd}, \Omega_{vc}^{dd}\right\}$. In order to average, e.g., over a sinusoidally oscillating distance we parameterize $r_{12}=r_m+r_a \sin\alpha$ by a mean distance $r_m$ and an oscillation amplitude $r_a$. In this case $dV_{r}=d\alpha$, $dV_\theta=r_{12} d\theta$ and $dV_\phi=r_{12} \sin\theta d\phi$. Then, the master equation is solved and the fluorescence intensity is calculated using these averaged coupling constants. Finally, the time-dependent intensity is plugged into Eq.~(\ref{int-y}). Since the expression for the fluorescence intensity Eq.~(\ref{int-y}) also depends on the orientation of the inter-atomic distance vector, we also average this expression over the same set of geometries.
In the following, this way of averaging will be referred to as averaged potential (AP) method.
\section{\label{sec-result}Results}
We now turn to a numerical study of our system as outlined in the previous section. Different ranges of averaging will be considered, according to different setups of interest. In all cases, the two ways of averaging the fluorescence intensity will be compared. We choose as initial condition both atoms to be
in state $|3\rangle$ unless noted otherwise.
\begin{figure}
\caption{(Color online) Time dependence of the fluorescence intensity averaged over $r_{12}$ by the AC method. $r_m=0.25\lambda$ and (a) $r_a=0.02\lambda$, (b) $r_a=0.14\lambda$ and (c) $r_a=0.2\lambda$. The inter-atomic distance vector is oriented such that $\phi = \pi/4$ and $\theta = \pi/2$. The laser parameters are $\Omega_1 = 3\, \gamma$, $\Omega_2 = 5\,\gamma$, $\Delta_1 = 0$, $\Delta_2 = 2\gamma$, and the two lower states are assumed degenerate $\delta = 0$.}
\label{r1}
\end{figure}
\begin{figure}
\caption{(Color online) Dependence of the amplitude of the oscillating fluorescence intensity on the oscillation amplitude $r_{12}$ of the atom for (a) $r_m=0.2\lambda$ and (b) $r_m=0.25\lambda$. The other parameters are as in Fig.~\ref{r1}.}
\label{maximaDeltaI}
\end{figure}
\subsection{Averaging over inter-particle distance} In this section, the orientation of the inter-atomic distance vector is fixed, while we assume a sinusoidal oscillation of the distance $r_{12}$ around a mean distance $r_m$ with amplitude $r_a$, i.e., $r_{12}(\alpha) = r_m + r_a\,\sin(\alpha)$ with $\alpha \in [0,2\pi]$. This corresponds to, e.g., atoms in a linear trap.
In Fig.~\ref{r1}, we choose a mean distance $r_m = 0.25\lambda$ and orientation $\theta = \pi/2$, $\phi=\pi/4$. The different curves correspond to oscillation amplitudes $0.02\lambda$, $0.14\lambda$ and $0.2\lambda$, respectively. All curves in this figure were obtained using the AC method.
It can be seen that in the averaged signal, the system does not reach a steady state in the long-time limit for any of these oscillation amplitudes. To analyze the oscillations in the long-time evolution in more detail, we determine the maximum (minimum) fluorescence intensity $I_{\textrm{max}}$ ($I_{\textrm{min}}$) in the long-time limit where the intensity undergoes periodic changes. We define an oscillation amplitude of the intensity as $\Delta I=I_{\textrm{max}}-I_{\textrm{min}}$. From Fig.~\ref{r1}, it is clear that $\Delta I$ depends on the oscillation amplitude $r_a$. This dependence is depicted in Fig.~\ref{maximaDeltaI} for small mean distances $r_m$, where it can be seen that $\Delta I$ exhibits a resonance in the plot versus the oscillation amplitude $r_a$.
This resonance can be understood as follows. First, one has to note that as long as the inter-atomic distance is not too small, typically the oscillation amplitude decreases with increasing particle distance, because the coupling constants between orthogonal dipole moments decrease. Therefore, small inter-atomic distances lead to a larger oscillation amplitude. Only for very small distances, the oscillation amplitude as well as the total fluorescence signal are attenuated because the dipole-dipole energy shifts move the atomic transitions out of resonance with the driving laser field, such that the upper state population is decreased. This explains why the averaged oscillation amplitude decreases from the resonance maximum towards smaller oscillation amplitude $r_a$. With smaller $r_a$, only larger inter-atomic distances are considered in the averaging, and thus the average oscillation amplitude decreases.
\begin{figure}
\caption{(Color online) Time-dependent fluorescence signal for different fixed distances $r_{12}$ without any averaging. (a) $r_{12} = 0.10\,\lambda$, (b) $r_{12} = 0.08\,\lambda$, (c) $r_{12} = 0.06\,\lambda$, (d) $r_{12} = 0.05\,\lambda$, (e) $r_{12} = 0.04\,\lambda$. The other parameters are as in Fig.~\ref{r1}. The vertical lines allow to easily judge the relative phase shifts of the different curves.}
\label{timedep-signal}
\end{figure}
The decrease of the oscillation amplitude $\Delta I$ from the resonance towards higher amplitudes is due to a different mechanism. In Fig.~\ref{maximaDeltaI}, for both mean distances $r_m$, this occurs if $r_a$ is large enough such that inter-atomic distances below about $0.06\,\lambda$ are included in the averaging. Some examples of unaveraged time-dependent signals for different inter-atomic distances are shown in Fig.~\ref{timedep-signal}. For distances larger than about $0.06\,\lambda$, the relevant contributions oscillate approximately in phase, see curves (a) and (b) in Fig.~\ref{timedep-signal}. For smaller distances, however, the contributions move out of phase, as can be seen from curves (c)-(e). Curves (d) and (e) approximately have maxima where curves (a) and (b) have minima, and vice versa. Curve (c) is an intermediate case. Therefore, the oscillations with different phases cancel each other in the averaging process if distances below about $0.06\,\lambda$ are included in the averaging.
In Fig.~\ref{DeltaI2}(a) we show $\Delta I$ in dependence of $r_a$ for a larger mean distance $r_m=2.25\lambda$, and over a broader range of oscillation amplitudes. It can be seen that the curve exhibits a series of resonances similar to the one shown in Fig.~\ref{maximaDeltaI}. These again occur due to an alternating destructive and constructive superposition of the different oscillations in the averaging process. The overall amplitude $\Delta I$, however, is small because of the overall larger inter-atomic distances considered in this figure.
\begin{figure}
\caption{(Color online) Dependence of the amplitude of the oscillating fluorescence intensity on the oscillation amplitude $r_{12}$ of the atom for larger mean distance $r_m=2.25\lambda$. In (a) we used the AC and in (b) the AP method. The other parameters are as in Fig.~\ref{r1}.}
\label{DeltaI2}
\end{figure}
\begin{figure}
\caption{(Color online) Comparison of the time dependence of the fluorescence averaged over r by both methods. The orientation angles $\theta=0.5\pi$ and $\phi=0.25\pi$ are fix and $r_m=0.25\lambda$. (a) AC method, $r_a=0.14\lambda$, (b) AP method, $r_a=0.14\lambda$, (c) AC method, $r_a=0.2\lambda$, (d) AP method, $r_a=0.2\lambda$. All other parameters are as in Fig.~\ref{r1}.}
\label{rvergleich}
\end{figure}
Finally, we discuss the time-averaged intensity obtained from the AP method of averaging. Some examples are shown in Fig.~\ref{rvergleich}. Curves (a) and (c) show our results from the AC method and (b) and (d) those from the AP method. The oscillation amplitudes are $r_a=0.14\lambda$ and $0.2\lambda$, respectively. All other parameters are as in Fig.~\ref{r1}. The left panel shows values for $5\leq \gamma\cdot t\leq 25$ since the stationary oscillation is reached rapidly for these parameters. Also in the AP case, the fluorescence intensity undergoes periodic changes in the long-time limit, see Fig.~\ref{rvergleich}. For small oscillation amplitudes $r_a$ there is little difference between the two methods, see curves (a) and (b). However, for larger values of $r_a$ the amplitude of the oscillations in the AP case is much larger than those obtained in the AC averaging. The dependence of the oscillation amplitude on the averaging range for the AP method is
\begin{figure}
\caption{Time dependence of the fluorescence averaged over $\theta$ by using the AC method. The distance $r_{12}=0.1\lambda$ is fixed. (a) $\phi=0.2\pi$, (b) $\phi=0.5\pi$, (c) $\phi=0.8\pi$ and (d) $\phi=\pi$. All other parameters are as in Fig.~\ref{r1}.}
\label{theta1}
\end{figure}
shown in Fig.~\ref{DeltaI2}, curve (b). As in the corresponding curve (a) for the AC averaging method, resonance structures appear. But depending on $r_a$, the two methods yield either similar or very different oscillation amplitudes. In addition, the result for the AP method seems to have a root at about $r_a = 0.4\lambda$. A careful analysis shows, however, that this minimum is not a true root. The reason for the minima in the AP curve is that for these oscillation amplitudes, the turning point at minimum inter-atomic distance is close to a distance where the coupling constants between orthogonal transition dipole moments are small. Then, the averaged coupling constants are small such that the oscillation amplitude has a minimum. These minima nicely show a crucial difference between the two averaging methods. In the AP method, it is easy to find averaging ranges where the averaged coupling constants are small or even vanish. Then, also the oscillation in the long-time dynamics is negligible. The results from the AC method, however, typically remain oscillatory even for such averaging ranges, as the dynamics for each of the different geometries contributes rather than only an averaged geometry. We will find this difference again in the following sections.
\subsection{\label{sec-orientation}Averaging over relative orientation}
In the following, we consider the case where the inter-atomic distance $r_{12}$ is fixed, but the relative orientation and thus the angles $\theta$ and/or $\phi$ are averaged over. A realization for this could be a Mexican-hat-like potential where one of the atoms is placed in a potential dip in the center whereas the other atom is confined to the potential minimum in the rim.
First we fix the angle $\phi$ and assume atom B to move around A on a circle in a plane which is perpendicular to the x-y-plane and includes the origin. Since $\theta$ is only defined between $0$ and $\pi$ we have to average over two semicircles with $\phi$ and $\phi+\pi$ to include the whole circle.
Some examples of our results for $r_{12}=0.1\lambda$ using the AC method of averaging are shown in Fig.~\ref{theta1}. Here, the angle $\phi$ is chosen as $0.2\pi$, $0.5\pi$, $0.8\pi$ and $\pi$, respectively. For $\phi=0.5\pi$ and for $\phi=\pi$ the system reaches a time-independent steady state in the long-time limit. That is because the cross-coupling constants are zero for these values of $\phi$, as both $\Gamma_{vc}^{dd}$ and $\Omega_{vc}^{dd}$ are proportional to $\sin(2\phi)\sin^2(\theta)$, see Eqs.~(\ref{couplings}). But even though the coupling constants are zero in both cases, the resulting intensities are not identical. This demonstrates that it is not sufficient to analyze the coupling constants alone to understand the system dynamics.
\begin{figure}
\caption{(Color online) Time dependence of the fluorescence intensity averaged over $\theta$ with fixed inter-atomic distance $r_{12}=0.1\lambda$.
(a) AC method with $\phi=0.6\pi$, (b) AP method with $\phi=0.6\pi$, (c) AC method with $\phi=0.9\pi$, and (d) AP method with $\phi=0.9\pi$. The other parameters are as in Fig.~\ref{r1}.}
\label{thetaVergleich}
\end{figure}
In addition we can see from Fig.~\ref{theta1} that there is a phase shift of $\pi$ with respect to the oscillation in the long-time limit between the two curves for $\phi=0.2\pi$ and $\phi=0.8\pi$. We found that in general curves for different values of $\phi$ split into two groups separated by such a phase shift of $\pi$. The first group contains curves for $0<\phi<\pi/2$, whereas the other consists of curves for $\pi/2 <\phi<\pi$.
Within each of these groups, the oscillation amplitude of the intensity has the same dependence on the angle $\phi$. For $\phi=0$ and $\phi=\pi/2$ the amplitude is zero. Then it increases with growing $\phi$ and reaches a maximum for $\phi=0.2\pi$ and $\phi=0.8\pi$, respectively. Thus, in Fig.~\ref{theta1} the curves with maximum oscillation amplitude are shown.
This separation is likely to appear due to the change of sign of the cross-coupling constants at $\phi=0.5\pi$. This can be understood from the explicit expressions of the coupling constants Eqs.~(\ref{couplings}). In the master equation~(\ref{master}) we can see that a sign change in the terms with the cross-couplings can be rewritten as a constant phase shift factor of $\exp[i\pi]$. It is, however, not straightforward to connect this phase shift to the phase shift seen in Fig.~\ref{theta1}, because the oscillation frequency of the time-dependent fluorescence in general does not only depend on $\Delta$, but also, e.g., on the laser field Rabi frequencies. In addition, one has to note that the geometric parameters $\theta, \phi$ enter the total fluorescence intensity as well, see Eq.~(\ref{int-y}).
But our interpretation is further supported by the fact that a change of the inter-atomic distance
$r_{12}$ has no influence on the separation of our curves into two groups. The separation also persists for different initial conditions, e.g., atom A in state $|1\rangle$ and
atom B in $|3\rangle$, and thus is not a consequence of the initial dynamics until the steady state has been reached.
\begin{figure}
\caption{(Color online) Time dependence of the fluorescence intensity averaged over $\phi$. $r_{12}$ is fixed at $0.1\lambda$. (a) $\theta=0.25\pi$, (b) $\theta=0.3\pi$, (c) $\theta=0.5\pi$ and (d) $\theta=\pi$. Curves (a-d) are obtained using the AC method. (e) shows a result using the AP method for $\theta=0.3\pi$. All other parameters are as in Fig.~\ref{r1}.}
\label{phi}
\end{figure}
Using the AP method the separation into two groups remains, but again the curves are different from our results from the other averaging method. In Fig.~\ref{thetaVergleich} we compare curves from both methods of averaging for averaging over $\theta$ with fixed angles $\phi=0.6\pi$ and $\phi=0.9\pi$. The curves resulting from the AP method have pronounced local extrema in each oscillation period in addition to the global ones, and the overall intensity is higher as in the AC case.
Next we assume atom B to move on a circle in the x-y-plane. Thus $\theta$ is fixed and we average over the angle $\phi$. The inter-atomic distance is $0.1\lambda$. Some examples of our results are shown in Fig.~\ref{phi}, where $\theta$ is chosen as $0.25\pi$, $0.3\pi$, $0.5\pi$ and $\pi$. For the AC method the system does not reach a time-independent state in the long-time limit except for the angle $\theta=\pi$. This is because for this choice of $\theta=\pi$ the coupling constants vanish since they are proportional to $\sin(2\phi)\sin^2(\theta)$, see Eqs.~(\ref{couplings}). For any different $\theta$ our system remains oscillating in the long-time limit. In case of $\theta=0.3\pi$ one can see local extrema in addition to the global extrema in the fluorescence intensity. In both cases, even the time-averaged average intensity is considerably larger than in the non-oscillatory case $\theta=\pi$. Interestingly, for $\theta=\pi/2$, the absolute value of the intensity is lower than for the non-oscillatory case $\theta=\pi$. Thus, the orthogonal coupling together with the averaging can have both an enhancing or a detrimental effect to the total emitted fluorescence.
For this set of geometries, the AP method of averaging always yields a stationary long-time limit and thus behaves qualitatively different from the first method. The reason for this is that the coupling constants for the orthogonal couplings vanish upon averaging over then angle $\phi$. As discussed before, then the time dependence in the long-time limit also vanishes, see Fig.~\ref{phi}.
We now turn to the case of atom B moving around A on a sphere with radius $r_{12}$. In this case, neither of the two angles $\theta$ and $\phi$ is fixed, and we have to average over both of them while the inter-atomic
\begin{figure}
\caption{(Color online) Time dependence of the total fluorescence intensity averaged over $\theta$ and $\phi$ for the inter-atomic distances (a) $r_{12}=0.1\lambda$, (b) $r_{12}=0.15\lambda$ and (c) $r_{12}=0.2\lambda$. Curves (a-c) are obtained using the AC method. (d) is the result from the AP method for $r_{12}=0.1\lambda$. The other parameters are as in Fig.~\ref{r1}.}
\label{phitheta}
\end{figure}
distance is fixed. Some results from both methods are shown in Fig.~\ref{phitheta}. We already know that the coupling constants vanish when averaged over $\phi$. That is why the time dependence in the AP method also vanishes when we average over $\theta$ and $\phi$, see curve (d). In curves (a)-(c) obtained using the AC method, the inter-atomic distance is chosen as $0.1\lambda$, $0.15\lambda$ and $0.2\lambda$, respectively. One can see that both the oscillation amplitude and the absolute value of the fluorescence intensity decrease with increasing inter-particle distance. For the distance $0.2\lambda$ there is almost no oscillation left due to the vanishing of the coupling constants with increasing inter-atomic distance. This also explains why this curve approaches the AP method result, where the averaged coupling constants are zero. As compared to curves (a) and (b) in Fig.~\ref{phi}, curve (a) in Fig.~\ref{phitheta} shows that the additional averaging over $\theta$ does lead to a reduction of the oscillation amplitude. Still, the oscillation and thus the dipole-dipole coupling of orthogonal dipole moments can survive an averaging over all orientations, depending on the averaging case.
\subsection{Averaging over distance and orientation}
\begin{figure}
\caption{(Color online) Time dependence of the total fluorescence intensity averaged over r, $\theta$ and $\phi$ for atom B moving on a sphere around atom A with additional harmonic oscillation of the inter-atomic distance. Here, $r_m=0.2\lambda$ and $r_a=0.12\lambda$. In (a) we used the AP and in (b) the AC method. All other parameters are as in Fig.~\ref{r1}.}
\label{thetarphi}
\end{figure}
After the individual averaging over the inter-atomic distance and the relative orientation of the two atoms in the previous sections, we now consider the case of averaging over both. This situation is realized, e.g., in a gas of atoms, where the relative position of any two particles changes with time. An averaging over the two-particle configuration space is meaningful, since in a macroscopic volume of gas at any time there is a finite probability for an arbitrary geometry within the volume of the sample to be present. A different realization is a sample of atoms randomly embedded in a host material. In this case, again an averaging is in order. The two situations differ, however, since the former case corresponds to a time-dependent geometry for any two-particle subsystem, whereas the latter case can be represented by a sample of time-independent pairs.
Thus, in the following, we investigate whether in these cases any time dependence of the fluorescence intensity remains in the long-time limit by considering a system where $r_{12}$, $\theta$ and $\phi$ are variable. The three-dimensional case of course leaves several possibilities for the averaging range. In the following, we will consider two cases. In the first case, atom B moves on a sphere with atom A in its center and additionally oscillates around the mean distance $\bm{r}_{12}$ with an amplitude $r_a$. In the second case, the particle fly-by, particle B passes atom A moving with constant velocity on a straight line, see Fig.~\ref{skizze}.
In Sec.~\ref{sec-orientation} we have seen that averaging the coupling constants over $\phi$ makes them vanish, such that the system does not show any time dependence in the long-time limit when we use the AP method of averaging. This, of course also holds true for the three-dimensional averaging for atom B moving on a sphere with oscillation of the inter-atomic distance.
In contrast, the AC method of averaging still yields time-dependent fluorescence intensities. An example is shown in Fig.~\ref{thetarphi}. Here, the inter-atomic mean distance is chosen $r_m=0.2\lambda$ and the oscillation amplitude is $r_a=0.12\lambda$. We see that even if we average over all three geometric parameters, the system does not reach a time-independent state in the long-time limit, even though the oscillation amplitude is small.
\begin{figure}
\caption{(Color online) Geometry for the case of atom B flying past atom A with constant velocity on a straight line from $-z_{max}$ to $z_{max}$.}
\label{skizze}
\end{figure}
\begin{figure}
\caption{(Color online) Dependence of the oscillation amplitude of the time-dependent intensity on $z_{max}$ for impact parameter $r_{min}=0.05\lambda$. $\phi = \pi/4$, and the other parameters are as in Fig.~\ref{r1}. In (a) we used the AC and in (b) the AP method.}
\label{x1}
\end{figure}
Finally, we consider the case where atom A flys past atom B along the z-axis from $-z_{max}$ to $z_{max}$ with constant velocity, see Fig~\ref{skizze}. The angle $\phi$ is fixed and we average over $\theta$ and $r_{12}$ considering the respective volume element. We analyzed the case $\phi = \pi/4$ and found that for both averaging methods the fluorescence intensity remains oscillatory in the long-time limit. To further study these oscillations, in Fig.~\ref{x1} we show the oscillation amplitude of the time-dependent fluorescence intensity in the long-time limit against the extend of the motion $z_{max}$. The minimum inter-atomic distance is chosen as $r_{min}=0.05\lambda$. Curve (a) shows our results from the AP and (b) those from the AC method of averaging.
One can see that in both cases the amplitude decreases with increasing $z_{max}$ for large values of $z_{max}$. This is because for large distances the dipole-dipole interaction tends to zero, and oscillations only occur if the particles are close. If the averaging interval contains increasing ranges of $z$ where there essentially is no oscillation because of the inter-atomic distance, then the oscillations in the overall signal decrease.
It is interesting to note, however, that in this averaging configuration the AP method shown in curve (a) yields much larger oscillations than the AC method shown in curve (b). Also, it can be seen that the AP method shows oscillations over a range of $z_{max}$ up to several wavelengths $\lambda$. The reason for this is as follows. In the AP method, the coupling constants are averaged over the different geometries. For $z=0$, the distance between the particles is $r_{min}=0.05\lambda$. At this position, the coupling constant $\Omega_{vc}^{dd}$ acquires a large value of more than $330 \gamma$. Of course, with increasing distance the constant $\Omega_{vc}^{dd}$ rapidly decreases down to zero. But averaging over a certain range $[-z_{max},z_{max}]$ still gives a considerable averaged coupling constant $\bar{\Omega}_{vc}^{dd}$ even for values of $z_{max}$ where the unaveraged coupling constants are negligible. This is the reason why the oscillations persist for large $z_{max}$ values in the AP case. In contrast, in the AC case, contributions from larger $z$ values do not oscillate at all such that the decrease of the oscillation amplitude with $z_{max}$ is much more rapid.
\begin{figure}
\caption{(Color online) Dependence of the oscillation amplitude of the time-dependent intensity on $z_{max}$ for smaller values of $z_{max}$. (a) AC method with $r_{min}=0.05\lambda$, (b) AP method with $r_{min}=0.05\lambda$. All other parameters are as in Fig.~\ref{x1}. }
\label{x2}
\end{figure}
We now focus on the region with smaller motion extends $z_{max}$. The corresponding results are shown in Fig.~\ref{x2} for $r_{min}=0.05\lambda$.
In the limit $z_{max}\to 0$, the time-dependent fluorescence approaches the unaveraged curves (d) and (e) in Fig.~\ref{timedep-signal}, which exhibit relatively low oscillation amplitudes. The reason is that at this small distance, the atomic states are shifted by the dipole-dipole interaction out of resonance with the laser fields, such that the overall fluorescence is low. For both methods, the intensity oscillations first strongly enhance with increasing $z_{max}$, and then decrease again after passing through a maximum oscillation amplitude. The AC method results for larger $z_{max}$ essentially remain structureless. The AP results, however, exhibit some oscillations, and only then start to decay monotonously with increasing averaging range. Due to the complexity of the system, it is difficult to definitively attribute the oscillation to a property of the system. We believe, however, that they are due to a similar alternating constructive and destructive interference in the averaging as the one that led to the resonance structures in Figs.~\ref{maximaDeltaI} and \ref{DeltaI2}. Such resonances do not appear in the AC method results, because there the contributions for higher values of $z$ where the oscillations in the AP method appear are already too small.
\section{\label{sec-summary}Discussion and summary} Dipole-dipole interactions between transitions with orthogonal transition dipole moments gives rise to a new class of effects in collective quantum systems. These couplings, however, strongly depend on the geometry of the setup, and even vanish for some geometries. Therefore here we have discussed different averaging schemes to answer the question whether measurable effects of the dipole-dipole coupling of orthogonal dipole moments survive if the geometry of the system under study is not fixed. As observable, we chose the easily accessible
fluorescence intensity of a pair of laser-driven $\Lambda$-type atoms, which for suitable laser parameters is known to exhibit periodic oscillations in the long-time limit due to the orthogonal couplings.
As a main result, we found that the effects of the dipole-dipole coupling of orthogonal transition dipole moments can survive extensive averaging over all three spatial dimensions. We have analyzed the obtained averaged signals, and expect our physical interpretations to carry over to other atomic level structures. Depending on the averaging range, both constructive and destructive superpositions of the contributions for the respective geometries is possible, such that a wide range of results was observed. The results also strongly depend on the method of averaging, and thus on the physical situation considered. Typically, the adiabatic case, where the geometry changes slowly as compared to the internal dynamics, is more favorable since it better preserves the intensity oscillations. In the average potential case, where the change of geometry is so fast that the atoms effectively see a dipole-dipole interaction averaged over the different geometries, some averaging ranges lead to an exact vanishing of the coupling constants. This usually does not occur in the adiabatic case. A somewhat different situation was found in the particle fly-by, where the averaging over the coupling constants in the AP method led to a much wider range of distances over which an effect of the orthogonal couplings can be observed. In general, our results show that the most pronounced effects of the orthogonal couplings in systems with variable geometry can be expected in one- or two-dimensional setups. There, it is easier to avoid detrimental averaging over extended sets of geometries, and additional control parameters such as the orientation of the dipole moments with respect to the axis of a one-dimensional sample allow to study the system properties in more detail.
\end{document} | arXiv | {
"id": "0709.2103.tex",
"language_detection_score": 0.8361934423446655,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{\Large Structural Identifiability Analysis of Fractional Order Models with Applications in Battery Systems} \author{S.M.Mahdi Alavi, Adam Mahdi, Pierre E. Jacob, Stephen J. Payne, and David A. Howey \thanks{S.M.M. Alavi was with the Energy and Power Group, Department of Engineering Science, University of Oxford. He is now with the Brain Stimulation Engineering Laboratory, Department of Psychiatry \& Behavioral Sciences, Duke University, Durham, NC 27710, USA. Email: mahdi.alavi@duke.edu} \thanks{A. Mahdi and S.J. Payne are with the Institute of Biomedical Engineering, Department of Engineering Science, University of Oxford, Old Road Campus Research Building, Oxford, OX3 7DQ, United Kingdom. Emails: \{adam.mahdi, stephen.payne\}@eng.ox.ac.uk} \thanks{P.E. Jacob is with the Department of Statistics, Harvard University, Science Center 7th floor, 1 Oxford Street, Cambridge, MA 02138-2901, USA. Email: pierre.jacob.work@gmail.com} \thanks{D.A. Howey is with the Energy and Power Group, Department of Engineering Science, University of Oxford, Parks Road, Oxford, OX1 3PJ, United Kingdom. Email: david.howey@eng.ox.ac.uk} }
\markboth{ } {Shell \MakeLowercase{\textit{et al.}}: Bare Demo of IEEEtran.cls for Journals}
\maketitle
\begin{abstract}
This paper presents a method for structural identifiability analysis of fractional order systems by using the coefficient mapping concept to determine whether the model parameters can uniquely be identified from input-output data. The proposed method is applicable to general non-commensurate fractional order models. Examples are chosen from battery fractional order equivalent circuit models (FO-ECMs). The battery FO-ECM consists of a series of parallel resistors and constant phase elements (CPEs) with fractional derivatives appearing in the CPEs. The FO-ECM is non-commensurate if more than one CPE is considered in the model. Currently, estimation of battery FO-ECMs is performed mainly by fitting in the frequency domain, requiring costly electrochemical impedance spectroscopy equipment. This paper aims to analyse the structural identifiability of battery FO-ECMs directly in the time domain. It is shown that FO-ECMs with finite numbers of CPEs are structurally identifiable. In particular, the FO-ECM with a single CPE is structurally globally identifiable.
\end{abstract}
\begin{IEEEkeywords} System identification, Identifiability, Fractional order systems, Batteries. \end{IEEEkeywords}
\IEEEpeerreviewmaketitle
\section{Introduction} \label{sec:introduction} Parameter estimation is very important for studying and understanding of systems \cite{Astroem1971, Young1981, Ljung2010,Ljung1987, Soderstrom1989, Zhu2001}. However, because of the specific model structure or inadequate data it might not be possible to infer some (or all) of the model parameters.
\emph{Structural identifiability} analysis is a data-free concept, which determines whether the model parameters can be uniquely estimated from rich input-output data \cite{Bellman1970}. \emph{Practical identifiability}, on the other hand, takes into account the practical aspects of the problem that come with real data including noise, bias and signal quality such as the shape of excitation, its magnitude, length, frequency ranges, etc, \cite{Raue2009}. Thus, structural identifiability analysis is a prerequisite for practical identifiability.
Since the 1970's, several analytical techniques have been proposed for studying structural identifiability analysis based on Taylor series expansion \cite{Pohjanpalo1978, Chappell1990}, similarity transformations \cite{Vajda1989, Anstett2008, Meshkat2014, Mahdi2014, Glover1974, DistefanoIII1977, VanDenHof1998}, differential algebra \cite{Ljung1994,Audoly2001}, and Laplace transforms (transfer functions) \cite{Cobelli1980, Bellman1970, Nazarian2010}. However, almost all of the proposed techniques deal with ordinary differential equations with integer orders. To the best of our knowledge, only reference \cite{Nazarian2010} studies the identifiability of single-input single-output (SISO) fractional commensurate-order systems in the frequency domain. In the fractional commensurate-order systems, all the orders of derivation are integer multiples of a base order, \cite{Oustaloup1995, Chen2009, Caponetto2010, Monje2010}. It was shown that SISO fractional commensurate-order systems are poorly identifiable for small values of the base order, \cite{Nazarian2010}.
This paper presents an alternative methodology to determine the structural identifiability of SISO fractional-order (FO) systems. The method is applicable to both commensurate and non-commensurate models based in time domain. This is an advantage since typically, in real world applications, the data is given in the time domain.
Recently, there has been a significant interest in identifiability analysis of battery models. References \cite{Schmidt2010, Forman2012, Moura2014, DAmato2012} study the identifiability of electrochemical models and show that some parameters are not practically identifiable. In \cite{Sitterly2011, Rausch2013, Sharma2014} and \cite{Rothenberger2014}, the identifiability of conventional equivalent circuit models (ECMs) is addressed. Conventional ECMs only include resistors and standard capacitors based on the Randles circuit \cite{Randles1947}. Their dynamics can be expressed by ordinary differential equations with integer orders. A comprehensive survey of the conventional ECMs with integer orders has been given in \cite{Hu2012}. However, these may not accurately reflect the dynamic behaviour of real battery systems. In order to address these issues, constant phase elements (CPEs) with fractional-order dynamics are incorporated into ECMs, which results in fractional-order ECMs (FO-ECMs), \cite{Cole1941}. Compared to conventional ECMs, FO-ECMs represent distributed electrode processes more accurately, \cite{Barsoukov2005} and may give more insight into battery performance, which could be useful for monitoring and diagnostic purposes \cite{Troeltzsch2006, Richardson2014}.
The usefulness of a FO-ECM highly depends on the ability to estimate its parameters. Currently, this is done by fitting the model to frequency domain impedance spectra that are obtained through Electrochemical Impedance Spectroscopy (EIS), \cite{Troeltzsch2006, Macdonald1982, Boukamp1986}. However, parameter estimation of FO-ECMs directly from time-domain data is very appealing since conversion to the frequency domain may introduce bias in the estimation \cite{Alavi2015}.
In this paper, a structural identifiability analysis method based on the concept of coefficient map is employed, which is applicable to general non-commensurate fractional order models. The method is applied to study structural identifiability of battery FO-ECMs, which are non-commensurate if more than one CPE is considered in the model. It is shown that the structural identifiability of battery FO-ECMs depends on the solution of a set of nonlinear coupled equations. The number of equations to be solved equals the number of CPEs. It is shown that FO-ECMs with finite numbers of CPEs are structurally identifiable and the FO-ECM with a single CPE is structurally globally identifiable.
\section{Model Structure}\label{sec:model} In this section discrete-time state-space and transfer function models of fractional-order systems are derived.
A state-space representation of a SISO FO system is given by \begin{eqnarray}\label{SS:c} \begin{aligned} \frac{d^{\alpha} x(t)}{dt^{\alpha}}&= \bar{A}(\beta)\, x(t) + \bar{B}(\beta)\,u(t)\\
y(t) &= M(\beta)\, x(t)+ D (\beta)\,u(t) \end{aligned} \end{eqnarray} where $x(t) \in \ensuremath{\mathbb{R}}^n$ is the state vector; $u(t) \in \ensuremath{\mathbb{R}}$ and $y(t) \in \ensuremath{\mathbb{R}}$ are input and output signals, respectively; $\bar{A}(\beta)\in \ensuremath{\mathbb{R}}^{n \times n}$, $\bar{B}(\beta) \in \ensuremath{\mathbb{R}}^{n \times 1}$, $M(\beta) \in \ensuremath{\mathbb{R}}^{1 \times n}$ and $D(\beta) \in \ensuremath{\mathbb{R}}$ are system matrices which depend on the parameter vector $\beta$ to be identified. Moreover, \begin{equation}\label{fracvec} \frac{d^{\alpha} x(t)}{dt^{\alpha}} = \Big[\frac{d^{\alpha_1} x_1(t)}{dt^{\alpha_1}},\ldots,\frac{d^{\alpha_n} x_n(t)}{dt^{\alpha_n}}\Big]^\top \end{equation} is the vector of fractional-order derivatives with the unknown fractional-orders $\alpha_i\in(0,1)$, $i=1,\ldots,n$.
\begin{Definition} \label{Def:commensurateFOsys}A FO system is said to be commensurate if $i\in\{1,\ldots n\},~\exists \rho \in \ensuremath{\mathbb{N}},~\mbox{such~that}~\alpha_i = \rho \alpha$, where $\alpha \in \ensuremath{\mathbb{R}}$; otherwise it is said to be non-commensurate, \cite{Oustaloup1995, Chen2009, Caponetto2010, Monje2010}.
{$\square$} \end{Definition}
This paper considers general SISO non-commensurate FO systems.
A discrete-time representation of the fractional differentiation operator $d^{\alpha} x(t)/dt^{\alpha}$ is often given by the Gr\"{u}nwald-Letnikov approximation \cite{Oustaloup1995} \begin{align} \nonumber &\mbox{diag}\{T_s^{\alpha_1},\ldots,T_s^{\alpha_n}\} \frac{d^{\alpha} x(kT_s)}{dt^{\alpha}}=\\ \label{G-L derivative} & \displaystyle \sum_{j=0}^{k+1}(-1)^j\mbox{diag}\left\{\binom{\alpha_1}{j},\cdots,\binom{\alpha_n}{j}\right\} x((k+1-j)T_s), \end{align} where $T_s$ is the sample time, $k\in \ensuremath{\mathbb{Z}}^+$ is the time index, $\mbox{diag}\{\cdot\}$ denotes the diagonal matrix and $\binom{\alpha_i}{j}$ is the binomial coefficient given by \begin{equation}\label{Gamma_func} \binom{\alpha_i}{j}=\frac{\Gamma(\alpha_i+1)}{\Gamma(j+1)\Gamma(\alpha_i+1-j)}, \end{equation} where, $\Gamma(\cdot)$ denotes the gamma function \[ \Gamma(\alpha_i)=\int_{0}^{\infty} z^{\alpha_i-1} e^{-z}dz,~ \text{for} ~ \alpha_i\in\mathbb{C}~ \text{with}~ \Re(\alpha_i)>0. \] For the sake of simplicity, $T_s$ is omitted from the argument and $x(k+1-j)$ is written as $x_{k+1-j}$ hereafter. The substitution of the Gr\"{u}nwald-Letnikov approximation into equation \eqref{SS:c} gives \begin{align} \nonumber & \mbox{diag}\{T_s^{-\alpha_1},\ldots,T_s^{-\alpha_n}\} \times\\ \nonumber & \displaystyle \sum_{j=0}^{k+1}\left((-1)^j\mbox{diag}\left\{\binom{\alpha_1}{j},\cdots,\binom{\alpha_n}{j}\right\}x_{k+1-j} \right)\\ \label{aux_ss1} & \hspace{7em} = \bar{A}(\beta) x_k +\bar{B}(\beta) u_k. \end{align} Multiplying \eqref{aux_ss1} by $\mbox{diag}\{T_s^{\alpha_1},\ldots,T_s^{\alpha_n}\}$ and extracting $x_{k+1}$ from the summation gives
\begin{align} \nonumber & x_{k+1}=\\ \nonumber & \left(\mbox{diag}\{\alpha_1,\cdots,\alpha_n\}+\mbox{diag}\{T_s^{\alpha_1},\cdots,T_s^{\alpha_n}\}\bar{A}(\beta)\right) x_k+\\ \nonumber & \mbox{diag}\{T_s^{\alpha_1},\cdots,T_s^{\alpha_n}\}\bar{B}(\beta) u_k -\\ \label{aus_ss2}& \displaystyle \sum_{j=2}^{k+1}\left((-1)^j\mbox{diag}\left\{\binom{\alpha_1}{j},\cdots,\binom{\alpha_n}{j}\right\}x_{k+1-j} \right). \end{align}
Thus a discrete-time state-space model, in compact form, can be written as \begin{eqnarray}\label{SS:d} \begin{aligned}
x_{k+1}&=\displaystyle \sum_{j=0}^{k} A_j(\beta,\alpha)\, x_{k-j}+B(\beta,\alpha) u_k \\ \label{output equation discrete} y_k&=M(\beta) x_k + D(\beta) u_k. \end{aligned} \end{eqnarray} with \begin{eqnarray}\label{SS:d-matrices} \begin{aligned} &\alpha=\left[\begin{array}{ccc} \alpha_1 ~ \cdots ~ \alpha_n \end{array}\right]\\ & A_0=\mbox{diag}\{\alpha_1,\cdots,\alpha_n\}+\\ & \hspace{3em}\mbox{diag}\{T_s^{\alpha_1},\cdots,T_s^{\alpha_n}\}\bar{A}(\beta)\\ & A_j=-(-1)^{j+1}\mbox{diag}\left\{\binom{\alpha_1}{j+1},\cdots,\binom{\alpha_n}{j+1}\right\},\\ &\hspace{3em} \mbox{~for~} 1 \leq j\\
& B =\mbox{diag}\{T_s^{\alpha_1},\cdots,T_s^{\alpha_n}\}\bar{B}(\beta). \end{aligned} \end{eqnarray}
Finally, the transfer function model structure parametrised by the unknown parameters \begin{align} \theta=[\beta ~ \alpha] \end{align} is given by \begin{align}
\nonumber H(z,\theta)=&M(\theta)\left(zI-\displaystyle \sum_{j=0}^{T}z^{-j}A_j(\theta)\right)^{-1}B(\theta)+\\ \label{TF-MS-theta} & \hspace{2em} D(\theta), \end{align} where $I$ is the $n \times n$ identity matrix and $T$ represents the data length, i.e., the number of samples.
\begin{Remark} The state-space model \eqref{SS:d} implies that $x_{k+1}$ depends on all the past states, $x_0$ up to $x_k$. This means that FO systems are non-Markov\footnote{In Markov system, $x_{k+1}$ can be written as functions of $x_k$ and inputs.}. From the transfer function perspective this means that the order of a FO system's transfer function equals the data length (equation \eqref{TF-MS-theta}). These are the main distinguishing features of FO systems which make their analysis and identification challenging.
{$\square$} \end{Remark}
\section{Structural Identifiability}\label{sec:strident}
The goal of this section is to introduce rigorously the concept of structural identifiability, which will be applied in the following section to FO-ECMs.
\begin{Definition}\label{def:ide}(\cite{Ljung1987}) Consider a model $\mathcal{M}$ with the transfer function $H(z,\theta)$, parametrised by $\theta$, where $\theta$ belongs to an open subset $\mathcal{D}_{\theta_{\mathcal{M}}} \subset \ensuremath{\mathbb{R}}^q$, and consider the equation \begin{equation}\label{TFd:ide} H(z,\theta)=H(z,\theta^\ast),\qquad \text{for almost all } z, \end{equation} where $\theta, \theta^\ast\in\mathcal{D}_{\theta_{\mathcal{M}}}$. Then, model $\mathcal{M}$ is said to be \begin{itemize} \item[-] \emph{globally identifiable} if \eqref{TFd:ide} has a unique solution in $\mathcal{D}_{\theta_{\mathcal{M}}}$, \item[-] \emph{identifiable} if \eqref{TFd:ide} has a finite number of solutions in $\mathcal{D}_{\theta_{\mathcal{M}}}$, \item[-] \emph{unidentifiabile} if \eqref{TFd:ide} has an infinite number of solutions in $\mathcal{D}_{\theta_{\mathcal{M}}}$. \end{itemize}
{$\square$} \end{Definition}
Instead of using the above definition of structural identifiability, which appears to be a standard definition in the engineering literature, it might be more convenient to use the concept of coefficient map, which will now be reviewed.
\begin{Definition} Consider the following monic transfer function\footnote{The coefficient of the highest order term in the denominator is 1.} \begin{equation}\label{general-tfms} H(z,\theta)=\frac{f_{n_f}(\theta)z^{n_f}+f_{n_f-1}(\theta)z^{n_f-1}+\cdots+f_{0}(\theta)}{z^{n_g}+g_{n_g-1}(\theta)z^{n_g-1}+\cdots+g_{0}(\theta)} \end{equation} where $\theta=\big[\begin{array}{ccc} \theta_1 & \cdots & \theta_q \end{array}\big]\in\mathcal{D}_{\theta_{\mathcal{M}}}$ is the parameter vector, and $\mathcal{D}_{\theta_{\mathcal{M}}}\subset \ensuremath{\mathbb{R}}^q$ is an open set. The \emph{coefficient map} $\mathcal{C}_{\mathcal{M}}:\mathcal{D}_{\theta_{\mathcal{M}}} \rightarrow \ensuremath{\mathbb{R}}^{n_f+n_g+1}$ is defined as \begin{equation}\label{cm} \mathcal{C}_{\mathcal{M}}(\theta)=(f_{n_f}(\theta),\cdots,f_{0}(\theta),g_{n_g-1}(\theta),\cdots, g_{0}(\theta)). \end{equation}
{$\square$} \end{Definition}
The following lemma illustrates the applicability of the coefficient map for studying the structural identifiability.
\begin{Lemma}[\cite{Mahdi2014, Meshkat2014}]\label{lem:Coefmap} Consider a model $\mathcal{M}$ with the transfer function \eqref{general-tfms} and the associated coefficient map \eqref{cm}. Then model $\mathcal{M}$ is \begin{itemize} \item[-] \emph{globally identifiable} if the coefficient map $\mathcal{C}_{\mathcal{M}}$ is one-to-one, \item[-] \emph{identifiable} if the coefficient map $\mathcal{C}_{\mathcal{M}}$ is many-to-one, \item[-] \emph{unidentifiable} if the coefficient map $\mathcal{C}_{\mathcal{M}}$ is infinitely many-to-one. \end{itemize}
{$\square$} \end{Lemma}
An important special case of \eqref{general-tfms} is when the coefficients of the transfer function form the parameter vector, in which case the identifiability is given by the following lemma. \begin{Lemma}\label{lem:cop} Consider the following transfer function \begin{align} \label{Coprime-lemma-TF} H(z,\theta)=\frac{f_{n_f}z^{n_f}+f_{n_f-1}z^{n_f-1}+\cdots+f_{0}}{z^{n_g}+g_{n_g-1}z^{n_g-1}+\cdots+g_{0}}, \end{align} where the parameter vector only consists of the coefficients of the numerator and denominator, i.e. $\theta=[ f_{n_f}, \cdots,f_0, g_{n_{g-1}},\cdots, g_0]^\top$. Then, the model structure \eqref{Coprime-lemma-TF} is globally identifiable, see Section 4.6 of \cite{Ljung1987}.
{$\square$}
\end{Lemma}
The above lemma is only applicable if the parameter vector is formed by the coefficients of the transfer function. If the coefficients of the transfer function are themselves functions of some parameter vector $\theta$, then the concept of reparametrisation can be used to study the identifiability of the model.
\begin{Definition}\label{def:Rep} Consider a model structure $\mathcal{M}$ with the parameter vector $\theta_\mathcal{M}$ belonging to the open subset $\mathcal{D}_{\theta_\mathcal{M}} \subset \ensuremath{\mathbb{R}}^{q}$. A \emph{reparametrisation} of the model structure $\mathcal{M}$ with the coefficient map $\mathcal{C}_\mathcal{M}$ is a map $\mathcal{R}: \mathcal{D}_{\theta_\mathcal{M}} \to \ensuremath{\mathbb{R}}^{p}$ such that \begin{equation} Im\, (\mathcal{C}_{\mathcal{M}} \circ \mathcal{R}) = Im\, (\mathcal{C}_{\mathcal{M}}), \end{equation} where $Im$ denotes the image of the map, and `$\circ$' denotes composition.
{$\square$} \end{Definition}
Based on the above definitions and lemmas, the process of determining the structural identifiability of a model $\mathcal{M}$ can be divided into three general steps: \begin{algorithmic}[1]
\STATE Compute the transfer function of model $\mathcal{M}$.
\STATE Determine the corresponding coefficient map $\mathcal{C}_{\mathcal{M}}$.
\STATE If $\mathcal{C}_{\mathcal{M}}$ is one-to-one then model $\mathcal{M}$ is globally identifiable; if $\mathcal{C}_{\mathcal{M}}$ is finitely many-to-one then $\mathcal{M}$ it is identifiable; finally if $\mathcal{C}_{\mathcal{M}}$ is infinitely many-to-one then model $\mathcal{M}$ is unidentifiable. \end{algorithmic}
\section{Structural Identifiability in Battery Systems}\label{sec:batsi}
In this section, the proposed identifiability analysis method is applied to battery models and results are discussed. First, a simple integer-order derivative example is considered in order to facilitate the reading of the more involved fractional-order case.
\subsection{Integer-order models}
\label{exRRCstd} Consider the electric circuit as shown in Figure\,\ref{Fig:StdRRC}. This circuit was proposed by Randles in 1947 \cite{Randles1947} for modeling the kinetics of rapid electrode reactions. Since then, the model has become the basis for studying various electrochemical energy storage systems such as batteries, fuel cells and supercapacitors, \cite{Alavi2015a}. The resistor $R_\infty$ in this circuit models the battery ohmic resistance. The resistor $R_1$ and the capacitor $C_1$ denote diffusion processes or the battery charge transfer resistance (CTR) and double layer (DLC) capacitance, respectively.
\begin{figure}
\caption{The Randles circuit with standard capacitor.}
\label{Fig:StdRRC}
\end{figure}
\begin{Proposition} {\it The Randles model given in Figure\,\ref{Fig:StdRRC} with the parameter vector $\theta=[R_\infty, R_1, C_1]$ is structurally globally identifiable. } \end{Proposition}
The above proposition will be shown in three steps.
{\em Step 1:} By Kirchhoff's laws, it is simple to show that a transfer function of the circuit, parameterised by $\theta$ is \begin{align} \label{exRRCstd:tf} H(z,\theta)=\frac{f_1(\theta)z+f_0(\theta)}{z+g_0(\theta)}, \end{align} where \begin{eqnarray}\label{exRRCstd:tfcoef} \begin{aligned} & f_1(\theta)=R_\infty\\ & f_0(\theta)=-R_\infty(1-\frac{T_s}{R_1C_1})+\frac{T_s}{C_1}\\ & g_0(\theta)=-(1-\frac{T_s}{R_1C_1}). \end{aligned} \end{eqnarray}
{\em Step 2:} The coefficient map associated with the model is given by \[ \mathcal{C}: \theta \to \big(f_1(\theta),f_0(\theta),g_0(\theta)\big). \]
{\em Step 3:} For global identifiability it is sufficient to show that the coefficient map is one-to-one. Since the above function is invertible with the inverse given by \[ \mathcal{C}^{-1}: (f_1,f_0,g_0) \to \Big(f_1, \frac{T_s}{C_1(1+g_0)}, \frac{T_s}{f_0-f_1g_0}\Big), \] the model \eqref{exRRCstd:tf} is globally identifiable.
{$\square$}
More details about the structural and practical identifiability of the Randles circuit and its generalised topology are given in \cite{Alavi2015a}.
\begin{figure}
\caption{The general battery electrochemical impedance spectroscopy model.}
\label{Fig:EISgeneral}
\end{figure}
\subsection{Fractional-order models}
In this section, we study the identifiability of FO-ECMs directly from time-domain data. A general impedance schematic of the EIS FO-ECM is shown in Figure \ref{Fig:EISgeneral}. There are two main differences between the EIS FO-ECM Figure \ref{Fig:EISgeneral} and the Randles circuit Figure \ref{Fig:StdRRC}. In the EIS FO-ECM, more than one parallel pair is seen. Each parallel pair is employed to model the battery processes over a certain frequency range. The Randles model can also be developed by adding more parallel pairs as discussed in \cite{Alavi2015a}. The number of parallel pairs depends on the required accuracy for the frequency domain fitting of impedance spectra.
The second and main difference is that the impedance of the capacitor in the Randles circuit is given by the integer derivative, while in the FO-ECMs fractional derivatives are applied. As mentioned in Section \ref{sec:introduction}, these elements are referred to as constant phase elements (CPEs), \cite{Cole1941}. CPEs model diffusion processes (or CTRs/DLCs) more accurately as shown in \cite{Alavi2015}. The impedance of the i-th CPE is given by: \begin{align} \label{EIS-CPE} Z_{CPE_i}(s)=\frac{1}{C_is^{\alpha_i}}, \end{align} where $C_i$ is a constant, $s$ is the Laplace operator and $\alpha_i$ ($0 > \alpha_i > 1$) is the exponent value. The dimension of $C_i$ is $\mbox{Fcm}^{-2}s^{\alpha_i-1}$ \cite{Jorcin2006}. In low frequency ranges the impedance frequency response may show constant phase behaviour such that the associated parallel resistor can be considered as an open circuit. This is referred to as Warburg term in the literature \cite{Barsoukov2005}.
It should be noted that there are techniques that approximate CPEs with ideal capacitors \cite{Plett2004}, or by a series connection of numerous R-C pairs, \cite{Andre2011, Birkl2013, Hu2011}.
The parameter vector associated with Figure \ref{Fig:EISgeneral} is defined as follows: \begin{eqnarray}\label{gen-ecm-parvec} \begin{aligned} \theta=[ R_\infty ,R_1 ,\cdots , R_n , C_1 , \cdots , C_n , \alpha_1 ,\cdots , \alpha_n]. \end{aligned} \end{eqnarray} By defining the voltage across the CPEs as the state variables, \begin{align} x \triangleq \left[\begin{array}{ccc} v_1 & \cdots & v_n\end{array}\right]^\top, \end{align} and by Kirchhoff's laws, it is easy to show that $A_j$, $B$, $M$ and $D$ in the state-space model \eqref{SS:d} are given by:
\begin{eqnarray}\label{gen-ecm-ssd} \begin{aligned} & A_j(\theta)=\mbox{diag}\left\{a_{1,j}(\theta), \cdots, a_{n,j}(\theta)\right\}\\ & B(\theta)=\left[\begin{array}{ccc} b_1(\theta) & \cdots & b_n(\theta)\end{array}\right]^\top\\ & M(\theta)=\left[\begin{array}{ccc} m_1 & \cdots & m_n\end{array}\right]\\ & D(\theta)=d(\theta), \end{aligned} \end{eqnarray} with \begin{eqnarray}\label{gen-ecm-params} \begin{aligned} & a_{i,0}(\theta)= \alpha_i-\frac{T_s^{\alpha_i}}{R_iC_i}\\%\mbox{~for~} i=1,\cdots,n\\ & a_{i,j}(\theta)= -(-1)^{j+1}\binom{\alpha_i}{j+1}\\%\mbox{~for~} i=1,\cdots,n \mbox{~and~} 1 \leq j & b_i(\theta)=\frac{T_s^{\alpha_i}}{C_i}\\%, \mbox{~for~} i=1,\cdots,n\\ & m_i=1\\%, \mbox{~for~} i=1,\cdots,n\\ & d(\theta)=R_\infty\\ & \text{for~}i=1,\cdots,n \mbox{~and~} j=1,2,\cdots,T, \end{aligned} \end{eqnarray} where $T$ is the data length.
By using \eqref{TF-MS-theta}, a transfer function model structure of the circuit FO-ECM of Figure \ref{Fig:EISgeneral} is given by:
\begin{align} \label{TF-gecm} H(z,\theta)=d(\theta)+\displaystyle \sum_{i=1}^{n}\frac{m_ib_i(\theta)z^{T}}{z^{T+1}- \sum_{j=0}^{T} a_{i,j}(\theta)z^{T-j}}. \end{align}
The structural identifiability of FO-ECMs with respectively one and two parallel R-CPE pairs is studied in the following sub-sections. The results provide useful insight into the general case with $n$ CPEs. From a practical perspective, the examples under consideration in the rest of the section are sufficient to represent the basic dynamic behaviour of a large number of electrochemical systems, \cite{Alavi2015, Waag2013}.
\subsubsection{$R_\infty - R_1||\frac{1}{C_1 s^{\alpha_1}}$ circuit}\label{exRRC}
Consider the EIS FO-ECM with a single parallel R-CPE in series with an ohmic resistor as shown in Figure \ref{Fig:fracR-RC}.
\begin{figure}\label{Fig:fracR-RC}
\end{figure}
\begin{Proposition} {\it The FO-ECM shown in Figure \ref{Fig:fracR-RC} with the parameter vector $\theta=[R_\infty,R_1, C_1, \alpha_1]$ is structurally globally identifiable.} \end{Proposition}
The proposition will be shown in three steps.
{\em Step 1:} Using \eqref{TF-gecm} the transfer function is found to be \begin{align}\label{exRRC:tf1} H(z,\theta)=d(\theta)+\frac{b_1(\theta)z^{T}}{z^{T+1}- \sum_{j=0}^{T} a_{1,j}(\theta)z^{T-j}}, \end{align} where the relationships between the transfer function coefficients and the model parameters are given in \eqref{gen-ecm-params}. In order to compute the coefficient map, \eqref{exRRC:tf1} is written as a monic rational function \begin{align} \label{exRRC:tf2}
H(z,\theta)=\frac{f_{T+1}(\theta)z^{T+1}+\cdots+ f_{0}(\theta)}{z^{T+1}+g_{T}(\theta)z^{T}+\cdots+ g_{0}(\theta)}, \end{align} where \begin{eqnarray}\label{exRRC:tfcoef} \begin{aligned} & f_{T+1}(\theta)=d(\theta)\\ & f_{T}(\theta)=b_1(\theta)-a_{1,0}(\theta)d(\theta)\\ & f_{T-j}(\theta)=-a_{1,j}(\theta)d(\theta), \mbox{~for~}1\leq j \leq T\\
& g_{T-j}(\theta)=-a_{1,j}(\theta), \mbox{~for~}0\leq j\leq T.
\end{aligned} \end{eqnarray}
{\em Step 2:} The induced coefficient map is given by \begin{equation}\label{exRRC:cm} \mathcal{C}: \theta \to \Big( f_{T+1}(\theta),\cdots,f_0(\theta),g_{T}(\theta),\cdots,g_0(\theta) \Big). \end{equation}
{\em Step 3:} Now it will be shown that the coefficient map \eqref{exRRC:cm} is one-to-one. First it is noted that it can be written as a composition of two functions \[ \mathcal{C}(\theta) = \mathcal{L} \circ \mathcal{R} (\theta), \] where \[ \mathcal{R}:\theta \to \Big(d(\theta), b_1(\theta), a_{1,T}(\theta), \cdots, a_{1,0}(\theta)\Big), \] where the components of the vector-function on the right-hand side are given in \eqref{gen-ecm-params}, and
\begin{align*} \mathcal{L}:&(d,b_1,a_{1,T},\cdots,a_{1,0}) \to \\ & \hspace{3em} \Big( f_{T+1},\ldots, f_0,g_{T},\cdots, g_0\Big), \end{align*} where the components of this vector-function are given in \eqref{exRRC:tfcoef}.
In order to show that $\mathcal{C}$ is one-to-one it is enough to prove that the maps $\mathcal{R}$ and $\mathcal{L}$, defined above, are both one-to-one.
It is claimed that $\mathcal{L}$ is invertible and thus one-to-one. The inverse map $\mathcal{L}^{-1}$ is obtained by solving the following set of algebraic equations recursively: \begin{eqnarray}\label{exRRC:gd} \begin{aligned} & d= f_{T+1}\\ & a_{1,0}=-g_{T}\\ & b_1=f_{T}-g_{T}f_{T+1}\\ & a_{1,j}=-g_{T-j}, \mbox{~for~}1\leq j\leq T. \end{aligned} \end{eqnarray}
Now it is claimed that $\mathcal{R}$ is one-to-one. First it is shown that $\alpha_1$ can be uniquely determined by solving the second equation in \eqref{gen-ecm-params} for $i=1$, i.e. \begin{equation}\label{ex:a11} a_{1,j}(\alpha_1)= (-1)^{j}\binom{\alpha_1}{j+1}\mbox{~for~} 1 \leq j \leq T, \end{equation}
where again, since $\alpha_1\in(0,1)$, the generalised binomial coefficient is computed using formula \eqref{Gamma_func}. Note that the function $a_{1,1}(\alpha_1)$ is the only function in \eqref{ex:a11}, which is symmetric with respect to the vertical line $\alpha_1=0.5$. In fact it can be shown that for any $j\ne 1$, $a_{1,j}(\alpha_1)$ is nonsymmetric (see Figure~\ref{Fig:alphaja1}). The figure shows that $a_{1,j}\approx 0$ for large $j$.
Since each of the function \eqref{ex:a11} is unimodal its preimage does not allow for unique determination of the fractional-order $\alpha_1$. However, as shown in Figure \ref{Fig:alphaja2}, the preimage of at least two distinct coefficients $a_{1,r}(\alpha_1)$ and $a_{1,q}(\alpha_1)$, with $1\leq \{r,q\} \leq T$ are sufficient to uniquely determine $\alpha_1$. Using the notation used in Figure \ref{Fig:alphaja2}, each equation $a_{1,j}(\alpha_1) = a_{j}$ gives two solutions $\alpha_1^{1,j}$ and $\alpha_1^{2,j}$, where $j \in \{1,\cdots,T\}$. Therefore, two distinct coefficients $a_{1,r}$ and $a_{1,q}$ are used to form a system of algebraic equations, whose common solution is the fractional-order $\alpha_1$. This has been shown in Figure \ref{Fig:alphaja2} for $j=25, 50, 169$ with a true value of $\alpha_1=0.3$ (for the clarity of the presentation, $a_{1,1}$ is not plotted in this figure).
\begin{figure}
\caption{$a_{1,j}$ versus $\alpha_1$ within the range $0 \leq \alpha_1 \leq 1$. It is seen that $a_{1,1}$ is symmetric, and that $a_{1,j}\approx 0$ for large $j$. }
\label{Fig:alphaja1}
\end{figure}
\begin{figure}
\caption{A system of at least two equations $a_{1,r}(\alpha_1) = a_{r}$, $a_{1,q}(\alpha_1) = a_{q}$, where $r\ne q$ and $1\leq \{r,q\} \leq T$ is sufficient in order to uniquely determine the fractional-order $\alpha_1$.}
\label{Fig:alphaja2}
\end{figure}
Having determined $\alpha_1$ the inverse map $\mathcal{R}^{-1}$ can be obtained by solving the following set of equations \[ R_\infty =d, \qquad C_1=\frac{T_s^{\alpha_1}}{b_1},\qquad R_1=\frac{T_s^{\alpha_1}}{(\alpha_1-a_{1,0})C_1}, \] which completes the claim and the proof of the proposition.
{$\square$}
Practical identification of this model using both synthetic and real data has been studied in \cite{Alavi2015}.
\subsubsection{$R_\infty - R_1||\frac{1}{C_1 s^{\alpha_1}} - R_2||\frac{1}{C_2 s^{\alpha_2}}$ circuit} \label{exRRCRC} In this part, a more complex model is considered as shown in Figure \ref{Fig:fracR-RC-RC}. The circuit includes two parallel pairs of R-CPE. If $R_2=\infty$, the CPE models the Warburg term.
\begin{figure}\label{Fig:fracR-RC-RC}
\end{figure}
\begin{Proposition} {\it The FO-ECM Figure~\ref{Fig:fracR-RC-RC} with the parameter vector $\theta=[R_\infty, R_1, R_2, C_1, C_2, \alpha_1, \alpha_2]$ is structurally identifiable.} \end{Proposition}
The above proposition will be shown in three steps.
{\em Step 1:} Using \eqref{TF-gecm}, the transfer function of the circuit is
\begin{align} \label{exRRCRC:tf1} H(z,\theta)=d(\theta)+\displaystyle \sum_{i=1}^{2}\frac{b_i(\theta)z^{T}}{z^{T+1}- \sum_{j=0}^{T} a_{i,j}(\theta)z^{T-j}}, \end{align} with the coefficients given in \eqref{gen-ecm-params}. In a rational function form \eqref{exRRCRC:tf1} is expressed as \begin{align} \nonumber &H(z,\theta)=\\ \label{exRRCRC:tf2} &\frac{f_{2T+2}(\theta)z^{2T+2}+f_{2T+1}(\theta)z^{2T+1}+\cdots+ f_{0}(\theta)}{z^{2T+2}+g_{2T+1}(\theta)z^{2T+1}+\cdots+ g_{0}(\theta)}. \end{align}
For the identifiability analysis, a number of key coefficients are given by: \begin{eqnarray}\label{exRRCRC:tfcoef} \begin{aligned}
f_{2T+2}(\theta)&=d\\
f_{2T+1}(\theta)&=b_1+b_2+d(-a_{1,0}-a_{2,0})\\
f_{2T}(\theta)&=-b_1a_{2,0}-b_2a_{1,0}+\\&\hspace{3em}d(a_{1,0}a_{2,0}-a_{1,1}-a_{2,1})\\
g_{2T+1}(\theta)&=-a_{1,0}-a_{2,0}\\
g_{2T}(\theta)&=-a_{1,1}-a_{2,1}+a_{1,0}a_{2,0}\\
g_{2}(\theta)&=a_{1,T} a_{2,T-2} + a_{1,T-1} a_{2,T-1} +\\&\hspace{3em} a_{1,T-2} a_{2,T}\\
g_{1}(\theta)&=a_{1,T}a_{2,T-1}+a_{1,T-1}a_{2,T}\\
g_{0}(\theta)&=a_{1,T}a_{2,T}. \end{aligned} \end{eqnarray}
{\em Step 2:} The induced coefficient map is given by \[ \mathcal{C}: \theta \to \Big( f_{2T+2}(\theta),\cdots,f_0(\theta),g_{2T+1}(\theta),\cdots,g_0(\theta) \Big). \]
{\em Step 3:} Now it will be shown that the above coefficient map is identifiable, i.e. is finitely many-to-one. In order to show the identifiability the following lemma will be used.
\begin{Lemma}\label{ex2:lem} {\em Consider $g_0$, $g_1$ and $g_2$, the three components of the coefficient map $\mathcal{C}$ expressed in \eqref{exRRCRC:tfcoef}. Then following relations hold:} \begin{equation}\label{ex2:lemeq} \begin{aligned} &g_1 + g_0(T+1)\left(\frac{1}{\alpha_1-T}+\frac{1}{\alpha_2-T}\right) = 0\\ &g_2 - g_0(T+1)(\hat{a}+\hat{b}+\hat{c}) = 0, \end{aligned} \end{equation} where \begin{align*} &\hat{a}=\frac{T}{(\alpha_2-T)(\alpha_2-T+1)}\\ &\hat{b}=\frac{(T+1)}{(\alpha_1-T)(\alpha_2-T)}\\ &\hat{c}=\frac{T}{(\alpha_1-T)(\alpha_1-T+1)}. \end{align*}
{$\square$} \end{Lemma}
\noindent{\it Proof:} To prove the lemma, first it is noted that the relation between $a_{i,j}$ and $a_{i,j+1}$ is the following:
\begin{align*} a_{i,j+1}&= -(-1)^{j+2}\binom{\alpha_i}{j+2}\\ &=-(-1)^{j+2}\frac{\Gamma(\alpha_i+1)}{\Gamma(j+3)\Gamma(\alpha_i-j-1)}\\ &=-1\times -(-1)^{j+1}\frac{\Gamma(\alpha_i+1)}{(j+2)\Gamma(j+2)\frac{\Gamma(\alpha_i-j)}{(\alpha_i-j-1)}}\\&=-\frac{(\alpha_i-j-1)}{j+2}a_{i,j}. \end{align*}
Therefore, $g_1$ and $g_2$ can be re-written as follows: \begin{align*} \nonumber g_{1} &=a_{1,T}a_{2,T-1}+a_{1,T-1}a_{2,T}=\\ \nonumber & a_{1,T} \frac{-a_{2,T}(T+1)}{\alpha_2-(T-1)-1}+\frac{-a_{1,T}(T+1)}{\alpha_1-(T-1)-1}a_{2,T}\\% \label{exRRCRC:g1} & =-g_0(T+1)\left(\frac{1}{\alpha_2-T}+\frac{1}{\alpha_1-T}\right), \end{align*}
\begin{align*} \nonumber g_{2}&=a_{1,T} a_{2,T-2} + a_{1,T-1} a_{2,T-1} + a_{1,T-2} a_{2,T}\\ \nonumber &=a_{1,T}\frac{-a_{2,T-1}(T-1+1)}{\alpha_2-(T-2)-1}+ \\ \nonumber &\hspace{3em} \frac{-a_{1,T}(T+1)}{\alpha_1-(T-1)-1}\frac{-a_{2,T}(T+1)}{\alpha_2-(T-1)-1}+\\ \nonumber & \hspace{3em}\frac{-a_{1,T-1}(T-1+1)}{\alpha_1-(T-2)-1}a_{2,T}\\ \nonumber & = a_{1,T}\frac{-\frac{-a_{2,T}(T+1)}{\alpha_2-(T-1)-1}(T-1+1)}{\alpha_2-(T-2)-1} +\\ \nonumber & \hspace{3em} \frac{-a_{1,T}(T+1)}{\alpha_1-(T-1)-1}\frac{-a_{2,T}(T+1)}{\alpha_2-(T-1)-1} + \\ \nonumber &\hspace{5em}\frac{-\frac{-a_{1,T}(T+1)}{\alpha_1-(T-1)-1}(T-1+1)}{\alpha_1-(T-2)-1}a_{2,T}\\% \label{exRRCRC:g2} &=g_0(T+1)(\hat{a}+\hat{b}+\hat{c}). \end{align*}
{$\square$}
By the result of Lemma~\ref{ex2:lem} the exponent values $\alpha_1$ and $\alpha_2$ are determined by solving \eqref{ex2:lemeq}. It can be easily verified (using any computer algebra package, e.g.\ Mathematica) that these algebraic equations admit only two real solutions, which are permuted with respect to each other. Since the mathematical expressions of the solutions are quite cumbersome, they are not provided explicitly in this paper.
Finally, it is noted that from the first equation in \eqref{exRRCRC:tfcoef}, the ohmic resistor $R_\infty$ is estimated trough the coefficient $f_{2T+2}$, i.e., $R_\infty=f_{2T+2}$. Then $a_{i,j}$'s are calculated recursively, backward from $j=T$ to $j=1$ for $i=1,2$. By obtaining $a_{1,1}$ and $a_{2,1}$, the parameters $a_{1,0}$ and $a_{2,0}$ can be computed from the coefficients $g_{2T}$ and $g_{2T+1}$ in \eqref{exRRCRC:tfcoef}. Then $b_1$ and $b_2$ are computed from the coefficients $f_{2T}$ and $f_{2T+1}$ in \eqref{exRRCRC:tfcoef}. And, for a given solution of \eqref{ex2:lemeq} (there are two real solutions) parameters $C_1$ and $C_2$ are then obtained by using $b_i=T_s^{\alpha_i}/C_i$, and finally $R_1$ and $R_2$ are calculated from $a_{1,0}$ and $a_{2,0}$, respectively. This completes the Step~3 and the proof of the proposition.
{$\square$}
\section{Conclusions}
A method was proposed for the structural identifiability analysis of fractional order (FO) systems based on the concept of coefficient map. The method is applicable to both commensurate and non-commensurate models and was applied to determine the structural identifiability of battery fractional-order equivalent circuit models (FO-ECMs). This study has shown that a battery FO-ECM is structurally identifiable, and the global identifiability was proved for the FO-ECM with a single constant phase element.
\end{document} | arXiv | {
"id": "1511.01402.tex",
"language_detection_score": 0.6857235431671143,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\begin{abstract} We consider a problem on the conditions of a compact Lie group $G$ that the loop space of the $p$--completed classifying space be a $p$--compact group for a set of primes. In particular, we discuss the classifying spaces $BG$ that are $p$--compact for all primes when the groups are certain subgroups of simple Lie groups. A survey of the $p$--compactness of $BG$ for a single prime is included. \end{abstract}
\maketitle
A $p$--compact group (see Dwyer--Wilkerson \cite{DWb}) is a loop space $X$ such that $X$ is $\F_p$--finite and that its classifying space $BX$ is $\F_p$--complete (see Andersen--Grodal--M{\o}ller--Viruel~\cite{AGMV} and Dwyer--Wilkerson~\cite{DWn}). We recall that the $p$--completion of a compact Lie group $G$ is a $p$--compact group if $\pi_0(G)$ is a $p$--group. Next, if $C(\rho )$ denotes the centralizer of a group homomorphism $\rho$ from a $p$--toral group to a compact Lie group, according to \cite[Theorem~6.1]{DWb}, the loop space of the $p$--completion $\Omega (BC(\rho ))\p$ is a $p$--compact group.
In a previous article \cite{Ig2}, the classifying space $BG$ is said to be {\it $p$--compact} if $\Omega (BG)\p$ is a $p$--compact group. There are some results for a special case. A survey is given in \fullref{sec1}. It is well-known that, if $\Sigma_3$ denotes the symmetric group of order $6$, then $B\Sigma_3$ is not $3$--compact. In fact, for a finite group $G$, the classifying space $BG$ is $p$--compact if and only if $G$ is $p$--nilpotent. Moreover, we will see that $BG$ is $p$--compact toral (see Ishiguro \cite{It}) if and only if the compact Lie group $G$ is $p$--nilpotent (see Henn \cite{He}). For the general case, we have no group theoretical characterization, though a few necessary conditions are available. This problem is also discussed in the theory of $p$--local groups (see Broto, Levi and Oliver \cite{BLO,BLOs}) from a different point of view.
We consider the $p$--compactness of $BG$ for a set of primes. Let $\Pi$ denote the set of all primes. For a non--empty subset $\mathbb{P}$ of $\Pi$, we say that $BG$ is {\it $\mathbb{P}$--compact} if this space is $p$--compact for any $p \in \mathbb{P}$. If $G$ is connected, then $\Omega (BG)\p \simeq G\p$ for any prime $p$, and hence $BG$ is $\Pi$--compact. The connectivity condition, however, is not necessary. For instance, the classifying space of each orthogonal group $\mathit{O}(n)$ is also $\Pi$--compact. Since $\pi_0(\mathit{O}(n))=\Z /2$ is a $2$--group, $B\mathit{O}(n)$ is $2$--compact, and for any odd prime $p$, the $p$--equivalences $B\mathit{O}(2m) \simeq_{p} B\mathit{O}(2m+1) \simeq_{p} B\mathit{SO}(2m+1)$ tell us that $B\mathit{O}(n)$ is $\Pi$--compact.
Next let $\mathbb{P} (BG)$ denote the set of primes $p$ such that $BG$ is $p$--compact. In \cite{It} the author has determined $\mathbb{P} (BG)$ when $G$ is the normalizer $NT$ of a maximal torus $T$ of a connected compact simple Lie group $K$ with Weyl group $W(K)$. Namely $$ \mathbb{P}(BNT) = \begin{cases} \Pi &\text{if } W(K) \text{ is a 2--group,}\\
\{ p \in \Pi~|~|W(K)| \not\equiv 0 \mod p \} &\text{otherwise.} \end{cases} $$ Other examples are given by a subgroup $H\cong SU(3)\rtimes \Z/2$ of the exceptional Lie group $G_2$ and its quotient group $\Gamma_2 =H/(\Z/3)$. $$ \CD \Z /3 @= \Z /3 @>>> * \\ @VVV @VVV @VVV \\ SU(3) @>>> H @>>> \Z /2 \\
@VVV @VVV @| \\ PU(3) @>>> \Gamma_2 @>>> \Z /2 \endCD $$ A result of \cite{Ig2} implies that $\mathbb{P} (BH) =\Pi$ and $\mathbb{P} (B\Gamma_2) =\Pi -\{ 3\}$.
In this paper we explore some necessary and sufficient conditions for a compact Lie group to be $\Pi$--compact. First we consider a special case. We say that $BG$ is {\it $\mathbb{P}$--compact toral} if for each $p \in \mathbb{P}$ the loop space $\Omega (BG)\p$ is expressed as an extension of a $p$--compact torus $T\p$ by a finite $p$--group $\pi$ so that that there is a fibration $(BT)\p \longrightarrow (BG)\p \longrightarrow B\pi$. Obviously, if $BG$ is $\mathbb{P}$--compact toral, the space is $\mathbb{P}$--compact. A necessary and sufficient condition that $BG$ be $p$--compact toral is given in \cite{It}. As an application, we obtain the following:
\begin{bigthm} \label{thm1} Suppose $G$ is a compact Lie group, and $G_0$ denotes its connected component with the identity. Then $BG$ is $\Pi$--compact toral if and only if the following two conditions hold: \begin{enumerate} \item[\rm(a)] $G_0$ is a torus $T$, and the group $G/G_0=\pi_0G$ is nilpotent. \item[\rm(b)] $T$ is a central subgroup of $G$. \end{enumerate} \end{bigthm}
For a torus $T$ and a finite nilpotent group $\gamma$, the product group $G=T \times \gamma$ satisfies conditions (a) and (b). Thus $BG$ is $\Pi$--compact toral. \fullref{prop2.1} will show, however, that a group $G$ with $BG$ being $\Pi$--compact toral need not be a product group.
Next we ask if $BH$ is $\mathbb{P}$--compact when $H$ is a subgroup of a simple Lie group $G$. For $\mathbb{P} =\Pi$, the following result determines certain types of $(G, H_0)$ where $H_0$ is the connected component of the identity. We have seen the cases of $(G, H)=(G, NT)$ when $W(G)=NT/T$ is a $2$--group, and of $(G, H)=(G_2, SU(3)\rtimes \Z/2)$ which is considered as a case with $(G, H_0)=(G_2, A_2)$. Recall that the Lie algebra of $SU(n+1)$ is simple of type $A_n$, and the Lie group $SU(3)$ is of $A_2$--type (see Bourbaki~\cite{Bourbaki}).
\begin{bigthm} \label{thm2} Suppose a connected compact Lie group $G$ is simple. Suppose also that $H$ is a proper closed subgroup of $G$ with $\rank(H_0)=\rank(G)$, and that the map $BH {\longrightarrow} BG$ induced by the inclusion is $p$--equivalent for some $p$. Then the following hold:
\begin{enumerate} \item[\rm(a)] If the space $BH$ is $\Pi$--compact, $(G, H_0)$ is one of the following types: \[ (G, H_0)= \left\{ \begin{array}{llll} (G, T_G) &\mbox{for $G=A_1$ \ or \ $B_2(=C_2)$ } \\ (B_n, D_n) \\ (C_2, A_1 \times A_1) \\ (G_2, A_2) \end{array} \right. \] where $T_G$ is the maximal torus of $G$.
\item[\rm(b)] For any odd prime $p$, all above types are realizable. Namely, there are $G$ and $H$ of types as above such that $BH$ is $\Pi$--compact, together with the $p$--equivalent map $BH {\longrightarrow} BG$. When $p=2$, any such pair $(G, H)$ is not realizable. \end{enumerate} \end{bigthm}
We make a remark about covering groups. Note that if $\alpha {\longrightarrow} \wt G {\longrightarrow} G$ is a finite covering, then $\alpha$ is a central subgroup of $ \wt G$. For a central extension $\alpha {\longrightarrow} \wt G {\longrightarrow} G$ and a subgroup $H$ of $G$, we consider the following commutative diagram: $$ \CD \alpha @>>> \wt G @>>> G \\
@| @AAA @AAA \\ \alpha @>>> \wt H @>>> H \endCD $$ \noindent Obviously the vertical map $H {\longrightarrow} G$ is the inclusion, and $\wt H$ is the induced subgroup of $ \wt G$. We will show that the pair $(G, H)$ satisfies the conditions of \fullref{thm2} if and only if its cover $(\wt G, \wt H)$ satisfies those of \fullref{thm2}. Examples of the type $(G, H_0)=(B_n, D_n)$, for instance, can be given by $(\mathit{SO}(2n+1), \mathit{O}(2n))$ and the double cover $(\mathit{Spin}(2n+1), \mathit{Pin}(2n))$.
For the case $(G, H_0)=(G_2, A_2)$, we have seen that $H$ has a finite normal subgroup $\Z/3$, and that for its quotient group $\Gamma_2$ the classifying space $B\Gamma_2$ is $p$--compact if and only if $p\ne 3$. So $\mathbb{P} (B\Gamma_2) \ne \Pi$. The following result shows that this is the only case. Namely, if $\Gamma$ is such a quotient group for $(G, H_0) \ne (G_2, A_2)$, then $\mathbb{P} (B\Gamma) = \Pi$.
\begin{bigthm} \label{thm3} Let $(G, H)$ be a pair of compact Lie groups as in \fullref{thm2}. For a finite normal subgroup $\nu$ of $H$, let $\Gamma$ denote the quotient group $H/\nu$. If $(G, H_0) \ne (G_2, A_2)$, then $B\Gamma$ is $\Pi$--compact. \end{bigthm}
The author would like to thank the referee for the numerous suggestions.
\section{A survey of the $p$--compactness of $BG$ } \label{sec1}
We summarize work of earlier articles \cite{Ig2,It} together with some basic results, in order to introduce the problem of $p$--compactness. For a compact Lie group $G$, the classifying space $BG$ is $p$--compact if and only if $\Omega (BG)\p$ is $\F_p$--finite. So it is a mod $p$ finite H--space. The space $B\Sigma_3$ is not $p$--compact for $p=3$. We notice that $\Omega (B\Sigma_3)\3$ is not a mod $3$ finite H--space, since the degree of the first non--zero homotopy group of $\Omega (B\Sigma_3)\3$ is not odd. Actually there is a fibration $\Omega (B\Sigma_3)\3 {\longrightarrow} (S^3)\3 {\longrightarrow} (S^3)\3$ (see Bousfield and Kan \cite{BK}).
First we consider whether $BG$ is $p$--compact toral, as a special case. When $G$ is finite, this is the same as asking if $BG$ is $p$--compact. Note that, for a finite group $\pi$, the classifying space $B\pi$ is an Eilenberg--MacLane space $K(\pi , 1)$. Since $(BT)\p$ is also Eilenberg--MacLane, for $BG$ being $p$--compact toral, the $n$--th homotopy groups of $(BG)\p$ are zero for $n \ge 3$. A converse to this fact is the following.
\begin{thm}{\rm\cite[Theorem~1]{It}}\qua Suppose $G$ is a compact Lie group, and $X$ is a $p$--compact group. Then we have the following: \begin{enumerate} \item[\rm(i)] If there is a positive integer $k$ such that $\pi_n((BG)\p )=0$ for any $n \ge k$, then $BG$ is $p$--compact toral.
\item[\rm(ii)] If there is a positive integer $k$ such that $\pi_n(BX)=0$ for any $n \ge k$, then $X$ is a $p$--compact toral group. \end{enumerate} \end{thm}
This theorem is also a consequence of work of Grodal \cite{Gt,Gs}
A finite group $\gamma$ is $p$--nilpotent if and only if $\gamma$ is expressed as the semidirect product $\nu \rtimes \gamma_p$, where $\nu$ is the subgroup generated by all elements of order prime to $p$, and where $\gamma_p$ is the $p$--Sylow subgroup. The group $\Sigma_3$ is $p$--nilpotent if and only if $p\ne 3$. Recall that a fibration of connected spaces $F {\longrightarrow} E {\longrightarrow} B$ is said to be preserved by the $p$--completion if $F\p {\longrightarrow} E\p {\longrightarrow} B\p$ is again a fibration. When $\pi_0(G)$ is a $p$--group, a result of Bousfield and Kan \cite{BK} implies that the fibration $BG_0 {\longrightarrow} BG {\longrightarrow} B\pi_0G$ is preserved by the $p$--completion, and $BG$ is $p$--compact.
We have the following necessary and sufficient conditions that $BG$ be $p$--compact toral. \begin{thm}{\rm\cite[Theorem~2]{It}}\qua Suppose $G$ is a compact Lie group, and $G_0$ is the connected component with the identity. Then $BG$ is $p$--compact toral if and only if the following conditions hold: \begin{enumerate} \item[\rm(a)] $G_0$ is a torus $T$ and $G/G_0=\pi_0G$ is $p$--nilpotent.
\item[\rm(b)] The fibration $BT {\longrightarrow} BG {\longrightarrow} B\pi_0G$ is preserved by the $p$--completion. \end{enumerate} Moreover, the $p$--completed fibration $(BT)\p {\longrightarrow} (BG)\p {\longrightarrow} (B\pi_0G)\p$ splits if and only if $T$ is a central subgroup of $G$. \end{thm}
Next we consider the general case. What are the conditions that $BG$ be $p$--compact? For example, for the normalizer $NT$ of a maximal torus $T$ of a connected compact Lie group $K$, it is well--known that $(BNT)\p \simeq (BK)\p$ if $p$ does not divide the order of the Weyl group $W(K)$. This means that $BNT$ is $p$--compact for such $p$. Using the following result, we can show the converse.
\begin{prop}{\rm\cite[Proposition~3.1]{It}}\qua If $BG$ is $p$--compact, then the following hold: \begin{enumerate} \item[\rm(a)] $\pi_0G$ is $p$--nilpotent.
\item[\rm(b)] $\pi_1((BG)\p )$ is isomorphic to a $p$--Sylow subgroup of $\pi_0G$. \end{enumerate} \end{prop}
The necessary condition of this proposition is not sufficient, even though the rational cohomology of $(BG)\p$ is assumed to be expressed as a ring of invariants under the action of a group generated by pseudoreflections.
\begin{thm}{\rm\cite[Theorem~1]{Ig2}}\qua \label{thm04} Let $G=\Gamma_2$, the quotient group of a subgroup $SU(3)\rtimes \Z/2$ of the exceptional Lie group $G_2$. For $p=3$, the following hold: \begin{enumerate} \item[\rm(1)] $\pi_0G$ is $p$--nilpotent and $\pi_1((BG)\p )$ is isomorphic to a $p$--Sylow subgroup of $\pi_0G$.
\item[\rm(2)] $(BG)\p$ is rationally equivalent to $(BG_2)\p$.
\item[\rm(3)] $BG$ is not $p$--compact. \end{enumerate} \end{thm}
We discuss invariant rings and some properties of $B\Gamma_2$ and $BG_2$ at $p=3$. Suppose $G$ is a compact connected Lie group. The Weyl group $W(G)$ acts on its maximal torus $T^n$, and the integral representation $W(G) {\longrightarrow} GL(n, \Z )$ is obtained (see Dwyer and Wilkerson~\cite{DWgs,DWc}). It is well--known that $K(BG)\cong K(BT^n)^{W(G)}$ and $H^*(BG; \F_p)\cong H^*(BT^n; \F_p)^{W(G)}$ for large $p$. Let $W(G)^*$ denote the dual representation of $W(G)$. Although the mod 3 reductions of the integral representations of $W(G_2)$ and $W(G_2)^*$ are not equivalent, there is $\psi \in GL(2, \Z )$ such that $\psi W(G_2) \psi^{-1} = W(G_2)^*$ \cite[Lemma~3]{Ig2}. Consequently, $K(BT^2 ; \Z \3)^{W(G_2)} \cong K(BT^2 ; \Z \3)^{W(G_2)^*} $. Since $K(B\Gamma_2 ; \Z \3) \cong K(BT^2 ; \Z \3)^{W(G_2)^*} $, we have the following result.
\begin{thm}{\rm\cite[Theorem~3]{Ig2}}\qua Let $\Gamma_2$ be the compact Lie group as in \fullref{thm04}.
Then the following hold: \begin{enumerate} \item[\rm(1)] The $3$--adic K-theory $K(B\Gamma_2 ; \Z \3)$ is isomorphic to $K(BG_2 ; \Z \3)$as a $\lambda$--ring. \item[\rm(2)] Let $\Gamma$ be a compact Lie group such that $\Gamma_0=PU(3)$ and the order of $\pi_0(\Gamma)$ is not divisible by $3$. Then any map from $(B\Gamma)\3$ to $(BG_2)\3$ is null homotopic. In particular $[(B\Gamma_2)\3, (BG_2)\3]=0$. \end{enumerate} \end{thm}
We recall that if a connected compact Lie group $G$ is simple, the following results hold:
\begin{enumerate} \item For any prime $p$, the space $(BG)\p$ has no nontrivial retracts (see Ishiguro~\cite{Isr}).
\item Assume $|W(G)|\equiv 0\ \text{mod} \ p$ . If a self-map $(BG)\p {\longrightarrow} (BG)^p$ is not null homotopic, it is a homotopy equivalence (see M{\o}ller~\cite{Moeb}).
\item Assume $|W(G)|\equiv 0\ \text{mod} \ p$, and let $K$ be a compact Lie group. If a map $f\co (BG)\p {\longrightarrow} (BK)\p$ is trivial in mod $p$ cohomology, then $f$ is null homotopic (see Ishiguro~\cite{Is}). \end{enumerate}
Replacing $G$ by $\Gamma_2$ at $p=3$, we will see that (3) still holds. On the other hand it is not known if (1) and (2) hold, though on the level of K-theory they do.
\section{$\Pi$--compact toral groups}
Recall that a finite group $\gamma$ is {\it $p$--nilpotent} if and only if $\gamma$ is expressed as the semidirect product $\nu \rtimes \gamma_p$, where the normal $p$--complement $\nu$ is the subgroup generated by all elements of order prime to $p$, and where $\gamma_p$ is the $p$--Sylow subgroup. For such a group $\gamma$, we see $(B\gamma)\p \simeq B\gamma_p$. For a finite group $G$, one can show that
$\mathbb{P} (BG) =\{ p \in \Pi \ | \ G\ \text{is $p$--nilpotent} \} $. Consequently, if $G=\Sigma_n$, the symmetric group on $n$ letters, then $\mathbb{P} (B\Sigma_2) = \Pi$,
$\mathbb{P} (B\Sigma_3) =\Pi -\{ 3\}$, and $\mathbb{P} (B\Sigma_n) =\{ p \in \Pi \ | \ p>n \}$ for $n\ge 4$.
In \cite{He}, Henn provides a generalized definition of $p$--nilpotence for compact Lie groups. A compact Lie group G is \emph{$p$--nilpotent} if and only if the connected component of the identity, $G_0$, is a torus; the finite group $\pi_0 G$ is p-nilpotent, and the cojugation action of the normal $p$--complement is trivial on $T$. We note that such a $p$--nilpotent group need not be semidirect product.
Let $\gamma = \pi_0G$. Then, from the inclusion $\gamma_p {\longrightarrow} \gamma$, a subgroup $G_p$ of $G$ is obtained as follows: $$ \CD T @>>> G @>>> \gamma \\
@| @AAA @AAA \\ T @>>> G_p @>>> \gamma_p \endCD $$ A result of Henn \cite{He} shows $(BG)\p \simeq (BG_p)\p$ if and only if the compact Lie group $G$ is $p$--nilpotent.
\begin{lem} A classifying space $BG$ is $p$--compact toral if and only if the compact Lie group $G$ is $p$--nilpotent. \end{lem}
\begin{proof} If $BG$ is $p$--compact toral, we see from \cite[Theorem~2]{It} that the fibration $BT {\longrightarrow} BG {\longrightarrow} B\pi_0G$ is preserved by the $p$--completion. Let $\pi =\pi_0G$. Then we obtain the following commutative diagram: $$ \CD (BT)\p @>>> (BG)\p @>>> (B\pi)\p \\
@| @AAA @AAA \\ (BT)\p @>>> (BG_p)\p @>>> (B\pi_p)\p \endCD $$ By \cite[Theorem~2]{It}, the finite group $\pi$ is $p$--nilpotent, so the map $(B\pi_p)\p {\longrightarrow} (B\pi)\p$ is homotopy equivalent. Thus $(BG)\p \simeq (BG_p)\p$,and hence the result of \cite{He} implies that $G$ is $p$--nilpotent. Conversely, if $G$ is $p$--nilpotent, then the following commutative diagram $$ \CD BT @>>> BG @>>> B\pi \\
@| @AAA @AAA \\ BT @>>> BG_p @>>> B\pi_p \endCD $$ tells us that $BT {\longrightarrow} BG {\longrightarrow} B\pi$ is $p$--equivalent to the fibration $$(BT)\p {\longrightarrow} (BG_p)\p {\longrightarrow} (B\pi_p)\p.$$ From \cite[Theorem~2]{It}, we see that $BG$ is $p$--compact toral. \end{proof}
\begin{proof}[Proof of \fullref{thm1}] First suppose $BG$ is $\Pi$--compact toral. Lemma 2.1 implies that $G_0$ is a torus $T$ and $G/G_0=\pi_0G$ is $p$--nilpotent for any $p$. According to \cite[Lemma~2.1]{It}, the group $\pi_0G$ must be nilpotent. We notice that for each $p$ the normal $p$--complement of $\pi_0G$ acts trivially on $T$. Thus $\pi_0G$ itself acts trivially on $T$, and $T$ is a central subgroup of $G$. Conversely, assume that conditions (a) and (b) hold. According to \cite[Proposition~1.3]{He}, we see that $G$ is $p$--nilpotent for any $p$. Therefore $BG$ is $\Pi$--compact toral. \end{proof}
We will show that a group which satisfies conditions (a) and (b) of \fullref{thm1}
need not be a product group. For instance, consider the quaternion group $Q_8$ in $SU(2)$. Recall that the group can be presented as $Q_8=\langle x, y\ |\ x^4=1, x^2=y^2, yxy^{-1}=x^{-1}\rangle$. Let $\rho\co Q_8 {\longrightarrow} U(2)$ be a faithful representation given by the following: $$ \begin{matrix} \rho (x) =\begin{pmatrix} i &0\\
0&-i\end{pmatrix} &,\ \ \ \rho (y)=\begin{pmatrix} 0&-1\\ 1&0 \end{pmatrix} \end{matrix} $$ \noindent Let $S$ denote the center of the unitary group $U(2)$ and let $G$ be the subgroup of $U(2)$ generated by $\rho (Q_8 )$ and $S$. Then we obtain the group extension $S {\longrightarrow} G {\longrightarrow} \Z /2 \oplus \Z /2$. Since $S\cong S^1$, this group $G$ satisfies conditions (a) and (b). On the other hand, we see that the non--abelian group $G$ can not be a product group. This result can be generalized as follows:
\begin{prop} \label{prop2.1} Suppose $\rho \co \pi {\longrightarrow} U(n)$ is a faithful irreducible representation for a non--abelian finite nilpotent group $\pi$. Let $S$ be the center of the unitary group $U(n)$ and let $G$ be the subgroup of $U(n)$ generated by $\rho (\pi )$ and $S$ with group extension $S {\longrightarrow} G {\longrightarrow} \pi_0G$. Then this extension does not split, and $G$ satisfies conditions (a) and (b) of \fullref{thm1}. \end{prop}
\begin{proof} First we show that $G$ satisfies conditions (a) and (b) of \fullref{thm1}. Since $\pi$ is nilpotent, so is the finite group $\pi_0G \cong G/S$. Recall that the center of the unitary group $U(n)$ consists of scalar matrices, and is isomorphic to $S^1$. Thus we obtain the desired result.
Next we show that the group extension $S {\longrightarrow} G {\longrightarrow} \pi_0G$ does not split. If this extension did split, then we would have $G \cong S \rtimes \pi_0G$. Since the action of $\pi_0G$ on the center $S$ is trivial, it follows that $G$ is isomorphic to the product group $S \times \pi_0G$. Let $Z(\pi )$ denote the center of $\pi$. Since the representation $\rho \co \pi {\longrightarrow} U(n)$ is irreducible and faithful, Schur's Lemma implies $S\cap \rho (\pi ) =Z(\rho (\pi )) \cong Z(\pi )$. Thus we obtain the following commutative diagram: $$ \CD S @>>> G @>>> \pi_0G \\
@AAA @AAA @| \\ Z(\pi ) @>>> \pi @>q >> \pi_0G \endCD $$ Regarding $\pi$ as a subgroup of $G=S \times \pi_0G$, an element $y\in \pi$ can be written as $y=(s, x)$ for $s\in S$ and $x\in \pi_0G$. Notice that $\pi_0G$ is nilpotent and this group has a non--trivial center, since $\pi$ is non--abelian. The map $q \co \pi {\longrightarrow} \pi_0G$ is an epimorphism. Consequently we can find an element $y_0=(s_0, x_0)$ where $s_0 \in S$ and $x_0$ is a non--identity element of $Z(\pi_0G )$. This means that $y_0$ is contained in $Z(\pi )$, though $q(y_0)$ is a non--identity element. This contradiction completes the proof. \end{proof}
\section{$\Pi$--compact subgroups of simple Lie groups }
We will need the following results to prove \fullref{thm2}.
\begin{lem} \label{lem3.1} Let $K$ be a compact Lie group, and let $G$ be a connected compact Lie group. If $(BK)\p \simeq (BG)\p$ for some $p$, we have a group extension as follows: $$1{\longrightarrow} W(K_0){\longrightarrow} W(G) {\longrightarrow} \pi_0K {\longrightarrow} 1$$ \end{lem}
\begin{proof} It is well--known that $H^*((BG)\p ; \Q) = H^*((BT_G)\p ; \Q)^{W(G)}$, and since $(BK)\p \simeq (BG)\p$, it follows that $H^*((BG)\p ; \Q) = H^*((BK)\p ; \Q)$. Notice that $H^*((BK)\p ; \Q) = \smash{H^*((BK_0)\p ; \Q)^{\pi_0K}}
= \smash{(H^*((BT_{K_0})\p ; \Q)^{W(K_0)})^{\pi_0K}}$. Galois theory for the invariant rings (see Smith \cite{Si}) tells us that $W(K_0)$ is a normal subgroup of $W(G)$ and that the quotient group $W(G)/W(K_0)$ is isomorphic to $\pi_0K$. This completes the proof. \end{proof}
\begin{lem} \label{lem3.2} For a compact Lie group $K$, suppose the loop space of the $p$--completion $\Omega (BK)\p$ is a connected $p$--compact group. Then $p$ doesn't divide the order of $\pi_0K$. \end{lem}
\begin{proof} Since $BK$ is $p$--compact, $\pi_0K$ is $p$--nilpotent. So, if $\pi$ denotes a $p$--Sylow subgroup of $\pi_0K$, then $(B\pi_0K)\p \simeq B\pi$. Notice that $(BK)\p$ is 1--connected. Hence the map $(BK)\p {\longrightarrow} (B\pi_0K)\p$ induced from the epimorphism $K {\longrightarrow} \pi_0K$ is a null map. Consequently the $p$--Sylow subgroup $\pi$ must be trivial. \end{proof}
For $K=NT$, the normalizer of a maximal torus $T$ of a connected compact simple Lie group, the converse of \fullref{lem3.2} is true, though it doesn't hold in general. Note that $\pi_0\Gamma_2 =\Z/2$ and that $B\Gamma_2$ is not $3$--compact \cite{Ig2}.
\begin{proof}[Proof of \fullref{thm2}] (1)\qua Since $(BH)\p \simeq (BG)\p$ for some $p$, \fullref{lem3.1} says that the Weyl group $W(H_0)$ is a normal subgroup of $W(G)$. First we show that $W(H_0) \ne W(G)$. If $W(H_0) = W(G)$, the inclusion $H_0 {\longrightarrow} G$ induces the isomorphism $H^*(BH_0 ; \Q) \cong H^*(BG ; \Q)$, since $\rank(H_0)=\rank(G)$. Hence $BH_0 \simeq_0 BG$. Consequently if $\wt{H}_0$ and $\wt{G}$ denote the universal covering groups of $H_0$ and $G$ respectively, then $\wt{H}_0 \cong \wt{G}$. The maps $B\wt{H}_0 {\longrightarrow} BH_0$ and $B\wt{G} {\longrightarrow} BG$ are rational equivalences. According to \cite[Lemma~2.2]{Ia}, we would see that $H_0 = H = G$. Since $H$ must be a proper subgroup of $G$, we obtain the desired result.
We now see that $W(H_0)$ is a proper normal subgroup of $W(G)$. If $W(H_0)$ is a nontrivial group, a result of Asano~\cite{A} implies that $(G, H_0)$ is one of the following types: $$ (G, H_0)= \begin{cases} (B_n, D_n) \\ (C_n, A_1 \times \cdots \times A_1) \\ (G_2, A_2) \\ (F_4, D_4) \end{cases} $$ According to \cite[Lemma~2.1 and Proposition~3.1]{It}, we notice $\pi_0H = W(G)/W(H_0)$ is a nilpotent group since $BH$ is $\Pi$--compact. Recall that $W(C_n)/W(A_1 \times \cdots \times A_1) \cong \Sigma_n$ and $W(F_4)/W(D_4) \cong \Sigma_3$. For $n \ge 3$, we notice that the symmetric group $\Sigma_n$ is not nilpotent. Hence the group $W(G)/W(H_0)$ is nilpotent when $W(G)/W(H_0) \cong \Z /2$. Consequently, we see the following: $$ (G, H_0)= \left\{ \aligned &(B_n, D_n) \\ &(C_2, A_1 \times A_1) \\ &(G_2, A_2) \endaligned \right. $$ It remains to consider the case that $W(H_0)$ is a trivial group. In this case $\pi_0H = W(G)$, and $W(G)$ is a nilpotent group. From \cite[Proposition~3.4]{It}, we see that $G=A_1\ \text{or}\ B_2(=C_2)$.
(2)\qua We first show that, for any odd prime $p$, all types of the pairs are realized for some $G$ and $H$. To begin with, we consider the case $(G, T_G)$ for $G=A_1$. Take $(G, H)=(\mathit{SO}(3), \mathit{O}(2))$. Since $\pi_0(\mathit{O}(2))=\Z /2$ and $B\mathit{O}(2) \simeq_p B\mathit{SO}(3)$ for odd prime $p$, the space $B\mathit{O}(2)$ is $\Pi$--compact. In the case $G=B_2$, take $(G, H)=(G, NT_G)$ for $G=\mathit{Spin}(5)$. Then $\pi_0H$ is a $2$--group and $BNT_G \simeq_p BG$ for odd prime $p$, and hence $BNT_G$ is $\Pi$--compact.
In the case of $(B_n, D_n)$, take $(G, H)=(\mathit{SO}(2n+1), \mathit{O}(2n))$. Since $\pi_0(\mathit{O}(2n))=\Z /2$ and $B\mathit{O}(2n) \simeq_p B\mathit{SO}(2n+1)$ for odd prime $p$, the space $B\mathit{O}(2n)$ is $\Pi$--compact. For $(C_2, A_1 \times A_1)$, take $G=\mathit{Sp}(2)$ and $H=(\mathit{Sp}(1) \times \mathit{Sp}(1))\rtimes \Z /2\langle a \rangle$ where $a=\bigl(\begin{smallmatrix} 0 &1\\ 1&0 \end{smallmatrix}\bigr) \in \mathit{Sp}(2)$. For complex numbers $z$ and $w$, we see that $$ \begin{matrix} \begin{pmatrix} 0 &1\\ 1&0 \end{pmatrix} \begin{pmatrix} z&0\\ 0&w \end{pmatrix} \begin{pmatrix} 0 &1\\ 1&0\end{pmatrix} &= \begin{pmatrix} w&0\\ 0& z \end{pmatrix} \end{matrix}. $$ \noindent Thus the action of $\Z /2\langle a\rangle$ is given by $\bigl(\begin{smallmatrix} 0 &1\\ 1&0 \end{smallmatrix}\bigr) $. We note that $$ W(\mathit{Sp}(2))=D_8=\left\langle \begin{matrix} \begin{pmatrix} -1 &0\\ 0&1 \end{pmatrix}, \begin{pmatrix} 1 &0\\ 0&-1 \end{pmatrix}, \begin{pmatrix} 0 &1\\ 1&0 \end{pmatrix} \end{matrix} \right\rangle. $$ Consequently $\pi_0H$ is a $2$--group and $BH \simeq_p BG$ for odd prime $p$, and hence $BH$ is $\Pi$--compact. Finally, for $(G_2, A_2)$, as mentioned in the introduction, take $G=G_2$ and $H=SU(3)\rtimes \Z/2$. Then $BH$ is $\Pi$--compact.
It remains to consider the case $p=2$. Note that $|W(G)/ W(H_0)|$ for each of such $(G, H_0)$'s is a power of $2$. \fullref{lem3.1} implies that the finite group $\pi_0H$ must be a 2--group. \fullref{lem3.2} says that $|\pi_0H|$ is not divisible by $2$, since $(BH)\2 \simeq (BG)\2$. Thus $H$ is connected, and hence $H=G$. This completes the proof. \end{proof}
Any proper closed subgroup of $G$ which includes the normalizer $NT$ satisfies the assumption of \fullref{thm2}. So, this theorem shows, once again, that almost all $BNT$ are not $\Pi$--compact \cite{It}. Furthermore, for any connected compact Lie group $G$, it is well-known that $(BNT)\p \simeq (BG)\p$ if $p$ does not divide the order of the Weyl group $W(G)$, hence $BNT$ is $p$--compact for such $p$. The converse is shown in \cite{It}.
\begin{lem} \label{lem3.3} Let $\alpha {\longrightarrow} \wt G {\longrightarrow} G$ be a central extension of compact Lie groups. Then $BG$ is $p$--compact if and only if $B\wt G$ is $p$--compact. \end{lem}
\begin{proof} First assume that $BG$ is $p$--compact. Since $\alpha {\longrightarrow} \wt G {\longrightarrow} G$ is a central extension, the fibration $B\alpha {\longrightarrow} B\wt G {\longrightarrow} BG$ is principal. Thus we obtain a fibration $B\wt G {\longrightarrow} BG {\longrightarrow} K(\alpha ,2)$. The base space is $1$--connected, so the fibration is preserved by the $p$--completion, and hence we obtain the fibration $$(B\alpha )\p {\longrightarrow} (B\wt G)\p {\longrightarrow} (BG)\p.$$ Since the loop spaces $\Omega (B\alpha )\p$ and $\Omega (BG)\p$ are $\F_p$--finite, so is $\smash{\Omega (B\wt G)\p}$. Thus $\smash{B\wt G}$ is $p$--compact.
Conversely we assume that $B\wt G$ is $p$--compact. Consider the fibration $$\Omega (BG)\p {\longrightarrow} (B\alpha )\p {\longrightarrow} (B\wt G)\p.$$ Since the map $(B\alpha )\p {\longrightarrow} (B\wt G)\p$ is induced from the inclusion $\alpha \hookrightarrow \wt G$, it is a monomorphism of $p$--compact groups. Hence its homotopy fiber $\Omega (BG)\p$ is $\F_p$--finite, and therefore $BG$ is $p$--compact. \end{proof}
\begin{cor} Let $\alpha {\longrightarrow} \wt G {\longrightarrow} G$ be a central extension of compact Lie groups, and let $H$ be a subgroup of $G$ so that there is the commutative diagram: $$ \CD \alpha @>>> \wt G @>>> G \\
@| @AAA @AAA \\ \alpha @>>> \wt H @>>> H \endCD $$ \noindent Then the pair $(G, H)$ satisfies the conditions of \fullref{thm2} if and only if so does the pair $(\wt G, \wt H)$. \end{cor}
\begin{proof} \fullref{lem3.3} implies that $BH$ is $\Pi$--compact if and only if $B\wt H$ is $\Pi$--compact. It is clear that $\rank(H_0)=\rank(G)$ if and only if $\rank(\wt H_0)=\rank(\wt G)$. Finally we see $(BH)\p \simeq (BG)\p$ if and only if $(B\wt H)\p \simeq (B\wt G)\p$ from the following commutative diagram of fibrations: $$ \CD (B\alpha )\p @>>> (B\wt G)\p @>>> (BG)\p \\
@| @AAA @AAA \\ (B\alpha )\p @>>> (B\wt H)\p @>>> (BH)\p \endCD $$ This completes the proof. \end{proof}
\begin{lem} \label{lem3.4} Let $M {\longrightarrow} K {\longrightarrow} L$ be a short exact sequence of groups. If $\nu$ is a normal subgroup of $K$, the kernel $\nu '$ of the composition $\nu {\longrightarrow} K {\longrightarrow} L$ is a normal subgroup of $M$. \end{lem}
\begin{proof} We consider the following commutative diagram: $$ \CD \nu ' @>>> M \\
@VVV @VVV \\ \nu @>>> K \\
@VVV @VV q V \\ q(\nu) @>>> L \endCD $$ For $x \in \nu '$ and $m \in M$, it follows that $$ \begin{array}{rcl}
q(mxm^{-1})&=q(m)q(x)q(m^{-1})\\
&=q(m)q(m)^{-1}=e \end{array} $$ Thus $mxm^{-1} \in \ker q$. Since $\nu ' \subset \nu $, $M \subset K$, and $\nu \vartriangleleft K$, we see that $mxm^{-1} \in \nu$. So $mxm^{-1} \in \ker q\cap \nu =\nu '$, and therefore $\nu ' \vartriangleleft M$. \end{proof}
\begin{proof}[Proof of \fullref{thm3}] First suppose $(G, H_0)=(B_n, D_n)$ or $(C_2, A_1 \times A_1)$. Let $\nu '$ be the kernel of the composition $\nu {\longrightarrow} H {\longrightarrow} \pi_0H$. Consider the following commutative diagram: $$ \CD \nu ' @>>> H_0 \\
@VVV @VVV \\ \nu @>>> H \\
@VVV @VV q V \\ q(\nu) @>>> \pi_0H \endCD $$ \fullref{lem3.4} says that $\nu ' \vartriangleleft H_0$. Since $\nu '$ is a finite normal subgroup of $H_0$, it is a finite $2$--group. As we have seen in the proof of \fullref{thm2}, $\pi_0H = W(G)/W(H_0)$ is a $2$--group, and hence so is $q(\nu)$. Consequently $\nu$ is a $2$--group.
Now consider the following commutative diagram: $$ \CD \nu ' @>>> \nu @>>> q(\nu) \\
@VVV @VVV @VVV \\ H_0 @>>> H @>>> \pi_0H \\
@VVV @VVV @VVV \\ \Gamma_0 @>>> \Gamma @>>> \pi_0 \Gamma \endCD $$ Since $\pi_0 \Gamma$ is a $2$--group, the fibration $B\Gamma_0 {\longrightarrow} B\Gamma {\longrightarrow} B\pi_0 \Gamma$ is preserved by the $2$--completion (see Bousfield and Kan \cite{BK}). Hence $B\Gamma$ is $2$--compact. Next, for odd prime $p$, we see that $(B\Gamma)\p \simeq (BH)\p$, since $\nu$ is a $2$--group. We see also that $G$ has no odd torsion and $H^*(BH ; \F_p) = H^*(BH_0 ; \F_p)^{\pi_0H} \cong H^*(BG ; \F_p)$. Consequently the space $(B\Gamma)\p$ is homotopy equivalent to $(BG)\p$. Therefore $B\Gamma$ is $\Pi$--compact.
It remains to consider the case $(G, H_0)=(G, T_G)$ for $G=A_1$ or $G=B_2(=C_2)$. Since $H_0=T_G$ and $H_0 \vartriangleleft H$, we see that $H$ is a subgroup of the normalizer $NT_G$. Consider the following commutative diagram: $$ \CD T_G @>>> NT_G @>>> W(G) \\
@| @AAA @AAA \\ T_G @>>> H @>>> \pi_0H \endCD $$ Since the map $BH {\longrightarrow} BG$ is $p$--equivalent for some $p$, it follows that $\pi_0H=W(G)$. Consequently $H=NT_G$.
If $\nu$ is a finite normal subgroup of $NT_G$, then $B\nu$ is contained in the kernel of the map $(BG)\p \simeq (BNT_G)\p {\longrightarrow} (B\Gamma)\p$. Since $G$ is simple and $G\ne G_2$, according to \cite{Is,Ir}, the group $\nu$ is included in the center of $G$. Thus $\nu$ is a $2$--group. Therefore $(B\Gamma)\p \simeq (BNT_G)\p \simeq (BG)\p$ for odd prime $p$, and hence $B\Gamma$ is $p$--compact for such $p$. Finally we note that $W(G)$ is a $2$--group, and hence $B\Gamma$ is $2$--compact. \end{proof}
We will discuss a few more results. Basically we have been looking at three Lie groups $H_0 \subset H \subset G$. The following shows a property of the (non--connected) middle group $H$.
\begin{prop} \label{prop3.1} Suppose $G$ is a connected compact Lie group, and $H$ is a proper closed subgroup of $G$ with $\rank(H_0)=\rank(G)$. If the order of $\pi_0H$ is divisible by a prime $p$, so is the order of $W(G)/W(H_0)$. \end{prop}
\begin{proof}
Assuming $|W(G)/W(H_0)| \not \equiv 0\ \text{mod}\ p$, we will show $\pi_0H \not \equiv 0\ \text{mod}\ p$. Notice that we have the following commutative diagram $$ \CD T @>>> N_GT @>>> W(G) \\
@| @AAA @AAA \\ T @>>> N_{H_0}T @>>> W(H_0), \endCD $$ where the vertical maps are injective, since $\rank(H_0)=\rank(G)$. We recall, from Jackowski, McClure and Oliver~\cite{JMO}, that the Sylow theorem for compact Lie groups $G$ holds. Namely $G$ contains maximal $p$--toral subgroups, and all of which are conjugate to $N_pT$, where $N_p(T)/T$ is a $p$--Sylow subgroup of $N(T)/T=W(G)$.
Suppose $K$ is a $p$--toral subgroup of $H$. Since $|W(G)/W(H_0)| \not \equiv 0\ \text{mod}\ p$, we see that $K$ is a subgroup of $H_0$ up to conjugate. Consequently, the composite map $K \hookrightarrow H {\longrightarrow} \pi_0H$ must be homotopy equivalent to a null map. Since $H {\longrightarrow} \pi_0H$ is surjective, the $p$--part of $\pi_0H$ is trivial. \end{proof}
For each pair mentioned in the part (a) of \fullref{thm2}, we note that $|W(G)/W(H_0)|$ is a power of $2$. \fullref{prop3.1} says, for instance, that $\pi_0H$ is a $2$--group for any $(G, H)$
such that $|W(G)/W(H_0)|$ is a power of $2$. As an application, one can show that if $H$ is a non--connected proper closed subgroup of $\mathit{SO}(3)$ with $H_0=\mathit{SO}(2)$, then $H$ is isomorphic to $\mathit{O}(2)$. A proof may use the fact that $H$ is $2$--toral, and that a maximal $2$--toral subgroup in $H$ is $2$--stubborn \cite{JMO}. A $2$--compact version of this result also holds. Suppose $X$ is a $2$--compact group such that there are two monomorphisms of $2$--compact groups $B\mathit{SO}(2)\2 {\longrightarrow} BX$ and $BX {\longrightarrow} B\mathit{SO}(3)\2$. Then, along the line of a similar argument, one can also show that $BX$ is homotopy equivalent to $B\mathit{O}(2)\2$ if $X$ is not connected. In the case of $X$ being connected, the classifying space $BX$ is either $B\mathit{SO}(2)\2$ or $B\mathit{SO}(3)\2$.
In \fullref{thm2}, Lie groups of type $(C_2, A_1 \times A_1)$ has been discussed. An example is given by $\mathit{Sp}(1) \times \mathit{Sp}(1) \subset (\mathit{Sp}(1) \times \mathit{Sp}(1))\rtimes \Z /2 \subset \mathit{Sp}(2)$. The middle group can be regarded as the wreath product $\mathit{Sp}(1)\smallint \Sigma_n$ for $n=2$. We ask for what $n$ and $p$ its classifying space is $p$--compact. Note that $\mathit{Sp}(1) \smallint \Sigma_n$ is a proper closed subgroup of $\mathit{Sp}(n)$.
\begin{prop} Let $\Gamma (n)$ denote the wreath product $\mathit{Sp}(1)\smallint \Sigma_n$. Then $$ \mathbb{P} (B\Gamma (n)) = \begin{cases} \Pi &\text{if } n=2\\
\{ p \in \Pi ~|~ p>n \} &\text{if } n\ge 3 \end{cases} $$ \end{prop}
\begin{proof} When $n=2$, the desired result has been shown in our proof of the part (b) of \fullref{thm2}. Recall from \cite{It} that if $B\Gamma (n)$ is $p$--compact, then $\pi_0B\Gamma (n)=\Sigma_n$ must be $p$--nilpotent. For $n \ge 4$, it follows that $\Sigma_n$ is $p$--nilpotent if and only if $p>n$. Since the group $\Gamma (n)$ includes the normalizer of a maximal torus of $\mathit{Sp}(n)$, we see
$B\Gamma (n) \simeq_{p} BSp(n)$ if $p>n$. Thus $\mathbb{P} (B\Gamma (n)) =\{ p \in \Pi \ | \ p>n \}$ for $n \ge 4$.
For $n=3$, note that $\Sigma_3$ is $p$--nilpotent if and only if $p\ne 3$. So it remains to prove that $B\Gamma (3)$ is not $2$--compact. We consider a subgroup $H$ of $\Gamma (3)$ which makes the following diagram commutative: $$ \CD \displaystyle \prod_{}^3Sp(1) @= \displaystyle \prod_{}^3Sp(1) @>>> * \\
@VVV @VVV @VVV \\ H @>>> \Gamma (3) @>>> \Z /2 \\
@VVV @VVV @| \\ \Z /3 @>>> \Sigma_3 @>>> \Z /2 \endCD $$ The fibration $BH {\longrightarrow} B\Gamma (3) {\longrightarrow} B\Z /2$ is preserved by the completion at $p=2$. Hence, if $B\Gamma (3)$ were $2$--compact, the space $\Omega (BH)\2$ would be a connected $2$--compact group so that the cohomology $H^*(BH ; \Q \2)$ should be a polynomial ring, (see Dwyer and Wilkerson~\cite[Theorem~9.7]{DWb}). Though $H^*\bigl(B\prod_{}^3Sp(1) ; \Q \2\bigr)$ is a polynomial ring, its invariant ring $H^*(BH;\Q\2)= H^*\bigl(B\prod_{}^3Sp(1) ; \Q \2\bigr)^{\Z /3}$ is not a polynomial ring, since the group $\Z /3$ is not generated by reflections. This contradiction completes the proof. \end{proof}
For $(G, H)=(\mathit{Sp}(n), \mathit{Sp}(1)\smallint \Sigma_n)$, we note that $(G, H_0)$
is a type of $(C_n, A_1 \times \cdots \times A_1)$. This is one of the cases that the Weyl group $W(H_0)$ is a normal subgroup of $W(G)$ (see Asano~\cite{A}) discussed in our proof of the part (a) of \fullref{thm2}. Finally we talk about the only remaining case $(G, H_0)=(F_4, D_4)$. An example is given by $\mathit{Spin}(8) \subset \mathit{Spin}(8)\rtimes \Sigma_3 \subset F_4$. Let $\Gamma$ denote the middle group $\mathit{Spin}(8)\rtimes \Sigma_3$. Then we can show that $\mathbb{P} (B\Gamma ) =\{ p \in \Pi \ | \ p>3 \}$. To show that $B\Gamma $ is not $2$--compact, one might use the fact, (see Adams~\cite[Theorem~14.2]{Ad}), that $W(F_4)=W(\mathit{Spin}(8)) \rtimes \Sigma_3$, and that its subgroup $W(\mathit{Spin}(8)) \rtimes \Z /3$ is not a reflection group.
\end{document} | arXiv | {
"id": "0903.4585.tex",
"language_detection_score": 0.715185821056366,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Entanglement of pair cat states and teleportation} \author{Shang-Bin Li, Ru-Kuan Wu, Qin-Mei Wang, Jing-Bo Xu\\ Chinese Center of Advanced Science and Technology (World Labor-\\ atory), P.O.Box 8730, Beijing, People's Republic of China;\\ Zhejiang Institute of Modern Physics and Department of Physics,\\ Zhejiang University, Hangzhou 310027, People's Republic of China\thanks{Mailing address}} \date{}
\maketitle
\begin{abstract} {\normalsize The entanglement of pair cat states in the phase damping channel is studied by employing the relative entropy of entanglement. It is shown that the pair cat states can always be distillable in the phase damping channel. Furthermore, we analyze the fidelity of teleportation for the pair cat states by using joint measurements of the photon-number sum and phase difference. \\
PACS number: 03.67.-a, 03.65.Ud, 42.50.Dv}
\end{abstract}
\section * {I. INTRODUCTION} \hspace*{8mm}Quantum entanglement plays an important role in various fields of quantum information, such as quantum computation [1], quantum cryptography [2], quantum teleportation [3,4], dense coding [5] and quantum communication [6], etc. It has been recognized [7,8] that quantum teleportation can be viewed as an achievable experimental technique to quantitatively investigate quantum entanglement. There exists a class of states called maximally correlated states, which have an interesting property, i.e., the PPT distillable entanglement is exactly the same as the relative entropy of entanglement [9]. Both two-mode squeezed vacuum states and pair cat states [10] belong to this class. In continuous variable teleportation the entanglement resource is usually the two-mode squeezed state, or the Einstein-Podolsky-Rosen (EPR) states. Continuous variable quantum teleportation of arbitrary coherent states has been realized experimentally by employing a two-mode squeezed vacuum state as an entanglement resource [7]. Theoretical proposals of teleportation scheme based on the other continuous variable entangled states have already been discussed [11]. Update now, little attention has been paid to the entanglement properties of the pair cat state and its possible application of quantum information. Gou et al. have proposed a scheme for generating the pair cat state of motion in a two-dimensional ion trap [12]. In their scheme, the trapped ion is excited bichromatically by five laser beams along different directions in the X-Y plane of the ion trap. Four of these have the same frequency and can be derived from the same source, reducing the demands on the experimentalist. It is shown that if the initial vibrational state is given by a two-mode Fock state, pair cat states are realized when the system reaches its steady state. Their work motivates us to investigate the entanglement properties of the pair cat state and its possible application in quantum information processes, such as quantum teleportation. The motivation is two-fold: (1) the storage of continuous variable entangled states in two-dimensional motional states of trapped ions is feasible in current experimental techniques. (2) the mapping of steady state entanglement to optical beams is also realizable [13].\\ \hspace*{8mm}On the other hand, quantum entanglement is a fragile nature, which can be destroyed by the interaction between the real quantum system and its environment. This effect, called decoherence, is the most serious problem for all entanglement manipulations in quantum information processing. There have several proposals for entanglement distillation and purification in continuous variable systems [14]. In this paper, we firstly investigate the relative entropy of entanglement of pair cat states in the phase damping channel, and show that the pair cat states can always be distillable in the phase damping channel. Then, we explore possible application of pair cat states in quantum information processing, such as quantum teleportation. The fidelity of teleportation protocol in which the mixed pair cat state is used as a entangled resource, is analyzed.\\ \hspace*{8mm}This paper is organized as follows. In section II, based on the exact solution of the master equation describing phase damping, we give the numerical calculations of relative entropy of entanglement for pair cat states in the phase damping channel and investigate the influence of the initial parameters of these states on the relative entropy of entanglement. In section III, we analyze the fidelity of teleportation for the pair cat states by using joint measurements of the photon-number sum and phase difference. The influence of phase damping on the fidelity is discussed. A conclusion is given in section IV.\\
\section * {II. RELATIVE ENTROPY OF ENTANGLEMENT OF PAIR CAT STATES IN PHASE DAMPING CHANNEL} \hspace*{8mm}The relative entropy of entanglement is a good measure of quantum entanglement, it reduces to the Von Neumann entropy of the reduced density operator of either subsystems for pure states. For a mixed state $\rho$, the relative entropy of entanglement [15] is defined by $E_R(\rho)=\min_{\sigma\in{D}}S(\rho\parallel\sigma)$, where $D$ is the set of all disentangled states, and $S(\rho\parallel\sigma)=\textrm{Tr}[\rho(\log_2\rho-\log_2\sigma)]$ is the quantum relative entropy. It is usually hard to calculate the relative entropy of entanglement for mixed states. Recently, it has been shown [27] that the relative entropy of entanglement for a class of mixed states characterized by the following density matrix $$
\rho=\sum_{n_1,n_2}a_{n_1,n_2}|\phi_{n_1},\psi_{n_1}\rangle\langle\phi_{n_2},\psi_{n_2}| \eqno{(1)} $$ can be written as $$ E_R(\rho)=-\sum_{n}a_{n,n}\log_{2}a_{n,n}+\textrm{Tr}(\rho\log_2\rho). \eqno{(2)} $$ The separate state $\rho^{\ast}$ that minimizes the quantum relative entropy $S(\rho\parallel\rho^{\ast})$ is $$
\rho^{\ast}=\sum_{n}a_{n,n}|\phi_n,\psi_n\rangle\langle\phi_n,\psi_n|, \eqno{(3)} $$
where, $|\phi_n\rangle$ and $|\psi_n\rangle$ are orthogonal states of each subsystem. The states in Eq.(1) are also called maximally correlated states and are known to have some interesting properties. For example, the PPT distillable entanglement is exactly the same as the relative entropy of entanglement [9].\\ \hspace*{8mm}Now, we consider the phase damping model. The density matrix satisfies the following master equation in the interaction picture $$ \frac{d}{dt}\rho(t)=(L_1+L_2)\rho(t), \eqno{(4)} $$ with $$ L_i\rho=\frac{\gamma_i}{2}[2a^{\dagger}_{i}a_i{\rho}a^{\dagger}_{i}a_i -(a^{\dagger}_{i}a_i)^2\rho-\rho{(a^{\dagger}_{i}a_i)^2}], \eqno{(5)} $$ where, $\gamma_i$($i=1,2$) is the ith mode phase damping coefficient, and $a^{\dagger}_i$ ($a_i$) is the creation (annihilation) operator of the ith mode field. For arbitrary initial states described by the density matrix $\rho_0$, the solution of Eq.(4) can be obtained, $$ \rho(t)=\sum^{\infty}_{k_1=0}\sum^{\infty}_{k_2=0}\frac{1}{{k_1}!{k_2}!} (\gamma_1{t})^{k_1}(\gamma_2{t})^{k_2}\Lambda_{k_1,k_2}(t)\rho_0\Lambda_{k_1,k_2}(t), \eqno{(6)} $$ where $$ \Lambda_{k_1,k_2}(t)=(a^{\dagger}_1a_1)^{k_1}(a^{\dagger}_2a_2)^{k_2} \exp[-\frac{\gamma_1t}{2}(a^{\dagger}_1a_1)^2-\frac{\gamma_2t}{2}(a^{\dagger}_2a_2)^2]. \eqno{(7)} $$ If we assume the initial density matrix $\rho_0$ is arbitrary two-mode continuous variable pure states, i.e., $$
\rho_0=\sum_{n,m}\sum_{n^{'},m^{'}}a_{n,m}a^{\ast}_{n^{'},m^{'}}|n,m\rangle\langle{n^{'}},m^{'}|, \eqno{(8)} $$
where $|n,m\rangle$ is two-mode particle number state. Then, the time-evolution density matrix with the initial condition is calculated as $$ \rho(t)=\sum_{n,m}\sum_{n^{'},m^{'}}a_{n,m}a^{\ast}_{n^{'},m^{'}}\exp[-\frac{\gamma_1t}{2}
(n-n^{'})^2-\frac{\gamma_2t}{2}(m-m^{'})^2]|n,m\rangle\langle{n^{'}},m^{'}|. \eqno{(9)} $$ If the density matrix $\rho(t)$ in Eq.(9) can be expressed as the similar form of Eq.(1), i.e., $$
\rho(t)=\sum_{n_1,n_2}c_{n_1,n_2}(t)|\phi^{'}_{n_1},\psi^{'}_{n_1}\rangle\langle
\phi^{'}_{n_2},\psi^{'}_{n_2}|, \eqno{(10)} $$
where, $|\phi^{'}_n\rangle$ and $|\psi^{'}_n\rangle$ are orthogonal states of modes 1 and 2, the relative entropy of entanglement of $\rho(t)$ in Eq.(10) can be expressed as, $$ E_R(\rho(t))=-\sum_{n}c_{n,n}(t)\log_{2}c_{n,n}(t)+\textrm{Tr}(\rho(t)\log_2\rho(t)), \eqno{(11)} $$ and the separate state $\rho^{\ast}$ that minimizes the quantum relative entropy is $$
\rho^{\ast}(t)=\sum_{n}c_{n,n}(t)|\phi^{'}_n,\psi^{'}_n\rangle\langle\phi^{'}_n,\psi^{'}_n|. \eqno{(12)} $$
In what follows, we investigate the relative entropy of entanglement of pair cat states in phase damping channel. Firstly, we will briefly outline the definition of pair cat states and the closely related pair coherent states. For two independent boson annihilation operators $\hat{a}_1$, $\hat{a}_2$, a pair coherent state $|\xi,q\rangle$ is defined as an eigenstate of both the pair annihilation operator $\hat{a}_1\hat{a}_2$ and the number difference operator $\hat{Q}=\hat{a}^{\dagger}_1\hat{a}_1-\hat{a}^{\dagger}_2\hat{a}_2$ [16], i.e., $$
\hat{a}_1\hat{a}_2|\xi,q\rangle=\xi|\xi,q\rangle,~~~\hat{Q}|\xi,q\rangle=q|\xi,q\rangle, \eqno{(13)} $$ where $\xi$ is a complex number and $q$ is a fixed integer. Without loss of generality, we may set $q\geq0$ and the pair coherent states can be explicitly expanded as a superposition of the two-mode Fock states, i.e., $$
|\xi,q\rangle=N_q\sum^{\infty}_{n=0}\frac{\xi^n}{\sqrt{n!(n+q)!}}|n+q,n\rangle, \eqno{(14)} $$
where $N_q=[|\xi|^{-q}I_{q}(2|\xi|)]^{-1/2}$ is the normalization constant and $I_q$ is the modified Bessel function of the first kind of order $q$. It has been suggested by Reid and Krippner that the non-degenerate parametric oscillator transiently generates pair coherent states, in the limit of very large parametric nonlinearity and high-Q cavities[17]. Recently, Munro \textit{et al.} have shown that the pair coherent states can be used to improve the detection sensitivity of weak forces[18]. Pair cat states $|\xi,q,\phi\rangle$ are proposed by Gerry and Grobe [10], which are defined as superposition of two different pair coherent states, i.e., $$
|\xi,q,\phi\rangle=N_{\phi}[|\xi,q\rangle+e^{i\phi}|-\xi,q\rangle], \eqno{(15)} $$ where the normalization constant $N_{\phi}$ is given by $$
N_{\phi}=\frac{1}{\sqrt{2}}[1+N^2_q\cos\phi\sum^{\infty}_{n=0}\frac{(-1)^n|\xi|^{2n}} {n!(n+q)!}]^{-\frac{1}{2}}. \eqno{(16)} $$
It is easy to verify that the states $|\xi,q,\phi\rangle$ are eigenstates of the operator $(\hat{a}_1\hat{a}_2)^2$ with eigenvalue $\xi^2$. Gou et al. have proposed a scheme for generating the pair cat state of motion in a two-dimensional ion trap [12]. In their scheme, the trapped ion is excited bichromatically by five laser beams along different directions in the X-Y plane of the ion trap. Four of these have the same frequency and can be derived from the same source, reducing the demands on the experimentalist. It is shown that if the initial vibrational state is given by a two-mode Fock state, pair cat states are realized when the system reaches its steady state. Our following calculation show that pair cat states hold controllable entanglement. So, it is reasonable to regard the controlled two-dimensional trapped ion as a reliable source of entanglement. Recent achievements concerning the transfer of entangled state have provided us a possible way to map the pair cat state of the motional freedom of two dimensional trapped ions into freely propagating optical fields [13]. When the free photon propagates in the optical fibre, one of the encountered decoherence mechanisms is the phase damping. In the following, we discuss the entanglement of pair cat states in phase damping channel. We assume that the initial state is prepared in pair cat states
$|\xi,q,\phi\rangle$. By making use of Eqs.(6) and (7), we obtain $$ \rho(t)=N^2_{\phi}N^{2}_q\sum^{\infty}_{n=0}\sum^{\infty}_{m=0}\frac{\exp[-\frac{\gamma_1 +\gamma_2}{2}t(n-m)^2]\xi^n\xi^{\ast{m}}(1+(-1)^ne^{i\phi})(1+(-1)^me^{-i\phi})}
{\sqrt{n!m!(n+q)!(m+q)!}}|n+q,n\rangle\langle{m+q},m|. \eqno{(17)} $$ The relative entropy of entanglement for $\rho(t)$ is calculated as $$
E(t,\xi)=-\sum_nN^2_{\phi}N^2_q\frac{|1+(-1)^ne^{i\phi}|^2|\xi|^{2n}}{n!(n+q)!}
\log_2N^2_{\phi}N^2_q\frac{|1+(-1)^ne^{i\phi}|^2|\xi|^{2n}}{n!(n+q)!} $$ $$ ~~~+\textrm{Tr}(\rho(t)\log_2\rho(t)). \eqno{(18)} $$ \begin{figure}
\caption{The relative entropy of entanglement $E$ of the pair cat state as a function of the parameter $|\xi|$ and the degree of damping $d$ for $q=0$ with $\phi=\pi$. }
\label{Fig.1}
\end{figure} \begin{figure}
\caption{The relative entropy of entanglement $E$ of the pair cat state as a function of the degree of damping $d$ and the parameter
$\phi$ for $q=0$ with $|\xi|=2$. }
\label{Fig.2}
\end{figure} \begin{figure}
\caption{The relative entropy of entanglement $E$ of the pair cat state as a function of the parameter $|\xi|$ and the parameter $\phi$ for $q=0$ with $d=0$. }
\label{Fig.3}
\end{figure} \begin{figure}
\caption{The relative entropy of entanglement $E$ of the pair cat state as a function of the parameter $\phi$ for three values of
$q=0,10$ and $30$ with $d=0$ and $|\xi|=2$. }
\label{Fig.4}
\end{figure} In numerical computations throughout this paper, the parameters $\gamma_1=\gamma_2=\gamma$, $d=\gamma{t}$ are chosen and the truncated photon number has been taken to be
$\max(n)=\max(m)=100$, the value of which is sufficiently large for numerical convergence. Figures 1,2 and 3 show that the relative entropy of entanglement $E$ of the pair cat state increases with $|\xi|$ and decreases with degree of damping $d$, and can be controlled by adjusting the relative phase $\phi$. This results can be explained as follows: the entanglement of pair cat states heavily depend on the photon number distribution which can be modified by the relative phase via the interference. Similar results have been obtained in Ref.[19]. In Fig.4, we plot the relative entropy of entanglement $E$ of the pair cat state as a function of the relative phase $\phi$ for three values of the parameter $q$. Recently, Hiroshima has numerically calculated the relative entropy of entanglement of two-mode squeezed vacuum states, defined by
$|\psi(r)\rangle=\exp[-r(a^{\dagger}_1a^{\dagger}_2-a_1a_2)]|vac\rangle$, in phase damping channel [20]. It has been shown [21] that the two-mode squeezed vacuum state in phase damping channel is always distillable (and inseparable). In the following, we show that the pair cat states are always distillable (and inseparable) in phase damping channel.\\ \hspace*{8mm}For two-mode continuous variable states
$|\psi\rangle$, $$
|\psi\rangle=\sum_nf_n|\phi_n,\psi_n\rangle, \eqno{(19)} $$
where $|\phi_n\rangle$ and $|\psi_n\rangle$ are orthogonal particle number states of each subsystem and $f_n$ satisfy the normalization condition $\sum_n|f_n|^2=1$. The density matrix with the initial condition $\rho(0)=|\psi\rangle\langle\psi|$ can be written as $$ \rho(t)=\sum_{n,m}f_nf^{\ast}_m\exp[-\frac{\gamma_1}{2}t(\phi_n-\phi_m)^2
-\frac{\gamma_2}{2}t(\psi_n-\psi_m)^2]|\phi_n,\psi_n\rangle\langle\phi_m,\psi_m|. \eqno{(20)} $$ According to Ref.[22], if operator $\Omega(t)=\textrm{Tr}_D\rho(t)\otimes{I}-\rho(t)$ is not positive definite, there is always a scheme to distill $\rho(t)$. Here, we find
$\textrm{Tr}_D\rho(t)=\sum_n|f_n|^2|\phi_n\rangle\langle\phi_n|$. If there are two nonzero $f_i$, $f_j$, it is always possible to choose four vectors
$|W_1\rangle=\frac{1}{\sqrt{2}}(|\phi_i,\psi_i\rangle+|\phi_j,\psi_j\rangle)$,
$|W_2\rangle=\frac{1}{\sqrt{2}}(|\phi_i,\psi_i\rangle-|\phi_j,\psi_j\rangle)$,
$|W_3\rangle=\frac{1}{\sqrt{2}}(|\phi_i,\psi_i\rangle+i|\phi_j,\psi_j\rangle)$,
$|W_4\rangle=\frac{1}{\sqrt{2}}(|\phi_i,\psi_i\rangle-i|\phi_j,\psi_j\rangle)$. Then, we have $$
\Omega_1(t)\equiv\langle{W}_1|\Omega(t)|W_1\rangle=-\exp[-\frac{\gamma_1}{2}t(\phi_i -\phi_j)^2-\frac{\gamma_2}{2}t(\psi_i-\psi_j)^2]\textrm{Re}(f_if^{\ast}_j), $$ $$
\Omega_2(t)\equiv\langle{W}_2|\Omega(t)|W_2\rangle=\exp[-\frac{\gamma_1}{2}t(\phi_i -\phi_j)^2-\frac{\gamma_2}{2}t(\psi_i-\psi_j)^2]\textrm{Re}(f_if^{\ast}_j), $$ $$
\Omega_3(t)\equiv\langle{W}_3|\Omega(t)|W_3\rangle=\exp[-\frac{\gamma_1}{2}t(\phi_i -\phi_j)^2-\frac{\gamma_2}{2}t(\psi_i-\psi_j)^2]\textrm{Im}(f_if^{\ast}_j), $$ $$
\Omega_4(t)\equiv\langle{W}_4|\Omega(t)|W_4\rangle=-\exp[-\frac{\gamma_1}{2}t(\phi_i -\phi_j)^2-\frac{\gamma_2}{2}t(\psi_i-\psi_j)^2]\textrm{Im}(f_if^{\ast}_j), \eqno{(21)} $$ which satisfy $$ \Omega_1(t)+\Omega_2(t)\equiv0,~~~\Omega_3(t)+\Omega_4(t)\equiv0, $$ $$ \Omega_1(t)+i\Omega_4(t)=-\exp[-\frac{\gamma_1}{2}t(\phi_i-\phi_j)^2 -\frac{\gamma_2}{2}t(\psi_i-\psi_j)^2](f_if^{\ast}_j)\neq0, $$ $$ \Omega_2(t)+i\Omega_3(t)=\exp[-\frac{\gamma_1}{2}t(\phi_i-\phi_j)^2 -\frac{\gamma_2}{2}t(\psi_i-\psi_j)^2](f_if^{\ast}_j)\neq0, \eqno{(22)} $$ Eqs.(22) show that there is at least one of $\Omega_k(t)$ ($k=1,2,3,4$), which is negative for any finite $\frac{\gamma_1+\gamma_2}{2}t$. From the above, we obtain the following conclusion: the two-mode continuous variable state
$|\psi\rangle=\sum_nf_n|\phi_n,\psi_n\rangle$, in which
$|\phi_n\rangle$ and $|\psi_n\rangle$ are orthogonal particle number states of each subsystem, is always distillable (and inseparable) in phase damping channel, if there are at least two nonzero values of coefficiences $f_n$. Obviously, pair coherent states and pair cat states which belong to the family of states in Eq.(20) is always distillable in phase damping channel. It should be interesting to consider a slightly modified purification protocol similar to the protocol in Ref.[14] to distill maximal entangled states from the mixed pair cat states or mixed pair coherent states due to phase damping.\\
\section * {III. FIDELITY OF TELEPORTATION VIA PAIR CAT STATES IN PHASE DAMPING CHANNEL} \hspace*{8mm}Recently, Cochrane et al. have presented a teleportation protocol by making use of joint measurements of the photon number sum and phase difference on two field modes [11]. Various kinds of two modes entangled states used as the entanglement resource have been discussed and the respective teleportation fidelities have been investigated. In this section, we adopt the protocol of cochrane et al. to investigate the fidelity of teleportation, in which the pair cat state is utilized as the entanglement resource. The influence of phase damping on the fidelity is also discussed.\\ \hspace*{8mm}Consider arbitrary target state sent by Alice to Bob $$
|\psi\rangle_T=\sum^{\infty}_{k=0}d_k|k\rangle_T, \eqno{(23)} $$
where $|k\rangle_T$ is the fock state. Initially, Alice and Bob share the two-mode fields in the pair cat state. Then, the total state is $$
|\psi\rangle=N_{\phi}N_{q}\sum^{\infty}_{n=0}\sum^{\infty}_{k=0} \frac{d_k\xi^n[1+(-1)^ne^{i\phi}]}{\sqrt{n!(n+q)!}}
|n+q\rangle_a|n\rangle_b|k\rangle_T. \eqno{(24)} $$ The whole operation of this teleportation protocol can be decomposed as two steps: Alice makes a joint measurement of the photon number sum and phase difference of the target state and her component of the pair cat state; The results of the joint measurement are sent to Bob via the classical channel, and Bob reproduce the target state after appropriate amplification and phase shift operations according to the results of the joint measurement. The joint measurement of the photon number sum and phase difference has attracted much attention due to its extensive potential applications both in quantum optics and quantum information [23,24]. In Ref.[24], Luis et al. introduced the hermitian phase-difference operator $$
\hat{\Theta}_{12}=\sum^{\infty}_{N=0}\sum^{N}_{r=0}\theta^{N}_r|\theta^N_r
\rangle\langle\theta^N_r|, \eqno{(25)} $$ with $$
|\theta^N_r\rangle=\frac{1}{\sqrt{N+1}}\sum^{N}_{n=0}e^{in\theta^N_r}|n\rangle_1|N-n\rangle_2, \eqno{(26)} $$ and $$ \theta^N_r=\vartheta+\frac{2\pi{r}}{N+1}, \eqno{(27)} $$ where $\vartheta$ is an arbitrary angle. It is obvious that the joint measurement projects the two-mode quantum state onto
$|\theta^N_r\rangle$. In Ref.[25], a physical scheme of the joint measurement of the photon number sum and phase difference of two-mode fields was proposed, in which only the linear optical elements and single-photon detector are involved.\\ \hspace*{8mm}If Alice measure the number sum $\hat{N}_a+\hat{N}_T$ of the target and her component of the pair cat state with result $N$, the state of the total system is projected onto $$
|\psi^{(N)}\rangle=[\frac{P(\xi,q,\phi,N)}{N^2_qN^2_{\phi}}]^{-1/2}\sum^{N}_{l=q} \frac{d_{N-l}\xi^{l-q}[1+(-1)^{l-q}e^{i\phi}]}
{\sqrt{l!(l-q)!}}|l\rangle_a|l-q\rangle_b|N-l\rangle_T, \eqno{(28)} $$ where $$
P(\xi,q,\phi,N)=N^2_{\phi}N^2_{q}\sum^{N}_{l=q}\frac{2|d_{N-l}
\xi^{l-q}|^2[1+(-1)^{l-q}\cos\phi]}{l!(l-q)!}, \eqno{(29)} $$ is the probability of obtaining the result $N$. Further measurement of phase difference with the result $\theta_{-}$ performed by Alice will project Bob's mode onto the pure state $$
|\psi^{(N,\theta_{-})}\rangle=[\frac{P(\xi,q,\phi,N)}{N^2_qN^2_{\phi}}]^{-1/2} \sum^{N-q}_{n=0}\frac{d_{N-q-n}(e^{-i\theta_{-}}\xi)^n[1+(-1)^ne^{i\phi}]}
{\sqrt{n!(n+q)!}}|n\rangle_b. \eqno{(30)} $$
Alice sends the values $N$ and $\theta_{-}$ to Bob, and then Bob amplifies his mode so that $|n\rangle_b\rightarrow|N-q-n\rangle_b$ [26] and makes a operation $e^{-i\hat{N}_b\theta_{-}}$ for phase shifting his mode. The teleportation protocol is then completed and Bob finally has the state in $$
|\psi^{(N)}\rangle_{b}=[\frac{P(\xi,q,\phi,N)}{N^2_qN^2_{\phi}}]^{-1/2}
\sum^{N-q}_{n=0}\frac{d_{N-q-n}\xi^n[1+(-1)^ne^{i\phi}]}{\sqrt{n!(n+q)!}}|N-q-n\rangle_b. \eqno{(31)} $$ The fidelity of this protocol depends on the result $N$ and can be obtained as follows $$
F(\xi,q,\phi,N)=[\frac{P(\xi,q,\phi,N)}{N^2_qN^2_{\phi}}]^{-1}|\sum^{N-q}_{n=0}
\frac{|d_{N-q-n}|^2\xi^n[1+(-1)^ne^{i\phi}]}{\sqrt{n!(n+q)!}}|^2. \eqno{(32)} $$ The average fidelity defined by $\bar{F}(\xi,q,\phi)\equiv\sum^{\infty}_{N=q}P(\xi,q,\phi,N)F(\xi,q,\phi,N)$ is $$
\bar{F}(\xi,q,\phi)=N^2_qN^2_{\phi}\sum^{\infty}_{N=0}|\sum^{N}_{n=0}
\frac{|d_{N-n}|^2\xi^n[1+(-1)^ne^{i\phi}]}{\sqrt{n!(n+q)!}}|^2. \eqno{(33)} $$ Let the target state be a coherent state
$|\psi\rangle_T=|\alpha\rangle_T$. Then, the average fidelity can be expressed as $$
\bar{F}(\xi,q,\phi,\alpha)=N^2_qN^2_{\phi}e^{-2|\alpha|^2}\sum^{\infty}_{N=0}|
\alpha|^{4N}|\sum^{N}_{n=0}\frac{|\alpha|^{-2n}\xi^n[1+(-1)^ne^{i\phi}]}{(N-n)!
\sqrt{n!(n+q)!}}|^2. \eqno{(34)} $$ \begin{figure}
\caption{The average fidelity is plotted as the functions of the parameters $\xi$ with $q=0$ and $\phi=\pi/2$ for different values of $|\alpha|$, $|\alpha|=1$ (Solid Line), $|\alpha|=0.5$ (Dash Line), $|\alpha|=0.1$ (Dot Line). }
\label{Fig.5}
\end{figure}
In Fig.5, we have plotted the average fidelity as the functions of the parameters $\xi$ for different values of $|\alpha|$. It is shown that the average fidelity increases with the value of $\xi$. Furthermore, the average fidelity defined above heavily depends on the teleported states. If the teleported state is a coherent state, the smaller the amplitude of coherent states, the higher the average fidelity. Its physical reason can be elucidated by two facts: one fact is that this protocol works perfectly if the target is a number state [11]; the other fact is that the smaller the amplitude of a coherent state, the closer the state distance between the coherent state and a specific number state,i.e., the vacuum state. In what follows, we discuss the influence of phase damping on the fidelity of the above teleportation protocol. In this case, the state of the total system can be written as follows $$ \rho_T=\sum^{\infty}_{k,l,n,m=0} \frac{d_kd^{\ast}_l\xi^n\xi^{\ast{m}}[1+(-1)^ne^{i\phi}][1+(-1)^me^{-i\phi}]
e^{-\gamma{t}(n-m)^2}}{\sqrt{n!m!(n+q)!(m+q)!}}|k\rangle_{TT}\langle{l}|
\otimes|n+q,n\rangle\langle{m+q},m|. \eqno{(35)} $$ After completing the protocol described above, Bob finally achieves the state in his mode expressed by $$ \rho_{b}=[\frac{P(\xi,q,\phi,N)}{N^2_qN^2_{\phi}}]^{-1}\sum^{N^{\prime}}_{n,m=0} \frac{d_{N^{\prime}-n}d^{\ast}_{N^{\prime}-m}\xi^n\xi^{\ast{m}}[1+(-1)^ne^{i\phi}][1+(-1)^me^{-i\phi}]
e^{-\gamma{t}(n-m)^2}}{\sqrt{n!m!(n+q)!(m+q)!}}|N^{\prime}-n\rangle_{bb}\langle{N^{\prime}-m}|, \eqno{(36)} $$ where $N^{\prime}=N-q$ and $N$ is the measured number sum of Alice's joint measurement. Then, the fidelity of this protocol depends on the result $N$ and is $$ F(\xi,q,\phi,N,\gamma{t})=[\frac{P(\xi,q,\phi,N)}{N^2_qN^2_{\phi}}]^{-1}\sum^{N^{\prime}}_{n,m=0}
\frac{|d_{N^{\prime}-n}|^2|d_{N^{\prime}-m}|^2\xi^n\xi^{\ast{m}}[1+(-1)^ne^{i\phi}][1+(-1)^me^{-i\phi}] e^{-\gamma{t}(n-m)^2}}{\sqrt{n!m!(n+q)!(m+q)!}}. \eqno{(37)} $$ The average fidelity is $$ \bar{F}(\xi,q,\phi,\gamma{t})=N^2_qN^2_{\phi}\sum^{\infty}_{N^{\prime}=0}\sum^{N^{\prime}}_{n,m=0}
\frac{|d_{N^{\prime}-n}|^2|d_{N^{\prime}-m}|^2\xi^n\xi^{\ast{m}}[1+(-1)^ne^{i\phi}][1+(-1)^me^{-i\phi}] e^{-\gamma{t}(n-m)^2}}{\sqrt{n!m!(n+q)!(m+q)!}}. \eqno{(38)} $$
For a coherent state $|\alpha\rangle$ teleported by Alice, the average fidelity can be rewritten as $$
\bar{F}(\xi,q,\phi,|\alpha|,\gamma{t})=N^2_qN^2_{\phi}e^{-2|\alpha|^2}\sum^{\infty}_{N^{\prime}=0}|\alpha|^{4N^{\prime}}\sum^{N^{\prime}}_{n,m=0}
\frac{|\alpha|^{-2n-2m}\xi^n\xi^{\ast{m}}[1+(-1)^ne^{i\phi}][1+(-1)^me^{-i\phi}] e^{-\gamma{t}(n-m)^2}}{(N^{\prime}-n)!(N^{\prime}-m)!\sqrt{n!m!(n+q)!(m+q)!}}. \eqno{(39)} $$ In Fig.6, the average fidelity for the coherent states is plotted as a function of $\gamma{t}$ for different values of amplitude
$|\alpha|$. It is shown that the phase damping deteriorate the average fidelity of teleportation basing on the pair cat state, which is qualitatively consistent with the behavior of its relative entropy of entanglement in the phase damping channel. As mentioned above, the essential parts of this teleportation protocol are the preparation of the pair cat state and the joint measurement of the number sum and phase difference. The direct preparation of pair cat states in two modes optical fields is still a open question. However, we can map the pair cat state of the motional freedom of two dimensional trapped ions into freely propagating optical fields. The details will be discussed elsewhere. Recently, the physical implementation of joint measurement of photon number sum and phase difference of two-mode optical fields is shown to be possible by using only the linear optical elements and the single-photon detector [25]. So we can conclude that the physical realization of this teleportation protocol is feasible at the present technology.
\section * {IV. CONCLUSION} \hspace*{8mm}In this paper, we investigate the entanglement of pair cat states in the phase damping channel by employing the relative entropy of entanglement. We give the numerical calculations of the relative entropy of entanglement of this state \begin{figure}
\caption{The average fidelity is plotted as the functions of the phase damping coefficience $\gamma{t}$ with $\xi$=30, $q=0$ and
$\phi=\pi/2$ for different values of $|\alpha|$, $|\alpha|=1$
(Solid Line), $|\alpha|=0.5$ (Dash Line), $|\alpha|=0.1$ (Dot Line). }
\label{Fig.6}
\end{figure} in the phase damping channel and study the influence of the parameters on the relative entropy of entanglement. We find that the relative phase of the pair cat state can control the relative entropy of entanglement. Then, we show that the pair cat states can always be distillable in the phase damping channel. Finally, we analyze the fidelity of teleportation for the pair cat states by using joint measurements of the photon-number sum and phase difference. The influence of phase damping on the fidelity is discussed. The behavior of average fidelity for teleporting a coherent based on the pair cat state is qualitatively consistent with its relative entropy of entanglement. It is interesting to investigate the entanglement and teleportation fidelity of pair cat states in the amplitude damping channel. \section * {ACNOWLEDGMENTS} This project was supported by the National Natural Science Foundation of China (Project NO.10174066).
\end{document} | arXiv | {
"id": "0506217.tex",
"language_detection_score": 0.7337049245834351,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\begin{abstract} We prove that a complex surface $S$ with irregularity $q(S)=5$ that has no irrational pencil of genus $>1$ has geometric genus $p_g(S)\ge 8$. As a consequence, one is able to classify minimal surfaces $S$ of general type with $q(S)=5$ and $p_g(S)<8$. This result is a negative answer, for $q=5$, to the question asked in \cite{mr} of the existence of surfaces of general type with irregularity $q\ge 4$ that have no irrational pencil of genus $>1$ and with the lowest possible geometric genus $p_g=2q-3$. This gives some evidence for the conjecture that the only irregular surface with no irrational pencil of genus $>1$ and $p_g=2q-3$ is the symmetric product of a genus three curve.
\noindent{\em 2000 Mathematics Subject Classification:} 14J29 \end{abstract} \title{On surfaces of general type with $q=5$}
\section{Introduction}
Let $S$ be a smooth complex projective surface with irregularity $q(S):=h^0(\Omega^1_S)\ge 3$. The existence of a fibration $f\colon S\to B$ with $B$ a smooth curve of genus $b>1$ (``an irrational pencil of genus $b>1$'') gives much geometrical information on $S$ (cf. the survey \cite{survey}). However, surfaces with an irrational pencil of genus $b>1$ can hardly be regarded as ``general'' among the irregular surfaces of general type: for instance, for $b<q(S)$ the Albanese variety of such a surface $S$ is not simple.
By the classical Castelnuovo-De Franchis theorem, if $S$ has no irrational pencil of genus $>1$ then the inequality $p_g(S)\ge 2q(S)-3$ holds, where $p_g(S):=h^0(K_S)$ is, as usual, the geometric genus. Note that this inequality has been recently generalized in \cite{PareschiPopa} to K\"ahler varieties of arbitrary dimension.
The surfaces of general type $S$ for which the equality $p_g(S)=2q(S)-3$ holds are studied in \cite{mr}. There those with an irrational pencil of genus $>1$ are classified and the inequality $K^2_S\ge 7\chi(S)-1$ is proven for $S$ minimal. However, the question of the existence of surfaces with $p_g(S)=2q(S)-3$ having no irrational pencil of genus $b>1$ is widely open. At present, the state of the art is as follows: \begin{itemize} \item for $q=3$, the only such surfaces are (the minimal desingularization of) a theta divisor in a principally polarized abelian threefold (\cite{HaconPardini}, \cite{Pirola}); \item for $q=4$, if $S$ is minimal then $K^2_S=16,17$ (\cite{bnp},\cite{cau}); \item for $q\ge 4$, no example is known. \end{itemize}
One is led to conjecture that the only irregular surface with no irrational pencil of genus $>1$ and $p_g=2q-3$ is the symmetric product of a genus three curve. In this note we settle the case $q=5$: \begin{thm}\label{noq5} Let $S$ be a smooth projective complex surface with $q(S)=5$
that has no irrational pencils of genus $>1$. Then:
$$p_g(S)\geq 8.$$ \end{thm}
As a consequence we obtain the following classification theorem: \begin{thm}\label{irrpencil} Let $S$ be a minimal complex surface of general type with $q(S)=5$ and $p_g(S)\le 7$. Then either: \begin{enumerate} \item $p_g(S)=6$, $K_S^2=16$ and $S$ is the product of a curve of genus 2 and a curve of genus 3; or
\item $p_g(S)=7$, $K_S^2=24$ and $S=(C\times F)/\mathbb Z_2$, where $C$ is a curve of genus $7$ with a free $\mathbb Z_2$-action, $F$ is a curve of genus 2 with a $\mathbb Z_2$-action such that $F/\mathbb Z_2$ has genus 1 and $\mathbb Z_2$ acts diagonally on $C\times F$. The map $f\colon S \to C/\mathbb Z_2$ induced by the projection $C\times F\to C$ is an irrational pencil of genus $4$ with general fibre $F$ of genus 2. \end{enumerate}
\end{thm}
The idea of the proof of Theorem \ref{noq5} is to obtain contradictory upper and lower bounds for $K^2_S$ under the assumption that $p_g(S)<8$ and $S$ is minimal.
For fixed $q$ and $p_g$, by Noether's formula giving an upper bound for $K^2$ is the same as giving a lower bound for the topological Euler characteristic $c_2$. More precisely, it is the same as giving a lower bound for $h^{1,1}$, the only Hodge number which is not determined by $p_g$ and $q$.
In our situation, the upper bound follows directly from the result of \cite{cau} that if $S$ is a surface of general type with $q=5$, having no irrational pencils, then $h^{1,1}\geq 11+t$, where $t$ is bigger or equal to the number of curves contracted by the Albanese map.
If the canonical system $|K_S|$ has no fixed components, one can apply the results of \cite{bnp} to get a lower bound for $K^2_S$ which is enough to rule out this possibility. Hence the bulk of the proof consists in obtaining a lower bound for $K^2_S$ under the assumption that $|K_S|$ has a fixed part $Z>0$. This is done in \S \ref{sec:reider}, where we improve by 1 in the case $Z>0$ a well known inequality for surfaces with birational bicanonical map due to Debarre (cf. Corollary \ref{canonical}). The proof is based on a subtle numerical analysis of the intersection properties of the fixed and moving part of $|K_S|$ that is, we believe, of independent interest.
It would be possible to generalize Theorem \ref{noq5} for $q\geq 6$, if a good lower bound for $h^{1,1}(S)$ could be established. Unfortunately it is very difficult to extend the methods of \cite{cau} for $q\geq 6$. Recently, a lower bound on $h^{1,1}$ has been obtained in \cite{lapo} by completely different methods, but it is not strong enough for our purposes.
\noindent {\em Acknowledgments:\/} This research was partially supported by FCT (Portugal) through program POCTI/FEDER and Project PTDC/MAT/099275/2008 and by MIUR (Italy) through project PRIN 2007 \emph{``Spazi di moduli e teorie di Lie''}.
We wish to thank Letterio Gatto and Stavros Papadakis for very helpful conversations.
\noindent{\bf Notation and conventions:} a {\em surface} is a smooth complex projective surface. We use the standard notation for the invariants of a surface $S$: $p_g(S):=h^0(\omega_S)=h^2(\mathcal O_S)$ is the {\em geometric genus}, $q(S):=h^0(\Omega^1_S)=h^1(\mathcal O_S)$ is the {\em irregularity} and $\chi(S):=p_g(S)-q(S)+1$ is the {\em Euler--Poincar\'e} characteristic.
An {\em irrational pencil of genus $b$} of a surface $S$ is a fibration $f\colon S\to B$, where $B$ is a smooth curve of genus $b>0$.
We use $\equiv$ to denote linear equivalence and $\sim$ to denote numerical equivalence of divisors.
\section{Reider divisors} \label{sec:reider}
Let $S$ be a surface and let $M$ be a nef and big divisor on $S$ such that $M^2\geq 5$. By Reider's theorem, if a point $P$ of $S$ is a base point of $|K_S+M|$, then there is an effective divisor $E$ passing through $P$ such that either: \begin{itemize} \item $E^2=-1$, $ME=0$ or \item $E^2=0$, $ME=1$.\end{itemize} This suggests the following definition: \begin{defn} Let $M$ be a nef and big divisor on a surface $S$. An effective divisor $E$ such that $E^{2}=k $ and $EM=s$ is called a $(k,s)$ divisor of $M$. \end{defn} By \cite[(0.13)]{ccm}, the $(-1,0)$ divisors and the $(0,1)$ divisors are $1$-connected.
In addition, if $E$ is a $(-1,0)$ divisor, using the index theorem one shows that the intersection form on the components of $E$ is negative definite. In particular, there exist only finitely many $(-1,0)$ divisors of $M$ on $S$.
\begin{lem}\label{lem:01divisor} Let $M$ be a nef and big divisor on a projective surface $S$. Then:
\begin{enumerate}
\item if $C$ is an irreducible component of a reducible $(0,1)$ divisor $E$ of $M$, then $C^2<0$;
\item if $E_1,E_2$ are two distinct $(0,1)$ divisors of $M$, then $E_1E_2= 0$ and $E_1$ and $E_2$ are disjoint. \end{enumerate} \end{lem} \begin{proof} Let $E$, $C$ be as in (i). The index theorem gives $C^2<0$ if $MC=0$ and $C^2\le 0$ if $MC=1$. Assume that $C^2=0$. Then $EC=(E-C)C>0$, since $E$ is $1$-connected, and therefore $(E+C)^2\ge 2$. Since $M^2\geq 5$ and $M(C+E)=2$ we have a contradiction to the index theorem. Hence $C^2<0$.
Next we prove (ii). We have:
$$M^2\geq 5,\quad M(E_1+E_2)=2, \quad M(E_1-E_2)=0,$$
hence by the index theorem we obtain:
$$2E_1E_2=(E_1+E_2)^2\leq 0, \quad -2E_1E_2=(E_1-E_2)^2\le 0.$$ So $E_1E_2=0$. By 1-connectedness of $E_1$, $E_2$ we conclude that neither divisor is contained in the other. Then we can write $E_1=A+B$, $E_2=A+C$ where $A\geq 0$, $B, C>0$ and $B$ and $C$ have no common components.
Since $M$ is nef and $ME_i=1$, we have $1\geq MB(=MC)$ and so $B^2\leq 0, C^2\leq 0$. Then, since $0=(E_1-E_2)^2=(B-C)^2$, we conclude that $B^2=C^2=BC=0$. Hence $B$ and $C$ are disjoint, $MB=MC=1$ and $B$ is numerically equivalent to $C$. Since $B$ is also a $(0,1)$ divisor, $BE_1=0$ and so, by 1-connectedness of $E_1$ we conclude that $A=0$. \end{proof}
\begin{lem}\label{rational} Let $S$ be a surface and let $M$ be a nef and big divisor such that the linear system $|M|$ has no fixed components.
Let $E$ be a $(0,1)$ divisor of $M$ and let $C$ be the only irreducible component of $E$ such that $MC=1$. Then either $|M|$ has a base point on $C$ or $C$ is a smooth rational curve. \end{lem}
\begin{proof} Suppose $|M|$ has no base points on $C$. Then, since $MC=1$ the restriction map $H^0(M)\to H^0(C, M)$ has image of dimension at least $2$. It follows that $C$ is a smooth rational curve. \end{proof}
\begin{prop}\label{fixed}Let $X$ be a non ruled surface and let $M$ be a divisor of $X$ such that:
\begin{itemize}
\item $M^2\geq 5$,
\item the system $|M|$ has no fixed component and maps $X$ onto a surface. \end{itemize}
Let $C$ be an irreducible curve contained in the fixed locus of $|K_X+M|$. Then either: \begin{enumerate} \item $C$ is contained in a $(-1,0)$ divisor of $M$, $MC=0$ and $C^2<0$;
or \item $C$ is contained in a $(0,1)$ divisor of $M$, $MC\leq 1$ and $C^2\leq 0$. \end{enumerate}
\end{prop} \begin{proof}
Let $P\in C$ be a point. By Reider's theorem, there is a $(-1,0)$ divisor or a $(0,1)$ divisor of $M$ passing through $P$.
Assume for contradiction that $C$ is not a component of any $(-1,0)$ or $(0,1)$ divisor of $M$. Since there are only finitely many distinct $(-1,0)$ divisors of $M$ in $S$, we can assume that there is a $(0,1)$ divisor passing through a general point $P$ of $C$.
It follows that there are infinitely many $(0,1)$ divisors on $S$. Recall that two distinct $(0,1)$ divisors are disjoint by Lemma \ref{lem:01divisor}. Thus, since $|M|$ has a finite number of base points, by Lemma \ref{rational} $X$ is ruled, against the assumptions.
So $C$ is contained in a $(-1,0)$ divisor or a $(0,1)$ divisor $E$ of $M$. In the first case, $M$ being nef implies that $MC=0$ and so $C^2<0$ by the index theorem. In the second case, again by nefness $MC\leq 1$ and again by the index theorem $C^2\leq 0$. \end{proof}
\begin{lem}\label{EL}
Let $S$ be a surface and let $M$ be a nef and big divisor of $S$ and let $E$ be a $(0,1)$ divisor of $M$. If $L$ is a divisor such that $(M-L)^2>0$ and $M(M-L)>0$, then $EL\leq 0$.
\end{lem} \begin{proof}
Write $\gamma:=M(M-L)$. Then $M(\gamma E-(M-L))=0$. Since $(M-L)^2>0$ and $E^2=0$, $\gamma (M-L)\not\sim E$. Thus, by the index theorem $0> (\gamma E-(M-L))^2= -2\gamma E(M-L)+(M-L)^2$.
So $E(M-L)>0$, and therefore $EL\leq 0$.\end{proof}
\begin{prop}\label{2M} Let $S$ be a smooth minimal surface of general type and let $M$ be a divisor such that
\begin{itemize}
\item $Z:=K_S-M>0$;
\item the linear system $|M|$ has no fixed components and maps $S$ onto a surface.
\end{itemize}
Then the following hold:
\begin{enumerate}
\item if $M^2\geq 5+KZ$, then $h^0(2M)<h^0(K_S+M)$;
\item if $M^2\geq 5$, $(M-Z)^2>0$ and $M(M-Z)>0$, then there are no $(0,1)$ divisors of $M$. Furthermore $h^0(2M)<h^0(K_S+M)$ and every irreducible fixed component $C$ of $|K_S+M|$ satisfies $MC=0$.
\end{enumerate}\end{prop} \begin{proof}
We observe first of all that $h^{0}(2M)=h^{0}(K_S+M)$ if and only if $Z$ is the fixed part of $|K_S+M|$.
{\rm (i)} Assume for contradiction that
$h^{0}(2M)=h^{0}(K_S+M)$.
Let $C$ be an irreducible component of $Z$. By Proposition \ref{fixed}, $C^{2}\leq 0$ and $MC\leq 1.$ Now $$-2\leq C^{2}+KC\leq C^{2}+KZ,$$ and hence $C^{2}\geq -2-KZ.$ It follows $$(M-C)^{2}=M^{2}-2MC +C^2\geq M^{2}-2 -2 -KZ=M^2-4-KZ>0.$$ In addition, we have: $$M(M-C)=(M-C)^2+C(M-C)\ge (M-C)^2-C^2\ge (M-C)^2>0.$$
Since $MZ\geq 2$ by the 2-connectedness of canonical divisors, there is at least a component $D$ of $Z$ such that $MD>0$. By Proposition \ref{fixed}, we have $MD=1$ and $D$ is contained in a $(0,1)$ divisor $E$ of $M$. Then Lemma \ref{EL} gives $EC\le 0$ for all the components of $Z$, and so $EZ\le 0.$
But now since $ME=1$ and $E^{2}=0$ we obtain that $KE=1+EZ\le 1$. On the other hand, $K_SE$ is $>0$ by the index theorem and it is even by the adjunction formula, hence we have a contradiction.
{\rm (ii)} Let $E$ be a $(0,1)$ divisor of $M$. Then we have $EZ\le 0$ by Lemma \ref{EL} and we get a contradiction as above. So there are no $(0,1)$ divisors of $M$ on $S$.
Hence by Proposition \ref{fixed} every irreducible fixed curve of $|K_S+M|$ satisfies $MC=0$. Since $MZ\geq 2$ by the 2-connectedness of the canonical divisors, not every component of $Z$ can be a fixed component of $|K_S+M|$ and therefore $h^0(K_S+M)>h^0(2M)$. \end{proof}
As a consequence, we obtain the following refinement of Thm. 3.2 and Rem. 3.3 of \cite{deb1}:
\begin{cor}\label{canonical} Let $S$ be a minimal surface of general type whose canonical map is not composed with a pencil. Denote by $M$ the moving part and by $Z$ the fixed part of $|K_S|$. If $Z>0$ and $M^2\geq 5+K_SZ$, then $$K_S^2+\chi(S)=h^0(K_S+M)+K_SZ+MZ/2 \geq h^0(2M)+K_SZ+MZ/2+1.$$
Furthermore, if $h^0(K_S+M)=h^0(2M)+1$ then $|K_S+M|$ has base points and there is a $(-1,0)$ divisor or a $(0,1)$ divisor $E$ of $M$ such that $EZ\geq 1$.
\end{cor}
\begin{proof} Since $M$ is nef and big, by Kawamata-Viehweg vanishing $h^0(K_S+M)=\chi(K_S+M)$, hence the equality follows by the Riemann-Roch theorem whilst the inequality is Proposition \ref{2M}, (i).
For the second assertion it suffices to notice that $h^0(K_S+M)=h^0(2M)+1$ means that the image of the restriction map $H^0(K_S+M)\to H^0(Z, (K_S+M)|_Z)$ is 1-dimensional. Since $(K_S+M) Z\geq 2$, the system $|K_S+M|$ has necessarily base points. Thus there is a $(-1,0)$ divisor or a $(0,1)$ divisor $E$ of $M$. By adjunction $K_SE\equiv E^2$(mod 2) and so necessarily $EZ\geq 1$. \end{proof}
\section{Proofs of Theorem \ref{noq5} and Theorem \ref{irrpencil}} \begin{proof}[Proof of Theorem \ref{noq5}] Let $a\colon S\to A$ be the Albanese map of $S$. Notice that by the classification of surfaces the assumptions that $q(S)=5$ and $S$ has no irrational pencil of genus $>1$ imply that $S$ is of general type and $a$ is generically finite onto its image. Without loss of generality we may assume that $S$ is minimal. By \cite{appendix}, an irregular surface of general type having no irrational pencils of genus $>1$ satisfies $p_g\geq 2q-3$. We assume for contradiction that $p_g(S)=7=2q(S)-3$, so that $\chi(S)=3$. We denote by $\varphi_K\colon S\to \mathbb P^7$ the canonical map and by $\Sigma$ the canonical image. Since $q(S)>2$, $\Sigma$ is a surface by \cite{xiaoirreg}.
We denote by $t$ the rank of the cokernel of the map $a^{\ast}\colon \NS(A) \to \NS (S)$. Note that $t$ is bigger than or equal to the number of irreducible curves contracted by the Albanese map.
Denote as usual by $b_i(S)$ the $i$-th Betti number and by $c_2(S)$ the second Chern class of $S$.
By \cite[Thm.1,(3)]{cau}, we have $b_2(S)\ge 31+t$, namely $c_2(S)\ge 13+t$. By Noether's formula this is equivalent to:
\begin{equation}\label{causin}
K^2_S\le 23-t
\end{equation}
Denote by $\mathbb G$ the Grassmannian of $2$-planes of $H^0(\Omega^1_S)^{\vee}$ and by $\mathbb G$ the Grassmannian of $2$-planes in $H^0(\Omega^1_S)$. By the Castelnuovo--De Franchis theorem, the kernel of the map
$\rho\colon \bigwedge ^2H^{0}(\Omega^1_S) \to H^{0}(K_S)$ does not contain any nonzero simple tensor. Hence $\rho$ induces a morphism $\mathbb G^{\vee}\to\mathbb P(H^0(K_S))$ which is finite onto its image. Since $\dim\mathbb G^{\vee}=6$, it follows that $\ker \rho$ has dimension $3$, $\rho$ is surjective and it induces a finite map $\mathbb G^{\vee}\to \mathbb P(H^0(K_S))$. As a consequence, we have the following facts:
\begin{itemize} \item[(a)] the surface $S$ is generalized Lagrangian, namely there exist independent $1$-forms $\eta_1,\dots \eta_4\in H^0(\Omega^1_S)$ such that $\eta_1\wedge\eta_2+\eta_3\wedge\eta_4=0$. In addition, we may assume that $\eta_1\wedge \eta_2$ is a general $2$-form of $S$. In that case, the fixed part of the linear system $\mathbb P(\wedge^2V)$, where $V=<\eta_1, \dots \eta_4>$, coincides with the fixed part of the canonical divisor (cf. \cite[\S 3]{severi}) . \item[(b)] the canonical image $\Sigma$ is contained in the intersection of $\mathbb G$ with the codimension $3$ subspace $T=\mathbb P(\im \rho^t) \subset \mathbb P^9=\mathbb P(\bigwedge ^2H^{0}(\Omega^1_S))$,
\item[(c)] since $\mathbb G^{\vee}$ is the dual variety of $\mathbb G$, the space $T$ is not contained in an hyperplane tangent to $\mathbb G$, hence $Y:=\mathbb G\cap T$ is a smooth threefold.
\end{itemize}
Using Lefschetz hyperplane section theorem we see that $\Pic(Y)$ is generated by the class of a hyperplane. Then $\Sigma$ is the scheme theoretic intersection of $Y$ with a hypersurface of degree $m\geq 2$ of $\mathbb P^6$. Thus, since $\mathbb G$ has degree 5 (cf. \cite[Cor.1.11]{mukai}), it follows that $\deg\Sigma=5m$ and $\omega_{\Sigma}=\mathcal O_{\Sigma}(m-2)$.
By the proof of Thm. 1.2 of \cite{mr}, the degree $d$ of $\varphi_K$ is different from $2$. Since $K^2_S\le 23$ by \eqref{causin}, the inequality $K^2_S\ge d\deg\Sigma=5dm$ gives $d=1$, namely $\varphi_K$ is birational onto its image. So we have $m\ge 3$, since $\omega_{\mathbb G}=\mathcal O_{\mathbb G}(-5)$ (cf. \cite[ Prop. 1.9]{mukai}) and $\Sigma$ is of general type.
Write $|K_S|=|M|+Z$, where $Z$ is the fixed part and $M$ is the moving part. If $Z=0$, then in view of (a) we have $K_S^2\geq 8\chi=24$ by \cite[Thm.1.2]{bnp}. This would contradict \eqref{causin}, hence $Z>0$.
Since $m>2$, every quadric that
contains $\Sigma$ must contain $Y$. Recall that $Y$ is obtained from $\mathbb G$ by intersecting with 3 independent linear sections. Denote by $R$ the homogeneous coordinate ring of $\mathbb G$. Since $R$ is Cohen--Macaulay and $Y$ has codimension $3$ in $\mathbb G$, these 3 linear sections form an $R$-regular sequence. As a consequence (cf. \cite[Prop.1.1.5]{bruns}) the (vector) dimension of the space of quadrics of $\mathbb P^6$ containing $Y$ is the same as same as the (vector) dimension of the space of quadrics of $\mathbb P^9$ containg $\mathbb G$.
Since the latter dimension is $5$ (cf. \cite[Prop.1.2]{mukai}), it follows that:
$$h^0(2M)\geq h^0(\mathcal O_{\mathbb P^6}(2))-5=23.$$
Then by \eqref{causin} and Corollary \ref{canonical} we have:
\begin{equation}\label{eq:23}
26-t\geq K_S^2+\chi(S)=h^0(K_S+M)+K_SZ+MZ/2 \geq 23+K_SZ+MZ/2+1.
\end{equation} So $K_SZ+MZ/2\leq 2-t$. Recall that $MZ\ge 2$ by the $2$-connectedness of canonical divisors.
Assume $K_SZ=0$. Then every component of $Z$ is an irreducible smooth rational curve with self-intersection $-2$ and as such it is contracted by the Albanese map. Since $K_SZ+MZ/2\leq 2-t$, the only possibility is
$t=1$ and $MZ=2$. Hence $Z=rA$, where $A$ is a $-2$-curve. Since $MZ=2$ and $K_SZ=0$, we have $Z^2=-2$ and so $r=1$. Hence $Z$ is a $-2$-cycle of type $A_1$. Then, again by (a) and \cite[Thm.12]{bnp}, we get $K^2\geq 8\chi=24$, a contradiction.
So $K_SZ>0$. Then by \eqref{eq:23} necessarily $K_SZ=1$, $MZ=2$ (yielding $Z^2=-1$) and $h^0(K_S+M)= 23=h^0(2M)+1$. Then by Corollary \ref{canonical}, there is a $(-1,0)$ or a $(0,1)$ divisor $E$ of $M$, and, since the hypotheses of Proposition \ref{2M}, (ii) are satisfied, $E$ must be a $(-1,0)$ divisor of $M$.
Then $M(E+Z)=2$ and so by the algebraic index theorem $M^2(E+Z)^2- 4\leq 0$, yielding $(E+Z)^2\leq 0$. Since $(E+Z)^2=-2+2EZ$ and, by Corollary \ref{canonical}, $EZ\geq 1$, the only possibility is $EZ=1$ and $(E+Z)^2=0$. In this case $K_S(E+Z) =2$ and this is impossible
by \cite[Proposition 8.2]{bnp}, where it is shown that a minimal irregular surface with $q\geq 4$, having no irrational pencils of genus $>1$, cannot have effective divisors of arithmetic genus 2 and self-intersection $0$.
\end{proof}
\begin{proof}[Proof of Theorem \ref{irrpencil}] By \cite{appendix}, a surface of general type $S$ with $q(S)=5$ has $p_g(S)\ge 6$ and, in addition, if $p_g(S)=6$ then $S$ is the product of a curve of genus $C$ and a curve of genus 3.
Now statement (ii) is a consequence of Theorem \ref{noq5} and \cite[Thm.1.1]{mr}.
\end{proof}
\begin{minipage}{13.0cm} \parbox[t]{6.5cm}{Margarida Mendes Lopes\\ Departamento de Matem\'atica\\ Instituto Superior T\'ecnico\\ Universidade T{\'e}cnica de Lisboa\\ Av.~Rovisco Pais\\ 1049-001 Lisboa, PORTUGAL\\ mmlopes@math.ist.utl.pt
}
\parbox[t]{5.5cm}{Rita Pardini\\ Dipartimento di Matematica\\ Universit\`a di Pisa\\ Largo B. Pontecorvo, 5\\ 56127 Pisa, Italy\\ pardini@dm.unipi.it}
\vskip1.0truecm
\parbox[t]{5.5cm}{Gian Pietro Pirola\\ Dipartimento di Matematica\\ Universit\`a di Pavia\\ Via Ferrata, 1 \\
27100 Pavia, Italy\\ \email{gianpietro.pirola@unipv.it}} \end{minipage}
\end{document} | arXiv | {
"id": "1003.5991.tex",
"language_detection_score": 0.7874655723571777,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{On the Refinement of Certain Statistics on Alternating Words}
\begin{abstract}
In this paper, we investigate statistics on alternating words under correspondence between ``possible reflection paths within several layers of glass'' and ``alternating words''. For $v=(v_1,v_2,\cdots,v_n)\in\mathbb{Z}^{n}$, we say $P$ is a path within $n$ glass plates corresponding to $v$, if $P$ has exactly $v_i$ reflections occurring at the $i^{\rm{th}}$ plate for all $i\in\{1,2,\cdots,n\}$.
We give a recursion for the number of paths corresponding to $v$ satisfying $v \in \mathbb{Z}^n$ and $\sum_{i\geq 1} v_i=m$. Also, we establish recursions for statistics around the number of paths corresponding to a given vector $v\in\mathbb{Z}^n$ and a closed form for $n=3$.
Finally, we give a equivalent condition for the existence of path corresponding to a given vector $v$.
\end{abstract}
\section{Introduction}
There are several results about reflections across glass plates. For example, if $a_m$ is the number of distinct paths within $3$ glass plates, such that there are exactly $m$ reflections, then the recursion of sequence $\{a_m\}_{m\in\mathbb{N}}$ is $a_m=a_{m-1}+a_{m-2}$. In \cite{junge1973polynomials}, Junge and Hoggatt generalized the statistic $a_n$ to $m$ plates, and study a related matrix equation and its characteristic polynomial. In \cite{hoggatt1979reflections}, Hoggatt considered statistics in paths of fixed length. In present paper, we will focus on other statistics which are also quite natural. \begin{definition}\label{def} Within $n$ glass plates: \begin{enumerate}
\item We say $P$ is a \emph{path between $n$ glass plates} if $P$ is a polygonal curve with peaks and valleys lying in some plates. In this case, peaks and valleys are called reflections.
\item For $i<n$, we say a path $P$ \emph{starts at $i^{\rm{th}}$ plate} if the first reflection occurs at $j^{\rm{th}}$ plate such that $i<j$.
\item We say $P$ is a path \emph{corresponding to a given vector $v=(v_1,v_2,\cdots,v_n)\in\mathbb{Z}^n$} if $P$ starts at first plate and has exactly $v_i$ reflections at the $i^{\rm{th}}$ plate for all $i\in\{1,2,\cdots,n\}$. On the other hand, we call $v(P)$ the vector determined by path $P$.
\item For $v \in \mathbb{Z}^n$, $N(v)$ is the number of paths $P$ such that $v(P)=v$.
\item $a^n_m$ is the number of paths $P$ such that $\sum v(P)=m$.
\item $b^n_m$ is the number of $v$ satisfying $v=v(P)$ and $\sum v(P)=m$ for some path $P$.
\end{enumerate} \end{definition}
\begin{example}\label{exa:a^3_4} The first two rows of figure \ref{fig:a^3_4} show the distinct $a^3_4=8$ paths and their corresponding vectors within $3$ glass plates with $4$ reflections. Note that the vector $(2,1,1)$ is corresponding to $2$ different paths, so we have $N(2,1,1)=2$. Moreover, since the $8$ paths corresponding to only $6$ different vectors, we get $b^3_4=6$. \end{example}
\begin{figure}
\caption{Paths within $3$ glass plates with $4$ reflections}
\label{fig:a^3_4}
\end{figure}
A word $w=w_1 w_2 \cdots w_n$ is (down-up) alternating if $w_1>w_2<w_3>w_4<\cdots$. If $N_{n,m}$ is the number of alternating words of length $m$ over alphabet $\{1,2,\cdots,n\}$, then $N_{n,m}=a^n_m$ for all $m\ge2$, $n\ge 2$ because of one-to-one correspondence between paths within plates and alternating words (see figure \ref{fig:a^3_4}). Moreover, in view of alternating words, $b^n_m$ is the number of equivalence classes where $w_1$ and $w_2$ are related if the numbers of each alphabet in $w_1$ and $w_2$ are the same respectively. In figure \ref{fig:a^3_4}, we see $2131$ and $3121$ are equivalent since they both contain two ``$1$'', one ``$2$'' and one ``$3$''. $N(v)$ is the number of words with $i$ occurring $v_i$ times. See \cite{gao2016pattern} for more information about alternating words.
\section{A closed form of $a^n_m$} \begin{definition} We define $a^n_{m,j}$ as the number of paths within $n$ glass plates which start at the first plate with $m$ reflections, and ``the last reflection'' occur at the $j^{\rm{th}}$ plate. \end{definition}
From definition, we know that $a^n_m=\sum_{j=1}^n a^n_{m,j}$. Moreover, it is easy to see $a^n_{2i,j}=\sum_{j<j'} a^n_{2i-1,j'}$ and $a^n_{2i+1,j}=\sum_{j'<j} a^n_{2i,j'}$ which lead to the following theorem.
\begin{theorem} For positive integers $n\ge 2$, $m\ge 1$, and $1\le j\le n$, we have \[a^n_{m,j}=\sum_{\ell=0}^{\lfloor{\frac{m-1}{2}}\rfloor}(-1)^{\ell+{\lfloor{\frac{m-1}{2}}\rfloor}}a_{\ell} C(n-j+\lfloor{\frac{m-1}{2}}\rfloor-\ell,m-1-2\ell)\] where $a_{\ell}=\sum_{i=1}^{\ell}(-1)^{i+1}C(n+i-1,2i)\times a_{\ell-i}$ with $a_0=1$, and $C(\alpha,\beta)={\alpha !}/[\beta !(\alpha-\beta)!]$. \end{theorem}
\begin{proof} We give a proof by induction on $m$. It is easy to see that $a^n_{1,j}=C(n-j,0)$ and $a^n_{2,j}=C(n-j,1)$ which satisfy the formula. If $m$ is even, say $m=2r$, then by induction hypothesis and the recursion mentioned above, we have \begin{equation}\nonumber
\begin{aligned}
a^n_{2r,j}&=\sum_{j'=j+1}^n a^n_{2r-1,j'}=\sum_{j'=j+1}^n\sum_{\ell=0}^{r-1}(-1)^{\ell+r-1}a_{\ell} C(n-j'+r-1-\ell,2r-2-2\ell)\\
&=\sum_{\ell=0}^{r-1}(-1)^{\ell+r-1}a_{\ell} \sum_{j'=j+1}^n C(n-j'+r-1-\ell,2r-2-2\ell)\\
&=\sum_{\ell=0}^{r-1}(-1)^{\ell+r-1}a_{\ell} C(n-j+r-1-\ell,2r-1-2\ell).
\end{aligned}
\end{equation} Similarly, if $m$ is odd, say $m=2r+1$, we have \begin{equation}\nonumber
\begin{aligned}
a^n_{2r+1,j}&=\sum_{\ell=0}^{r-1}(-1)^{\ell+r-1}a_{\ell} \sum_{j'=1}^{j-1} C(n-j'+r-1-\ell,2r-1-2\ell)\\
&=\sum_{\ell=0}^{r-1}(-1)^{\ell+r-1}a_{\ell} [C(n+r-\ell-1,2r-2\ell)-C(n-j+r-\ell,2r-2\ell)]\\
&=\sum_{\ell=0}^{r-1}(-1)^{\ell+r}a_{\ell} C(n-j+r-\ell,2r-2\ell)+\sum_{\ell=0}^{r-1}(-1)^{\ell+r-1}a_{\ell} C(n+r-\ell-1,2r-2\ell)\\
&=\sum_{\ell=0}^{r-1}(-1)^{\ell+r}a_{\ell} C(n-j+r-\ell,2r-2\ell)+a_r\\
&=\sum_{\ell=0}^{r}(-1)^{\ell+r}a_{\ell} C(n-j+r-\ell,2r-2\ell).
\end{aligned}
\end{equation} \end{proof}
\section{The generating function and recursions of $N(v)$} In this section, our goal is that for $v=(v_1,v_2,\cdots,v_n)\in\mathbb{Z}^n$, we want to find the generating function $D$ of $N(v)$ such that \[D(t_1,t_2,\cdots,t_n)=\sum_{(v_1,v_2,\cdots,v_n)\in\mathbb{Z}^n}N(v_1,v_2,\cdots,v_n)\cdot t_1^{v_1}t_2^{v_2}\cdots t_n^{v_n}.\] For this purpose, we have the following definitions. \begin{definition} We define $D^n_i$ to be the generating function corresponding to the number of possible paths within $n$ plates starting at the $i^{\rm{th}}$ plate. \end{definition}
\begin{definition} For every function of form $D(t_1,t_2,\cdots,t_n)$, we define \[D'(t_1,t_2,\cdots,t_n)=D(t_n,t_{n-1},\cdots,t_1).\] \end{definition}
\begin{lemma}\label{lem:D^2_1} The formula of the generating function $D_1^2$ is given by $$D^2_1=\frac{1+t_2}{1-t_1t_2}.$$ \end{lemma}
\begin{proof} We classify all paths into two classes: the paths which have no reflection, and those with first reflection occurring at the $2^{\rm{nd}}$ plate. Then we have \begin{equation}\label{D^2_1}
D_{1}^{2} = 1+t_{2} D_{1}^{2}{'}. \end{equation} and, taking prime on both sides, \begin{equation}\label{D^2_1'}
D^2_1{'}=1+t_1D^2_1. \end{equation} From (\ref{D^2_1}) and (\ref{D^2_1'}), we have \[D^2_1=1+t_2(1+t_1D^2_1)\] and the conclusion follows. \end{proof}
\begin{remark} Note that $D^2_1=({1+t_2})/({1-t_1t_2})=(1+t_2)\sum_{i\ge 0}t_{1}^{i}t_{2}^{i}$. It implies that each of the vectors $(0,0),(0,1),(1,1),(1,2),(2,2),(2,3),\cdots$ corresponds to exact one path respectively and the others have no corresponding path. Figure \ref{fig:D^2_1} justifies this result. \end{remark}
\begin{figure}
\caption{Paths within $2$ glass plates}
\label{fig:D^2_1}
\end{figure}
The technique in proof of \ref{lem:D^2_1} can be applied to compute $D^n_1$ for $n\ge 3$. However, this process can be very complicated for large $n$. Next, we will provide another recursion for later discussion instead.
\begin{lemma}\label{lem:D^n_i} For positive integers $n\ge 2$, $i\le n$, the generating functions $D^n_i$ satisfies \[D^n_i=D^n_1-\sum_{j=2}^{i}t_j-\sum_{1\le a<b\le i}t_b t_a D^n_a\] \end{lemma}
\begin{proof} The set of all paths starting at the $i^{\rm{th}}$ plate, is equal to the set of all paths starting at the first plate but without those paths with first reflection occurring at $j^{\rm{th}}$ plate for some $j<i$. Moreover, the set of paths with first reflection occurring at $j^{\rm{th}}$ plate for some $j<i$ consists of paths with only one reflection and those have at least two reflections. So we have $D^n_i=D^n_1-(\sum_{j=2}^{i}t_j+\sum_{1\le a<b\le i}t_b t_a D^n_a)$ \end{proof}
\begin{theorem}\label{thm:D^n_1} For positive integer $n\ge 2$, the generating functions $D^{n}_1$ satisfy \[D^{n}_{1}=\frac{nu(D^{n-1}_{1})+t_{n}D_{1}^{n\prime}}{de (D^{n-1}_1)}\] where $nu(D^{n-1}_1)$ is the numerator of $D^{n-1}_1$, and $de(D^{n-1}_1)$ is the denominator of $D^{n-1}_1$. \end{theorem}
\begin{proof} Iterating lemma \ref{lem:D^n_i}, for $n\ge 2$, we can see that each term of $nu(D^n_1)$ except for $1$ has odd degree, and each term of $de(D^n_1)$ has even degree.
Now we classify paths in $D^n_1$ into two classes: paths containing a reflection which occurs at the $n^{\rm{th}}$ plate, and paths whose reflections do not occur at the $n^{\rm{th}}$ plate. A path in the first class must be a path with even reflections in $D^{n-1}_1$, connecting with a bottom up path starting at the $n^{\rm{th}}$ plate. So the generating function of this class is \[e(D^{n-1}_1)t_n D^{n\prime}_1\] where $e(D^{n-1}_1)$ is the even degree terms of $D^{n-1}_1$. The generating function of the second class is $D^{n-1}_1$. Then we get \begin{equation}\nonumber
\begin{aligned}
D^{n}_1\times de(D^{n-1}_1)
&=[D^{n-1}_1 +e(D^{n-1}_1)t_n D^{n\prime}_1]\times de(D^{n-1}_1)\\
&=D^{n-1}_1\times de(D^{n-1}_1)+e(D^{n-1}_1)t_n D^{n\prime}_1\times de(D^{n-1}_1)\\
&=nu(D^{n-1}_1)+t_n D^{n\prime}_1 [e(D^{n-1}_1)\times de(D^{n-1}_1)].
\end{aligned}
\end{equation} Since $D^{n-1}_1\times de(D^{n-1}_1)=nu(D^{n-1}_1)$ and each term of $de(D^{n-1}_1)$ has even degree, we know that the sum of even degree terms in $nu(D^{n-1}_1)$ is $e(D^{n-1}_1)\times de(D^{n-1}_1)$. The only term of $nu(D^{n-1}_1)$ of even degree is $1$, and then we have $e(D^{n-1}_1)\times de(D^{n-1}_1)=1$. \end{proof}
\begin{example}\label{exa:D^3_1} The above theorem gives a recursion to compute $D^{n}_1$. For example, by \ref{lem:D^2_1}, we know $D^2_1=(1+t_2)/(1-t_1t_2)$, then applying theorem \ref{thm:D^n_1}, \begin{equation}\label{D^3_1}
D^3_1=\frac{1+t_2+t_3 D^{3\prime}_1}{1-t_1 t_2} \end{equation} and taking prime on both sides, \begin{equation}\label{D^3_1'}
D^{3\prime}_1=\frac{1+t_2+t_1 D^{3}_1}{1-t_3 t_2}. \end{equation} From (\ref{D^3_1}) and (\ref{D^3_1'}), we have \[D^3_1=\frac{1+t_2+t_3 [\frac{1+t_2+t_1 D^{3}_1}{1-t_3 t_2}]}{1-t_1 t_2}.\] Hence \[D^3_1=\frac{1+t_2+t_3-t_2^2 t_3}{1-t_1 t_2 -t_2 t_3 -t_3 t_1 +t_1 t_2^2 t_3}.\] \end{example}
\begin{theorem} For $v\in\mathbb{Z}^n$, we define $m^n_v=N(v)$. Let $v\in\mathbb{Z}^n$ with $v_1 v_n\neq 0$, $n\ge 2$. If $v\cdot\vec{1}$ is odd, then
\[m^n_v=m^n_{v-e_1 -e_2}+m^n_{v-e_n}+\sum_{\substack{u\in\mathbb{Z}^{n-2}\\ u\cdot\vec{1}\ \rm{is}\ \rm{odd}}}m^{n-2}_u m^n_{v-\overline{u}-e_1}+\sum_{\substack{0\neq u\in\mathbb{Z}^{n-2}\\ u\cdot\vec{1}\ \rm{is}\ \rm{even}}}m^{n-2}_u m^n_{v-\overline{u}-e_n}\] and, if $v\cdot\vec{1}$ is even, then
\[m^n_v=m^n_{v-e_n -e_{n-1}}+m^n_{v-e_1}+\sum_{\substack{u\in\mathbb{Z}^{n-2}\\ u\cdot\vec{1}\ \rm{is}\ \rm{odd}}}m^{n-2}_u m^n_{v-r(\overline{u})-e_n}+\sum_{\substack{0\neq u\in\mathbb{Z}^{n-2}\\ u\cdot\vec{1}\ \rm{is}\ \rm{even}}}m^{n-2}_u m^n_{v-r(\overline{u})-e_1}\] where $\overline{u}=(0,u,0)$, $\{e_i\}$ is the standard basis, and $r(u)$ is the reverse of $u$. \end{theorem}
\begin{proof} Suppose $v\cdot\vec{1}$ is odd. First, we classify all paths within $n$ glass plates into four classes (see figure \ref{fig:recursion of N(v)}): the paths whose first reflection occur at $2^{\rm{nd}}$ plate; the paths whose first reflection occur at $n^{\rm{th}}$ plate; the paths, with last reflection occurring at $1^{\rm{st}}$ plate and other reflections occurring between $2^{\rm{th}}$ and $(n-1)^{\rm{th}}$ plates, connecting with a path starting at $1^{\rm{th}}$ plate, and the paths not belonging to previous three classes. This partition gives us \begin{equation}\label{eq:recursion of m} m^n_v=m^n_{v-e_1-e_2}+m^n_{r(v-e_n)}+\sum_{\substack{u\in\mathbb{Z}^{n-2}\\ u\cdot\vec{1}\ \rm{is}\ \rm{odd}}}m^{n-2}_u m^n_{v-\overline{u}-e_1}+\sum_{\substack{0\neq u\in\mathbb{Z}^{n-2}\\ u\cdot\vec{1}\ \rm{is}\ \rm{even}}}m^{n-2}_u m^n_{r(v-\overline{u}-e_n)}. \end{equation}
Note that if $v'\cdot\vec{1}$ is even, then ``paths corresponding to $v'$'' and ``paths corresponding to $r(v')$'' have one-to-one correspondence, which implies $m^n_{v'}=m^n_{r(v')}$. Therefore, we have $m^n_{r(v-e_n)}=m^n_{v-e_n}$ and $m^n_{r(v-\overline{u}-e_n)}=m^n_{v-\overline{u}-e_n}$, and together with equation (\ref{eq:recursion of m}) the conclusion follows. For $v\cdot\vec{1}$ is even, the proof is similar. \end{proof}
\begin{figure}
\caption{Paths within $n$ glass plates with $v_1 v_n\neq 0$}
\label{fig:recursion of N(v)}
\end{figure}
\begin{remark} If $v_1 v_n=0$, say $v_n=0$, then we reduce the problem to case $v\in\mathbb{Z}^{n-1}$. \end{remark}
\section{A closed form of $N(v)$ within $3$ glass plates} From now on, we focus on the case $n=3$. Recalling example \ref{exa:D^3_1}, we have \[D^3_1=\frac{1+t_2+t_3-t_2^2 t_3}{1-t_1 t_2 -t_2 t_3 -t_3 t_1 +t_1 t_2^2 t_3}.\] Note that the coefficient of the term $t_1^x t_2^y t_3^z$ in $D^3_1$ is $N(x,y,z)$. To find a closed form, we compute the Taylor series of $D^3_1$, and put the coefficient of the term $t_1^n t_2^i t_3^j$ into matrices. \begin{definition} We define the matrix $M_n$ as the following: the $(i,j)$ entry of $M_n$ is $m^n_{i,j}=N(n,i,j)$, and we require that the indices $i$ and $j$ start from $0$. Notice that we do not determine the size of $M_n$ at this moment. \end{definition}
\begin{remark} It is easy to observe that matrix $M_n$ has $2n+3$ nonzero sequences which are parallel to the main diagonal (See figure 4 to figure 7). \end{remark}
\begin{figure}
\caption{Matrix $M_0$}
\caption{Matrix $M_1$}
\end{figure}
\begin{figure}
\caption{Matrix $M_2$}
\caption{Matrix $M_3$}
\end{figure}
\begin{definition} In matrix $M_n$, \emph{the} $i^{\rm{th}}$ \emph{sequence} $S^n_i$ is the $i^{\rm{th}}$ nonzero sequence from the bottom of $M_n$ which is parallel to the diagonal. Moreover, we require the sequence starting from the first nonzero term. Figure $8$ marks the $1^{\rm{st}}$ to the $9^{\rm{th}}$ sequences of $M_3$. \end{definition}
\begin{definition} Given a sequence $S=\{a_n\}_{n\ge 0}$, we call itself the \emph{difference sequence of $S$ of order $0$}, and by recurrence, we define the \emph{difference sequence of $S$ of order $i$} to be $d(S,i)=\{c_{n+1}-c_n\}_{n\ge 0}$ where $\{c_n\}_{n\ge 0}$ is the difference sequence of $S$ of order $i-1$. Moreover, a sequence $\{a_n\}_{n\ge 0}$ is called an \emph{arithmetic sequence of order} $0$ if it is a constant sequence. A sequence $\{a_n\}_{n\ge 0}$ is called an \emph{arithmetic sequence of order} $k$ if the sequence $\{a_{n+1}-a_n\}_{n\ge 0}$ is an arithmetic sequence of order $k-1$. Figure $9$ gives an arithmetic sequence of order $3$. Finally, the leading term of a sequence $S$ is denoted by $LT(S)$. \end{definition}
\begin{figure}
\caption{Arithmetic sequence of higher order}
\end{figure}
Observing figure $4$ to figure $7$, we can see that $LT(S^n_1)=1$ and the other terms of $S^n_1$ are $0$. We also observe that \begin{equation*}
LT(S^n_i)=
\begin{cases}
C(n+1,\frac{i-1}{2}) & \textrm{if $i$ is odd}\\
C(n,\frac{i}{2}-1) & \textrm{if $i$ is even}.
\end{cases} \end{equation*} Finally, for integers $n\ge k\ge 0$, $S^n_{2k+2}$ and $S^n_{2k+3}$ are arithmetic sequences of order $k$. These properties will derive a closed form for any entry of $M_n$. We will prove these observations in up-coming theorems.
\begin{lemma}[Tick Lemma] \label{lem:tick} The elements $m^n_{i,j}$ of $M_n$ satisfy the recursion \[m^n_{i,j}=m^{n-1}_{i-1,j}+\sum_{k\ge 0}m^{n-1}_{i-k,j-1-k}.\] \end{lemma}
\begin{proof} We classify all paths into two classes: paths with first reflection occurring at $2^{\rm{nd}}$ plate, and paths with first reflection occurring at $3^{\rm{rd}}$ plate. Hence we have \[N(n,i,j)=N(n-1,i-1,j)+N'(n,i,j-1)\] where $N'(c_1,c_2,c_3)$ is the number of bottom up paths which starting at the $3^{\textit{rd}}$ plate, and the $i^{\text{th}}$ plate has exactly $c_i$ reflections. Inductively, we get \begin{equation}\nonumber \begin{aligned} N(n,i,j)&=N(n-1,i-1,j)+N'(n,i,j-1)\\ &=N(n-1,i-1,j)+N(n-1,i,j-1)+N'(n,i-1,j-2)\\ &=N(n-1,i-1,j)+N(n-1,i,j-1)+N(n-1,i-1,j-2)+N'(n,i-2,j-3)\\ &\hspace{3.25cm}\vdots\\ &=N(n-1,i-1,j)+\sum_{k\ge 0}N(n-1,i-k,j-1-k). \end{aligned} \end{equation} \end{proof}
\begin{remark}
By lemma \ref{lem:tick}, the $(3,4)$ entry in figure $11$ satisfies \[m^3_{3,4}=m^2_{2,4}+m^2_{3,3}+m^2_{2,2}+m^2_{1,1}+m^2_{0,0}.\] which is the sum of some elements forming a tick in figure $10$. In general, $m^n_{i,j}$ is equal to the sum of a tick in $M_{n-1}$. \end{remark}
\begin{figure}
\caption{Matrix $M_2$}
\caption{Matrix $M_3$}
\end{figure}
\begin{theorem} For nonnegative integer $n\ge 0$, $i\in\{1,2,\cdots,2n+3\}$, $S^n_i$ are the only $2n+3$ nonzero sequences parallel to diagonal in $M_n$. Moreover, each term of $S^n_1$ is $0$ except $LT(S^n_1)=1$, and for any $n\ge k\ge 0$, $S^n_{2k+2}$ and $S^n_{2k+3}$ are arithmetic sequences of order $k$. \end{theorem}
\begin{proof} We give a proof by induction on $n$. It is trivial to verify the case $n=0,1$. For $n\ge 2$, for any $n-1\ge k\ge 1$, $S^n_{2k+2}=\{m^n_{(n-1-k+i),(k+i)}\}_{i\ge 0}$, then lemma \ref{lem:tick} states that \[m^n_{(n-1-k+i),(k+i)}=m^{n-1}_{(n-2+k+i),(k+i)}+\sum_{j\ge 0}m^{n-1}_{(n-1-k+i-j),(k+i-1-j)}.\] By induction hypothesis, for any $i\ge 0$, $\{m^{n-1}_{(n-1-k+i-j),(k+i-1-j)}\}_{j\ge 0}$ is an arithmetic sequence of order $k-1$, so $\{\sum_{j\ge 0}m^{n-1}_{(n-1-k+i-j),(k+i-1-j)}\}_{i\ge 0}$ is an arithmetic sequence of order $k$. Applying hypothesis again, $\{m^{n-1}_{(n-2+k+i),(k+i)}\}_{i\ge 0}$ is an arithmetic sequence of order $k$. Hence we know $S^n_{2k+2}$ is an arithmetic sequence of order $k$. A similar argument works for other cases. \end{proof}
\begin{theorem} \label{thm:anti-diagonal} For integers $n\ge0$, $2n+3\ge i\ge 1$, there holds \begin{equation*}
LT(S^n_i)=
\begin{cases}
C(n+1,\frac{i-1}{2}) & \textrm{if $i$ is odd}\\
C(n,\frac{i}{2}-1) & \textrm{if $i$ is even}.
\end{cases} \end{equation*}
\end{theorem}
\begin{proof} In matrix $M_n$, since $m^n_{i,j}$ is the number of paths corresponding to $(n,i,j)$, the entries $m^n_{a,b}$ in the first nonzero sequence parallel to the anti-diagonal must satisfy $a+b=n$, and the entries $m^n_{c,d}$ in the second sequence must satisfy $c+d=n+1$. Therefore, whenever $c+d=n+1$, $m^n_{c,d}=C(n+1,c)$ because it is the number of permutation of $c$ v-shaped paths whose reflection occurs at second plate and $d$ v-shaped paths whose reflection occurs at third plate. Figure $12$ shows all paths corresponding to $(3,2,2)$. Similarly, we have $m^n_{a,b}=C(n,a)$ for $a+b=n$. \end{proof}
\begin{figure}
\caption{Paths corresponding to $(3,2,2)$}
\end{figure}
To obtain a closed form of $m^n_{i,j}$, we need one more property about matrices $M_n$: figure $13$ shows $d(S^4_9,j)$ for $0\le j\le 3$. Note that the ratio of $LT(d(S^4_9,j))$ is $1:3:3:1$. Similarly, we have the ratio of $LT(d(S^5_{11},j))$ is $1:4:6:4:1$ (see figure $14$). In general, in matrix $M_n$, for any $i\ge 2$, ratio of the leading terms of difference sequences of $S^n_i$ is the binomial coefficients.
\begin{figure}
\caption{The difference sequences of $S^4_9$}
\caption{The difference sequences of $S^5_{11}$}
\end{figure}
\begin{lemma} \label{lem:LT} In matrix $M_n$, the leading term of the difference sequence of the $i^{\rm{th}}$ sequence of order $j$ satisfies the recursion: \[LT(d(S^n_i,j))=LT(d(S^{n-1}_i,j))+LT(d(S^{n-1}_{i-2},j))+LT(d(S^{n-1}_{i-2},j-1)).\] \end{lemma}
\begin{proof} Let $\{a_j\}_{j\ge 0}$ and $\{b_j\}_{j\ge 0}$ be the $i^{\rm{th}}$ and the $(i-2)^{\rm{th}}$ sequence of $M_{n-1}$ respectively. By lemma \ref{lem:tick}, we see \[LT(d(S^n_i,j))=\sum_{k=0}^{j}(-1)^k C(j,k)a_{j-k}+\sum_{k=0}^{j-1}(-1)^k C(j-1,k)b_{j-k}.\] Furthermore, it is also trivial to compute \[LT(d(S^{n-1}_i,j))=\sum_{k=0}^{j}(-1)^k C(j,k)a_{j-k}\] and \begin{equation}\nonumber \begin{aligned}
LT(d(S^{n-1}_{i-2},j))+LT(d(S^{n-1}_{i-2},j-1))&=\sum_{k=0}^{j}(-1)^k C(j,k)b_{j-k}+\sum_{k=0}^{j-1}(-1)^k C(j-1,k)b_{(j-1)-k}\\
&=\sum_{k=0}^{j-1}(-1)^k C(j-1,k)b_{j-k} \end{aligned} \end{equation} which complete the proof. \end{proof}
\begin{theorem} \label{thm:LT} In matrix $M_n$, for $i\ge 2$, $0\le j\le \lfloor{i/2}\rfloor-1$, there exists a constant $k_{n,i}$ such that \begin{equation*}
LT(d(S^n_i,j))=
\begin{cases}
k_{n,i}C(\frac{i}{2}-1,j) \textrm{ if $i$ is even}\\
k_{n,i}C(\frac{i-1}{2}-1,j) \textrm{ if $i$ is odd.}
\end{cases} \end{equation*} \end{theorem}
\begin{proof} We give a proof by induction on $n$. First, it is trivial to verify the case $n=0$. For $n\ge 1$, $i\ge 2$, we can apply lemma \ref{lem:LT} to get \[LT(d(S^n_i,j))=LT(d(S^{n-1}_i,j))+LT(d(S^{n-1}_{i-2},j))+LT(d(S^{n-1}_{i-2},j-1)).\] If $i$ is even, by induction hypothesis, we have \begin{equation}\nonumber \begin{aligned}
LT(d(S^n_i,j))&=LT(d(S^{n-1}_i,j))+LT(d(S^{n-1}_{i-2},j))+LT(d(S^{n-1}_{i-2},j-1))\\
&=k_{n-1,i}C(\frac{i}{2}-1,j)+k_{n-1,i-2}C(\frac{i-2}{2}-1,j)+k_{n-1,i-2}C(\frac{i-2}{2}-1,j-1)\\
&=k_{n-1,i}C(\frac{i}{2}-1,j)+k_{n-1,i-2}C(\frac{i-2}{2},j)\\
&=[k_{n-1,i}+k_{n-1,i-2}]C(\frac{i}{2}-1,j). \end{aligned} \end{equation} Similar, if $i$ is odd, we have \begin{equation}\nonumber \begin{aligned}
LT(d(S^n_i,j))&=LT(d(S^{n-1}_i,j))+LT(d(S^{n-1}_{i-2},j))+LT(d(S^{n-1}_{i-2},j-1))\\
&=k_{n-1,i}C(\frac{i-1}{2}-1,j)+k_{n-1,i-2}C(\frac{i-3}{2}-1,j)+k_{n-1,i-2}C(\frac{i-3}{2}-1,j-1)\\
&=k_{n-1,i}C(\frac{i-1}{2}-1,j)+k_{n-1,i-2}C(\frac{i-3}{2},j)\\
&=[k_{n-1,i}+k_{n-1,i-2}]C(\frac{i-1}{2}-1,j). \end{aligned} \end{equation} \end{proof}
\begin{example}\label{exa:closed form} The number of paths corresponding to $(7,5,7)$ is $N(7,5,7)=840$. \end{example}
\begin{proof} By definition, $N(7,5,7)=m^7_{5,7}$ and $m^7_{5,7}$ is the $3^{\rm{rd}}$ term of $S^7_{11}$ (see figure $15$). Theorem \ref{thm:anti-diagonal} says that $LT(S^7_{11})=C(8,3)=56$, and ratio of leading terms of difference sequences of $S^7_{11}$ is $1:4:6:4:1$ by theorem \ref{thm:LT}. So we get a diagram of difference sequences in figure $16$ where the question mark is $m^7_{5,7}$. Finally, $S^7_{11}$ is an arithmetic sequence of order $4$, which implies $m^7_{5,7}=56+224\times 2+336=840$. \end{proof}
\begin{figure}
\caption{The matrix $M_7$}
\caption{Difference sequences of $S^7_{11}$}
\end{figure}
Generalizing the method in example \ref{exa:closed form}, we obtain the following closed form of $N(v)$: \begin{theorem} \label{thm:closed form} For integers $n,i,j\ge 0$, the number of paths corresponding to $(n,i,j)$ is \[N(n,i,j)=m^n_{i,j}=C(k,\frac{k+j-i}{2})\times C(\lfloor{\frac{\ell-2}{2}}\rfloor+\lfloor{\frac{i+j-k}{2}}\rfloor,\lfloor\frac{\ell-2}{2}\rfloor)\] where $k=n-1+\gcd(2,i+j+n+1)$, $\ell=k+j-i+\gcd(2,i+j+n)$, and $C(\alpha,-1)=1$ if $\alpha =-1$ and $C(\alpha,-1)=0$ if $\alpha\neq -1$. \end{theorem}
\begin{proof} In the matrix $M_n$, by theorem \ref{thm:anti-diagonal}, the numbers of the first two nonzero sequences parallel to the anti-diagonal are $C(n,t)$, $t\in\{0,1,\cdots,n\}$ and $C(n+1,s)$, $s\in\{0,1\cdots,n+1\}$ respectively. Suppose $m^n_{i,j}$ is in $S^n_{\ell}$, $\ell\ge 2$, then the leading term $LT(S^n_{\ell})=m^n_{x,y}$ satisfies \[2 \mid [(x+y)-(i+j)].\] Hence $LT(S^n_{\ell})=C(k,(k+j-i)/2)$ where \[k=n-1+\gcd(2,i+j+n+1) \quad\textrm{and}\quad \ell=k+j-i+\gcd(2,i+j+n).\] Finally, since $S^n_{\ell}$ is an arithmetic sequence of order $\lfloor(\ell-2)/2\rfloor$ and $m^n_{i,j}$ is the $\{\lfloor(i+j-k)/2\rfloor+1\}^{\rm{th}}$ term of $S^n_{\ell}$, we have \begin{equation}\nonumber \begin{aligned}
m^n_{i,j}&=C(k,\frac{k+j-i}{2})\sum_{p\ge 0}C(\lfloor{\frac{i+j-k}{2}}\rfloor,p)\times C(\lfloor{\frac{\ell-2}{2}}\rfloor,\lfloor{\frac{\ell-2}{2}}\rfloor-p)\\
&=C(k,\frac{k+j-i}{2})\times C(\lfloor{\frac{\ell-2}{2}}\rfloor+\lfloor{\frac{i+j-k}{2}}\rfloor,\lfloor\frac{\ell-2}{2}\rfloor). \end{aligned} \end{equation} \end{proof}
\section{Existence of a path corresponding to a given vector} Given a vector $(x,y,z)\in\mathbb{Z}^3$, there is a path corresponding to $(x,y,z) $ if and only if $m^x_{y,z}=N(x,y,z)\neq 0$. So we can use theorem \ref{thm:closed form} to determine whether there is a path corresponding to a given vector or not. However, in this section, we will give a better criterion.
\begin{definition} For every alternating word $w$, $P_w$ denote the path corresponding to $w$. \end{definition}
\begin{definition} For alternating words $w_1,w_2$, we say $P_{w_1}$ and $P_{w_2}$ are \emph{connectable} if $w_1 w_2$ is an alternating word. In this case, we denote the path corresponding to $w_1 w_2$ by $[P_{w_1},P_{w_2}]$. \end{definition}
\begin{lemma}\label{lem:connected} The following pairs of paths are connectable: \[(P_{21},P_{32}),\ (P_{32},P_{31}),\ (P_{21},P_{21}),\ (P_{32},P_{32}),\ (P_{31},P_{31}),\ (P_{21},P_{31}).\] \end{lemma}
\begin{proof} $2132,3231,2121,3232,3131$, and $2131$ are alternating words. Figure $17$ gives the connected paths of these pairs in order. \end{proof}
\begin{figure}
\caption{Connected paths of $6$ pairs of paths}
\end{figure}
\begin{theorem} \label{thm:even case} For $(a_1,a_2,a_3)\in\mathbb{Z}^3$ with $a_1,a_2,a_3\ge 0$ and $a_1+a_2+a_3$ being even, there is a path corresponding to $(a_1,a_2,a_3)$ if and only if \begin{equation} \begin{cases} a_1+a_2\ge a_3\\ a_2+a_3\ge a_1\\ a_3+a_1\ge a_2. \end{cases} \label{cond:even} \end{equation} \end{theorem}
\begin{proof} Suppose $(a_1,a_2,a_3)$ is a vector satisfies the above condition. Consider the equation \[ x(1,1,0)+y(0,1,1)+z(1,0,1)=(a_1,a_2,a_3)\] and its only solution \[\begin{cases} x=(a_1+a_2-a_3)/2\\ y=(-a_1+a_2+a_3)/2\\ z=(a_1-a_2+a_3)/2. \end{cases}\] The hypothesis implies that $x,y,z$ are nonnegative integers. By lemma \ref{lem:connected}, we can construct a path by connecting $x$ copies of $P_{21}$, $y$ copies of $P_{32}$ and $z$ copies of $P_{31}$.
Conversely, let $P$ be a path with $2k$ reflections and $v(P)=(a_1,a_2,a_3)$. Notice that $P$ can be written as the form \[[P_{w_1},P_{w_2},\cdots,P_{w_k}].\] where $w_i$ is an alternating word of length $2$ over alphabet $\{1,2,3\}$. Since $v(P_{w_i})=(1,1,0)$ or $(0,1,1)$ or $(1,0,1)$, there are integers $x,y,z\ge 0$ such that \[(a_1,a_2,a_3)=x(1,1,0)+y(0,1,1)+z(1,0,1).\] Hence \[\begin{cases} a_1+a_2=2x+y+z\ge y+z=a_3\\ a_2+a_3=x+2y+z\ge x+z=a_1\\ a_1+a_3=x+y+2z\ge x+y=a_2. \end{cases}\] \end{proof}
\begin{corollary}\label{thm:odd case} For $(a_1,a_2,a_3)\in\mathbb{Z}^3$ with $a_1,a_2,a_3\ge 0$ and $a_1+a_2+a_3$ being odd, there is a path corresponding to $(a_1,a_2,a_3)$ if and only if one of the followings holds \begin{enumerate}
\item $\begin{cases}
a_1+a_2\ge a_3\\
a_2+a_3\ge a_1\\
a_3+a_1\ge a_2.
\end{cases}$
\item There exists a constant $\alpha\ge 0$ such that $(a_1,a_2,a_3)=(\alpha,\alpha+1,0)$. \end{enumerate} \end{corollary}
Recall definition \ref{def}, $b^n_m$ is the number of distinct vectors corresponding to some paths with exactly $m$ reflections. Using \ref{thm:even case} and \ref{thm:odd case}, we obtain closed forms of the statistic $b^3_m$.
\begin{theorem} For integer $n\ge 1$, we have \begin{enumerate}
\item $b^3_{2n}=C(n+2,2)$.
\item $b^3_{2n+1}=C(n+2,2)+1$. \end{enumerate} \end{theorem}
\begin{proof} By theorem \ref{thm:even case}, vector $(a_1,a_2,a_3)$ is corresponding to a path with $2n$ reflections if and only if \[\begin{cases}a_1+a_2\ge a_3\\ a_2+a_3\ge a_1\\ a_3+a_1\ge a_2 \end{cases}\] which is equivalence to $a_i\le n$ for $i=1,2,3$. In this case, by the principle of inclusion and exclusion, the number of such vectors $(a_1,a_2,a_3)$ is \[C(2n+2,2)-3C(n+1,2)=(n+1)(2n+1)-3\frac{(n+1)n}{2}=C(n+2,2).\] Similar argument works for $b^3_{2n+1}$. \end{proof}
\end{document} | arXiv | {
"id": "2106.06864.tex",
"language_detection_score": 0.6381264925003052,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\begin{abstract} We discuss combinatorial conditions for the existence of various types of reductions between equivalence relations, and in particular identify necessary and sufficient conditions for the existence of injective reductions. \end{abstract}
\title{Combinatorics of Reductions between Equivalence Relations}
\section{Introduction}
Let $E$ and $F$ be equivalence relations on sets $X$ and $Y$, respectively. A \emph{homomorphism} from $E$ to $F$ is a function $\phi:X\to Y$ such that for all $x,x'\in X$, $x\mathrel{E}x'$ implies $\phi(x)\mathrel{F}\phi(x')$. A homomorphism $\phi$ from $E$ to $F$ induces a map $\tilde{\phi}:X/E\to Y/F$ between the quotients defined by $\tilde{\phi}([x]_E)=[\phi(x)]_F$. We obtain special kinds of homomorphisms by requiring $\phi$ or $\tilde{\phi}$ to have certain properties such as being one-to-one or onto. For instance if $\tilde{\phi}$ is one-to-one, then $\phi$ is called a \emph{reduction}. In this note we study the combinatorics of reductions between equivalence relations, and attempt to identify necessary and sufficient conditions for the existence of reductions of various natural types. We will see that certain types admit simple combinatorial characterizations while others do not. Our main results are a necessary and sufficient condition for the existence of an injective reduction from $E$ to $F$ and a complete diagram of implications between the various types of reducibility that we consider. We work in the purely set-theoretic context without making any definability assumptions on equivalence relations or reductions.
Many of the combinatorial problems we consider may be viewed as special instances of the general matching problem addressed in \cite{ANS}. However, it is not easy to apply the abstract framework of \cite{ANS} to our context, and we give a comparatively simple proof of Theorem \ref{thm:main} below.
\section{Reductions of Equivalence Relations}
We now define the various types of homomorphisms that we will consider. Let $E$ and $F$ be equivalence relations on sets $X$ and $Y$, respectively, let $\phi:X\to Y$ be a homomorphism from $E$ to $F$, and let $\tilde{\phi}$ be the induced map on classes. We consider the following properties of the maps $\phi$ and $\tilde{\phi}$: \begin{enumerate}
\item[(i)] $\phi$ is one-to-one;
\item[(ii)] $\phi$ is onto;
\item[(iii)] $\tilde{\phi}$ is one-to-one;
\item[(iv)] $\tilde{\phi}$ is onto;
\item[(v)] $\mbox{ran}(\phi)$ is \emph{$F$-invariant}; i.e., if $y\in\mbox{ran}(\phi)$ and $y\mathrel{F}y'$ then $y'\in\mbox{ran}(\phi)$. \end{enumerate} It is straightforward to check that the only implications holding between these properties are those following from the fact that $\phi$ is onto if and only if $\tilde{\phi}$ is onto and $\mbox{ran}(\phi)$ is $F$-invariant. It follows that there are 16 distinct Boolean combinations of these properties. Since we will always take $\phi$ to be a reduction (i.e., we assume (iii) holds), this reduces the number of distinct combinations to 8. We now introduce terminology and notation for these 8 types of reductions.
\begin{definition} Let $E$, $F$, $\phi$, and $\tilde{\phi}$ be as above. \begin{enumerate}
\item $\phi$ is a \emph{reduction} if (iii) holds;
\item $\phi$ is an \emph{embedding} if (i) and (iii) hold;
\item $\phi$ is a \emph{surjective reduction} if (ii) -- (v) hold;
\item $\phi$ is an \emph{isomorphism} if (i) -- (v) hold
\item $\phi$ is an \emph{invariant reduction} if (iii) and (v) hold;
\item $\phi$ is a \emph{full reduction} if (iii) and (iv) hold;
\item $\phi$ is an \emph{invariant embedding} if (i), (iii), and (v) hold;
\item $\phi$ is a \emph{full embedding} if (i), (iii), and (iv) hold. \end{enumerate} \end{definition}
\begin{definition} If $E$, $F$ are equivalence relations on sets $X$, $Y$, we say that $E$ is \emph{reducible} to $F$ and write $E\leq F$ if there is a reduction from $E$ to $F$, and we say that $E$ and $F$ are \emph{bireducible} and write $E\sim F$ if $E\leq F$ and $F\leq E$. We introduce analogous terminology and notation for the other types of reductions as follows: \[ \hspace{-52mm} \arraycolsep=8mm \begin{array}{lcc} \text{(1) \ \emph{reducible}} & \leq & \sim \\ \text{(2) \ \emph{embeddable}} & \sqsubseteq & \approx \\ \text{(3) \ \emph{surjectively reducible}} & \ensuremath{\preccurlyeq} & \ensuremath{\preccurlyeq\succcurlyeq} \\ \text{(4) \ \emph{isomorphic}} & \cong & \cong \\ \text{(5) \ \emph{invariantly reducible}} & \leq^i & \sim^i \\ \text{(6) \ \emph{fully reducible}} & \leq^f & \sim^f \\ \text{(7) \ \emph{invariantly embeddable}} & \sqsubseteq^i & \approx^i \\ \text{(8) \ \emph{fully embeddable}} & \sqsubseteq^f & \approx^f \end{array} \] \end{definition}
We display all the direct implications between these relations in Figures \ref{FIG1} and \ref{FIG2}, and we include a proof of Proposition \ref{prop:complete} at the end of the paper.
\begin{proposition}\label{prop:complete} The diagrams in Figures \ref{FIG1} and \ref{FIG2} are complete; that is, in each diagram, for every pair of nodes $A$ and $B$, the implication $A\Rightarrow B$ holds if and only if it is implied by the arrows in the diagram. \end{proposition}
\noindent Note, however, that certain implications involving more than two relations may not be displayed in the figures; for instance, the fact that $E\leq F\wedge F\leq E\Rightarrow E\leq^fF$ is not displayed in Figure \ref{FIG1}.
\begin{figure}
\caption{Implications between types of reducibility}
\label{FIG1}
\end{figure}
\begin{figure}
\caption{Implications between equivalences on the class of equivalence relations}
\label{FIG2}
\end{figure}
\section{The Main Theorem}
Now we consider the problem of finding necessary and sufficient combinatorial conditions for the existence of reductions of the various types between equivalence relations.
\begin{definition} Given an equivalence relation $E$ and a (possibly finite) cardinal $\kappa$, let $\ensuremath{\mathfrak{n}}_\kappa(E)$ be the number of $E$-classes of cardinality $\kappa$. Similarly, let $\ensuremath{\mathfrak{n}}_{\geq\kappa}(E)$ be the number of $E$-classes of size at least $\kappa$ and $\ensuremath{\mathfrak{n}}_{\leq\kappa}(E)$ the number of $E$-classes of size at most $\kappa$. \end{definition}
\begin{theorem} Let $E$ and $F$ be equivalence relations on sets $X$ and $Y$, respectively. Then \begin{enumerate}
\item $E\leq F \ \Longleftrightarrow \ |X/E|\leq |Y/F|$;
\item $E\sqsubseteq F \ \Longleftrightarrow \ (\forall\kappa) \, \ensuremath{\mathfrak{n}}_{\geq\kappa}(E)\leq\ensuremath{\mathfrak{n}}_{\geq\kappa}(F)$;
\item $E\ensuremath{\preccurlyeq} F \ \Longrightarrow \ (\forall\kappa)\, [\,\ensuremath{\mathfrak{n}}_{\leq\kappa}(E)\leq\ensuremath{\mathfrak{n}}_{\leq\kappa}(F) \; \wedge \; \ensuremath{\mathfrak{n}}_{\geq\kappa}(E)\geq\ensuremath{\mathfrak{n}}_{\geq\kappa}(F)\,]$;
\item $E\cong F \ \Longleftrightarrow \ (\forall\kappa)\, \ensuremath{\mathfrak{n}}_{\kappa}(E)=\ensuremath{\mathfrak{n}}_{\kappa}(F)$;
\item $E\leq^i F \ \Longrightarrow \ (\forall\kappa)\, \ensuremath{\mathfrak{n}}_{\leq\kappa}(E)\leq\ensuremath{\mathfrak{n}}_{\leq\kappa}(F)$;
\item $E\leq^f F \ \Longleftrightarrow \ |X/E|=|Y/F|$;
\item $E\sqsubseteq^i F \ \Longleftrightarrow \ (\forall\kappa)\, \ensuremath{\mathfrak{n}}_{\kappa}(E)\leq\ensuremath{\mathfrak{n}}_{\kappa}(F)$;
\item $E\sqsubseteq^f F \ \Longleftrightarrow \ F\ensuremath{\preccurlyeq} E$. \end{enumerate} \label{thm:main} \end{theorem}
The bi-implications (1), (4), (6), and (7) are trivial to prove, as are the forward implications in (2), (3), and (5). The backward direction of (2) appears to be somewhat harder, and is our main result. Additionally we will show that the necessary conditions given in (3) and (5) are not sufficient, and we argue that there are no simple combinatorial conditions characterizing the surjective or invariant reducibility of $E$ to $F$.
Now we present our proof of (2), which will make use of the following lemma.
\begin{lemma}\label{lem} Let $\kappa$ be an infinite cardinal, and $A$ the class of ordinals that can be partitioned into $\kappa$ many cofinal subsets. Then $A$ is closed. \end{lemma}
\begin{proof} Let $\gamma$ be a limit point of $A$, and let $\langle \gamma_\alpha : \alpha < \textnormal{cf}( \gamma ) \rangle$ be a continuous increasing sequence of elements of $A$ with limit $\gamma$. For each $\alpha < \textnormal{cf}(\gamma)$, let $\{ P_\nu^\alpha : \nu < \kappa \}$ be a partition of $\gamma_\alpha$ into $\kappa$ many cofinal subsets. For each $\nu < \kappa$, define \[ P_\nu := \bigcup_{\alpha < \textnormal{cf}(\gamma)} (P_\nu^{\alpha+1} - \gamma_\alpha). \] The set $\{ P_\nu : \nu < \kappa \}$ is a partition of $\gamma$ into $\kappa$ many cofinal subsets. \end{proof}
Note that for an ordinal $\gamma$ and infinite cardinal $\kappa$, $\gamma$ may be partitioned into $\kappa$ many cofinal subsets iff $\gamma=\kappa\cdot\alpha$ for some ordinal $\alpha$.
\begin{proof}[Proof of Theorem \ref{thm:main} (2)] The forward direction is clear. For the backward direction, we must show that there exists an injective function $\phi : X \to Y$ such that \[ (\forall x, x' \in X)\; x \ E \ x' \Leftrightarrow \phi(x) \ F \ \phi(x') \] under the assumption that \[ (\forall \kappa)\;\ensuremath{\mathfrak{n}}_{\ge \kappa}(E) \, \le \, \ensuremath{\mathfrak{n}}_{\ge \kappa}(F). \]
Let us begin by fixing an enumeration $\langle C_\xi : \xi < \alpha \rangle$ of the $E$-classes such that for all $\xi < \eta < \alpha$, $|C_\xi| \le |C_\eta|$, as well as an enumeration $\langle D_\xi : \xi < \beta \rangle$ of the $F$-classes such that for all $\xi < \eta < \beta$, $|D_\xi| \le |D_\eta|$. Notice that since $\ensuremath{\mathfrak{n}}_{\ge 1}(E) \le \ensuremath{\mathfrak{n}}_{\ge 1}(F)$, we have $|\alpha| \leq |\beta|$.
It is not difficult to see that there exists an appropriate injection as long as $|\alpha|$ is finite,
so for the remainder of the proof we assume $|\alpha|$ is infinite. Indeed, as an inductive hypothesis, assume we have proven the theorem for every pair of equivalence relations $(E', F')$ satisfying \[ (\forall \kappa)\; \ensuremath{\mathfrak{n}}_{\ge \kappa}(E') \, \le \, \ensuremath{\mathfrak{n}}_{\ge \kappa}(F') \]
such that the number of $E'$-classes is $< |\alpha|$.
Since $|\alpha| \le |\beta| \le \beta$, there is at least one ordinal $\gamma \le \beta$ that can be partitioned into $|\alpha|$ many cofinal subsets. By Lemma \ref{lem}, there is a largest such $\gamma \le \beta$, which we fix. We first claim that $|\beta - \gamma| < |\alpha|$. If not, let $\delta$ be the least ordinal such that $\gamma+\delta=\beta$, so that $|\delta|=|\beta-\gamma|$. Then \[
\gamma+|\alpha| \ \leq \ \gamma+|\beta-\gamma| \ = \ \gamma+|\delta| \ \leq \ \gamma+\delta \ = \ \beta, \] contradicting the choice of $\gamma$.
Let $\sigma < \alpha$ be the least ordinal such that $(\forall \xi < \gamma)\,|C_\sigma| > |D_\xi|$ if such an ordinal exists, and let $\sigma = \alpha$ otherwise. Hence, for each $\nu < \sigma$ there is some $\xi' < \gamma$ such that $|C_\nu| \le |D_{\xi'}|$. Let $\{ P_\nu : \nu < \sigma \}$ be a partition of $\gamma$ into cofinal subsets (such a partition exists because $\gamma$ can be partitioned into $|\alpha|$ many cofinal subsets and $\sigma \le \alpha$).
Given any $\nu < \sigma$, we may pick a $\xi' < \gamma$ such that $|C_\nu| \le |D_{\xi'}|$, and then we may pick a $\xi \in P_\nu$ such that $\xi' \le \xi$ (so $|D_{\xi'}| \le |D_{\xi}|$). Hence, \[
(\forall \nu < \sigma)(\exists \xi \in P_\nu)\; |C_\nu| \le |D_\xi|. \] Because of this, we may easily define an injection $\phi_1$ from $X_1 := \bigcup_{\nu < \sigma} C_\nu$ to $Y_1 := \bigcup_{\xi < \gamma} D_\xi$ such that \[ (\forall x,x' \in X_1)\; x \ E \ x' \ \Leftrightarrow \ \phi_1(x) \ F \ \phi_1(x'). \]
If $\sigma = \alpha$ we are done, so assume $\sigma < \alpha$. Consider the sets \[ X_2 := \bigcup_{\sigma \le \nu < \alpha} C_\nu \quad \mbox{and} \quad Y_2 := \bigcup_{\gamma \le \xi < \beta} D_\xi. \]
Let $E' := E \restriction X_2$ and $F' := F \restriction Y_2$. Since $|\beta - \gamma| < |\alpha|$, by the definition of $\sigma$ and the hypothesis that $\ensuremath{\mathfrak{n}}_{\ge |C_\sigma|}(E) \le \ensuremath{\mathfrak{n}}_{\ge |C_\sigma|}(F)$ we have that $|\alpha - \sigma| < |\alpha|$. That is, there are $< |\alpha|$ many $E'$-classes. Also notice that $(\forall \kappa)\, \ensuremath{\mathfrak{n}}_{\ge \kappa}(E') \le \ensuremath{\mathfrak{n}}_{\ge \kappa}(F')$. We may now apply the inductive hypothesis to obtain an injective reduction $\phi_2$ from $E'\upharpoonright X_2$ to $F'\upharpoonright Y_2$. At this point we are finished, since \[ \phi\, :=\, \phi_1 \cup \phi_2 \]
is an injective reduction from $E$ to $F$. \end{proof}
\section{Counterexamples}
In this section we present some examples to show that the necessary conditions given in Theorem \ref{thm:main} for the existence of invariant and surjective reductions are not sufficient, and we argue that for these types of reducibility, no nice necessary and sufficient conditions exist.
\begin{example}\label{ex:1} Let $E$ and $F$ be equivalence relations each having exactly one equivalence class of size $n$ for each $1\leq n<\omega$ and no additional classes except that $E$ has exactly one class of size $\aleph_0$. Then for all cardinals $\kappa$ we have $\ensuremath{\mathfrak{n}}_{\leq\kappa}(E)\leq\ensuremath{\mathfrak{n}}_{\leq\kappa}(F)$ and $\ensuremath{\mathfrak{n}}_{\geq\kappa}(E)\geq\ensuremath{\mathfrak{n}}_{\geq\kappa}(F)$, but clearly there can be no invariant reduction from $E$ to $F$. \end{example}
To dispell the impression that finite cardinals are the sole source of the problem, we give another counterexample where this time $\ensuremath{\mathfrak{n}}_\kappa(E)$ and $\ensuremath{\mathfrak{n}}_\kappa(F)$ are either 0 or infinite for all $\kappa$. Our construction uses Fodor's Lemma, which is typical for the uncountable case of the matching problem (see, for instance, \cite[Lemma 4.9]{ANS}).
\begin{example} There exist equivalence relations $E$ and $F$ such that \begin{itemize} \item[(1)] for all cardinals $\kappa$, $\ensuremath{\mathfrak{n}}_\kappa(E)$ and $\ensuremath{\mathfrak{n}}_\kappa(F)$ are either 0 or $\aleph_0$; \item[(2)] $(\forall \kappa)\, \ensuremath{\mathfrak{n}}_{\le \kappa}(E) = \ensuremath{\mathfrak{n}}_{\le \kappa}(F)$; \item[(3)] $(\forall \kappa)\, \ensuremath{\mathfrak{n}}_{\ge \kappa}(E) = \ensuremath{\mathfrak{n}}_{\ge \kappa}(F)$; \item[(4)] $E\not\leq^iF$, and hence also $E\not\ensuremath{\preccurlyeq} F$. \end{itemize} \label{ex:2} \end{example}
\begin{proof} It suffices to specify $\ensuremath{\mathfrak{n}}_\kappa(E)$ and $\ensuremath{\mathfrak{n}}_\kappa(F)$ for each cardinal $\kappa$. Let $\ensuremath{\mathfrak{n}}_1(E) = \aleph_0$ and $\ensuremath{\mathfrak{n}}_{\aleph_{\alpha}}(E) = \aleph_0$ for every limit ordinal $\alpha < \omega_1$, and let $\ensuremath{\mathfrak{n}}_\kappa(E) = 0$ for every other cardinal $\kappa$. Let $\ensuremath{\mathfrak{n}}_1(F) = \aleph_0$ and $\ensuremath{\mathfrak{n}}_{\aleph_{\alpha+1}}(F) = \aleph_0$ for every limit ordinal $\alpha < \omega_1$, and let $\ensuremath{\mathfrak{n}}_\kappa(F) = 0$ for every other cardinal $\kappa$.
It is clear that conditions (1) through (3) are satisfied. Suppose, towards a contradiction, that $\phi$ is an invariant reduction from $E$ to $F$. For every limit ordinal $\alpha < \omega_1$, $\phi$ maps each $E$-class of size $\aleph_{\alpha}$ onto an $F$-class of size $< \aleph_{\alpha}$. For each limit ordinal $\alpha < \omega_1$, arbitrarily pick some $E$-class $C_\alpha$ of size $\aleph_\alpha$. Hence, the function $\phi$ maps each class $C_\alpha$ onto some $F$-class of size $\aleph_{g(\alpha)}$ for some $g(\alpha) < \alpha$. We have now defined a regressive function $g$ from the (stationary) set of limit ordinals less than $\omega_1$ to $\omega_1$. By Fodor's Lemma, $g$ is constant on some stationary set. This means that there is some $\beta < \omega_1$ such that $\phi$ maps $\omega_1$ many $E$-classes onto $F$-classes of size $\aleph_\beta$. Since there are at most $\aleph_0$ many $F$-classes of size $\aleph_\beta$, we have a contradiction. \end{proof}
Examples \ref{ex:1} and \ref{ex:2} suggest that in general there is no ``nice'' combinatorial characterization of the existence of an invariant or surjective reduction from one equivalence relation to another, and we now describe one way of making this precise. Define a \emph{nice condition} to be a conjunction of statements of the form ``for all cardinals $\kappa$, $a\mathrel{R}b$,'' where $a$ is one of the four terms \[
\ensuremath{\mathfrak{n}}_{\kappa}(E), \ \ensuremath{\mathfrak{n}}_{\leq\kappa}(E), \ \ensuremath{\mathfrak{n}}_{\geq\kappa}(E), \ |X/E|, \] $b$ is one of the four terms \[
\ensuremath{\mathfrak{n}}_{\kappa}(F), \ \ensuremath{\mathfrak{n}}_{\leq\kappa}(F), \ \ensuremath{\mathfrak{n}}_{\geq\kappa}(F), \ |Y/F|, \] and $R$ is one of the six relations \[ \leq, \ \geq, \ =, \ \ne, \ <, \ >. \] The proof of the following proposition is tedious but not difficult, and we omit it.
\begin{proposition} Every nice condition which is implied by $E\leq^iF$ follows from the condition \[ (\forall\kappa)\; \ensuremath{\mathfrak{n}}_{\leq\kappa}(E) \, \leq \, \ensuremath{\mathfrak{n}}_{\leq\kappa}(F), \] and every nice condition which is implied by $E\ensuremath{\preccurlyeq} F$ follows from the condition \[ (\forall\kappa)\; [\,\ensuremath{\mathfrak{n}}_{\leq\kappa}(E)\leq\ensuremath{\mathfrak{n}}_{\leq\kappa}(F) \; \wedge \; \ensuremath{\mathfrak{n}}_{\geq\kappa}(E)\geq\ensuremath{\mathfrak{n}}_{\geq\kappa}(F)\,]. \] \end{proposition}
\noindent In this sense parts (3) and (5) of Theorem \ref{thm:main} are optimal, and Examples \ref{ex:1} and \ref{ex:2} show that none of the relations $E\ensuremath{\preccurlyeq} F$, $E\leq^iF$, and $E\sqsubseteq^fF$ can be characterized by a nice condition.
\section{Completeness of the Diagrams}
In this final section we prove Proposition \ref{prop:complete}.
\noindent \emph{Proof that the diagram in Figure \ref{FIG1} is correct and complete}. All displayed implications follow immediately from the definitions, so we need only show that there are no additional implications. We will show that for every node $A$ in the diagram, there is no implication of the form $A\Rightarrow B$ that is not displayed. For the top node $E\cong F$ this is vacuous. By symmetry, it will suffice to consider the seven nodes on the left half of the diagram. We will accomplish this using the following seven pairs of equivalence relations, which are described as follows: $\langle n_1,\ldots,n_m\rangle$ denotes the equivalence relation having for each $1\leq k\leq m$ exactly $n_k$ equivalence classes of size $k$ and no others.
\begin{enumerate}
\item $E=\langle 1\rangle$, $F=\langle 2\rangle$;
\item $E=\langle 1\rangle$, $F=\langle 0,1\rangle$;
\item $E=\langle 0,1\rangle$, $F=\langle 1\rangle$;
\item $E=\langle 1\rangle$, $F=\langle 0,2\rangle$;
\item $E=\langle 0,1\rangle$, $F=\langle 2\rangle$;
\item $E=\langle 1,0,1\rangle$, $F=\langle 0,2\rangle$;
\item $E=\langle 1,0,1\rangle$, $F=\langle 0,3\rangle$. \end{enumerate}
\noindent (1) shows that $E\sqsubseteq^iF$ does not imply $F\leq E$. (2) shows that $E\sqsubseteq^fF$ implies neither $E\leq^iF$ nor $F\sqsubseteq E$. (3) shows that $E\ensuremath{\preccurlyeq} F$ implies neither $E\sqsubseteq F$ nor $F\leq^iE$. (4) shows that $E\sqsubseteq F$ implies neither $E\leq^iF$ nor $F\leq E$. (5) shows that $E\leq^iF$ implies neither $E\sqsubseteq F$ nor $F\leq E$. (6) shows that $E\leq^fF$ implies none of $E\sqsubseteq F$, $E\leq^iF$, $F\sqsubseteq E$, and $F\leq^iE$. Finally, (7) shows that $E\leq F$ implies none of $E\sqsubseteq F$, $E\leq^iF$, and $F\leq E$. These observations suffice to establish the completeness of the diagram in Figure \ref{FIG1}.
\noindent \emph{Proof that the diagram in Figure \ref{FIG2} is correct and complete}. That $E\sim F\Rightarrow E\sim^fF$ is clear, and the fact that $E\approx^iF\Rightarrow E\cong F$ is well-known and follows from the standard Schr\"{o}der-Bernstein argument. The remaining displayed implications follow immediately from the implications in Figure 1, so it is only left to show that there are no additional implications. For this it suffices to show the following: \[ \begin{array}{llll} (1) & E\ensuremath{\preccurlyeq\succcurlyeq} F & \not\Rightarrow & E\cong F; \\ (2) & E\approx F & \not\Rightarrow & E\sim^iF; \\ (3) & E\sim^i F & \not\Rightarrow & E\approx F; \\ (4) & E\sim F & \not\Rightarrow & E\sim^iF; \\ (5) & E\sim F & \not\Rightarrow & E\approx F. \end{array} \] This may be done using the following equivalence relations, which have no classes other than those described. \begin{enumerate}
\item $E$ has one class of size $n$ for each even integer $n$, $F$ has one class of size $n$ for each odd integer $n\geq 3$, and both $E$ and $F$ have $\aleph_0$ many classes of size 1.
\item $E$ has $\aleph_0$ many classes of size $\aleph_0$ and one class of size 1; $F$ has $\aleph_0$ many classes of size $\aleph_0$ and one class of size 2.
\item Both $E$ and $F$ have $\aleph_0$ many classes of size 1, and $E$ has one class of size 2.
\item $E$ has one class of size 1, $F$ has one class of size 2.
\item Same as (4). \end{enumerate}
\thebibliography{99}
\bibitem[ANS]{ANS} R.\ Aharoni, C.\ St.\ J.\ A.\ Nash-Williams, and S.\ Shelah, \textit{A general criterion for the existence of transversals}, Proceedings of the London Mathematical Society (3) 47 (1983), 43--68.
\end{document} | arXiv | {
"id": "1409.2810.tex",
"language_detection_score": 0.6975107192993164,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\thispagestyle{empty} \rightline{\large\baselineskip16pt\rm\vbox to20pt{\hbox{OCHA-PP-53}
\vss}} \vskip15mm \begin{center} {\fontA Spontaneous dissipation from generalized radiative corrections } \end{center} \begin{center} {\sc Masahiro~MORIKAWA } \\[2mm] {\it Department of Physics, Ochanomizu university \\[2mm] 1-1, Otsuka 2, Bunkyo-ku, Tokyo 112 JAPAN \\ e-mail: hiro@phys.ocha.ac.jp} \vskip1truecm ({\it received ~~~~~~~~~~~~~~~ 1995}) \end{center} \begin{abstract} We derive dissipative effective Hamiltonian for the unstable Lee model without any ad hoc coarse graining procedure. Generalized radiative corrections, utilizing the in-in formalism of quantum field theory, automatically yield irreversibility as well as the decay of quantum coherence. Especially we do not need to extend the ordinary Hilbert space for describing the intrinsically dissipative system if we use the generalized in-in formalism of quantum field theory. \hfil\break\noindent pacs numbers: 05.70.Ln,~~ 03.70.+k ,~~ 82.20.Mj,~~ 11.10.Wx
\end{abstract} \vskip1cm
\section{Introduction}
Understanding the irreversibility in the macroscopic world is one of the most attractive issues in physics. Especially a consistent derivation of the irreversible dynamics from much fundamental microscopic law of physics would be the central issue. Our ill fortune is that the most microscopic physics are strictly reversible and a simple application of them never yield irreversibility. \par A popular approach to obtain irreversibility will be to consider an open system\cite{li86}: We decompose the total closed system into a relevant system and the remaining environmental degrees of freedom. Then by {\it coarse graining} the environmental degrees of freedom with appropriate initial conditions (projection), we obtain effective dynamics for the relevant system. The irreversibility stems from the information loss of the system into the environment. Although this pragmatic procedure is widely used in the literature, qualitative dissipative nature in general depends on how we set the separation of the total system and on the coarse graining procedure. Surely this is unfavorable nature of the theory; the irreversibility is an intrinsic nature of the system and should not be affected by the method of description. In this paper, we would like to demonstrate that for certain systems, it is possible to derive intrinsic irreversibility without specifying the separation and the coarse graining methods
\par It is obvious that not all the systems show irreversibility. Then what is the essential difference between reversible and irreversible systems? According to our experience, a system should, at least, be {\it unstable} for it to show irreversibility. It is manifest that a stable state cannot change further and shows no irreversibility. On the other hand the unstable system cannot persist on the initial state and eventually decays into much stable state if any. However instability will not be the sufficient condition for the irreversibility. If the stable-unstable transition is simple and the system has finite recursion time, then the system cannot be irreversible. In order to obtain the infinite recursion time, we need {\it infinite number of degrees of freedom or chaos}\cite{kubotani95}. Another necessary condition for irreversibility will be a {\it natural averaging} procedure whatever it is implicit or explicit. One such averaging procedure will be the radiative corrections in quantum theory. Esp
a) instability b) infinite degrees of freedom and c) natural averaging, are sufficient conditions for the irreversibility, we try to use a simple model which satisfies all the above conditions. It is the unstable Lee model\cite{lee54}. This simple model of quantum field theory is exactly solved and was used in the argument of renormalizations. In this article, we use this unstable Lee model and demonstrate a possible origin of the intrinsic dissipative dynamics without any ad hoc coarse graining procedure. \par The {\it instability} of the system is considered to be an essential ingredient for the emergence of intrinsic dissipativity. However, an unstable system can be described by hermitian Hamiltonian which includes no dissipativity nor irreversibility at least in appearance. There is a long history of the study on unstable states and its decay in quantum mechanics, in particle physics \cite{nakanishi58} \cite{sudarshan78} and in statistical mechanics \cite{petrosky91} \cite{antoniou93}. They faced with the complex eigenvalue for the Hamiltonian, and therefore extended the Hilbert space so that to maintain the hermiticity of the Hamiltonian. In this procedure they abandon the usual Hilbert space (a space of square integrable functions) and introduced a rigged Hilbert space (a space of distributions). According to this method, a pair of dual spaces $\Psi_+^{\dagger}$ and $\Psi_-^{\dagger}$ \cite{antoniou93} is necessary. They are dual of the space of boundary functions which are analytic in the lower (upper
\par We will not use this extended Hilbert space approach in the present article in order to describe the intrinsic dissipative nature of the unstable system. However, we use the generalized in-in formalism of quantum field theory \cite{schwinger61} \cite{keldysh64} \cite{chou85}, in which dissipative and irreversible properties are consistently incorporated through radiative corrections\cite{morikawa86} \cite{morikawa95}. In the ordinary quantum field theory (in-out formalism), ultra-violet divergence in the quantum system of infinite degrees of freedom necessitates the renormalization of the parameters in the Hamiltonian through the process of radiative corrections. This radiative correction is a special kind of averaging, which does not directly yield irreversibility in the stable theory. If the system is unstable, this process yields complex poles in the retarded propagator. The location of a pole is interpreted as the decay strength or the inverse of the lifetime. This dissipativity was not present in
\par In this paper, we first review the unstable Lee model in the usual treatment in the next section \S 2. Then we study the same model in the in-in formalism of quantum field theory in \S 3 and derive a Langevin equation for the fields. We further derive the effective Hamiltonian which describes irreversible dynamics in \S 4 and show that the linear entropy automatically increases. The last section \S 5 is for discussions and summary of our work.
\section{Unstable Lee model}
\par Let us begin our argument from the unstable Lee model in the usual treatment. The system is composed from two kinds of non-relativistic fermions ${\bf N}$ and ${\bf V}$ and one boson field ${\mbox{\boldmath $\theta$}}$.
Hamiltonian of the Lee model is given by \begin{eqnarray} {\bf H}&=&{\bf H}_0 + {\bf H}_{int} \nonumber \\ {\bf H}_0&=&m_V^0 \int {d\vec p \over (2\pi)^3} {\bf V}_{\vec p}^{\ast} {\bf V}_{\vec p} + m_N \int {d\vec p \over (2\pi)^3}{\bf N}_{\vec p}^{\ast} {\bf N}_{\vec p} +\int {d\vec k \over (2\pi)^3} \omega_{\vec k}\mbox{\boldmath $\theta$}_{\vec k}^{\ast} \mbox{\boldmath $\theta$}_{\vec k}\nonumber \\ {\bf H}_{\rm int}&=&\lambda_0 \int {d\vec k \over (2\pi)^3 \sqrt{2 \omega_{\vec k}}} \int {d\vec p \over (2\pi)^3} {f(\omega_{\vec k})} ({\bf V}_{\vec p}^{\ast} {\bf N}_{\vec p -\vec k}\mbox{\boldmath $\theta$}_{\vec k} + {\bf h.c.}), \label{bhamiltonian} \end{eqnarray} where $\omega_{\vec k}=\sqrt{\vec k^2 +\mu^2}$. The states
$| N_{\vec p}\rangle \equiv {\bf N}_{\vec p}^{\ast}|0\rangle$ and
$| a_{\vec k}\rangle \equiv {\bf a}_{\vec k}^{\ast}|0\rangle$ are the eigenstates of the total Hamiltonian, however the state
$| V_{\vec p}\rangle \equiv {\bf V}_{\vec p}^{\ast}|0\rangle$ is not.
\par In order to construct the eigenstate $| {\tilde V}_{\vec p}\rangle$, we have to superpose the states:
$| V_{\vec p}\rangle$ and
$| a_{\vec k} N_{\vec p-\vec k}\rangle$ as \begin{equation}
| {\tilde V}_{\vec p}\rangle=\sqrt{Z_V}
\left( | V_{\vec p}\rangle + \int {d\vec k \over (2\pi)^3} g_{\vec k} | a_{\vec k} N_{\vec p-\vec k}\rangle \right). \label{vstate} \end{equation} Then the weight $g_{\vec k}$ in this form is determined from the eigenstate equation \begin{equation}
{\bf H}| {\tilde V}_{\vec p}\rangle = m_V | {\tilde V}_{\vec p}\rangle, \label{eigensteq} \end{equation} as \begin{equation} g_{\vec k} ={1 \over m_V-m_N-\omega_{\vec k}} { \lambda_0 f(\omega_{\vec k}) \over \sqrt{2\omega_{\vec k}} }. \label{weight} \end{equation} The determination of the eigenvalue $m_V$ is equivalent to looking for a root of the retarded propagator: \begin{equation} G_R(E)=\left[ E- m_V^0+ i \epsilon +\int {d\vec k \over (2 \pi)^3} {1 \over m_N+\omega_{\vec k}-E-i \epsilon} { \lambda_0^2 f^2(\omega_{\vec k}) \over 2\omega_{\vec k} }\right]^{-1}. \label{invret}
\end{equation} A real root is found for a stable $V$- particle. However if unstable, the root becomes complex: $E=m_V- i \gamma/2$. Usually $m_V$ is interpreted as the observable mass and $1/\gamma$ as the life-time of the physical $V$ particle\cite{gamow28}. However, the state $|
{\tilde V}_{\vec p}\rangle$ turns out to have zero norm $\langle {\tilde V}_{\vec p}| {\tilde V}_{\vec p}\rangle=0$ simply because the Hamiltonian is a hermitian operator. Nakanishi and others \cite{nakanishi58} \cite{sudarshan78} expressed the eigenstate corresponding to this eigenvalue by introducing the notion of {\it complex distribution}. On the other hand in papers \cite{petrosky91} \cite{antoniou93}, they extended the ordinary Hilbert space introducing the notion of the {\it rigged Hilbert space}.
\section{Unstable Lee model in the in-in quantum field theory}
\par We will take a conservative approach. Instead of extending the ordinary Hilbert space, we express the evolution of the unstable particle in the in-in formalism of quantum field theory \cite{schwinger61} \cite{keldysh64} \cite{chou85}, which is the most appropriate formalism for describing the unstable quantum system\cite{morikawa86}. This is just a simple extension of the ordinary quantum field theory with the doubled time-contour of integration.
In this formalism, it is possible to express the statistical dissipation and fluctuations consistently with quantum field theory. Leaving the detail of this formalism for the other paper \cite{morikawa95}, we briefly explain this formalism here. \par The time contour of integration in the in-in quantum field theory is generalized to run from $-\infty$ to $+\infty$ and then back to $-\infty$ again. All the arguments of fields ${\bf \Phi}(x)$ are doubled according to this new time contour. Moreover, the Hamiltonian ${\bf H}[{\bf \Phi}]$ of the system is generalized to $\hat {\bf H}[{\bf \Phi}^{\pm}]={\bf H}[{\bf \Phi}^+]-{\bf H}[{\bf \Phi}^-]$ where $X^+(x)$ and $X^-(x)$ mean the field quantity $X(x)$ restricted on the forward and backward time branches, respectively. In the same manner, the Lagrangian density ${\cal L}$ is generalized to $\hat{\cal L}={\cal L}[{\bf \Phi}^+]-{\cal L}[{\bf \Phi}^-]$. Because the standard Pauli equation for the density matrix
$i \partial \mbox{\boldmath $\rho$}(t)/\partial t=[{\bf H}, \mbox{\boldmath $\rho$}(t)]$ is expressed in the coordinate representation ($\langle \Phi_+|\mbox{\boldmath $\rho$}(t)| \Phi_-\rangle=\rho[\Phi^{\pm},t]$) as \begin{equation} i {\partial \rho[\Phi^{\pm},t] \over \partial t}=(H[\Phi^+]-H[\Phi^-])\rho[\Phi^{\pm},t], \label{pauli} \end{equation} this generalized Hamiltonian, in the coordinate representation, $\hat H$ is thought to be the time translation operator for the density matrix. \par The partition function is defined in the usual way except that the time-integration contour is doubled. \begin{eqnarray} \hat Z[J]&\equiv& {\rm Tr}[ T_C({\rm exp}[i\ \int_C d^4 x { J(x) {\bf \Phi}(x) }] )\rho] \nonumber \\ &=&{\rm Tr}[ T_+({\rm exp}[i\ \int d^4 x{ J_+(x) {\bf \Phi}_+(x) }])T_-({\rm exp}[-i\ \int d^4 x{ J_-(x) {\bf \Phi}_-(x) })])\rho] \label{pfunction} \end{eqnarray} where the suffix $C$ in the integral means that the time integration contour is generalized so that it runs from minus infinity to plus infinity and then back to the minus infinity again. The symbol $\rho$ is the initial density matrix.
The symbol ${\bf \Phi}(x)$ represents all the quantum fields in Heisenberg picture. Generalized effective action $\hat\Gamma[\Phi]$ is defined simply as the Legendre transformation of the above partition function $\hat Z[J]$. Perturbation method using generalized propagators is available for calculating various quantities. \par Back to the unstable Lee model, we calculate the generalized effective action. Because there is only one loop correction for the $V$- particle propagator and no correction for the $N$- and boson- particle propagators, the calculation is exactly done and the result is \begin{eqnarray} \hat\Gamma&=&S_N[N_+]-S_N[N_-]+S_{\theta}[\theta_+]-S_{\theta}[\theta_-] +S_{\rm int}[N_+, V_+,\theta_+]-S_{\rm int}[N_-, V_-,\theta_-] \nonumber\\ &+&\int d^4x \int d^4x' \left( \Phi_V^+, \Phi_V^-\right)^{\ast}_x \left( {\matrix{D-iB&i(B-A)\cr i(B+A)&-D+iB\cr }} \right)_{x,x'} \left( {\matrix{ \Phi_V^+\cr \Phi_V^-}} \right)_{x'} \label{effaction} \end{eqnarray} where all $N$ and $V$ variables are Grassmann valued fields. In the above, the first line represents the bare actions corresponding to Eq(\ref{bhamiltonian}) but with renormalized coupling constant $\lambda$ instead of $\lambda_0$. The last line is the radiatively corrected $V$-particle part and is further rewritten as \begin{equation} \int d^4x \int d^4x' \left( \Phi_V^{\Delta}, \Phi_V^C\right)^{\ast}_x \left( {\matrix{iB&D+iA\cr D-iA&0\cr }} \right)_{x,x'} \left( {\matrix{ \Phi_V^{\Delta}\cr \Phi_V^C}} \right)_{x'}, \label{veffaction} \end{equation} where $\Phi_{\Delta}=\Phi_+-\Phi_-$ and $\Phi_C=(\Phi_++\Phi_-)/2$. Kernels A, B, and D are induced from radiative corrections and are exactly calculated. Their Fourier transforms are given by \begin{eqnarray} D(E)&=&E-m_V^0+\int {d \vec k \over (2\pi)^3} {\lambda_0^2 f^2(\omega_{\vec k}) \over 2 \omega_{\vec k}} {{\cal P} \over m_N+\omega_{\vec k}- E}, \nonumber\\ B(E)&=&Z_V^{-1}\theta(E-m_N-\mu) \sqrt{(E-m_N)^2-\mu^2} \lambda^2 f(E-m_N)/(4 \pi), \nonumber\\ A(E)&=&{\rm sign}(E)B(E), \label{kernels} \end{eqnarray} in the momentum representation but the three momentum is suppressed. $D(E)$ part yields the infinite mass correction and the wave function renormalization as \begin{equation} D(E)=(1+C_1)E-(m_V^0-C_0 +C_1 m_V)=Z_V^{-1} E -m_V, \label{dpart} \end{equation} where \begin{equation} C_0= \int {d\vec k \over (2 \pi)^3} { \lambda_0^2 f^2(\omega_{\vec k}) \over 2\omega_{\vec k} } {{\cal P} \over m_N+\omega_{\vec k}-m_V}, ~~ C_1=\int {d\vec k \over (2 \pi)^3} { \lambda_0^2 f^2(\omega_{\vec k}) \over 2\omega_{\vec k} } {{\cal P} \over (m_N+\omega_{\vec k}-m_V)^2}. \label{coeff} \end{equation} The renormalized coupling constant $\lambda$ and the wave function renormalization $Z_V$ appeared in Eq.(\ref{kernels}) are defined as \begin{equation} Z_V^{-1}=1+C_1, ~~~ \lambda^2=Z_V \lambda_0^2. \label{renormalization} \end{equation} The $B(E)$ part comes from the quantum cross correlation between the forward time branch and the backward time branch. This term was absent in the usual in-out formalism and is new in the in-in formalism. The $A(E)$ part is also specific to the in-in formalism and it breaks time reversal symmetry because it is odd in the argument $E$. The time irreversible term $A(E)$ appears simply because we are considering specific boundary condition: We have taken {\it initial} density matrix $\rho$ in Eq.(\ref{pfunction}). If we took the {\it final} density matrix there, the signature of the $A(E)$ term would be reversed. \par The above kernels are in general non-local. Here we take the local approximation (setting $E \rightarrow m_V$) with the limit $m_V \gg m_N, \mu$ just for simplicity.\footnote{ This non-locality means that the retarded effect has finite time scale and it turns out later that the natural noise associated with the system is colored. } Then the kernels $A$ and $B$ become \begin{equation} A(E) \approx {\lambda^2 f(m_V) \over 4\pi} E, ~~~~ B(E) \approx {\lambda^2 f(m_V) \over 4\pi}m_V, \label{localapp} \end{equation} where we have set $Z_V=1$. Note that the effective action becomes complex reflecting the fact that the system is unstable.\footnote{ Because $A(E)$ is odd ($A(-E)=-A(E)$) and $B(E)$ is even ($B(-E)=B(E)$), their Fourier transforms $A(t)$ and $B(t)$ are pure imaginary and real, respectively. } The pure imaginary term, which is proportional to $B(t)$, is however symmetric for the exchange of the variables $\Phi_V^+ \leftrightarrow \Phi_V^-$; all other terms are anti-symmetric. Therefore if we define the hermitian conjugate including the operation of the exchange of $\Phi_V^{\pm}$, the generalized effective action $\hat\Gamma$ becomes hermitian. In fact, this hermiticity is explicitly realized later in the effective Hamiltonian. \par Now we derive the generalized equations of motion for the fields. Because only the $V$- filed shows irreversibility and dissipativity, we concentrate on this field and suppress the suffix $V$ for the moment. The above effective action can be re-expressed in a cute form which manifestly represents dissipativity if we introduce auxiliary fields $\xi(t)$ and $\xi^{\ast}(t)$ which are also Grassmann valued fields. If we decompose the effective action as $ \Gamma ={\rm Re} \Gamma +i{\rm Im} \Gamma $, then the imaginary part is even in the variable $\Phi _\Delta (x) $: \begin{equation} {\rm Im} \hat\Gamma [\Phi _c,\Phi _\Delta,\Phi^{\ast} _c,\Phi^{\ast} _\Delta ] =\int\!\!\!\int {\Phi^{\ast}_\Delta(x)B(x-y)\Phi _\Delta (y)}. \label{imeaction} \end{equation} We can rewrite this expression by introducing auxiliary fields $\xi(x)$ and $\xi^{\ast}(x)$ which are Grassmann valued fields \begin{equation} {\rm exp}[i\hat\Gamma [\Phi, \Phi^{\ast} ]] =\int {[d\xi] [ d\xi^{\ast} ]}P[\xi, \xi^{\ast} ] {\rm exp}[i {\rm Re} \Gamma +\int ({i\Phi^{\ast} _\Delta \xi-i \xi^{\ast}\Phi_\Delta})] \label{decomposition} \end{equation} where, \begin{equation} P[\xi, \xi^{\ast} ]= ({\rm det}B){\rm exp}[\int \!\! \int {\xi^{\ast} B^{-1}\xi }] \label{weightp} \end{equation} is a normalizable positive kernel for the fields $\xi(x)$ and $\xi^{\ast}(x)$. Note that this weight function is purely Gaussian. It means that we may be able to interpret $P[\xi, \xi^{\ast}]$ as a statistical weight for the random fields $\xi(x), \xi^{\ast}(x)$(stochastic part). Therefore it is possible to interpret Eq.(\ref{decomposition}) that the total effective action $\Gamma$ is a statistical average of the individual effective actions ${\rm Re} \Gamma -\int {\xi^{\ast} \Phi _\Delta }+\int {\Phi^{\ast} _\Delta \xi }$. Application of the variational principle on this individual effective action yields an equation of motion for $\Phi_C(x)$ as \begin{eqnarray} 0&=& \left({{\delta {\rm Re} \Gamma -\int {\xi^{\ast} \Phi _\Delta }+\int {\Phi^{\ast} _\Delta \xi }}
\over {\delta \Phi^{\ast}_{\Delta} (x)}} \right)_{\Phi_{\Delta}=0} \nonumber \\ &=& \left( (i-\gamma)\partial_t-m_V+{\nabla^2 \over 2m_V}\right)\Phi_C + V'+ \xi, \label{langevin} \end{eqnarray} where we have used the local approximation and $V'$ is the interaction term in the total Hamiltonian $H_{\rm int}$. We have set $J=0$, which means there is no external source. The symbol $\gamma$ is $\lambda_0^2 f(m_V)/(4\pi)$. This is a renormalized Langevin type stochastic differential equation with friction and random force terms. According to this equation, the evolution of the field $\Phi_C(x)$ is partially deterministic and partially stochastic. The former part is governed by the action $ {\rm Re} \Gamma [\Phi ]$ which include the damping effect and the latter part is induced by the random field $\xi(x)$ whose statistical properties are completely determined by ${\rm Im} \Gamma [\Phi ]$. Actually if we define the statistical average as \begin{equation} \left\langle {\cdots} \right\rangle _{\xi,\xi^{\ast}} \equiv \int {[d\xi ][d\xi^{\ast} ]}P[\xi, \xi^{\ast} ]\cdots, \label{statav} \end{equation} then we obtain the correlation function for the random field \begin{equation} \left\langle {\xi^{\ast} (x)\xi (y)} \right\rangle _\xi =B(y-x), \label{correlation} \end{equation} which becomes white noise if we take the local approximation Eq.(\ref{localapp}). The same variational principle yields the equations of motion for the other fields $N$ and $\theta$. However there appear no new terms which show dissipativity and irreversibility even after full radiative corrections.
\section{Effective Hamiltonian and entropy increase}
\par We can express the dissipativity of the system in another form by constructing the effective Hamiltonian. If we apply the local approximation, then the effective action reduces to the local form $\hat \Gamma_V=\int d^4x \hat{\cal L}_V$, where ${\cal L}_V$ is the generalized effective Lagrangian for the $V$-particle part: \begin{eqnarray} \hat {\cal L}_V &=&i (\Phi^{\Delta\ast}_V \dot\Phi^C_V +\Phi^{C\ast}_V\dot \Phi^{\Delta}_V ) +\Phi^{\Delta\ast}_V {\nabla^2 \over 2m_V} \Phi^C_V +\Phi^{C\ast}_V {\nabla^2 \over 2m_V} \Phi^{\Delta\ast}_V \nonumber\\ &-&\gamma (\Phi^{\Delta\ast}_V \dot\Phi^C_V-\Phi^{C\ast}_V \dot\Phi^{\Delta\ast}_V) +i \gamma m_V \Phi^{\Delta\ast}_V \Phi^{\Delta}_V. \label{efflagrangian} \end{eqnarray} The canonical momenta are defined by $p_V^{\pm}\equiv \pm \partial\hat{\cal L}/\partial \dot\Phi_V^{\pm}$ or \begin{equation} p_V^{\Delta} \equiv {\partial \hat{\cal L} \over \partial\dot\Phi_V^{\Delta}} =(i-\gamma) \Phi_V^{C\ast}, ~~ p_V^C \equiv {\partial \hat{\cal L} \over \partial\dot\Phi_V^C} =(i+\gamma) \Phi_V^{\Delta\ast}. \label{canmom} \end{equation} Then the generalized effective Hamiltonian for the total system becomes \begin{eqnarray} \hat H&=& \int d^3 x [p_V^{\Delta}\dot\Phi_V^{\Delta}+p_V^C\dot\Phi_V^C - \hat {\cal L}] \nonumber\\ &=& H[\Phi_N^+,\Phi_{\theta}^+]-H[\Phi_N^-,\Phi_{\theta}^-] +H_{\rm int}[\Phi^+]-H_{\rm int}[\Phi^-] \nonumber\\ &-&\Phi^{\Delta\ast}_V {\nabla^2 \over 2m_V} \Phi^C_V -\Phi^{C\ast}_V {\nabla^2 \over 2m_V} \Phi^{\Delta\ast}_V -i \gamma m_V \Phi^{\Delta\ast}_V \Phi^{\Delta}_V. \label{effhamiltonian} \end{eqnarray} where $H[\Phi_N^{\pm},\Phi_{\theta}^{\pm}]$ and $H_{\rm int}[\Phi^{\pm}]$ are, respectively, the free $N~\theta$- particle part and the interaction part of the original Hamiltonian Eq.(\ref{bhamiltonian}). There is no radiative corrections for these parts except $Z_V$ and $\lambda$. Note that the effective Hamiltonian, even after the local approximation, is hermitian in the sense $H[\Phi^-,\Phi^+]^*=H[\Phi^+, \Phi^-]$. Therefore if we write down the generalized Pauli equation as $i {\partial \rho[\Phi^{\pm}] / \partial t}=\hat H[\Phi^{\pm}]\rho[\Phi^{\pm}]$, the total probability is conserved (${\rm Tr}\rho={\rm const.}$). We now demonstrate this in an operator form of the Pauli equation. \par We rewrite the above Pauli equation for the density matrix in an operator form.
Remember that we have been using the representation:
${\bf \Phi}(\vec x)| \Phi\rangle=\Phi(\vec x) | \Phi\rangle$,
$\langle \Phi_+| \mbox{\boldmath $\rho$}(t) | \Phi_-\rangle=\rho[\Phi^{\pm},t]$, and so on. Therefore for example, the operator form of $\Phi_{\Delta}~\rho[\Phi^{\pm},t]$ is $[{\bf \Phi}, \mbox{\boldmath $\rho$}(t)]$. In the similar way, the operator form of the Pauli equation becomes \begin{equation} i{\partial \mbox{\boldmath $\rho$}(t)\over \partial t} =[{\bf H}_R, \mbox{\boldmath $\rho$}(t)] -i \gamma m_V [{\bf \Phi}^{\ast}_V , [ {\bf \Phi}_V, \mbox{\boldmath $\rho$}(t)]], \label{opform} \end{equation} where $H_R$ is the total Hamiltonian Eq.(\ref{bhamiltonian}) with renormalizations. It is easy to see the conservation of probability from this equation: \begin{equation} {\partial \over \partial t}{\rm Tr} \mbox{\boldmath $\rho$}(t)=0. \label{consprob} \end{equation} Further we define the linear entropy $S(t) \equiv -{\rm Tr} \mbox{\boldmath $\rho$}(t)^2$, which has values in $[-1,0]$. This measures how the system possesses coherence or the amount of classicality; $S(t)=-1$ for a pure quantum state, and the coherence is much destroyed for larger values of $S(t)$\cite{morikawa90}. Time evolution of $S(t)$ is governed by the last term on the RHS in Eq.(\ref{opform}), \begin{eqnarray} {\partial S(t)\over \partial t}&=&2\gamma m_V {\rm Tr}[ \mbox{\boldmath $\rho$}(t), {\bf \Phi}^{\ast}_V][{\bf \Phi}_V, \mbox{\boldmath $\rho$}(t)] \nonumber\\
&=&2\gamma m_V {\rm Tr}|[{\bf \Phi}_V, \mbox{\boldmath $\rho$}(t)]|^2 > 0. \label{lentropy} \end{eqnarray} Therefore the linear entropy perpetually increases. This manifestly shows irreversibility of the system. We emphasize that we did not use any ad hoc averaging method such as partial trace on the environment; we do not have any environment at all. Therefore the irreversibility represented by Eq.(\ref{lentropy}) is intrinsic to the system. Moreover this result does not rely upon ad hoc approximations such as the truncation of the sequence of correlation functions\footnote{ We have used non-relativistic approximations, neglected the recoil of the heavy $V$-particle, and took local approximations. These are all our approximations. } ; the Lee model is exactly solved. This decoherence term stems from the quantum interference between the fields on the forward time branch and those on the backward time branch, and was not exist in the usual quantum field theory of in-out formalism.
\section{Discussions and summary}
\par In this article, we have arrived at the dissipative expressions Eq.(\ref{langevin})Eq.(\ref{effhamiltonian})Eq.(\ref{opform}) for the dynamics of unstable Lee model. We would like to emphasize the following points for the origin of intrinsic irreversibility of the model. \begin{enumerate} \item The starting point of our study has been the bare Hamiltonian Eq.(\ref{bhamiltonian}), in which no dissipativity is manifest. However this bare Hamiltonian itself does not correctly describe the real system; we need to take into account the radiative corrections and remove the divergences in the theory. These radiative corrections and renormalizations are essential for defining a feasible theory. {\it At the same time}, these radiative corrections automatically induce the dissipative kernels in Eq.(\ref{kernels}) if we use the in-in formalism of quantum field theory. Note that there is {\it no ad hoc coarse graining process} at all in this procedure of radiative corrections. Actually no information included in the bare Hamiltonian is lost in the process of radiative corrections. \item The system can be {\it consistently expressed in the density matrix formalism with the ordinary Hilbert space.} We do not have to extend the original Hilbert space anymore. We found that the $V$-particle state spontaneously decohers. This is reasonable because the decay of a $V$- particle means not only a diffusion of energy but also a diffusion of information. \end{enumerate} \par It will be interesting to compare our approach with the others concerning irreversibility and dissipativity in quantum theory. \begin{enumerate} \item T. Petrosky et al. \cite{petrosky91} and I. E. Antoniou et al. \cite{antoniou93} proposed the extension of the ordinary Hilbert space in order to express the semi-group property of the evolution of unstable state with hermitian Hamiltonian. In our case, instead of extending the representation space, we extended the field variables making the time integration contour double and introduced the density matrix in the in-in formalism of quantum field theory. The generalized Hamiltonian Eq.(\ref{effhamiltonian}) is guaranteed to be hermitian even if the system is dissipative and irreversible. A new feature, which was not discussed in the work \cite{petrosky91} \cite{antoniou93}, is the destruction of quantum coherence associated with the instability as is expressed in the last term of Eq.(\ref{efflagrangian}) and Eq(\ref{effhamiltonian}). This term directly increases the entropy of the system as we have seen in Eq.(\ref{lentropy}).\footnote{ In general, friction term {\it reduces} the entropy and the term which induce the quantum decoherence (diffusion term) increases the entropy.\cite{morikawa95b}. } \par In the work \cite{petrosky91} \cite{antoniou93}, they tried to connect the deterministic time-reversible theory and the statistical time-irreversible theory by a star-unitary operator. In our case, this kind of transformation is the process of the generalized radiative corrections in the in-in formalism of quantum field theory. According to the work \cite{petrosky91} \cite{antoniou93}, a pair of dual spaces was necessary in order to separately represent the future-decaying and past-decaying states. In our case, this pair corresponds to the extension of the variables introducing the doubled time contour; the time evolution on the forward-time branch represents the future-decaying state, and vise versa for past-decaying state. \item Laplae et al. \cite{laplae65} and Umezawa \cite{umezawa93} introduced a notion of dynamical map which relates bare fields and the radiatively corrected asymptotic fields. This map specifies, among many equivalent representation of the canonical commutation relation, one representation suitable for the description of the actual system. From this point of view, the dynamical map, in the in-in formalism of quantum field theory, gives an ensemble of equivalent representations as we see in Eq.(\ref{decomposition}); the total effective action $\hat \Gamma[\Phi, \Phi^{\ast}]$, which corresponds to the effective Hamiltonian Eq.(\ref{effhamiltonian}), is the statistical average of dynamics each of which has deterministic evolution. In this sense, our dynamical map yields one-to-many correspondence instead of one-to-one. \item Arimitsu et al. \cite{arimitsu87} derived a general effective Hamiltonian in the thermo-field dynamics. Our effective Hamiltonian Eq.(\ref{effhamiltonian}) is similar to that derived by Arimitsu et al. \cite{arimitsu87}. In their formalism, it was necessary to double the dynamical degrees of freedom, $\Phi$ fields and $\tilde\Phi$ fields, in order to describe dissipative quantum field theory within the ordinary Hilbert space. The situation is almost the same in our case; we had to introduce $\Phi_+$ as well as $\Phi_-$ fields in order to describe dissipative quantum field theory within the ordinary Hilbert space. \item An usual method to derive quantum-dissipative dynamics is to use the influence functional method\cite{feynman63}\cite{caldeira83}. The influence functional is the induced action by partial trace of an environmental degrees of freedom and is technically almost the same as our Eq.(\ref{effaction}). In our case, we simply considered radiative corrections for all the fields (full trace in Eq.(\ref{pfunction})) not introducing the environment. Moreover the dissipative properties we obtained have nothing to do with the truncation of the BBGKY hierarchy of greens functions because the Lee model is exactly solved; one-loop graph is the whole radiative correction. These facts also suggest that the dissipative properties are intrinsic to the system. \end{enumerate} \par We summarize our work. We studied the unstable Lee model constructing the radiatively corrected effective action Eq.(\ref{effaction}) and Hamiltonian Eq.(\ref{effhamiltonian}) in the in-in formalism of quantum field theory. {}From the effective action Eq.(\ref{effaction}), we derived a Langevin equation for the $V$-field Eq.(\ref{langevin}) which explicitly shows damping and fluctuation of the state. {}From the effective Hamiltonian Eq.(\ref{effhamiltonian}), we derived perpetually increasing entropy Eq.(\ref{lentropy}). The irreversibility and dissipativity were not manifest in the original bare Hamiltonian Eq.(\ref{bhamiltonian}). However we have to make radiative corrections (dynamical map) which is an indispensable process to define the asymptotic fields and a feasible theory. Through this process we obtained the effective action and Hamiltonian in which the irreversibility and dissipativity are manifest. Technically the radiative correction process is regarded as an averaging process. This averaging process yields the dissipativity. However this averaging process is the unique procedure and does not include any arbitrariness in principle. Therefore the dissipativity and irreversibility we derived are intrinsic to the unstable Lee model. The increase of entropy is due to the last term in Eq.(\ref{effhamiltonian}) which destroys the quantum coherence (=decoherence) . This term appears due to the interference of fields $\Phi_+$ and $\Phi_-$, which is specific to the in-in formalism of quantum field theory. In this way irreversibility is consistently described within the ordinary Hilbert space. If we force to ascribe a wave function for the unstable state, then the norm of the wave function would vanish. We introduced a density matrix and allowed mixed state for the unstable particle. Then the probability is conserved despite the irreversibility. \par We would like to report extension of our formalism of intrinsic irreversibility and dissipativity in our future publications.
\hfil\break\noindent{\bf Acknowledgement} \par The author is grateful to Hiroto Kubotani, Izumi Ojima, and Akio Sugamoto for valuable discussions and useful comments. He also would like to thank The Kurata Foundation for the Promotion of Science for financial support.
\end{document} | arXiv | {
"id": "9511015.tex",
"language_detection_score": 0.7919979095458984,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\begin{abstract} It is conjectured that the Kashiwara-Vergne Lie algebra $\widehat{\mathfrak{krv}}_2$ is isomorphic to the direct sum of the Grothendieck-Teichm\"uller Lie algebra $\mathfrak{grt}_1$ and a one-dimensional Lie algebra. In this paper, we use the graph complex of internally connected graphs to define a nested sequence of Lie subalgebras of $\widehat{\mathfrak{krv}}_2$ whose intersection is $\mathfrak{grt}_1$, thus giving a way to interpolate between these two Lie algebras. \end{abstract} \keywords{Grothendieck-Teichm\"uller Lie algebra, Kashiwara-Vergne Lie algebra} \subjclass[2010]{17B65, 81R99} \address{ Matteo Felder\\ Dept. of Mathematics\\ University of Geneva\\ 2-4 rue du Li\`evre\\ 1211 Geneva 4\\ Switzerland\\ Matteo.Felder@unige.ch }
\title{Internally connected graphs and the Kashiwara-Vergne Lie algebra}
\section*{Introduction}
The Kashiwara-Vergne Lie algebra $\widehat{\mathfrak{krv}}_2$ was introduced by A. Alekseev and C. Torossian in \cite{Alekseev2012}. It descibes the symmetries of the Kashiwara-Vergne problem \cite{Kashiwara1978} in Lie theory. It has been shown in \cite{Alekseev2012} to contain the Grothendieck-Teichm\"uller Lie algebra $\mathfrak{grt}_1$ as a Lie subalgebra. Conjecturally though, \begin{equation*} \widehat{\mathfrak{krv}}_2\cong \mathfrak{grt}_1\oplus \mathbb{K}t=:\mathfrak{grt} \end{equation*} where $t$ is a generator of degree 1. The aim of this work is to define a nested sequence of Lie subalgebras of $\widehat{\mathfrak{krv}}_2$ whose intersection is $\mathfrak{grt}$. This infinite family therefore interpolates between these two Lie algebras. Our hope is that this construction will provide the framework to a more systematic approach to tackle the conjecture. The technical tool used for this construction is the operad of internally connected graphs $\mathsf{ICG}$ introduced by P. \v Severa and T. Willwacher in \cite{Severawillwacher2011}. Elements of $\mathsf{ICG}(n)$ are linear combinations of (isomorphism classes of) graphs with $n$ ``external" and an arbitrary number of ``internal" vertices satisfying some connectivity condition. On these spaces, there are (among others) two natural operations. One is given by splitting internal (external) vertices into two internal (an external and an internal) vertices connected by an edge. The other splits external vertices into two external vertices. In both cases, we sum over all ways of reconnecting the ``loose" edges (see Figure \ref{figure:operations}). While the former defines a differential $d$ on $\mathsf{ICG}(n)$, the latter, denoted by $\delta$, maps $\mathsf{ICG}(n)$ to $\mathsf{ICG}(n+1)$ and is therefore of a more simplicial nature.
\begin{figure}
\caption{A schematic description of the operators $d$ and $\delta$. Black vertices represent ``internal", white vertices ``external" vertices. For simplicity, we omit all signs. }
\label{figure:operations}
\end{figure}
The central character throughout this story will be the equation \begin{equation}\label{eq:TOP} dX=\delta Y \end{equation} where solutions $X$ and $Y$ should lie in $\mathsf{ICG}(n)$ and $\mathsf{ICG}(n-1)$, respectively. Note that $\mathsf{ICG}(n)$ is filtered by the number of the internal loops (i.e. loops that do not contain any external vertices). While the simplicial differential $\delta$ preserves this number, the differential $d$ might increase it.
Let us now trace the connection to A. Alekseev and C. Torossian's work. It is given by the identification of internally trivalent trees in $\mathsf{ICG}(n)$ modulo some relation with the Lie algebra of special derivations $\mathfrak{sder}_n$ of the free Lie algebra in $n$ variables. This construction first appeared in some form in V. Drinfeld's famous paper \cite{Drinfeld1991}. Also, one-loop graphs in $\mathsf{ICG}(n)$ modulo some relations may be identified with a subspace of the graded vector space $\mathfrak{tr}_n$ of cyclic words in $n$ letters. Both $\widehat{\mathfrak{krv}}_2$ and $\mathfrak{grt}_1$ are Lie subalgebras of $\mathfrak{sder}_2$, meaning that their description as graphs should be in terms of (equivalence classes of) trees. For this, let $x$ be the internally trivalent tree part of $X\in \mathsf{ICG}(2)$ which solves equation \eqref{eq:TOP} for some $Y\in \mathsf{ICG}(1)$ only up to internal loop order $1$, i.e. \begin{equation*} dX=\delta Y \mod 2 \text{ internal loops.} \end{equation*} Then the one-loop part of this equation (which only involves the tree part $x$ of $X$ on the left hand side) can be viewed as an identity in the space of cyclic words in two letters. In fact, it encodes exactly the defining relation of the Kashiwara-Vergne Lie algebra, where the differential $d$ takes the role of the ``divergence" map, $\text{div}:\mathfrak{sder}_2\rightarrow \mathfrak{tr}_2$, and $\delta$ corresponds to A. Alekseev and C. Torossian's simplicial operator $\mathfrak{tr}_1\rightarrow \mathfrak{tr}_2$. We may therefore identify $\widehat{\mathfrak{krv}}_2$ with (equivalence classes of) internally trivalent trees which correspond to the tree part of a solution to equation \eqref{eq:TOP} up to loop order $2$. On the other hand, the Grothendieck-Teichm\"uller Lie algebra is related to graph complexes through T. Willwacher's result \cite{Willwacher2014} \begin{equation*} \mathfrak{grt}_1\cong H^0(\mathsf{GC}_2) \end{equation*} where $\mathsf{GC}_2$ is a version of M. Kontsevich's graph complex. Surprisingly, the algorithm describing the isomorphism $H^0(\mathsf{GC}_2)\rightarrow \mathfrak{grt}_1$, produces first a pair $(X,Y)\in \mathsf{ICG}(2)\times \mathsf{ICG}(1)$ which satisfies \begin{equation*} dX=\delta Y \text{ for any loop order}, \end{equation*} for which the tree part of $X$ eventually represents the desired $\mathfrak{grt}_1$-element. Thus, it appears as if the Lie algebras $\mathfrak{grt}_1$ and $\widehat{\mathfrak{krv}}_2$ live on opposite ends of a chain described in terms of solutions to equation \eqref{eq:TOP} up to a certain loop order. More precisely, we consider solutions to the equation \begin{equation*} dX=\delta Y \mod k+1 \text{ internal loops}. \end{equation*} and set $\widehat{\mathfrak{krv}}^{(k)}_2$ to consist of the tree part of such $X$. Then, to summarize, our main result may be rephrased as follows. \begin{theorem} There exists a family of subspaces $\{\widehat{\mathfrak{krv}}^{(k)}_2\}_{k\in \mathbb{N}}$ of $\mathfrak{sder}_2$ satisfying: \begin{enumerate} \item{For all $k\geq 1$, $\widehat{\mathfrak{krv}}^{(k)}_2$ is a Lie subalgebra of $\mathfrak{sder}_2$} \item{They define an infinite nested sequence between $\widehat{\mathfrak{krv}}_2$ and $\mathfrak{grt}_1$, that is, \begin{equation*} \mathfrak{grt}_1 \subset \dots\subset \widehat{\mathfrak{krv}}_2^{(k+1)}\subset \widehat{\mathfrak{krv}}^{(k)}_2\subset \dots \subset \widehat{\mathfrak{krv}}_2^{(1)}=\widehat{\mathfrak{krv}}_2. \end{equation*}} \item{Their intersection is $\bigcap\limits_{k\geq 1} \widehat{\mathfrak{krv}}^{(k)}_2\cong \mathfrak{grt}_1 \oplus \mathbb{K}t$.} \end{enumerate} \end{theorem}
The proofs of (1) and (3) are non-trivial and require several results from the theory of graph complexes. Additionally, we recall a similar construction which stems from the work of P. \v Severa and T. Willwacher \cite{Severawillwacher2011} for the kernel of the divergence map, $\ker (\text{div}:\mathfrak{sder}_n\rightarrow \mathfrak{tr}_n)=:\mathfrak{krv}_n$ (which is also referred to as the Kashiwara-Vergne Lie algebra). More precisely, we show that there exists a nested sequence of Lie subalgebras $\{\mathfrak{krv}^{(k)}_n\}_{k\geq 1}$ of $\mathfrak{sder}_n$ extending the notion of the Lie algebra $\mathfrak{krv}_n$. In this instance, the intersection of these Lie subalgebras is the Drinfeld-Kohno Lie algebra $\mathfrak{t}_n$.
\begin{figure}
\caption{A pair $(X,Y)\in \mathsf{ICG}(2)\times \mathsf{ICG}(1)$ solving equation \eqref{eq:TOP} in loop order $1$. }
\label{figure:sol}
\end{figure}
\section{Preliminaries: Results from homotopy theory}\label{section:preliminaries} In this first section, we recall some well-known facts from homotopy theory. Throughout the text, we work over a field $\mathbb{K}$ of characteristic zero. \begin{definition} Let $f$ and $g$ be chain maps between two chain complexes $(V,d_V)$ and $(W,d_W)$. A \emph{homotopy} between $f$ and $g$ is a map $h:V\rightarrow W$ of degree $-1$ such that \begin{equation*} f-g=d_Wh+hd_V \end{equation*} We say $f$ is homotopic to $g$. \end{definition}
\begin{definition} A \emph{homotopy retract} consists of the following data: \begin{itemize} \item{two chain complexes $(W,d_W)$ and $(V,d_V)$,} \item{chain maps \begin{align*} i:(W,d_W)&\overset{\sim}{\longrightarrow} (V,d_V)\\ p:(V,d_V)&\longrightarrow (W,d_W) \end{align*} where $i$ is a quasi-isomorphism,} \item{a homotopy $h$ between $\mathrm{Id}$ and $ip$.} \end{itemize} Sometimes, it is more convenient to say $(W,d_W)$ is a homotopy retract of $(V,d_V)$. \end{definition}
\begin{proposition}\label{prop:quasi-iso} Let $(V,d)$ denote a differential graded vector space. If $\pi:V \rightarrow V$ is a projection ($\pi^2=\pi$) and $h:V \rightarrow V$ is a map of degree $-1$ such that $\mathrm{Id}-\pi=dh+hd$ (i.e. $h$ is a homotopy between $\mathrm{Id}$ and $\pi$), then $\pi(V)\hookrightarrow V$ is a quasi-isomorphism. \end{proposition}
\begin{proof} Denote the inclusion map by $i:\pi(V)\hookrightarrow V$. Notice that \begin{align*}
\pi i =&\mathrm{Id}|_{\pi(V)}\\ i \pi=& \pi. \end{align*} Moreover, since $\pi$ is homotopic to the identity $\mathrm{Id}_V$, the induced maps on cohomology coincide, i.e. $\pi^{*}=\mathrm{Id}_V^{*}=\mathrm{Id}_{H(V)}$. Also \begin{align*} i^{*}:H(\pi(V))&\rightarrow H(V)\\ \pi^{*}: H(V)&\rightarrow H(\pi(V)) \end{align*} satisfy $i^{*}\pi^{*}=\pi^{*}=\mathrm{Id}_{H(V)}$, $\pi^{*} i^{*}=\mathrm{Id}_{H(\pi(V))}$. Thus $i^{*}$ is an isomorphism. \end{proof}
\begin{proposition} Let $(V,d)$ be as above. There exist graded subspaces $H$, $U$, $U' \subset V$ such that $d(H)=0$, $H\cong H(V,d)$, $d$ restricted to $U$ is an isomorphism onto $U'$, i.e. $d:U \overset{\sim}{\rightarrow} U'$ and $V$ decomposes as $V=H\oplus U \oplus U'$. \end{proposition} \begin{proof}
Let $Z:=\{v\in V| dv=0\}$ be the subset of closed elements. Let $U\subset V$ be some complement of $Z$ in $V$, so that $V=Z\oplus U$. Define $U':=dU\subset Z$ and let $H\subset Z$ be some complement of $U'$ in $Z$, so that $Z=H\oplus U'$. Then $V=H\oplus U \oplus U'$. By construction, $dH=dU'=0$ and $d|_U:U\rightarrow U'$ is surjective. Since $U\cap Z=\{0\}$, it is also injective. Clearly, $H\cong H(V,d)$ as graded vector spaces. \end{proof}
\begin{corollary}\label{cor:existence} Let $(V,d)$ be as above. Then there exist a projection $\pi$ and a homotopy $h$ between $\mathrm{Id}$ and $\pi$ (i.e. $\mathrm{Id}-\pi=dh+hd$) satisfying \begin{equation}\label{eq:properties} d \pi=\pi d =0 \text{ and }h^2=\pi h =h \pi=0. \end{equation} For every such $\pi$ and $h$, we have $\pi(V)\cong H(V,d)$ as graded vector spaces. \end{corollary}
\begin{proof}
We have $V=H\oplus U\oplus U'$, with $d|_U:U\rightarrow U'$ an isomorphism, and $dH=dU'=0$. Let $\pi$ be the projection onto $H$ and $h:U'\rightarrow U$ be an inverse for $d|_U$, i.e. $d h|_{U'}= \mathrm{Id}$. Extend $h$ to $H$ and $U$ by $0$. Note that this way, $h:V\rightarrow V$ is a right inverse to $d:V\rightarrow V$. All requested relations are now easily checked.
Given such $\pi$ and $h$, the relation $d\pi=0$ implies $\pi(V)\subset \ker(d)$. Let \begin{equation*}
W:=\ker(\pi)\cap \ker(d)=\{v\in V| dv=\pi v=0\}. \end{equation*} Then \begin{equation*} \ker(d)=\pi(V)\oplus W. \end{equation*} We claim that $W=\mathrm{im}(d)$. Let $v=du\in \mathrm{im}(d)$. Then, $dv=d^2u=0=d\pi(v)$, i.e. $v\in W$. On the other hand, if $w\in W$, then $(\mathrm{Id}-\pi)(w)=w=(dh+hd)(w)=dhw\in \mathrm{im}(d)$. Now $\ker(d)=\pi(V)\oplus \mathrm{im}(d)$ implies $\pi(V)\cong H(V,d)$. \end{proof}
Suppose $(V,d)$ is a complex and $\{V_n\}_{n\in \mathbb{Z}}$ a family of subsets of $V$ such that $V\cong \prod\limits_{n\in \mathbb{Z}}{V_n}$ as graded vector spaces. Assume that the differential decomposes as $d=d_0+d_1+d_2+\dots$ with $d_j:V_n\rightarrow V_{n+j}$ for all $n$. Note that $V$ is bigraded. The degree within the complex will be denoted by a superscript. Moreover, suppose that the $V_n$ are bounded in this degree, that is for every degree $j$, there is an $\tilde{n}(j)$ such that $V_n^j=0$ for all $n<\tilde{n}(j)$. With this setting, we have a bounded above, complete and descending filtration $\mathcal{F}^p V:= \prod\limits_{n\geq p}{V_n}$. Note that as complexes, the completed associated graded complex $\hat{gr}V$ with differential $d_0$ is isomorphic to $(V,d_0)$, i.e. $(V,d_0)\cong (\hat{gr} V,d_0)$.
\begin{proposition}\label{prop:homotopy} Suppose $(V,d)$ is a complex as above. Let $\pi_0:V\rightarrow V$ be a projection (i.e. $\pi_0^2=\pi_0)$) and $h_0$ be a homotopy between $\mathrm{Id}$ and $\pi_0$ for $d_0$ (i.e. $\mathrm{Id}-\pi_0=d_0h_0+h_0d_0$) such that \begin{align*} d_0\pi_0=&\pi_0h_0=0\\ h_0^2=&\pi_0h_0=h_0\pi_0=0. \end{align*} Then \begin{equation*} h:=h_0-h_0d'h_0+h_0d'h_0d'h_0-\dots=h_0\cdot \frac{1}{1+d'h_0}=\frac{1}{1+h_0d'}\cdot h_0 \end{equation*} and \begin{equation*} \pi:=\mathrm{Id}-(dh+hd) \end{equation*} where $d'=d-d_0$ satisfy \begin{enumerate}[label=(\roman*)] \item{$\pi^2=\pi$} \item{$d \pi=\pi d$} \item{$h^2=0$} \item{$h \pi=\pi h=0$} \item{$\mathrm{Id}-\pi=dh+hd$} \end{enumerate} \end{proposition}
\begin{proof} By definition, we have $\mathrm{Id}-\pi=dh+hd$ and since $h_0^2=0$, it clearly follows that $h^2=0$. Moreover, \begin{equation*} d\pi=d(\mathrm{Id}-dh-hd)=d-dhd=(\mathrm{Id}-dh-hd)d=\pi d. \end{equation*} Using $h_0d_0 h_0= h_0(\mathrm{Id}-\pi_0-h_0d_0)=h_0$, a cumbersome computation shows $hdh=h$.
Hence, \begin{equation*} \pi h=(\mathrm{Id}-dh-hd)h=h-hdh=0=h(id-dh-hd)=h\pi, \end{equation*} and as $(dh+hd)^2=dhdh+hdhd=dh+hd$, we find $(\mathrm{Id}-\pi)^2=\mathrm{Id}-\pi \Leftrightarrow \pi^2=\pi.$ \end{proof}
\begin{corollary}\label{Cor:isom} Let $(V,d)$ and $\pi$ be as a in the proposition above. Then $(\pi(V),d)$ is a quasi-isomorphic subcomplex of $(V,d)$. Moreover, as a graded vector space, $\pi(V)$ is isomorphic to $H^{\bullet}(V,d_0)\cong H^{\bullet}(\hat{gr}V,d_0)$. \end{corollary}
\begin{proof} That $(\pi(V),d)\hookrightarrow (V,d)$ is a quasi-isomorphism follows directly from Proposition \ref{prop:quasi-iso}. From Corollary \ref{cor:existence}, we get that $\pi_0(V)\cong H^{\bullet}(V,d_0)$ as graded vector spaces. To prove that $H^{\bullet}(V,d_0)\cong \pi(V)$ as graded vector spaces, we show that \begin{equation*}
\pi_0|_{\pi(V)}: \pi(V)\leftrightarrows \pi_0(V) :\pi|_{\pi_0(V)} \end{equation*} are mutual inverses.
Note that as $\pi_0 h=h \pi_0=0$, $\pi_0\pi\pi_0=\pi_0(\mathrm{Id}-dh-hd)\pi_0=\pi_0^2=\pi_0$ and therefore $\pi_0\pi|_{\pi_0(V)}=\mathrm{Id}_{\pi_0(V)}$. The other direction is more technical. First of all, note that $h_0h=hh_0=0$ (as $h_0^2=0$) and $d_0h_0d_0=(\mathrm{Id}-\pi_0-h_0d_0)d_0=d_0$. Also, a somewhat tedious, but elementary calculation shows \begin{align*} h_0dh =&h_0d_0h-h+h_0\\ hdh_0=&hd_0h_0-h+h_0. \end{align*}
Using these identities, a lengthy algebraic manipulation produces the desired result, $\pi \pi_0 \pi=\pi$.
\end{proof}
\begin{lemma}\label{lemma:G-action} Let $G$ be a finite group acting on a chain complex $(V,d)$ (i.e. the action commutes with the differential). Then there exists a projection $\pi$ and a homotopy $h$ between $\mathrm{Id}$ and $\pi$ which satisfy the equations \eqref{eq:properties} as in Corollary \ref{cor:existence} and commute with the action of $G$. Moreover, for every such $\pi$ and $h$, we have $\pi(V)\cong H(V,d)$ as graded $G$-vector spaces. \end{lemma}
\begin{proof}
We need to adapt the proof of Corollary \ref{cor:existence} slightly. We have $V=H\oplus U\oplus U'$, with $d|_U:U\rightarrow U'$ an isomorphism, and $dH=dU'=0$. Let $\pi$ be the projection onto $H$. This is a $G$-equivariant map. Let $h_0:V\rightarrow V$ be any right inverse to $d$, i.e. $dh_0=\mathrm{Id}$. To construct an $G$-equivariant map out of $h_0$, define \begin{equation*}
h:=\frac{1}{|G|}\sum\limits_{g\in G}{g h_0 g^{-1}}. \end{equation*} This is still a right inverse to $d$ as \begin{equation*}
dh=\frac{1}{|G|}\sum\limits_{g\in G}{g dh_0 g^{-1}}=\frac{1}{|G|}\sum\limits_{g\in G}{g \mathrm{Id} g^{-1}}=\frac{|G|}{|G|}\mathrm{Id}=\mathrm{Id} \end{equation*} and it is $G$-equivariant. Let $k\in G$, then \begin{equation*}
k. h= \frac{1}{|G|}\sum\limits_{g\in G}{kg h_0 g^{-1}}=\frac{1}{|G|}\sum\limits_{g':=kg \in G}{g' h_0 (k^{-1} g')^{-1}}=\frac{1}{|G|}\sum\limits_{g' \in G}{g' h_0 g'^{-1} k}=h k. \end{equation*} To show that $\pi(V)\cong H(V,d)$ as $G$-vector spaces, we need to find a $G$-equivariant right inverse $i:H\rightarrow V$ to $\pi:V\rightarrow H\cong H(V,d)$. For this, let $i_0$ be any right inverse to $\pi$ (which exists as $\pi$ is surjective). By the same averaging trick as above, we define \begin{equation*}
i:=\frac{1}{|G|}\sum\limits_{g\in G}{g i_0 g^{-1}}. \end{equation*} That $\pi i=\mathrm{Id}$ and $i$ is $G$-invariant is checked in exactly the same way as for $h$. The map $i$ can then be extended to $U\oplus U'$ by $0$, thus giving a $G$-equivariant inverse to $\pi$ and $\pi(V)\cong H(V,d)$ as graded $G$-vector spaces. \end{proof}
The following homotopy transfer theorem for $L_\infty$-algebras can be found in chapter 10.3. of J.-L. Loday and B. Vallette's book \cite{lodayvallette2012}. Another good introductory survey is B. Vallette's text \cite{vallette2014}. \begin{theorem}\label{thm:homotopytransfer} (\cite{lodayvallette2012}, Theorem 10.3.5) Let $(W,d_W)$ be a homotopy retract of $(V,d_V)$ with maps $i:W\overset{\sim}{\rightarrow}V$, $p:V\rightarrow W$ and homotopy $h:V\rightarrow V$. Moreover, let $\{l_n:V^{\otimes}\rightarrow V\}_{n\geq 2}$ be an $L_\infty$-structure on $V$. This $L_\infty$-structure can be transferred to an $L_\infty$-structure on $W$ such that $i$ extends to an $L_\infty$-quasi-isomorphism. The transferred structure $\{m_n:W^{\otimes}\rightarrow W\}_{n\geq 2}$ is given by \begin{equation}\label{eq:transferredstructure} m_n:=\sum\limits_{t\in RT_n}\pm p t(l,h) i^{\otimes n} \end{equation} where the sum runs over rooted trees $t$ with $n$ leaves and where the notation $t(l, h)$ stands for the $n$-multilinear operation on $V$ defined by the composition scheme $t$ with vertices labeled by the $l_k$ and internal edges labeled by $h$. \end{theorem}
\begin{remark}\label{thm:Kontsevich-Soibelman}(\cite{lodayvallette2012}, Theorems 10.3.11. and 10.3.15) Both maps $i$ and $p$ may be extended to $L_\infty$-morphisms $\tilde{i}=(i,i_2,i_3,\dots)$ and $\tilde{p}=(p,p_2,p_3,\dots)$ between the $L_\infty$-algebras $(V,d_V,\{l_n\}_{n\geq 2})$ and $(W,d_W,\{m_n\}_{n\geq 2})$. The higher arity maps $\{i_n\}_{n\geq 2}$ and $\{p_n\}_{n\geq 2}$ are constructed using composition schemes involving only $i$, $p$, $h$ and $\{l_n\}_{n\geq 2}$. For instance, from \cite{vallette2014} \begin{equation*} i_n:=\sum\limits_{t\in RT_n}\pm h t(l,h) i^{\otimes n} \end{equation*} where the notation is as in equation \eqref{eq:transferredstructure}. \end{remark}
\section{The $L_\infty$-algebra of internally connected graphs}
We follow P. Severa and T. Willwacher's work \cite{Severawillwacher2011}. In their paper, we learn that the tools to define the $L_\infty$-algebra of internally connected graphs are based on M. Kontsevich's graph complex which can be found in \cite{Kontsevich1999} and \cite{Lambrechts2014}. Fix $n\geq 1$. \begin{definition}\label{def:icg} An \emph{admissible graph} is an unoriented graph $\Gamma$ with labeled vertices $1,2,\dots, n$ (called external), possibly other vertices (unlabeled and called internal) satisfying the following properties: \begin{enumerate} \item{There is a linear order on the set of edges.} \item{$\Gamma$ has no double edges, nor simple loops (edges connecting a vertex with itself).} \item{Every internal vertex is at least trivalent.} \item{Every internal vertex can be connected by a path with an external vertex.} \end{enumerate} \end{definition}
Let $\mathsf{graphs}(n)$ be the vector space spanned by finite linear combinations of admissible graphs with $n$ external vertices, modulo the relation $\Gamma^\sigma=(-1)^{|\sigma|} \Gamma$, where $\Gamma^{\sigma}$ differs from $\Gamma$ by a permutation $\sigma$ on the order of edges. Define the degree by \begin{equation*} \deg\Gamma=\#\text{edges} - 2\#\text{internal vertices} \end{equation*} and let the differential be given by vertex splitting. More precisely, an external vertex splits into an external and an internal vertex connected by an edge, and we sum over all possible ways of reconnecting the ``loose'' edges to the two newly created vertices, while only keeping admissible graphs. Similarly, an internal vertex splits into two internal vertices, before summing over all ways of reconnecting the edges previously connected to the splitted vertex. \begin{definition} A graph in $\mathsf{graphs}(n)$ which is connected after we cut off all external vertices is called \emph{internally connected}. Denote by $\mathsf{ICG}(n)$ the space spanned by internally connected graphs modulo sign relations obtained from the order of edges. Define the grading on $\mathsf{ICG}(n)$ to be \begin{equation*} \deg\Gamma=1-\#\text{edges}+2\#\text{internal vertices}. \end{equation*} Set the differential on $\mathsf{ICG}(n)$ to be given by vertex splitting. \end{definition}
Since any graph in $\mathsf{graphs}(n)$ may be written as the disjoint union of its internally connected components (after identifying the external vertices), the internally connected graphs freely generate $\mathsf{graphs}(n)$ as a coalgebra. We therefore have an isomorphism of cocommutative coalgebras \begin{equation*} \mathsf{graphs}(n)\cong S(\mathsf{ICG}(n)[1]). \end{equation*} By definition, the differential on $\mathsf{graphs}(n)$ defines the following $L_\infty$-structure on the graded vector space $\mathsf{ICG}(n)$. The $k$-ary bracket $[\Gamma_1,\dots,\Gamma_k]$ is given by gluing the $\Gamma_i$'s at the corresponding external vertices, applying the differential in $\mathsf{graphs}(n)$, and keeping only the graphs that are internally connected (we thus necessarily split only external vertices, and only in ways that connect all $\Gamma_i$'s together).
Finally, note that both $\mathsf{graphs}$ and $\mathsf{ICG}$ form operads in the category of cochain complexes. The operadic composition in $\mathsf{graphs}$ (and also in $\mathsf{ICG}$) is given by insertion. That is, for $\Gamma_1\in \mathsf{graphs}(r)$, $\Gamma_2\in \mathsf{graphs}(s)$, \begin{equation*} \Gamma_1 \circ_j \Gamma_2 \end{equation*} is constructed by replacing the $j$th external vertex by $\Gamma_2$, summing over all possible ways of reconnecting the ``loose" edges (which were previously adjacent to vertex $j$) to vertices of $\Gamma_2$, and keeping only admissible graphs (in the case of $\mathsf{ICG}$, we only keep the internally connected ones).
\subsection{A natural filtration on $\mathsf{ICG}(n)$}
On $\mathsf{ICG}(n)$, there is a natural descending filtration given by the number of internal loops (loops that do not contain any external vertices). For $p\in \mathbb{N}_0$, we denote by $\mathcal{F}^p:=\mathcal{F}^p\mathsf{ICG}(n)$ the subspace of $\mathsf{ICG}(n)$ having \emph{at least} $p$ internal loops. Clearly, $$\dots \subset\mathcal{F}^{p+1}\subset\mathcal{F}^{p}\subset\dots \subset \mathcal{F}^0=\mathsf{ICG}(n)$$ The completed associated graded with respect to this filtration is $$\hat{gr}\mathsf{ICG}(n)=\prod_{p\geq0}\mathcal{F}^p/\mathcal{F}^{p+1}.$$ Remark that the $p$-th piece of the associated graded, $\mathcal{F}^p/\mathcal{F}^{p+1}$, is the space of graphs having \emph{exactly} $p$ internal loops. Also note that the differential $d$ on $\mathsf{ICG}(n)$ can be decomposed into a sum $d=d_0+d_1+d_2+\dots$ where by applying $d_i$ the vertex splitting produces $i$ new internal loops. Note that thus $d_0$ splits internal vertices only. All others components come from splitting external vertices.
\begin{remark} Occasionally, we drop the word internal. It should be noted that by loops we always mean internal loops. \end{remark}
\begin{proposition} There exists a projection $\pi:\mathsf{ICG}(n)\rightarrow \mathsf{ICG}(n)$ and a homotopy $h$ between $\mathrm{Id}$ and $\pi$ which satisfy the equations (i) to (v) as in Proposition \ref{prop:homotopy} and are such that $(\mathrm{im}(\pi),d)$ is a quasi-isomorphic subcomplex of $(\mathsf{ICG}(n),d)$. Moreover, $\mathrm{im}(\pi)\cong H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)$ as graded vector spaces. \end{proposition}
\begin{proof} Denote by $\pi_0:\mathsf{ICG}(n)\rightarrow \mathsf{ICG}(n)$ the projection onto $H^\bullet(\mathsf{ICG}(n),d_0)\cong H^\bullet(\hat{gr}\mathsf{ICG}(n),d_0)$, and by $h_0$ a homotopy between $\mathrm{Id}$ and $\pi_0$ for $d_0$. These exist by Lemma \ref{cor:existence}. Then Proposition \ref{prop:homotopy} ensures the existence of $\pi$ and the rest of the statement is an immediate consequence of Corollary \ref{Cor:isom}. \end{proof}
\begin{proposition} On $H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)$, one can define a differential $\nabla$ in such a way that the complex $(H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0),\nabla)$ is quasi-isomorphic to $(\mathsf{ICG}(n),d)$. \end{proposition}
\begin{proof} Denote the isomorphism of graded vector spaces relating $H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)$ to $\mathrm{im}(\pi)$ by $\Phi$, \begin{equation*} \Phi:H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)\overset{\cong}{\longrightarrow} \mathrm{im}(\pi) \end{equation*} To turn this into an isomorphism of chain complexes, we define a differential on $H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)$ by \begin{equation*} \nabla:=\Phi^{-1}\circ d\circ \Phi \end{equation*} Defined this way, $\nabla^2=0$, $\Phi$ commutes with the differentials and \begin{equation*} (H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0),\nabla)\overset{\Phi}{\cong} (\mathrm{im}(\pi),d) \end{equation*} as chain complexes. Since $(\mathrm{im}(\pi),d)\overset{incl}{\hookrightarrow} (\mathsf{ICG}(n),d)$ is a quasi-isomorphism, $(H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0),\nabla)$ is quasi-isomorphic to $(\mathsf{ICG}(n),d)$ as well.
\end{proof}
\begin{remark} Note that the differential $\nabla$ splits as $\nabla=\nabla_1+\nabla_2+\dots$ where applying $\nabla_i$ creates $i$ new internal loops. \end{remark}
\begin{remark}\label{remark:composition} Denote the compositions by \begin{align*} i:&H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)\cong \mathrm{im}(\pi) \overset{incl}{\hookrightarrow} \mathsf{ICG}(n)\\ p:&\mathsf{ICG}(n)\overset{\pi}{\longrightarrow} \mathrm{im}(\pi)\cong H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0). \end{align*} The compositions $pi$ and $ip$ are \begin{align*}
pi=&\Phi^{-1} \circ \pi \circ incl \circ \Phi=\mathrm{Id} \text{ as } \pi |_{\mathrm{im}(\pi)}=\mathrm{Id}\\ ip=&incl \circ \Phi \circ \Phi^{-1} \circ \pi= incl \circ \pi. \end{align*} Note that $i$ and $p$ are chain maps, that is they satisfy \begin{align*} i \nabla =& d i\\ \nabla p =& p d. \end{align*} Moreover, Proposition \ref{prop:homotopy} ensures that there is a homotopy $h$ between $\mathrm{Id}$ and $ip$, i.e. \begin{equation*} \mathrm{Id}-ip=dh+hd \end{equation*} In the setting above, $(H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0),\nabla)$ together with the chain maps $i$, $p$ and the homotopy $h$ form a homotopy retract of $(\mathsf{ICG}(n),d)$. \end{remark}
Applying the homotopy transfer theorem \ref{thm:homotopytransfer}, we readily obtain the following result. \begin{proposition} The $L_\infty$-structure on $\mathsf{ICG}(n)$ may be transferred to an $L_\infty$-structure on $H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)$ such that the map $i:H^\bullet(\hat{gr}\mathsf{ICG}(n),d_0)\hookrightarrow \mathsf{ICG}(n)$ may be extended to an $L_\infty$-quasi-isomorphism. \end{proposition}
\subsection{$\mathsf{ICG}$ and $H^{\bullet}(\hat{gr}\mathsf{ICG},d_0)$ as cosimplicial objects}
The family of $L_\infty$-algebras $\{\mathsf{ICG}(n)\}_{n\geq 1}$ together with the strict $L_\infty$-morphisms $\{\delta_j:\mathsf{ICG}(n)\rightarrow \mathsf{ICG}(n+1)\}_{j=0}^{n+1}$ and $\{s_j:\mathsf{ICG}(n)\rightarrow \mathsf{ICG}(n-1)\}_{j=1}^{n}$ for all $n\geq 0$ given by \begin{itemize} \item{$\delta_0$ (and $\delta_{n+1}$): add an additional external vertex labeled by $1$ ($n+1$) and raise the labels of the other external vertices by one (leave the labels invariant). } \item{$\delta_j$ for $j\neq 0, n+1$: split the $j$th vertex into two (rename them by $j$ and $j+1$) and sum over all ways of reconnecting the ``tangling" loose edges. The labels of the external vertices which were greater than $j$ are all raised by one. } \item{$s_j$: delete the $j$th external vertex and all edges connected to it. All labels of external vertices greater than $j$ get lowered by one. } \end{itemize} form a cosimplicial object in the category of $L_\infty$-algebras. Operadically, for $\Gamma\in \mathsf{ICG}(n)$, $\delta_j(\Gamma)=\Gamma \circ_j (\circ \hspace{0.3cm} \circ)$. For all $n$, we define a \emph{cosimplicial differential} $\delta:\mathsf{ICG}(n)\rightarrow \mathsf{ICG}(n+1)$ by \begin{equation*} \delta:=\sum\limits_{j=0}^{n+1}{(-1)^j \delta_j}. \end{equation*}
\begin{proposition}\label{prop:cosimplicial} On $\{H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)\}_{n\geq 1}$ we may define $L_\infty$-morphisms $\{\delta'_j:H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)\rightarrow H^{\bullet}(\hat{gr}\mathsf{ICG}(n+1),d_0)\}_{j=0}^{n+1}$ and $\{s'_j:H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)\rightarrow H^{\bullet}(\hat{gr}\mathsf{ICG}(n-1),d_0)\}_{j=1}^{n}$ for all $n\geq 0$ which turn $\{(H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)\}_{n\geq 1}$ into a cosimplicial object in the category of $L_\infty$-algebras. \end{proposition}
\begin{lemma}\label{lemma:commutative} For all $n$, there exists a homotopy $h_n$ on $(\mathsf{ICG}(n),d)$ between $\mathrm{Id}$ and $ip$ that commutes with the cosimplicial maps $\{s_j\}_{j=1}^{n}$ and $\{\delta_j\}_{j=0}^{n+1}$, i.e. \begin{align*} h_{n+1}\delta_j=&\delta_j h_n\\ s_j h_n=& h_{n-1} s_j. \end{align*} \end{lemma}
Let us prove Proposition \ref{prop:cosimplicial} using Lemma \ref{lemma:commutative}.
\begin{proof}[Proof of Proposition \ref{prop:cosimplicial}] Lemma \ref{lemma:commutative} tells us that we have a family of homotopies $\{h_n\}_{n\geq 1}$ between $\mathrm{Id}$ and $ip$ that commute with the cosimplicial maps. To simplify notation, we shall omit the index $n$ for the homotopy. By Remark \ref{thm:Kontsevich-Soibelman} the maps $i$, $p$ may be extended to $L_\infty$-morphisms $\tilde{i}$, $\tilde{p}$. Note that $\tilde{i}\tilde{p}$ contains only compositions of the maps $h$, $\{l_n\}_{n\geq 2}$ and the composition $ip$, all of which commute with the cosimplicial maps. As $L_\infty$-maps they thus satisfy \begin{align*} \delta_j \tilde{i}\tilde{p}=\tilde{i}\tilde{p} \delta_j\\ s_j\tilde{i} \tilde{p}=\tilde{i}\tilde{p} s_j. \end{align*} Possible candidates for the cosimplicial maps on $ H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)$ are \begin{align*} \delta'_j:=&\tilde{p}\circ \delta_j \circ \tilde{i}\\ s'_j:=& \tilde{p}\circ s_j \circ \tilde{i}, \end{align*} where the composition is composition as $L_\infty$-maps. We need to check whether they satisfy the cosimplicial relations, i.e. for $i<j$ \begin{align*} \delta'_j\delta'_i=&\tilde{p}\circ \delta_j \circ \tilde{i}\circ \tilde{p}\circ \delta_i \circ \tilde{i}=\tilde{p}\circ \tilde{i} \circ \tilde{p}\circ \delta_j \circ \delta_i \circ \tilde{i}=\tilde{p}\circ \tilde{i} \circ \tilde{p}\circ \delta_i \circ \delta_{j-1} \circ \tilde{i}\\ =&\tilde{p}\circ \delta_i \circ \tilde{i} \circ \tilde{p}\circ \delta_{j-1} \circ \tilde{i}=\delta'_i\delta'_{j-1}. \end{align*} Analogously, for $i\leq j$, $s'_j s'_i=s'_is'_{j+1}$. The relations \begin{equation*} s'_j \delta'_i = \begin{cases} \delta'_i s'_{j-1} &\mbox{if } i<j \\ \mathrm{Id} & \mbox{if } i=j \text{ or } i=j+1\\ \delta'_{i-1} s'_j & \mbox{if } i>j+1 \end{cases} \end{equation*} follow from a similar easy computation. \end{proof}
\begin{proof}[Proof of Lemma \ref{lemma:commutative}] Fix $n,k\in \mathbb{N}$. Consider the space $\mathsf{ICG}^{1-val}(k)$ of internally connected graphs with $k$ \emph{univalent} external vertices. There is an obvious $S_k$-action which permutes the labels of the $k$ external vertices. This action extends to the direct sum \begin{equation*} J_{k,n}:=\bigoplus_{\substack{k_1\geq 0,\dots,k_n\geq 0 \\ \sum{k_i}=k}}{\mathsf{ICG}^{1-val}(k)}. \end{equation*} By Lemma \ref{lemma:G-action}, on the chain complex $(J_{k,n},d_0)$ there exists a projection $\pi_0$ and a homotopy $h_0$ between $\mathrm{Id}$ and $\pi_0$ which commute with this $S_k$-action. In particular, $\pi_0$ and $h_0$ restrict to \begin{equation*} I_{k,n}:=\bigoplus_{\substack{k_1\geq 0,\dots,k_n\geq 0 \\ \sum{k_i}=k}}{(\mathsf{ICG}^{1-val}(k))^{S_{k_1}\times\cdots\times S_{k_n}}} \end{equation*} Here, the action of $S_{k_1}\times\cdots\times S_{k_n}\subset S_k$ is obviously the induced one. We take a partition $\{k_1,\dots,k_n\}$ of the $k$ external edges and each $S_{k_i}$ will act only on the $k_i$ part by permutation. To see that $\pi_0$ and $h_0$ restrict to this space, let $\Gamma\in I_{k,n}$ and $\sigma\in S_{k_1}\times\cdots\times S_{k_n}$. Then, \begin{align*} \sigma . h_0(\Gamma)=h_0(\sigma. \Gamma)=h_0(\Gamma)&\Rightarrow h_0(\Gamma)\in I_{k,n}\\ \sigma . \pi_0(\Gamma)=\pi_0(\sigma. \Gamma)=\pi_0(\Gamma)&\Rightarrow \pi_0(\Gamma)\in I_{k,n}. \end{align*} In particular, this means that $\pi_0$ and $h_0$ preserve each $S_{k_i}$-invariant part. Denote by $\mathsf{ICG}(n)(k)$ the space of graphs with $n$ external vertices and $k$ edges connecting internal and external vertices. There is an isomorphism of chain complexes \begin{equation*} Sym: (\mathsf{ICG}(n)(k),d_0)\longrightarrow (I_{k,n},d_0) \end{equation*} Abbreviate the group $S_{k_1}\times\cdots\times S_{k_n}=:G(k_1,\dots,k_n)$. The map is given by \begin{equation*} Sym(\Gamma):=\frac{1}{k_1!\dots k_n!}\sum\limits_{\sigma \in G(k_1,\dots,k_n)}{\sigma.\tilde{\Gamma}} \end{equation*} where $\tilde{\Gamma}$ is obtained by assigning an external vertex to each edge connecting an internal vertex to an external one. An external vertex $i$ is thus sent to $k_i$ univalent external vertices, labeled by following the order of the $k_i$ incoming edges (for the symmetrization, the order in which the $k_i$ external vertices are labeled is actually irrelevant). Note that, because $d_0$ splits only internal vertices, $Sym$ is indeed an isomorphism of chain complexes, i.e. \begin{equation*} Sym(d_0\Gamma)=d_0 Sym(\Gamma). \end{equation*} For $j\in \{1,\dots,n\}$, the cosimplicial maps $\delta_j:\mathsf{ICG}(n)(k)\rightarrow \mathsf{ICG}(n+1)(k)$ are given by splitting the $j$-th external vertex and summing over all ways of reconnecting the ``tangling loose" edges. On $I_{k,n}$, the corresponding operations are given by maps $\tilde{\delta}_j$ satisfying \begin{equation*} \tilde{\delta}_j Sym(\Gamma)=Sym (\delta_j \Gamma) \end{equation*} for $\Gamma\in \mathsf{ICG}(n)(k)$. Explicitly, the right hand side is given by \begin{equation*} Sym (\delta_j \Gamma)=\sum\limits_{l=0}^{k_j}{\sum\limits_{\tau\in Unsh(l,k_j-l)}{\frac{1}{k_1!\dots l!(k_j-l)!\dots k_n!}\sum\limits_{\sigma\in G(k_1,\dots,k_{j-1},l,k_j-l,k_{j+1},\dots ,k_n)}{\sigma. \tau. \tilde{\Gamma}} }} \end{equation*} With this formula at hand, it is easy to see that $\pi_0$ and $h_0$ commute with $\tilde{\delta}_j$ on $I_{k,n}$. For this, let $\Gamma' \in I_{k,n}$. Then there exists a $\Gamma\in \mathsf{ICG}(n)(k)$ satisfying $\Gamma'=Sym(\Gamma)$ and \begin{align*} h_0 \tilde{\delta}_j(\Gamma') =& h_0\tilde{\delta}_j (Sym (\Gamma))=h_0 Sym(\delta_j \Gamma)\\ =& \sum\limits_{l=0}^{k_j}{\sum\limits_{\tau\in Unsh(l,k_j-l)}{\frac{1}{k_1!\dots l!(k_j-l)!\dots k_n!}\sum\limits_{\sigma\in G(k_1,\dots,k_{j-1},l,k_j-l,k_{j+1},\dots ,k_n)}{h_0 \sigma. \tau. \tilde{\Gamma}} }}\\ =&\sum\limits_{l=0}^{k_j}{\sum\limits_{\tau\in Unsh(l,k_j-l)}{\frac{1}{k_1!\dots l!(k_j-l)!\dots k_n!}\sum\limits_{\sigma\in G(k_1,\dots,k_{j-1},l,k_j-l,k_{j+1},\dots ,k_n)}{\sigma. \tau. h_0 \tilde{\Gamma}} }}\\ =&\tilde{\delta}_j h_0 (Sym(\Gamma))=\tilde{\delta}_j h_0 (\Gamma'). \end{align*} The proof that $\pi_0$ commutes with $\tilde{\delta}_j$ is analogous. Next, define a projection $\pi$ and a homotopy $h$ on $(\mathsf{ICG}(n)(k),d_0)$ via \begin{align*} \pi:=&Sym^{-1} \pi_0 Sym\\ h:=&Sym^{-1} h_0 Sym. \end{align*} Because $Sym$ is a chain map, $h$ is a homotopy between $\mathrm{Id}$ and $\pi$ with respect to the differential $d_0$. Moreover, $\pi$ and $h$ commute with the cosimplicial maps $\delta_j$. For $\Gamma \in \mathsf{ICG}(n)(k)$, \begin{align*} &h_0\tilde{\delta}_j Sym(\Gamma)=h_0 Sym(\delta_j \Gamma)=Sym(h\delta_j\Gamma)\\ =&\tilde{\delta}_j h_0 Sym(\Gamma)=\tilde{\delta}_j Sym(h \Gamma)=Sym(\delta_j h \Gamma) \end{align*} Using the fact that $Sym$ is an isomorphism, we find, \begin{equation*} \delta_j h=h \delta_j. \end{equation*} Analogously, one can show \begin{equation*} \pi\delta_j=\delta_j\pi. \end{equation*} Remark that because $h_0$ and $\pi_0$ preserve the $S_{k_i}$-invariant parts of some $Sym(\Gamma)\in (\mathsf{ICG}^{1-val}(k))^{S_{k_1}\times\cdots\times S_{k_n}}$, $h$ and $\pi$ will preserve the $k_i$ edges connected to the $i$th external vertex of $\Gamma$, for all $i$ (as in, after applying $h$ or $\pi$ the images of these $k_i$ edges will be connected to the image of the external vertex $i$). Also note that $\pi$ and $h$ correspond to the chain complex $(\mathsf{ICG}(n),d_0)$ (note that $\mathsf{ICG}(n)$ is the direct product over $k\geq 1$ of all $\mathsf{ICG}(n)(k)$), and \emph{not} to $(\mathsf{ICG}(n),d)$. However, by Proposition \ref{prop:homotopy}, we can extend these two maps to $(\mathsf{ICG}(n),d)$. Call them $H$ and $P$. These extensions are constructed using only maps which commute with the $\delta_j$. Therefore the extended projection and homotopy will still commute with the cosimplicial maps.
Note that $\pi$ and $h$ preserve the $k_j$ edges connecting internal to external vertices. Thus for $j=0$, \begin{equation*} h\delta_0 \Gamma=h (\underset{1}{\circ} \hspace{0.3cm} \Gamma)=\underset{1}{\circ} \hspace{0.3cm} h \Gamma= \delta_0 h \Gamma \end{equation*} and analogously for $\pi$. Therefore $h \delta_0=\delta_0 h$, $\pi \delta_0=\delta_0 \pi$. Similarly, this holds also for $j=n+1$.
The $s_j$ maps are given by simply forgetting the $j$th external vertex and all edges connected to it. Again, as the homotopy $h$ and the projection preserve the edges connected to external vertices, \begin{align*} h s_j=&s_j h\\ \pi s_j=& s_j \pi \end{align*} for all $j\in\{1,\dots,n\}$. Also, by construction, the extended homotopy $H$ and projection $P$ commute with the maps $s_j$.
As in Remark \ref{remark:composition}, denote by $i$ and $p$ the compositions \begin{align*} i:&H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0)\cong \mathrm{im}(P) \overset{incl}{\hookrightarrow} \mathsf{ICG}(n)\\ p:&\mathsf{ICG}(n)\overset{P}{\longrightarrow} \mathrm{im}(P)\cong H^{\bullet}(\hat{gr}\mathsf{ICG}(n),d_0). \end{align*} The extended homotopy $H$ is a homotopy between $\mathrm{Id}$ and $ip$ satisfying the properties requested in Lemma \ref{lemma:commutative}. \end{proof}
\section{The Kashiwara-Vergne Lie algebra} \subsection{A spectral sequence leading to the Kashiwara-Vergne Lie algebra} Consider the spectral sequence obtained through the filtration by internal loops. We find that the first page $E_1^{\bullet,\bullet}$ is exactly the aforementioned cohomology of the associated graded complex with respect to the differential $d_0$, that is, \begin{equation*} E_1^{p,q}=H^{p+q}(gr \mathsf{ICG}(n)^p,d_0). \end{equation*} P. Severa and T. Willwacher explain in \cite{Severawillwacher2011} that $H^{0}(\hat{gr}\mathsf{ICG}(n)^0,d_0)$, which consists of internally trivalent trees in $\mathsf{ICG}(n)$ modulo the IHX relation, can be identified (as a Lie algebra) with the Lie algebra of special derivations (for an introduction see \cite{Alekseev2012}). In formulas, \begin{equation*} E_1^{0,0}=H^{0}(\hat{gr}\mathsf{ICG}(n)^0,d_0)\cong \mathfrak{sder}_n. \end{equation*} We give the isomorphism in the appendix. This result already appeared in some form in V. Drinfeld's paper \cite{Drinfeld1991}. The Lie bracket on $E_1^{0,0}$ is given by identifying external vertices, summing over all ways of splitting external vertices without creating new internal loops and then keeping only internally connected, internally trivalent trees. This is justified in the following remark.
\begin{remark} Denote by $m_2:E_1^{\bullet,\bullet}\otimes E_1^{\bullet,\bullet} \rightarrow E_1^{\bullet,\bullet}$ the arity two component of the $L_\infty$-structure on the cohomology of the associated graded. In terms of the structure on $\mathsf{ICG}(n)$, it is given by \begin{equation*} m_2=p\circ [-,-]\circ i^{\otimes 2}. \end{equation*} Denote by $[-,-]_{Ih}$ the projection to $E_1^{0,0}$ of the image of $m_2$ restricted to $E_1^{0,0}\otimes E_1^{0,0}$. As it correspond to the bracket on $\mathfrak{sder}_n$ (which is sometimes named after Y. Ihara) we shall refer to it as the Ihara bracket. It is thus a map \begin{equation*} [-,-]_{Ih}:E_1^{0,0}\otimes E_1^{0,0}\rightarrow E_1^{0,0}. \end{equation*} For $\overline{x_0},\overline{y_0}\in E_1^{0,0}$, $[\overline{x_0},\overline{y_0}]_{Ih}=\overline{[x_0,y_0]_0}$. Here $[-,-]_0$ is the term of the bracket $[-,-]$ on $\mathsf{ICG}(n)$ that does not create any new loops. To see this, first note, \begin{align*} i(\overline{x_0})&=x_0+x_1+\dots\\ i(\overline{y_0})&=y_0+y_1+\dots \end{align*} Then \begin{equation*} [i(\overline{x_0}),i(\overline{y_0})]=[x_0,y_0]_0+[x_1,y_0]_0+[x_0,y_1]_0+\dots \end{equation*} Applying the projection $p$ we obtain $m_2(\overline{x_0}\otimes \overline{y_0})=\overline{[i(\overline{x_0}),i(\overline{y_0})]}\in E_1^{\bullet,\bullet}$. The only term in $E_1^{0,0}$ is $\overline{[x_0,y_0]_0}$, which therefore equals $[\overline{x_0},\overline{y_0}]_{Ih}$ by definition. Since $[-,-]_0$ is a Lie bracket on $\mathsf{ICG}(n)$, $[-,-]_{Ih}$ defines a Lie bracket on $E_1^{0,0}$. \end{remark}
Furthermore, also from \cite{Severawillwacher2011}, we know that the internally trivalent one-loop part of $\mathsf{ICG}(n)$ modulo IHX (given by $H^{1}(\hat{gr}\mathsf{ICG}(n)^1,d_0)$) is isomorphic to cyclic words in $n$ letters, denoted by $\mathfrak{tr}_n$ in \cite{Alekseev2012}, modulo the relation \begin{equation*} tr(w)=-(-1)^{length(w)}tr(\tilde{w}) \end{equation*} where $\tilde{w}$ corresponds to the word $w$ but read backwards. We will denote the space of cyclic words modulo this relation by $\mathfrak{tr}_n^{(1)}$, i.e. \begin{equation*} E_1^{1,0}=H^{1}(\hat{gr}\mathsf{ICG}(n)^1,d_0)\cong \mathfrak{tr}_n^{(1)}. \end{equation*} Moreover, \v Severa and Willwacher show in (\cite{Severawillwacher2011} Proposition 5.) that there is an injective map $\mathfrak{tr}_n^{(1)}\hookrightarrow \mathfrak{tr}_n$ (see the appendix) making the diagram \begin{center} \begin{tikzcd}
E_1^{0,0}\arrow{r}{\cong}\arrow{d}{\nabla_1}
& \mathfrak{sder}_n \arrow{d}{\text{div}} \\
E_1^{1,0} \arrow[hookrightarrow]{r}
& \mathfrak{tr}_n. \end{tikzcd} \end{center} commute. Here, $\text{div}:\mathfrak{sder}_n\rightarrow \mathfrak{tr}_n$ is the ``divergence map" defined by A. Alekseev and C. Torossian in \cite{Alekseev2012}. In particular, $E_2^{0,0}=\ker\nabla_1\cong \ker\text{div}$ is a Lie algebra, as shown in \cite{Alekseev2012}.
\begin{definition} The \emph{Kashiwara-Vergne Lie algebra} is \begin{equation*}
\mathfrak{krv}_n:=\ker\text{div}=\{x\in \mathfrak{sder}_n| \text{div}(x)=0\in \mathfrak{tr}_n\}. \end{equation*} \end{definition}
Since $\mathfrak{krv}_n\cong \ker\nabla_1=\{\overline{x}\in E_1^{0,0}|\nabla_1 (\overline{x})=0\in E_1^{1,0}\}$, all information determining $\mathfrak{krv}_n$ is given by an equation involving internally trivalent trees and internally trivalent one-loop graphs. In what follows, we extend this notion to higher loop orders. Note that for $r\in \mathbb{N}$ (see for instance \cite{weibel1995}) \begin{equation*}
E_r^{0,0}=\dfrac{\{x=x_0+x_1+x_2+\dots \in \mathsf{ICG}(n)| x_i \text{ graph with exactly } i \text{ loops}, \text{ } \deg(x)=0, \text{ } dx=0 \mod r \text{ loops}\}}{Q} \end{equation*} where \begin{align*}
Q:=&\{dy| y\in \mathsf{ICG}(n), \text{ } \deg(y)=-1\}\\
+&\{ x=x_1+x_2+\dots \in \mathcal{F}^1\mathsf{ICG}(n)|x_i \text{ graph with exactly } i \text{ loops}, \text{ } \deg(x)=0, \text{ } dx=0 \mod r \text{ loops}\}. \end{align*}
\begin{lemma} The map \begin{align*} i_{r}:E_r^{0,0}&\rightarrow E_1^{0,0}\cong \mathfrak{sder}_n\\ \overline{x}=\overline{x_0+x_1+\dots} &\mapsto \overline{x_0} \end{align*} is injective. \end{lemma}
\begin{proof} Let $x=x_0+x_1+\dots\in \mathsf{ICG}(n)$, $\deg(x)=0$, $dx=0 \mod r$ loops and assume $i_r(\overline{x})=\overline{x_0}=\overline{0}$, that is $x_0=d_0y_0$ for some tree $y_0$ of degree $-1$. Set $\tilde{x}:=x-dy_0$. It satisfies $d\tilde{x}=dx-0=0 \mod r$ loops and $\tilde{x}=x_0+x_1+\dots-d_0y_0 \in \mathcal{F}^1\mathsf{ICG}(n)$ (all elements have at least one internal loops). Therefore $x=x-dy_0+dy_0=\tilde{x}+dy_0$ and $\overline{x}=\overline{0}\in E_r^{0,0}$. \end{proof}
\begin{definition} We set \begin{equation*} \mathfrak{krv}_n^{(k)}:=i_{k+1}(E_{k+1}^{0,0}). \end{equation*} \end{definition} More explicitly, $\mathfrak{krv}_n^{(k)}$ consists of classes $\overline{x_0}\in E_1^{0,0}$ for which there are graphs $x_1,x_2,\dots \in \mathsf{ICG}(n)$ (where $x_i$ has exactly $i$ loops) of degree zero such that $x=x_0+x_1+\cdots \in \mathsf{ICG}(n)$ satisfies $dx=0\mod k+1$ loops. \begin{lemma} The new definition extends our previous notion of the Kashiwara-Vergne Lie algebra in the sense that $\mathfrak{krv}_n^{(1)}\cong \mathfrak{krv}_n$. \end{lemma}
\begin{proof} Note that $\mathfrak{krv}_n^{(1)}$ consists of $\overline{x_0}\in E_1^{0,0}$ which may be extended to a degree zero element $x=x_0+x_1+x_2+\dots \in \mathsf{ICG}(n)$ satisfying $dx=0 \mod 2$ internal loops. This equation is equivalent to $d_0x_0=0$ (which is satisfied by the definition of $x_0$) and $d_1x_0+d_0x_1=0 \in \mathsf{ICG}(n)$. To prove the statement, let $\overline{x_0}\in E_1^{0,0}$. Then \begin{align*} \nabla \overline{x_0}=&\nabla_1 \overline{x_0}+ \nabla _2 \overline{x_0}+\dots=\nabla p i \overline{x_0}=p d i \overline{x_0}=pd(x_0+x_1+x_2+\dots)\\ =&p (d_1x_0+d_0x_1+\dots)=\overline{d_1x_0+d_0x_1+\dots} \end{align*} This is an equation in $E_1^{\bullet,\bullet}$. Consider its $E_1^{1,0}$ component. It is given by \begin{equation*} \nabla_1\overline{x_0}=\overline{d_1x_0+d_0x_1}=\overline{d_1x_0}. \end{equation*} Therefore, $\ker \text{div}\cong\ker\nabla_1\cong \mathfrak{krv}_n^{(1)}$. \end{proof}
We obtain a sequence of inclusions \begin{equation*} \dots \subset \mathfrak{krv}^{(k)}_n\subset \mathfrak{krv}_n^{(k-1)}\subset \dots \subset \mathfrak{krv}^{(2)}_n\subset \mathfrak{krv}_n\subset \mathfrak{sder}_n. \end{equation*}
\begin{proposition} (\cite{Severawillwacher2011}) The subspaces $\mathfrak{krv}^{(k)}_n$ are Lie subalgebras of $\mathfrak{sder}_n$ for all $k\geq 1$. \end{proposition}
\begin{proof} The Ihara bracket of $\overline{x_0},\overline{y_0}\in E_1^{0,0}$ coincides with $\overline{[x_0,y_0]_0}$, where $[-,-]_0$ is the component of the bracket on $\mathsf{ICG}(n)$ which does not produce any new loops. To prove the claim, let $\overline{x_0},\overline{y_0}\in \mathfrak{krv}_n^{(k)}$. Denote their extensions by $x=x_0+x_1+\dots$ and $y=y_0+y_1+\dots$. We claim that $[x,y]$ is a suitable extension of the bracket $[\overline{x_0},\overline{y_0}]_{Ih}$. Indeed, \begin{equation*} [x,y]=[x_0,y_0]_0+[x_1,y_0]_0+[x_0,y_1]_0+\dots, \end{equation*} where $[x_1,y_0]_0+[x_0,y_1]_0$ are already graphs of loop order 1, and \begin{equation*} d[x,y]=[dx,y]+[x,dy]=0+0 \mod k+1 \text{ loops}. \end{equation*} \end{proof}
\begin{definition} The \emph{Drinfeld-Kohno Lie algebra} $\mathfrak{t}_n$ is generated by elements $t^{i,j}=t^{j,i}$, where $1\leq i,j \leq n$ and relations \begin{align*} [t^{i,j},t^{k,l}]=&0 \text{ if } \#\{i,j,k,l\}=4,\\ [t^{i,j}+t^{i,k},t^{j,k}]=&0 \text{ for } \#\{i,j,k\}=3. \end{align*} \end{definition}
\begin{remark} As shown in \cite{Severawillwacher2011}, the aforementioned spectral sequence converges to the Drinfeld-Kohno Lie algebra, more precisely $\mathfrak{t}_n\cong E_\infty^{0,0}$. A generator $t^{i,j}$ is mapped to the equivalence class represented by the graph with no internal vertices and one edge connecting the external vertices $i$ and $j$. In particular, this implies that \begin{equation*} \bigcap\limits_{k\geq 1} \mathfrak{krv}^{(k)}_n\cong \mathfrak{t}_n. \end{equation*} \end{remark}
\begin{remark} Most of the material presented in this section already appeared in some form in P. Severa and T. Willwacher's paper \cite{Severawillwacher2011}. Our aim was to give an explicit description of the Lie algebras $\mathfrak{krv}_n^{(k)}$ to which they hinted at in their work. Moreover, the techniques developed here will be useful in the next section. \end{remark}
\subsection{The extended Kashiwara-Vergne Lie algebra}
For $n=2$, A. Alekseev and C. Torossian defined in \cite{Alekseev2012} the following extension of $\mathfrak{krv}_2$, \begin{equation*}
\widehat{\mathfrak{krv}}_2:=\{x\in \mathfrak{sder}_2| \text{div} (x)=tr(f(u)-f(u+v)+f(v)) \text{ for some } f(u)=\sum\limits_{k=2}^\infty{f_k u^k}\}. \end{equation*} They show that this is a Lie subalgebra of $\mathfrak{sder}_2$. In fact, $[\widehat{\mathfrak{krv}}_2,\widehat{\mathfrak{krv}}_2]\subset \mathfrak{krv}_2$. Moreover, they prove that for $x\in \widehat{\mathfrak{krv}}_2$ the corresponding power series $f$ is odd, i.e. $f_k=0$ for $k$ even. In particular this implies that $tr(f)$ corresponds to some linear combination of internally trivalent one-loop graphs under the injective map $E_1^{1,0}\cong \mathfrak{tr}_1^{(1)}\hookrightarrow \mathfrak{tr}_1$. On the level of graphs, it is not difficult to see that the map $\delta_{AT}:tr(f)\mapsto tr(f(u)-f(u+v)+f(v))$ corresponds to applying the cosimplicial differential $\delta':=p\circ \delta \circ i: \mathfrak{tr}_1^{(1)}\rightarrow \mathfrak{tr}_2^{(1)}$ to the graph associated to $tr(f)$. Including the vertex splitting differential $d$ and $\nabla$, the global picture is encoded in the following commutative diagram.
\begin{displaymath}
\xymatrix{ \mathsf{ICG}(2) \ar@<2pt>[r]^p \ar[d]^{d_0+d_1} & E_1^{0,0} \ar@<2pt>[l]^i \ar[d]^{\nabla_1} \ar[r]^{\cong} & \mathfrak{sder}_2 \ar[d]^{\text{div}}\\
\mathsf{ICG}(2) \ar@<2pt>[r]^p & E_1^{1,0}\cong \mathfrak{tr}_2^{(1)} \ar@<2pt>[l]^i \ar@{^{(}->}[r] & \mathfrak{tr}_2\\
\mathsf{ICG}(1) \ar@<2pt>[r]^p \ar[u]_\delta & E_1^{1,0}\cong \mathfrak{tr}_1^{(1)} \ar@<2pt>[l]^i \ar@{^{(}->}[r] \ar[u]_{\delta'} & \mathfrak{tr}_1\ar[u]_{\delta_{AT}} } \end{displaymath} The diagram implies the following equalities. \begin{align*}
\widehat{\mathfrak{krv}}_2=&\{x\in \mathfrak{sder}_2| \text{div} (x)=tr(f(u)-f(u+v)+f(v)) \text{ for some } f(u)=\sum\limits_{k=2}^\infty{f_k u^k}\}\\
=&\{\overline{x}\in E_1^{0,0}\cong \mathfrak{sder}_2| \nabla_1 (\overline{x})=\delta'(f) \text{ for some } f\in \mathfrak{tr}_1^{(1)}\}\\
=&\{x\in \mathfrak{sder}_2| \exists X \in \mathsf{ICG}(2): \deg(X)=0 \text{, }X=x+x_1+\dots\\
&\text{ and } d_1x+d_0x_1=\delta Y \text{ for some } Y \in \mathsf{ICG}(1)\}\\
=&\{x\in \mathfrak{sder}_2| \exists X \in \mathsf{ICG}(2): \deg(X)=0 \text{, }X=x+x_1+\dots\\
&\text{ and } dX=\delta Y \mod 2 \text{ internal loops} \text{ for some } Y \in \mathsf{ICG}(1)\}. \end{align*}
As an extension of $\widehat{\mathfrak{krv}}_2$ we suggest, \begin{equation*}
\widehat{\mathfrak{krv}}_2^{(k)}:=\{x\in\mathfrak{sder}_2|\exists X \in \mathsf{ICG}(2): \deg(X)=0 \text{, }[X]=x
\text{ and } dX=\delta Y \mod k+1 \text{ internal loops} \text{ for some } Y \in \mathsf{ICG}(1)\} \end{equation*} By $[X]=x$ we mean that the tree part of $X$ is $x$ (for some choice of representative of the class of $x\in \mathfrak{sder}_2$, by abuse of notation), i.e. $X$ may be decomposed as \begin{equation*} X=x+x_1+x_2+x_3+\dots \end{equation*} with $x_i$ having $i$ internal loops. The equation $dX=\delta Y \mod k+1 \text{ internal loops}$ means that the equation holds up to loop order $k+1$, i.e. we discard all graphs having more than $k$ internal loops appearing on either side of the equation. Note that $\widehat{\mathfrak{krv}}_2=\widehat{\mathfrak{krv}}_2^{(1)}$. Again, there is a filtration \begin{equation*} \cdots\subset \widehat{\mathfrak{krv}}_2^{(k)}\subset \widehat{\mathfrak{krv}}_2^{(k-1)}\subset \cdots \subset \widehat{\mathfrak{krv}}^{(2)}_2\subset \widehat{\mathfrak{krv}}_2\subset \mathfrak{sder}_2. \end{equation*} Our main result is \begin{theorem}\label{Thm:krvhat} For all $k\geq 1$, $\widehat{\mathfrak{krv}}^{(k)}_2$ is a Lie subalgebra of $\mathfrak{sder}_2$. \end{theorem} For the proof we need a few additional tools and results from the theory of graph complexes.
\subsection{The graph complex $\mathsf{GC}_2$} The graph complex $\mathsf{GC}_2$ is a variant of M. Kontsevich's graph complex (\cite{Kontsevich1993},\cite{Kontsevich1994},\cite{Kontsevich1997}). We follow T. Willwacher's paper \cite{Willwacher2014}. \begin{definition} Let $\Gamma$ be an undirected graph with $N$ labeled vertices and $k$ edges satifying the following properties: \begin{enumerate}
\item{All vertices have valence at least three.}
\item{There is a linear order on the set of edges.}
\item{$\Gamma$ has no simple loops.} \end{enumerate}
We denote by $\mathsf{Gra_2}(N,k)$ the graded vector space spanned by isomorphism classes of connected graphs satisfying the conditions above, modulo the relation $\Gamma\cong(-1)^{|\sigma|}\Gamma^{\sigma}$, where $\Gamma^\sigma$ differs from $\Gamma$ just by a permutation $\sigma\in S_k$ on the order of the edges. The degree of such a graph $\Gamma$ is given by \begin{equation*} \deg_{\mathsf{Gra_2}}\Gamma=-k. \end{equation*} \end{definition} Set, \begin{equation*} \mathsf{Gra_2}(N):=\bigoplus\limits_{k\geq 0}\mathsf{Gra_2}(N,k). \end{equation*} The collection $\{\mathsf{Gra_2}(N)\}_{N\geq 1}$ naturally defines an operad $\mathsf{Gra_2}$ in the category of graded vector spaces. For $\Gamma\in \mathsf{Gra_2}(N)$, the $S_N$-action permutes the labels of the vertices. For $r,s\geq 1$, $\Gamma_1\in \mathsf{Gra_2}(r)$ and $\Gamma_2\in \mathsf{Gra_2}(s)$, the operadic composition $\Gamma_1\circ_j\Gamma_2\in \mathsf{Gra_2}(r+s-1)$ is given by inserting the graph $\Gamma_2$ at vertex $j$ of $\Gamma_1$ and summing over all ways of reconnecting the edges incident to vertex $j$ in $\Gamma_1$ to vertices of $\Gamma_2$. As in the case of $\mathsf{ICG}$, we ask that the order on the set of edges of $\Gamma_1\circ_j \Gamma_2$ is such that all edges of $\Gamma_1$ come before those of $\Gamma_2$ while the respective orderings are left unaltered. Next, define, \begin{equation*} \mathsf{GC}_2:=\prod\limits_{N\geq 1} \left( \mathsf{Gra_2}(N)[2-2N]\right)^{S_N}. \end{equation*} The space $\mathsf{GC}_2$ carries the structure of a differential graded Lie algebra. The degree of a graph $\Gamma\in \mathsf{GC}_2$ with $k$ edges and $N$ vertices is \begin{equation*} \deg\Gamma=-2-k+2N. \end{equation*} For the Lie bracket, consider the operadic pre-Lie product on $\mathsf{Gra_2}$, \begin{equation*} \Gamma_1\circ\Gamma_2=\sum\limits_{j=1}^{r} \Gamma_1\circ_j\Gamma_2. \end{equation*} Using this, the Lie bracket on $\mathsf{GC}_2$ is defined on homogeneous elements via, \begin{equation*} [\Gamma_1,\Gamma_2]:=\Gamma_1\circ\Gamma_2-(-1)^{\deg\Gamma_1\cdot\deg\Gamma_2}\Gamma_2\circ\Gamma_1. \end{equation*} The differential $d$ is given by vertex splitting, where again we ask that the newly created edge is placed last in the ordering of the edges.
\begin{remark}
More generally, one defines $\mathsf{Gra}_n$ for any $n$ by setting the degree of each edge to be $1-n$. Thus, a graph $\Gamma\in \mathsf{Gra}_n(N,k)$ has degree $\deg_{\mathsf{Gra}_n}\Gamma=(1-n)k$. Also, the equivalence relation given by the ordering on the set of edges becomes $\Gamma\cong (-1)^{|\sigma|(n-1)}\Gamma^{\sigma}$. Thus, when $n$ is odd, permuting the order of the edges does not produce any signs. However, in the $n$ odd case, we additionally ask that the edges are directed. For $\Gamma\in \mathsf{Gra}_n(N,k)$, there is then a natural $S_2^k$-action given by flipping the directions of the edges. In this case, we identify a graph with an edge direction flipped with minus the original graph. Moreover, one then defines, \begin{equation*} \mathsf{GC}_n:=\begin{cases} \prod\limits_{N\geq 1} \left( \mathsf{Gra}_n(N)[n(1-N)]\right)^{S_N} & n \text{ even,} \\ \prod\limits_{N\geq 1} \left( \mathsf{Gra}_n(N)\otimes sgn_N [n(1-N)]\right)^{S_N} & n \text{ odd}. \end{cases} \end{equation*} Here, $sgn_N$ denotes the one-dimensional representation of $S_N$. We will only be interested in the $n=2$ case. For more details, we refer to (\cite{Willwacher2014}, Section 3.). \end{remark}
\begin{remark} There is a map \begin{align}\label{eq:onemap} (-)_1:\mathsf{GC}_2&\rightarrow \mathsf{graphs}(1)\\ \nonumber\gamma&\mapsto \gamma_1 \end{align} given by marking vertex $1$ as ``external". For $\Gamma_1\in \mathsf{graphs}(1)$ and $\Gamma_r\in \mathsf{graphs}(r)$, $r\in \mathbb{N}$, let \begin{equation*} \Gamma_1\cdot \Gamma_r:=\Gamma_1\circ_1\Gamma_r-(-1)^{\deg \Gamma_1\cdot \deg \Gamma_r}\sum\limits_{j=1}^{r}{\Gamma_r\circ_j\Gamma_1.} \end{equation*} be an action of $\mathsf{graphs}(1)$ on $\mathsf{graphs}(r)$. \end{remark}
\begin{lemma}\label{lemma:operad} The action defined above satisfies the identity \begin{equation} \gamma\cdot (\gamma' \cdot \Gamma)-(-1)^{\deg \gamma\cdot \deg \gamma'}\gamma' \cdot (\gamma \cdot \Gamma)= (\gamma \cdot \gamma' -(-1)^{\deg \gamma\cdot \deg \gamma'}\gamma'\cdot \gamma)\cdot \Gamma \end{equation} for all $\gamma,\gamma' \in \mathsf{graphs}(1)$ and $\Gamma \in \mathsf{graphs}(r)$, $r\in \mathbb{N}$. \end{lemma}
\begin{remark} Note that for any operad in the category of cochain complexes $\mathcal{P}$, $\mathcal{P}(1)$ together with the operadic composition forms a graded algebra. Moreover, $\mathcal{P}(1)$ acts on $\mathcal{P}$ via \begin{equation*} a\cdot b := a \circ_1 b - (-1)^{\deg a\cdot \deg b}\sum\limits_{j=1}^r {b\circ_j a} \end{equation*} for any $r\in \mathbb{N}$. The identity in Lemma \ref{lemma:operad} holds also in this case. Its proof is a simple computation and we refer to (\cite{dolgushev2012}, Section 6.1.). \end{remark}
Let $r\in\mathbb{N}$. Following \cite{Willwacher2014}, we define an action of $\mathsf{GC}_2$ on $\mathsf{graphs}(r)$ by \begin{equation*} \gamma \bullet \Gamma:=\gamma_1\cdot \Gamma +\sum\limits_{v}\Gamma \circ_v \gamma=\Gamma_1\circ_1\Gamma_r-(-1)^{\deg \Gamma_1\cdot \deg\Gamma_r}\sum\limits_{j=1}^{r}{\Gamma_r\circ_j\Gamma_1+\sum\limits_{v}\Gamma \circ_v \gamma}, \end{equation*} for $\gamma\in \mathsf{GC}_2$ and $\Gamma\in \mathsf{graphs}(r)$. The composition $\Gamma\circ_v \gamma$ is constructed by ``inserting" $\gamma$ into the internal vertex $v$ in $\Gamma$ and summing over all ways of reconnecting edges incident to $v$ to vertices of $\gamma$. This action is compatible with the differentials on $\mathsf{graphs}$ and $\mathsf{GC}_2$, i.e. \begin{equation*} d(\gamma \bullet \Gamma)=(d\gamma_1)\cdot \Gamma+\gamma_1\cdot (d\Gamma) +\sum\limits_{v}(d\Gamma) \circ_v \gamma+\sum\limits_{v}\Gamma \circ_v (d\gamma). \end{equation*} \begin{remark}\label{remark:irreducible} Denote by $\mathsf{GC}_2^{1-vi}$ the subcomplex of $(\mathsf{GC}_2,d)$ spanned by 1-vertex irreducible graphs (that is graphs which remain connected after deletion of any of its vertex). As shown in \cite{Conant2005}, the subcomplex $\mathsf{GC}_2^{1-vi}$ is quasi-isomorphic to $\mathsf{GC}_2$. Also, note that the map $(-)_1$ restricted to $\mathsf{GC}_2^{1-vi}$ maps to internally connected graphs $\mathsf{ICG}(1)$. \end{remark}
\begin{remark}\label{remark:C} Denote by $(C,d)$ the subcomplex of $(\mathsf{ICG}(1),d)$ spanned by graphs having only one edge incident to the unique external vertex.
It follows from (\cite{Willwacher2014}, Proposition 6.13.) that \begin{equation*} H^0(\mathsf{GC}_2,d)\cong H^2(C,d). \end{equation*} On the level of the corresponding cochain complexes, the map inducing this isomorphism has the simple combinatorial form \cite{Willwacher2014} \begin{align*} F: \mathsf{GC}_2^{1-vi}&\rightarrow C\\ \Gamma&\mapsto (\underset{1}{\circ} \text{---} \underset{2}{\circ}) \circ_2 \Gamma. \end{align*} It preserves the number of loops and thus if we denote by $H^2(C,d)^{(l)}$ and $H^0(\mathsf{GC}_2,d)^{(l)}$ the $l$-loop parts, we still have an isomorphism \begin{equation} H^0(\mathsf{GC}_2,d)^{(l)}\cong H^2(C,d)^{(l)} \end{equation} for all $l\geq 1$. In particular, we have the following. \end{remark}
\begin{lemma}\label{lemma:loops} For $l\geq 1$, given $Z\in\mathsf{ICG}(1)$ satisfying \begin{equation*} Z \mod l+1 \text{ loops } \in C\text{, } \deg(Z)=2\text{, } dZ=0\mod l+1 \text{ loops,} \end{equation*} there exist a $Z'\in C$ and a (1-vertex irreducible) $\Gamma\in H^0(\mathsf{GC}_2)$ such that $Z+dZ' =(\underset{1}{\circ} \text{---} \underset{2}{\circ}) \circ_2 \Gamma \mod l+1$ loops. \end{lemma}
\begin{proof} The conditions on $Z$ imply that it represents a cohomology class in $\bigoplus\limits_{k=1}^{l}{H^2(C,d)^{(k)}}.$ This class corresponds to the class of some $\Gamma\in \mathsf{GC}_2$ of degree $0$ in $\bigoplus\limits_{k=0}^{l}{H^0(\mathsf{GC}_2,d)^{(k)}}\subset H^0(\mathsf{GC}_2,d)$ under the isomorphism which sends $\Gamma$ to $(\underset{1}{\circ} \text{---} \underset{2}{\circ}) \circ_2 \Gamma$. Therefore, there must be some $Z' \in C$ such that $Z+dZ'=(\underset{1}{\circ} \text{---} \underset{2}{\circ}) \circ_2 \Gamma \mod l+1$ loops. By Remark \ref{remark:irreducible}, we may assume that $\Gamma$ is 1-vertex irreducible. \end{proof}
\begin{lemma}\label{lemma:kerdelta} It is true that $\ker(\delta:\mathsf{ICG}(1)\rightarrow \mathsf{ICG}(2))=C$. \end{lemma}
\begin{proof} An easy graphical calculation shows that $C \subset \ker\delta$. For the other inclusion, let $f\in \ker \delta$, and let the external vertex be of valence $k$. Then, $\delta_0 f+\delta_2 f=\delta_1 f$. Define a linear map $\Delta:\mathsf{ICG}(2)\rightarrow \mathsf{ICG}(1)$ given by simply merging the two external vertices into one (and keeping all incident edges). Applying this map to our equation yields, $2^k f=2 f$. This implies $k=1$, and thus $f\in C$. \end{proof}
\begin{lemma}\label{lemma:crucial} Fix $k\geq 1$. Let $x\in \widehat{\mathfrak{krv}}^{(k)}_2$. By definition, there exists an $X\in \mathsf{ICG}(2)$ such that $[X]=x$ and $dX=\delta Y \mod k+1$ internal loops for some $Y\in \mathsf{ICG}(1)$ . Denote by $(-)_1: \mathsf{GC}_2 \rightarrow \mathsf{graphs}(1)$ the map defined in equation \eqref{eq:onemap}. It is given by marking vertex $1$ as ``external". In this setting, there exist an $X'\in \mathsf{ICG}(2)$ and a $\Gamma\in \mathsf{GC}_2$ (1-vertex irreducible, of degree $0$ and satisfying $d\Gamma=0$) such that \begin{align*} [X']=&x\\ dX'=&\delta (\Gamma)_1 \mod k+1 \text{ internal loops}. \end{align*} \end{lemma}
\begin{proof} It follows from Lemma \ref{lemma:loops} that there is a $Y'\in \mathsf{ICG}(1)$ satisfying $dX=\delta Y'\mod k+1$ internal loops and a 1-vertex irreducible $\Gamma \in \mathsf{GC}_2$ such that $d\Gamma=0$ and $dY'=(\underset{1}{\circ} \text{---} \underset{2}{\circ}) \circ_2 \Gamma \mod k+1$ internal loops. To see this, note that the equation $dX=\delta Y \mod k+1$ loops implies in particular via \begin{equation*}
0=d^2X=d\delta Y=-\delta dY \mod k+1 \text{ internal loops } \end{equation*} that $dY\mod k+1 \text{ loops }=:Z$ is in $\ker(\delta)$. Lemma \ref{lemma:kerdelta} implies $Z \mod k+1 \text{ loops }=Z \in C$. Moreover, we have $\deg(Z)=2$ and $dZ=0 \mod k+1$ loops. By Lemma \ref{lemma:loops} there exists a $\Gamma \in \mathsf{GC}_2$ of degree $0$ such that $d\Gamma=0$ and a $Z'\in C$ such that $Z+dZ'=(\underset{1}{\circ} \text{---} \underset{2}{\circ}) \circ_2 \Gamma \mod k+1$ loops. Set \begin{equation*} Y':=Y+Z'. \end{equation*} It satisfies $\delta Y'=\delta Y+\delta Z'=\delta Y=d X \mod k+1$ loops as $\delta Z'=0$. Also, $dY'=dY +dZ'=Z+dZ'=(\underset{1}{\circ} \text{---} \underset{2}{\circ}) \circ_2 \Gamma \mod k+1\text{ loops }=F(\Gamma)\mod k+1 \text{ loops}$.
Next, note that (\cite{Tamarkin1998}, \cite{Willwacher2014} section 6.4.) \begin{equation}\label{eq:F} F(\Gamma)=d(\Gamma)_1-(d\Gamma)_1. \end{equation} As $d\Gamma=0$, we have $F(\Gamma)=d(\Gamma)_1$, and since modulo $k+1$ loops, $F(\Gamma)=dY'$, we obtain \begin{equation*} d(Y'-(\Gamma)_1)=0 \mod k+1 \text{ loops}. \end{equation*} In \cite{Severawillwacher2011}, it is proven that $\mathfrak{t}_n\cong H(\mathsf{ICG}(n),d)$ holds for all $n\in \mathbb{N}$. The isomorphism is given by mapping generators $t^{i,j}$ to graphs with no internal vertex and one edge connencting the external vertices $i$ and $j$. In particular, this implies $H^k(\mathsf{ICG}(n))=0$ for $k\neq 0$. Therefore, as $H^1(\mathsf{ICG}(1))=0$ and $Y'-(\Gamma)_1\in \ker(d)=\mathrm{im}(d)$, there is a $W\in \mathsf{ICG}(1)$ of degree $0$ such that \begin{equation*} Y'-(\Gamma)_1=dW \mod k+1 \text{ loops}. \end{equation*} Because of degree reasons, $W$ will not have a tree part. At this point, set \begin{equation*} X':=X+\delta W. \end{equation*} It does indeed satisfy the required relations. As $W$ does not contribute to the tree part, clearly $[X']=x$. Moreover, everything modulo $k+1$ loops, \begin{equation*} dX'=dX+d\delta W=\delta Y'+d \delta W=\delta (\Gamma)_1+\delta d W +d \delta W=\delta (\Gamma)_1 \end{equation*} as $\delta dW =-d\delta W$. \end{proof}
\begin{remark} The condition for $\Gamma$ to be 1-vertex irreducible ensures that $(\Gamma)_1$ is internally connected. \end{remark}
\begin{remark}\label{remark:delta} For $\Gamma \in \mathsf{graphs}(1)$, \begin{equation*} \Gamma\cdot (\circ \hspace{0.3cm} \circ)= -\delta\Gamma. \end{equation*} Additionally, for $\gamma \in \mathsf{GC}_2$ \begin{equation*} \sum\limits_{v}{ (\Gamma \circ_v \gamma)\cdot (\circ \hspace{0.3cm} \circ)}=\sum\limits_{v}{(\Gamma\cdot (\circ \hspace{0.3cm} \circ))\circ_v \gamma} \end{equation*} where the sum runs over internal vertices of $\Gamma$. \end{remark}
\begin{proof}[Proof of Theorem \ref{Thm:krvhat}] Fix $k\geq 1$. Let $x_1,x_2\in \widehat{\mathfrak{krv}}^{(k)}_2$. By Lemma \ref{lemma:crucial}, there exist $X_1, X_2 \in \mathsf{ICG}(2)$ and $\Gamma_1, \Gamma_2 \in \mathsf{GC}_2$ (1-vertex irreducible, of degree $0$ and satisfying $d\Gamma_1=d\Gamma_2=0$) such that for $i=1,2$ \begin{enumerate}[label=(\roman*)] \item{$[X_i]=x_i$} \item{$dX_i=\delta (\Gamma_i)_1 \mod k+1 \text{ internal loops}$.} \end{enumerate} We need to find an $X\in \mathsf{ICG}(2)$ which extends the bracket $[x_1,x_2]_{Ih}$ and a $Y\in \mathsf{ICG}(1)$ such that $dX=\delta Y \mod k+1 \text{ internal loops}$. As an extension of $[x_1,x_2]_{Ih}$ we suggest the element \begin{equation} X:=\Gamma_1\bullet X_2 - \Gamma_2 \bullet X_1+d (X_1\wedge X_2)\mod k+1 \text{ internal loops} \in \mathsf{graphs}(2). \end{equation} The notation $X_1\wedge X_2$ means that we identify the corresponding external vertices. The edges of the new graph are ordered by preserving their order in $X_1$ and $X_2$ and by $e_1<e_2$ whenever $e_1$ is an edge of $X_1$ and $e_2$ is an edge of $X_2$. Remark that a priori $X$ might not be internally connected. It is a linear combination of graphs containing at most $k$ loops. The higher loop part is set to zero. There are several things to check.
\begin{enumerate}[label=(\roman*)] \item{$[X]=[x_1,x_2]_{Ih}$: The tree part of $X$ comes only from $d(X_1\wedge X_2)$ as $\Gamma_1\bullet X_2$ and $\Gamma_2\bullet X_1$ both contain loops. Moreover, this tree part exactly coincides with the bracket $[x_1,x_2]_{Ih}$ which is given by gluing $x_1$ and $x_2$ (the tree parts of $X_1$ and $X_2$) at the corresponding external vertices, applying the differential and only keeping the loop-free internally connected graphs.} \item{$dX=\delta Y \mod k+1 \text{ internal loops}$: The differential is compatible with the action of $\mathsf{GC}_2$ on $\mathsf{graphs}(2)$. Therefore, everything modulo $k+1$ internal loops, \begin{align*} dX=&\underbrace{(d\Gamma_1)}_{=0} \bullet X_2+ \Gamma_1\bullet \underbrace{(dX_2)}_{=\delta (\Gamma_2)_1}-\underbrace{(d\Gamma_2)}_{=0 } \bullet X_1-\Gamma_2\bullet \underbrace{(dX_1)}_{=\delta (\Gamma_1)_1}\\ =&\Gamma_1\bullet (\delta (\Gamma_2)_1)-\Gamma_2\bullet (\delta (\Gamma_1)_1)\\ =&-\Gamma_1\bullet ((\Gamma_2)_1\cdot (\circ \hspace{0.3cm} \circ))+\Gamma_2\bullet ((\Gamma_1)_1 \cdot (\circ \hspace{0.3cm} \circ))\\ =&-(\Gamma_1)_1\cdot ((\Gamma_2)_1 \cdot (\circ \hspace{0.3cm} \circ))+(\Gamma_2)_1\cdot ((\Gamma_1)_1\cdot (\circ \hspace{0.3cm} \circ))\\ -&\sum\limits_{v}{((\Gamma_2)_1\cdot (\circ \hspace{0.3cm} \circ))\circ_v \Gamma_1}+\sum\limits_{v'}{((\Gamma_1)_1\cdot (\circ \hspace{0.3cm} \circ))\circ_{v'} \Gamma_2}. \end{align*} Remark \ref{remark:delta} above, together with Lemma \ref{lemma:operad} enable us to write this as \begin{align*} =&(-(\Gamma_1)_1\cdot (\Gamma_2)_1) \cdot (\circ \hspace{0.3cm} \circ)+((\Gamma_2)_1\cdot (\Gamma_1)_1)\cdot (\circ \hspace{0.3cm} \circ)\\ -&\sum\limits_{v}{((\Gamma_2)_1\circ_v \Gamma_1)\cdot (\circ \hspace{0.3cm} \circ)}+\sum\limits_{v'}{((\Gamma_1)_1\circ_{v'} \Gamma_2)\cdot (\circ \hspace{0.3cm} \circ)}\\ =&\underbrace{(\Gamma_2\bullet (\Gamma_1)_1- \Gamma_1 \bullet (\Gamma_2)_1)}_{=:-Y}\cdot (\circ \hspace{0.3cm} \circ)\\ =&\delta Y. \end{align*} } \item{$X\in \mathsf{ICG}(2)$: Denote by $k_i$ the number of edges of $X_i$. Remark that the signs in the wedge product $\wedge$ behave as follows, \begin{equation*} X_1\wedge X_2=(-1)^{k_1 k_2}X_2\wedge X_1. \end{equation*} As $0=\deg(X_i)=1-k_i+2\#\text{internal vertices}$, we have that $k_i=2\#\text{internal vertices}+1$ is odd. Therefore, \begin{equation*} X_1\wedge X_2=-X_2\wedge X_1. \end{equation*} We find that the non-internally connected part of $\Gamma_1\bullet X_2$ is \begin{equation*} -(\delta (\Gamma_1)_1)\wedge X_2. \end{equation*} To see this, consider, \begin{equation*} \Gamma_1\bullet X_2=(\Gamma_1)_1\circ_1 X_2-\sum\limits_{j=1}^{2}{X_2\circ_j(\Gamma_1)_1+\sum\limits_{v}X_2 \circ_v \Gamma_1}. \end{equation*} The last sum will consist of internally connected graphs since we insert $\Gamma_1\in \mathsf{GC}_2$ into the internal vertices of $X_2$. When $X_2$ is inserted in the unique external vertex of $(\Gamma_1)_1$, the non-internally connected terms will arise when the edges of $(\Gamma_1)_1$ which were previously connected to the external vertex are distributed on the two external vertices. This corresponds to the expression $\delta_1 (\Gamma_1)_1\wedge X_2$. On the other hand, when $(\Gamma_1)_1$ is inserted in the first external vertex of $X_2$ we find the non-internally connected graphs by connecting all edges of $X_2$ previously connected to external vertex 1 to the unique external vertex of $(\Gamma_1)_1$. This yields $X_2\wedge \delta_0 (\Gamma_1)_1$. Similarly, we obtain $X_2\wedge \delta_2 (\Gamma_1)$ when considering the second external vertex of $X_2$. Moreover, since $\Gamma_1$ is of degree zero in $\mathsf{GC}_2$, all of $\Gamma_1$, $(\Gamma_1)_1$ and $\delta_i (\Gamma_1)_1$ will have an even number of edges, and thus $X_2\wedge \delta_i (\Gamma_1)_1=\delta_i (\Gamma_1)_1 \wedge X_2$. These three terms together give the claim above. For a more schematic explanation, see Figures \ref{Fig:nonconn1} and \ref{Fig:nonconn2}. \begin {center} \begin{figure}
\caption{The non-internally connected part of $(\Gamma_1)_1\circ_1 X_2$ is given by $\delta_1 (\Gamma_1)_1\wedge X_2$.}
\label{Fig:nonconn1}
\end{figure} \end{center}
\begin {center} \begin{figure}
\caption{The non-internally connected part of $X_2\circ_1 (\Gamma_1)_1$ is given by $X_2\wedge\delta_0 (\Gamma_1)_1$.}
\label{Fig:nonconn2}
\end{figure} \end{center}
The non-internally connected part of $d(X_1\wedge X_2)$ is \begin{equation*} (dX_1)\wedge X_2 - X_1\wedge (dX_2). \end{equation*} As $dX_i=\delta (\Gamma_i)_1 \mod k+1\text{ internal loops}$ the non-internally connected part of \begin{equation*} X:=\Gamma_1\bullet X_2 - \Gamma_2 \bullet X_1+d (X_1\wedge X_2)\mod k+1 \text{ internal loops} \end{equation*} vanishes, i.e. \begin{equation} \underbrace{-(\delta (\Gamma_1)_1)\wedge X_2}_{\text{from }\Gamma_1\bullet X_2}+ \underbrace{(\delta (\Gamma_2)_1)\wedge X_1}_{\text{from }\Gamma_2\bullet X_1}+ \underbrace{(dX_1)\wedge X_2 - (-1)^{k_1(k_2+1)}(dX_2)\wedge X_1}_{\text{from } d(X_1\wedge X_2)}=0 \end{equation} } \item{$Y\in \mathsf{ICG}(1)$: The only non-internally connected part of \begin{equation*} \Gamma_1\bullet (\Gamma_2)_1=(\Gamma_1)_1\cdot (\Gamma_2)_1+\underbrace{\sum\limits_{v}{(\Gamma_2)_1\circ_v \Gamma_1}}_{\in \mathsf{ICG}(1)} \end{equation*} is given by $(\Gamma_1)_1\wedge (\Gamma_2)_1+(\Gamma_2)_1\wedge (\Gamma_1)_1$. Therefore, in $Y$, the only non-internally connected part will be $(\Gamma_1)_1\wedge (\Gamma_2)_1+ (\Gamma_2)_1\wedge (\Gamma_1)_1-((\Gamma_2)_1\wedge (\Gamma_1)_1+(\Gamma_1)_1\wedge (\Gamma_2)_1)=0$. } \end{enumerate}
Hence, the conditions for $[x_1,x_2]_{Ih}\in \widehat{\mathfrak{krv}}^{(k)}_2$ are satisfied.
\end{proof}
\begin{definition} The \emph{Grothendieck-Teichm\"uller Lie algebra} $\mathfrak{grt}_1$ is spanned by elements $(0,\psi)\in \mathfrak{tder}_2$, that satisfy the following relations: \begin{align*} \psi(x,y)&=-\psi(y,x)\\ \psi(x,y)+\psi(y,z)+\psi(z,x)&=0 \text{ for } x+y+z=0\\ \psi(t^{1,2},t^{2,3}+t^{2,4})+\psi(t^{1,3}+t^{2,3},t^{3,4})&=\psi(t^{2,3},t^{3,4})+\psi(t^{1,2}+t^{1,3},t^{2,4}+t^{3,4})+\psi(t^{1,2},t^{2,3}) \end{align*} where the last equation takes values in the Lie algebra $\mathfrak{t}_4$. \end{definition}
\begin{theorem}\label{Thm:inclusion} The Lie algebra $\mathfrak{grt}_1$ is contained in all of the $\widehat{\mathfrak{krv}}^{(k)}_2$. \end{theorem} \begin{proof} In \cite{Willwacher2014} it was proven that $H^0(\mathsf{GC}_2)\cong \mathfrak{grt}_1 $. The map \begin{align*} H^0(\mathsf{GC}_2)&\rightarrow \mathfrak{grt}_1\\ \gamma &\mapsto \phi_\gamma \end{align*} is given by the following algorithm \cite{Willwacher2014}. \begin{enumerate} \item{Let $\gamma$ be a closed element in $\mathsf{GC}_2$. We may assume it to be 1-vertex irreducible. Denote by $\gamma_1\in \mathsf{graphs}(1)$ the linear combination of graphs obtained by marking the vertex $1$ as ``external" in each graph appearing in $\gamma$. As $\gamma$ is 1-vertex irreducible, $\gamma_1 \in \mathsf{ICG}(1)$.} \item{Apply $\delta$ to $\gamma_1$, i.e. split the external vertex into two vertices, and sum over all ways to reconnect the loose edges so that both vertices are hit by at least one edge. Call this linear combination $\gamma_2'\in \mathsf{ICG}(2)$.} \item{It turns out that $\gamma_2'$ is the coboundary of some element $\gamma_2\in \mathsf{ICG}(2)$. We choose $\gamma_2$ to be symmetric under interchange of the external vertices $1$ and $2$.} \item{Forget the non-internal trivalent tree part of $\gamma_2$ to obtain $T_2\in \mathfrak{sder}_2$.} \item{For each tree $t$ appearing in $T_2$ construct a Lie word in formal variables $X$ and $Y$ as follows. For each edge incident to vertex $1$, cut it and make it the ``root" edge. The resulting tree is a binary tree with leafs labelled by 1 or 2. It can be seen as a Lie tree, and one gets a Lie word $\phi_1(X,Y)$ by replacing each 1 by $X$ and 2 by $Y$. Set $\phi(X,Y)=\phi_1(X,Y)-\phi_1(Y,X)$. Summing over all such Lie words one gets a linear combination $\phi_\gamma(X,Y)$ of Lie words corresponding to $\gamma$. It is an element of $\mathfrak{grt}_1$.} \end{enumerate} The algorithm and the fact that this map is an isomorphism imply that given $\phi_\gamma\in \mathfrak{grt}_1$, there exists a unique internal trivalent tree $T_2 \in \mathfrak{sder}_2$ which may be extended to $\gamma_2\in \mathsf{ICG}(2)$ satisfying that there is a $\gamma_1\in \mathsf{ICG}(1)$ with $d\gamma_2=\delta\gamma_1$. This is exactly the required relation for $T_2$ to be in $\widehat{\mathfrak{krv}}^{(k)}_2$ for all $k\geq 1$. Hence, $\mathfrak{grt}_1\subset \widehat{\mathfrak{krv}}^{(k)}_2$ for all $k\geq 1$. \end{proof}
\begin{theorem}\label{thm:intersection} The intersection of all $\widehat{\mathfrak{krv}}_2^{(k)}$ is $\mathfrak{grt}_1\oplus \mathfrak{t}_2$, i.e. in formulas \begin{equation*} \mathfrak{grt}_1\oplus \mathfrak{t}_2\cong \bigcap\limits_{k\geq 1}\widehat{\mathfrak{krv}}_2^{(k)}=:\widehat{\mathfrak{krv}}_2^{(\infty)}. \end{equation*} \end{theorem}
We will need two rather technical lemmas.
\begin{lemma}\label{lemma:technical} For each $x\in \widehat{\mathfrak{krv}}^{(\infty)}$, there exists a pair $(X,Y)\in \mathsf{ICG}(2)\times \mathsf{ICG}(1)$ with $\deg(X)=0$ such that the tree part of $X$ is $x$ and $dX=\delta Y$. \end{lemma}
\begin{proof} We define the following auxiliary grading on $\bigoplus\limits_{r\geq 1}\mathsf{ICG}(r)$. It is given by connecting the subsequent external vertices by an edge, and then counting the number of not necessarily internal loops in our graph. A brief graphical calculation shows that this degree is preserved by both $\delta$ and $d$. Let now $x\in \widehat{\mathfrak{krv}}_2^{(\infty)}$, and denote by $x^M$ its (auxiliary) degree $M$ component. Since $x\in \widehat{\mathfrak{krv}}_2^{(\infty)}$, in particular $x\in \widehat{\mathfrak{krv}}_2^{(M)}$, and there is a pair $(X^M,Y^M)$ of degree $M$ extending $x^M$ such that $dX^M=\delta Y^M \mod M+1$ internal loops. But then $(X^M,Y^M)$ is an extension for $x^M$ which satisfies $dX^M=\delta Y^M$ to infinite loop order, since the number of internal loops is bounded by the degree $M$. Applying this construction to each homogeneous component of $x$ gives a pair $(X=\sum\limits_M X^M,Y=\sum\limits_M Y^M)$ satisfying all the required properties. \end{proof}
\begin{lemma} Let $(X,Y)$ be a pair corresponding to $x\in \widehat{\mathfrak{krv}}_2^{(\infty)}$. The map \begin{align*} B:\widehat{\mathfrak{krv}}_2^{(\infty)}&\rightarrow H^2(C,d)\\ (X,Y)&\mapsto dY. \end{align*} is well-defined. Here, $(C,d)$ is the complex defined in Remark \ref{remark:C}, \end{lemma}
\begin{proof} We define a map by \begin{align*} \widehat{\mathfrak{krv}}_2^{(\infty)}&\xrightarrow{E} H^0(\mathsf{ICG}(3),d)\cong \mathfrak{t}_3\\ (X,Y)&\mapsto \delta X. \end{align*} To show that $E$ is well-defined, first note that $\delta X$ is of degree 0 and that $d \delta X=-\delta dX=-\delta^2 Y=0$, that is indeed $\delta X\in H^0(\mathsf{ICG}(3),d)$. Let $(X_1,Y_1)$ and $(X_2,Y_2)$ be two extensions of $x$. The difference $X:=X_1-X_2$ has no tree part. Therefore, $E(X,Y)=\delta X=0$ because elements of $\mathfrak{t}_3$ consists only of trees (and $\delta X$ contains none). Thus, $E(X_1,Y_1)=\delta X_1=\delta X_2=E(X_2,Y_2)$ and $E$ is well-defined.
To prove the same for $B$, note that since $dY$ is obviously closed, of degree $2$ (as $\deg(Y)=1$) and satisfies $0=d^2 X=d \delta Y=-\delta dY$, i.e. $dY\in \ker\delta=C$, the target space is indeed $H^2(C,d)$. Again, let $(X_1,Y_1)$ and $(X_2,Y_2)$ be two extensions of $x$ and consider $Y:=Y_1-Y_2$. It follows from (\cite{Willwacher2014}, Proposition 6.13.), that the inclusion $(C,d)\hookrightarrow (\bigoplus\limits_{r\geq 1}\mathsf{ICG}(r)[1],d+\delta)$ is a quasi-isomorphism, in particular, \begin{equation}\label{eq:totalcomplex} H^2(C,d)\cong H^3(\bigoplus\limits_{r\geq 1}\mathsf{ICG}(r),d+\delta). \end{equation} The degree in the total complex for some $\Gamma \in \mathsf{ICG}(r)$ is $\deg_{Tot}:=\deg(\Gamma)+r$ (where $\deg(\Gamma)$ is the degree in $\mathsf{ICG}(r)$). In the total complex $dY$ is cohomologous to $\delta X$ via \begin{equation*} dY=\delta X -(d+\delta)(X-Y). \end{equation*} Therefore, since $\delta X=0$, we have $d Y=0 \in H^3(\bigoplus\limits_{r\geq 1}\mathsf{ICG}(r),d+\delta)$. But the isomorphism \eqref{eq:totalcomplex} implies that therefore $d Y=0$ already in $H^2(C,d)$. This yields the result, as now $dY_1=dY_2$, that is, $B$ is well-defined. \end{proof}
\begin{remark}\label{rmk:alpha} As $H^0(GC_2)\overset{F}{\cong} H^2(C,d)$, $dY=F(\gamma)$ for some $\gamma\in H^0(\mathsf{GC}_2)$. Also, by equation \eqref{eq:F}, $F(\gamma)=d\gamma_1$, where $\gamma_1$ is obtained by marking vertex $1$ as ``external" (see equation \eqref{eq:onemap}). Therefore, $d(\gamma_1-Y)=0$, and since $H^1(\mathsf{ICG}(1),d)=0$, $\gamma_1-Y=d\alpha$ for some $\alpha\in \mathsf{ICG}(1)$ of degree $0$. We shall use this relation in the proof below. \end{remark}
\begin{proof}[Proof of Theorem \ref{thm:intersection}] The algorithm in the proof of Theorem \ref{Thm:inclusion} provides us with a map \begin{equation*} A:H^0(\mathsf{GC}_2)\rightarrow \widehat{\mathfrak{krv}}_2^{(\infty)}. \end{equation*} Let $\gamma\in H^0(\mathsf{GC}_2)$, and denote by $\phi_\gamma$ the corresponding $\mathfrak{grt}_1$ element. Keeping the notation from the algorithm, the assignment $\gamma\mapsto \phi_\gamma$ produces a pair $(\gamma_2,\gamma_1)$ satisfying $d\gamma_2=\delta\gamma_1$ and thus the tree part of $\gamma_2$, denoted $T_2$, will lie in $\widehat{\mathfrak{krv}}_2^{(\infty)}$. Abusing notation, set $A(\gamma):=(\gamma_2,\gamma_1)$. Consider the composition \begin{equation*} H^0(\mathsf{GC}_2)\overset{A}{\longrightarrow} \widehat{\mathfrak{krv}}_2^{(\infty)} \overset{B}{\longrightarrow} H^2(C,d)\overset{F^{-1}}{\longrightarrow} H^0(\mathsf{GC}_2). \end{equation*} It equals the identity as \begin{equation*} F^{-1} \circ B \circ A (\gamma)=F^{-1} \circ B (\gamma_2,\gamma_1)=F^{-1} (d\gamma_1)=\gamma, \end{equation*} implying that $B$ is surjective.
We now determine the kernel of $B$. For this, let $(X,Y)$ be a pair corresponding to $x\in \widehat{\mathfrak{krv}}_2^{(\infty)}$ with $B(X,Y)=dY=0\in H^2(C,d)$. Then, $F(\gamma)=dY=0$ and since $F$ is an isomorphism $\gamma=0\in H^0(\mathsf{GC}_2)$, i.e. $\gamma=d\tilde{\gamma}$ for some $\tilde{\gamma}\in \mathsf{GC}_2$ of degree $-1$. Remark that (by equation \eqref{eq:F}) \begin{equation}\label{eq:Ftgamma} F(\tilde{\gamma})=d\tilde{\gamma}_1-(d\tilde{\gamma})_1=d\tilde{\gamma}_1-\gamma_1. \end{equation} Define $\widehat{\gamma}:=\gamma_1+ F(\tilde{\gamma})\in \mathsf{ICG}(1)$. It satisfies, \begin{equation*} \delta \widehat{\gamma}=\delta \gamma_1+\delta F(\tilde{\gamma})=\delta \gamma_1 \end{equation*} as $F(\tilde{\gamma})\in C=\ker\delta$. Also, it follows directly from equation \eqref{eq:Ftgamma} that $\widehat{\gamma}=d\tilde{\gamma}_1$. Finally, set \begin{equation*} X':=X+\delta(\tilde{\gamma}_1-\alpha)\in \mathsf{ICG}(2), \end{equation*} where $\alpha\in \mathsf{ICG}(1)$ is as in Remark \ref{rmk:alpha}. The degree of $X'$ is $0$ and it satisfies, \begin{align*} dX'=&d X+d\delta(\tilde{\gamma}_1-\alpha)=d X-\delta d \tilde{\gamma}_1+ \delta d\alpha\\ =&dX-\delta\widehat{\gamma}+\delta(\gamma_1-Y)\\ =&dX-\delta\gamma_1+\delta\gamma_1-\delta Y=dX-\delta Y=0. \end{align*} Hence, $X'\in H^0(\mathsf{ICG}(2),d)\cong\mathfrak{t}_2$, i.e. $X'=\lambda \cdot (\underset{1}{\circ} \text{---} \underset{2}{\circ})$ for some $\lambda \in \mathbb{K}$. But then, \begin{equation*} X=X' - \delta(\tilde{\gamma}_1 - \alpha)=\lambda \cdot (\underset{1}{\circ} \text{---} \underset{2}{\circ}) - \delta(\tilde{\gamma}_1 - \alpha). \end{equation*} However, $\delta(\tilde{\gamma}_1-\alpha)$ does not contribute to the tree part $x$ of $X$, which therefore is of the form $\lambda \cdot (\underset{1}{\circ} \text{---} \underset{2}{\circ})$. This implies $x \in \mathfrak{t}_2$ and $\ker B \subset \mathfrak{t}_2$. In fact, $\ker B=\mathfrak{t}_2$. The other inclusion is clear. Since $t^{1,2}$ satisfies $d (t^{1,2})=0$, a pair corresponding to $t^{1,2}$ in $\widehat{\mathfrak{krv}}_2^{(\infty)}$ is $(t^{1,2},0)$, which lies in $\ker B$. And since $B$ is well-defined, any pair corresponding to $t^{1,2}$ will lie in $\ker B$.
Thus, we eventually have \begin{equation*} \widehat{\mathfrak{krv}}_2^{(\infty)}\Big/ \mathfrak{t}_2 \overset{\cong}{\longrightarrow}H^0(\mathsf{GC}_2)\cong\mathfrak{grt}_1 \end{equation*} and $\widehat{\mathfrak{krv}}_2^{(\infty)}\cong \mathfrak{grt}_1 \oplus \mathfrak{t}_2$. \end{proof}
Since it is conjectured that $\widehat{\mathfrak{krv}}_2\cong \mathfrak{t}_2 \oplus \mathfrak{grt}_1$, we expect all $\widehat{\mathfrak{krv}}^{(k)}_2$ to coincide. \begin{conjecture} For all $k\geq 1$ \begin{equation*} \widehat{\mathfrak{krv}}^{(k)}_2=\widehat{\mathfrak{krv}}_2^{(k+1)}. \end{equation*} \end{conjecture}
\appendix \section{The spaces $\mathfrak{tr}_n$, $\mathfrak{sder}_n$, $\mathfrak{tder}_n$} We follow \cite{Alekseev2012}. Fix $n\geq 1$. Let $\mathfrak{lie}_n$ denote the completed free Lie algebra over $\mathbb{K}$ on $n$ variables $x_1,\dots,x_n$ and let $\text{Ass}_n=U(\mathfrak{lie}_n)$ be the completed free associative algebra in $n$ generators. The graded vector space of \emph{cyclic words} in $n$ variables $\mathfrak{tr}_n$ is defined as \begin{equation*} \mathfrak{tr}_n:=\text{Ass}^+_n/\langle (ab-ba), a,b\in \text{Ass}_n\rangle \end{equation*} where $\text{Ass}_n^+$ is the augmentation ideal of $\text{Ass}_n$. The Lie algebra $\mathfrak{tder}_n$ of \emph{tangential derivations} on $\mathfrak{lie}_n$ is defined as follows. A derivation $u$ on $\mathfrak{lie}_n$ is tangential if there exist $a_1,\dots ,a_n\in \mathfrak{lie}_n$ such that $u(x_i)=[x_i,a_i]$ for all $i=1,\dots,n$. The action of $u$ on the generators completely determine the derivation. For $u=(a_1,\dots,a_n)$ and $v=(b_1,\dots,b_n)$ elements of $\mathfrak{tder}_n$, the Lie bracket is the tangential derivation $[u,v]=(c_1,\dots,c_n)$, where $c_k=u(b_k)-v(a_k)+[a_k,b_k]$ for all $k=1,\dots,n$. The Lie algebra of \emph{special derivations} $\mathfrak{sder}_n$ is \begin{equation*}
\mathfrak{sder}_n:=\{u\in \mathfrak{tder}_n| u(\sum\limits_{i=1}^{n}{x_i})=0\}. \end{equation*} It is a Lie subalgebra of $\mathfrak{tder}_n$. For every $a\in \text{Ass}_n$, we have a unique decomposition \begin{equation*} a=a_0+\sum\limits_{k=1}^{n}(\partial_k a)x_k, \end{equation*} where $a_0\in \mathbb{K}$ and $(\partial_k a)\in \text{Ass}_n$. The \emph{divergence map} \begin{align*} \text{div}:\mathfrak{tder}_n&\rightarrow \mathfrak{tr}_n\\ u=(a_1,\dots, a_n) &\mapsto \sum\limits_{k=1}^{n}tr(x_k(\partial_k a_k)) \end{align*} is a cocycle for $\mathfrak{tder}_n$ (\cite{Alekseev2012}, Proposition 3.6.).
The following algorithm describes the isomorphism between $H^0(\hat{gr}\mathsf{ICG}(n)^0,d_0)$, i.e. internally trivalent trees in $\mathsf{ICG}(n)$ modulo IHX, and $\mathfrak{sder}_n$. Let $\Gamma$ be a tree representing an element of $H^0(\hat{gr}\mathsf{ICG}(n)^0,d_0)$. Pick an edge incident to the external vertex $1$, cut it and make it the ``root" edge. The resulting tree is a binary tree with leafs labeled by $1,\dots, n$. Repeat this procedure for every edge incident to vertex $1$, and take the sum of the trees obtained in this way. We want to interpret these binary trees as Lie words. The sign convention for this is as follows. The edges of the tree should be ordered such that its ``root" edge comes first, then all edges of its left subtree, and then all edges of its right subtree. For each subtree, apply this convention recursively. The resulting linear combination of Lie words (these can be read off the trees by following the ordering of the edges) in the variables $x_1,\dots,x_n$ corresponds to the first component $a_1$ of a special derivation $a=(a_1,\dots,a_n)\in \mathfrak{sder}_n$. The $i$-th component $a_i$ is obtained by applying the same procedure to the $i$-th external vertex.
We now give the map $H^1(\hat{gr}\mathsf{ICG}(n)^1,d_0)\hookrightarrow \mathfrak{tr}_n$ as described in \cite{Severawillwacher2011}. Let $\overline{\Gamma}\in H^1(\hat{gr}\mathsf{ICG}(n)^1,d_0)$. We may assume that the representative $\Gamma$ is such that the loop passes through all internal vertices. Order the edges as in Figure \ref{trgraph}. In this case, we map \begin{equation*} \overline{\Gamma}\mapsto tr(x_{m_1}\cdots x_{m_k})-(-1)^k tr(x_{m_k}\cdots x_{m_1}). \end{equation*}
\begin{figure}
\caption{An example of the isomorphism $H^0(\hat{gr}\mathsf{ICG}(3)^0,d_0)\rightarrow \mathfrak{sder}_3$. The triple on the right corresponds to the element $([x_2,x_3],-[x_1,x_3],[x_1,x_2])$.}
\end{figure}
\begin{figure}
\caption{This graph will be sent to $tr(x_{m_1}x_{m_2}x_{m_3}x_{m_4}x_{m_5})-(-1)^5tr(x_{m_5}x_{m_4}x_{m_3}x_{m_2}x_{m_1})$ under the injective map $H^1(\hat{gr}\mathsf{ICG}(n)^1,d_0)\rightarrow \mathfrak{tr}_n$.}
\label{trgraph}
\end{figure}
\end{document} | arXiv | {
"id": "1612.03083.tex",
"language_detection_score": 0.6019958853721619,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{A variation of a congruence of Subbarao for $n=2^{\alpha}5^{\beta}$\thanks{The author is supported by Croatian Science Foundation grant number 6422.} }
\author{Sanda Buja\v{c}i\'{c} \footnote{Department of Mathematics
Radmile Matej\v{c}i\'{c} 2, 51000 Rijeka, Croatia
Tel.: +385-51-584654
Fax: +385-51-584699
sbujacic@math.uniri.hr } }
\date{}
\maketitle
\begin{abstract} There are many open problems concerning the characterization of the positive integers $n$ fulfilling certain congruences and involving the Euler totient function $\varphi$ and the sum of positive divisors function $\sigma$ of the positive integer $n$. In this work, we deal with the congruence of the form
$$\label{cong} n\varphi(n)\equiv2\pmod{\sigma(n)} $$ and we prove that the only positive integers of the form $2^{\alpha}5^{\beta}, \enspace \alpha, \beta\geq0,$ that satisfy the above congruence are $n=1, 2, 5, 8$.\\ \textbf{Keywords}\\ Euler's totient function sum of positive divisors Pellian equations congruence of Subbarao\\ \textbf{Mathematics Subject Classification (2010)} 11A07, 11D09 \end{abstract}
\section{Introduction} \label{intro}
Wilson's theorem is a well known characterization of prime numbers. It states that a positive integer $p>1$ is a prime number if and only if the congruence of the form $$(p-1)!+1\equiv0\pmod{p}$$ is satisfied. There is probably no other so simple characterization of prime numbers in the form of a congruence, but there are many open problems concerning the characterization of the positive integers fulfilling certain congruences and involving functions $\varphi$ and $\sigma$, where $\varphi(n)$ and $\sigma(n)$ stand for the Euler totient function and the sum of positive divisors function of the positive integer $n$, respectively.
In 1932 D. H. Lehmer \cite{lehmer} was dealing with the congruence of the form \begin{equation}\label{eq:lehmer} n-1\equiv0\pmod{\varphi(n)}. \end{equation} This problem is known as Lehmer's totient problem. Despite the fact that the congruence $(\ref{eq:lehmer})$ is satisfied by every prime number, Lehmer's totient problem is an open problem because it is still not known whether there exists a composite number that satisfies it. Lehmer proved that, if there exists a composite number that satisfies the congruence (\ref{eq:lehmer}), then it must be odd, square free and it must have at least seven distinct prime factors. In 1944, F. Schuh \cite{schuh} improved Lehmer's result and showed that such composite number must have at least eleven distinct prime factors. The best current result shows that, if such composite number exists, it has to have at least fourteen distinct prime factors \cite{cohen}.
M. V. Subbarao was considering the congruence of the form \begin{equation}\label{eq:subbarao} n\sigma(n)\equiv 2\pmod{\varphi(n)}. \end{equation} He proved \cite{subbarao} that the only composite numbers that satisfy the congruence (\ref{eq:subbarao}) are numbers $4, 6$ and $22$.
A. Dujella and F. Luca were dealing with the congruence of the form \begin{equation}\label{eq:version} n\varphi(n)\equiv2\pmod{\sigma(n)}, \end{equation} which is a variation of the congruence (\ref{eq:subbarao}). They have proved \cite{luca} that there are only finitely many positive integers that satisfy the congruence (\ref{eq:version}) and whose prime factors belong to a fixed finite set. They have proved that when this finite set consists only of two primes $2$ and $3$, then the only positive integers of the form $n=2^a3^b, \ a, b \geq 0,$ that satisfy the congruence (\ref{eq:version}) are $n=1, 2, 3, 8, 9.$
We deal with the variation of the congruence of Subbarao for $n=2^{\alpha}5^{\beta}, \alpha, \beta\geq0$. The main result of this paper is the following theorem.
\begin{theorem}\label{main_theo}
The only positive integers $n$ of the form $n=2^{\alpha}5^{\beta}, \ \alpha, \beta \geq 0, $ that satisfy the congruence
$$n\varphi(n) \equiv 2 \pmod{\sigma(n)}$$ are numbers $n=1, 2, 5, 8.$ \end{theorem}
\section{The proof of Theorem \ref{main_theo}}
\subsection{The cases $\alpha=0$ and $\beta=0$}
It is easily seen that all the prime numbers satisfy the congruence
(\ref{eq:version}). Let $p$ be a prime. In this case, we have $\varphi(p)=p-1$
and $\sigma(p)=p+1$. The variation of the congruence of Subbarao (\ref{eq:version})
becomes
$$p(p-1)-2=(p+1)(p-2)\equiv0\pmod{(p+1)}.$$
The congruence (\ref{eq:version}) is
satisfied for all the prime numbers, or more precisely,
$$p(p-1)\equiv2\pmod{(p+1)}.$$ Hence, the prime numbers $2$ and $5$ satisfy the congruence (\ref{eq:version}).
The remaining part of the proof deals with the composite numbers of the form
$n=2^{\alpha}5^{\beta}, \ \alpha, \beta\geq0$.
For start, let $\beta=0$ which implies dealing with
the positive integers of the form $n=2^{\alpha}, \enspace \alpha\geq2$. We define
$$D:=\sigma(2^{\alpha})=2^{\alpha+1}-1.$$ We may notice $2^{\alpha+1}\equiv1\pmod{D}$.
Because of $(\ref{eq:version})$, we obtain
$$2^{\alpha}\cdot2^{\alpha}\left(1-\frac{1}{2}\right)\equiv2\pmod{D},$$
$$2^{2(\alpha+1)}\equiv2^4\pmod{D},$$
$$(2^{\alpha+1}-1)(2^{\alpha+1}+1)-15\equiv0\pmod{D}.$$
The condition $D\mid ((2^{\alpha+1}-1)(2^{\alpha+1}+1)-15)$ is satisfied if and only if
$D\mid 15$, or more precisely, if and only if $$(2^{\alpha+1}-1)\mid 15.$$ For
$\alpha\geq2$, the condition $(2^{\alpha+1}-1)\mid 15$ is satisfied only when $\alpha=3$. So,
$n=2^3$ is the only positive integer of the form $n=2^\alpha, \ \alpha \geq2,$ that
satisfies the variation of the congruence of Subbarao $(\ref{eq:version})$.
Now, let $\alpha=0$. We deal with the positive integers of the form $n=5^{\beta}, \enspace
\beta\geq2$. We define $$D:=\sigma(5^{\beta})=\frac{5^{\beta+1}-1}{4}.$$ As in the previous
case, it is easy to notice that $5^{\beta+1}\equiv1\pmod{D}$. Because of $(\ref{eq:version})$, we obtain
$$5^{2\beta-1}\cdot2^2\equiv2\pmod{D},$$ $$5^{2(\beta+1)}\cdot2^2\equiv5^3\cdot
2\pmod{D}.$$
Using $5^{\beta+1}\equiv1\pmod{D}$, the previous congruence implies
$D\mid 246$, which is not possible for $\beta\geq2$. Consequently, there are no positive integers of
the form $n=5^{\beta}, \enspace \beta\geq2,$ that satisfy the congruence
$(\ref{eq:version})$.
\subsection{Useful congruences}
The remaining part of the proof deals with the most general case, or more
precisely, with the positive integers of the form $n=2^{\alpha}5^{\beta}, \ \alpha, \beta\in \mathbb{N}.$ We start by defining $M:=2^{\alpha+1}-1$ and $N:=\frac{5^{\beta+1}-1}{4}$. As in the
previous cases, we use congruences $$2^{\alpha+1}\equiv1\pmod{M}$$ and
$$5^{\beta+1}\equiv1\pmod{N}.$$
Supposing the congruence $(\ref{eq:version})$ is satisfied, we get
\begin{equation}\label{eq:t}
2^{2\alpha+1}\cdot5^{2\beta-1}\equiv2\pmod{MN}.
\end{equation}
\noindent Multiplying (\ref{eq:t}) by $2\cdot5^3,$ we obtain
$$2^{2(\alpha+1)}\cdot5^{2(\beta+1)}\equiv500\pmod{MN}.$$
\indent Since $2^{\alpha+1}\equiv1\pmod{M},$ we get
$5^{2(\beta+1)}\equiv500\pmod{M}$. Analogously, because of $5^{\beta+1}\equiv1\pmod{N}$, we conclude $2^{2(\alpha+1)}\equiv500\pmod{N}$.
For $M\mid (2^{\alpha+1}-1)$, we have $M\mid (2^{2(\alpha+1)}-1)$. Similarly, $N\mid (5^{2(\beta+1)}-1)$. Hence, we get \begin{equation}\label{eq:21}
M, N \mid (2^{2(\alpha+1)}+5^{2(\beta+1)}-501).
\end{equation}
Our next step is to show that $\alpha$ and $\beta$ are even and $M$ and $N$ are coprime. Let $G:=\textnormal{gcd}(M, N)$. Because $2^{\alpha+1}\equiv1\pmod{M}$ and $5^{\beta+1}\equiv1\pmod{N},$ we get $$2^{\alpha+1}\equiv
5^{\beta+1}\equiv1\pmod{G}.$$ Because of $(\ref{eq:21})$, we conclude $G\mid -499$.
Number $499$ is a prime, so $G=1$ or $G=499$. For start, we can assume
$G=499$. In this case, we know that $499\mid M$, or, more precisely, $$499\mid
\left(2^{\alpha+1}-1\right).$$ The order of $2$ modulo $499$ is $166$, so $166\mid
(\alpha+1)$. Especially, $2\mid (\alpha+1)$. Hence, $\alpha$ is odd. We can notice
that $M$ can be expressed as $M=2^{\alpha+1}-1=2^{2k}-1,$ for $k\in\mathbb{N}$.
Obviously, $3\mid M$. Hence, $3\mid (n\varphi(n)-2)$, or, more precisely, $3\mid
(2^{2\alpha+1}\cdot5^{2\beta-1}-2)$, which is not possible. As a consequence, we conclude $499\nmid M$ which implies that $G=1$. We have proved that $M, N$ are coprime.
The next step is to determine the parity of $\alpha$ and $\beta$. For start, we assume that $\alpha$ is odd which implies that $\alpha+1$ is even. So,
$$M=2^{\alpha+1}-1=2^{2k}-1,$$ for $k\in\mathbb{N}$. Obviously, $3\mid M$ and according to our hypothesis, $3\mid
(2^{2\alpha+1}\cdot5^{2\beta-1}-2)$, which is not possible. We conclude that $\alpha$ is even.
Now, we assume that $\beta$ is odd. In that case, we write $$5^{\beta+1}-1=5^{2k}-1,$$ for
$k\in\mathbb{N}$. Obviously, $24\mid (5^{2k}-1)$ and because $6\mid N$ and
$N\mid (2^{2\alpha+1}\cdot5^{2\beta-1}-2)$, we get $6\mid
(2^{2\alpha+1}\cdot5^{2\beta-1}-2)$, which is not possible. Hence, $\beta$ is even, too.
Hence, we have proved
that $M$ and $N$ are odd and coprime numbers.
\indent As a consequence of (\ref{eq:21}), we may notice $$MN\mid
(2^{2(\alpha+1)}+5^{2(\beta+1)}-501).$$ On the other hand,
$$4MN=(2^{\alpha+1}-1)(5^{\beta+1}-1),$$
and obviously $2^{2(\alpha+1)}+5^{2(\beta+1)}-501\equiv0\pmod{4}$.\\
\indent Let $x:=2^{\alpha+1}$ and $y:=5^{\beta+1}$. The initial
problem is now represented by the equation of the form
\begin{equation}\label{eq:22}
x^2+y^2-501=c(x-1)(y-1),
\end{equation} for some $c\in\mathbb{N}$.
Since numbers $\alpha$ and $\beta$ are even, the following congruences hold $$x\equiv0\pmod{8}, \enspace x^2\equiv0\pmod{8},$$
$$y\equiv5\pmod{8}, \enspace y^2\equiv1\pmod{8}.$$ From $(\ref{eq:22})$, we get $4c\equiv4\pmod{8}$, which is satisfied for
\begin{equation}\label{eq:kongr1}
c\equiv1\pmod{2}.
\end{equation}
We also notice that congruences $$x\equiv2\pmod{3},\enspace x^2\equiv1\pmod{3},$$
$$y\equiv2\pmod{3},\enspace y^2\equiv1\pmod{3}$$ are satisfied. From
$(\ref{eq:22})$, we easily get
\begin{equation}\label{eq:kongr2}
c\equiv2\pmod{3}.
\end{equation}
We conclude $$x\equiv3\pmod{5},\enspace x^2\equiv4\pmod{5} \enspace \enspace
\textnormal{for} \enspace \alpha\equiv2\pmod{4}, $$
$$x\equiv2\pmod{5}, \enspace x^2\equiv4\pmod{5} \enspace \enspace \textnormal{for}
\enspace \alpha\equiv0\pmod{4}.$$
Obviously, $$y\equiv y^2\equiv0\pmod{5}.$$ Bringing everything together, we obtain that
$$c\equiv1\pmod{5},\enspace \textnormal{for} \enspace \alpha\equiv2\pmod{4},$$
or
\begin{equation}\label{eq:kongr3}
c\equiv2\pmod{5},\enspace \textnormal{for} \enspace \alpha\equiv0\pmod{4}.
\end{equation}
Now, we try to determine which of the above residue classes modulo $5$ is satisfied by the number $c$ that is introduced in our problem.
Let $t=2^{\alpha}\cdot5^{\beta-1}$. We get that
$$5t^2=2^{2\alpha}\cdot5^{2\beta-1}.$$ According to $(\ref{eq:t}),$ we conclude
$5t^2\equiv1\pmod{M}$, which implies
$\left(\frac{5}{M}\right)=\left(\frac{M}{5}\right)=1$. In this case
$M\equiv1, 4\pmod{5}$. Since $M=2^{\alpha+1}-1$, we get $2^{\alpha+1}-1\equiv
1\pmod{5}$ or $2^{\alpha+1}-1\equiv 4\pmod{5}$. The first congruence is satisfied for $\alpha\equiv0\pmod{4},$ while the second congruence, $2^{\alpha+1}-1\equiv 4\pmod{5}$, is never satisfied. Consequently, we consider only positive integers $c$ that satisfy the congruence $$c\equiv2\pmod{5}.$$
Taking into account congruences $(\ref{eq:kongr1}), (\ref{eq:kongr2})$ and
$(\ref{eq:kongr3})$ and using Chinese Remainder Theorem, we determine that required positive integers $c$ satisfy
\begin{equation}\label{eq:kongr}
c\equiv17\pmod{30}.
\end{equation}
\subsection{Pellian equations}
\indent We "diagonalize" the equation $(\ref{eq:22})$ as in
\cite{luca}. Let \begin{equation}\label{eq:23}
X:=cy-c-2x,
\end{equation}
\begin{equation}\label{eq:24}
Y:=cy-c-2y.
\end{equation}
Then
$$(c+2)Y^2-(c-2)X^2-(-1996c+4008)=-4(c-2)(x^2+y^2-501-c(x-1)(y-1))=0.$$
This method has resulted with the Pellian equation of the form
\begin{equation}\label{eq:25}
(c+2)Y^2-(c-2)X^2=-1996c+4008.
\end{equation}
\indent Let $X=0$. In this case, the Pellian equation
(\ref{eq:25})
becomes $$Y^2=\frac{-1996c+4008}{c+2}.$$ The only integer solution of the above equation is $Y=\pm2$ for $c=2$. Since $c=2$ does not satisfy the congruence (\ref{eq:kongr}), in our case $Y=\pm2$ is not the solution of the equation (\ref{eq:25}).\\
\indent Let $Y=0$. The initial Pellian equation (\ref{eq:25}) becomes
$$X^2=\frac{1996c-4008}{c-2}.$$
The right-hand side of the equation is an integer for $c=1, 3, 4, 6,
10, 18$. Those numbers do not satisfy the congruence (\ref{eq:kongr}).
The right-hand side of the above equation is not a perfect square for such positive integers $c$, so we conclude there does not exist a solution $X$ of the Pellian equation (\ref{eq:25}).
\indent Now we deal with the general case. Let $(X, Y)$ be a solution of the
equation (\ref{eq:25}) in positive integers. In this case, $\frac{X}{Y}$ is a
good rational approximation of the irrational number $\sqrt{\frac{c+2}{c-2}}$. More
precisely,
$$\left| \frac{X}{Y}-\sqrt{\frac{c+2}{c-2}}\right|
=\frac{1996c-4008}{(\sqrt{c+2}Y+\sqrt{c-2}X)\sqrt{c-2}Y}\leq\frac{1996(c-2)}{
\sqrt{c^2-4}Y^2}<\frac{1996}{Y^2}.$$
The rational approximation of the form
\begin{equation}\label{eq:26}
\left| \frac{X}{Y}-\sqrt{\frac{c+2}{c-2}}\right| <\frac{1996}{Y^2}
\end{equation}
is not good enough to conclude that $\frac{X}{Y}$ is a convergent of continued fraction expansion of
$\sqrt{\frac{c+2}{c-2}}$. We use Worley and Dujella's theorem from \cite{worley} and
\cite{dujellarsa}.
\begin{theorem}[Worley, Dujella]
\textit{Let $\alpha$ be an irrational number and let $a, b\neq0$ be coprime nonzero integers satisfying the inequality $$\left|
\alpha-\frac{a}{b}\right| <\frac{H}{b^2},$$
where $H$ is a positive real number. Then $$(a, b)=(rp_{m+1}\pm sp_m,\enspace
rq_{m+1}\pm sq_m),$$ for $m, r, s\in\mathbb{N}_0$ such that
$rs<2H,$ where $\frac{p_m}{q_m}$ is $m$--th convergent from continued fraction expansion of irrational number $\alpha$.}
\end{theorem}
According to Worley and Dujella's theorem, we get that every solution $(X, Y)$ of the Pellian
equation (\ref{eq:25}) is of the form
$$X=\pm d(rp_{k+1}+ up_k), \enspace Y=\pm d(rq_{k+1}+ uq_k)$$
for some $k\geq-1$, $u\in\mathbb{Z}$, $r$ nonnegative positive
integer and $d=\textrm{gcd}(X,
Y)$ for which the inequality $$|ru|<2\cdot\frac{1996}{d^2}$$ holds.
In order to determine all the integer solutions of the Pellian equation (\ref{eq:25}), we also use a lemma from
\cite{jadri}.
\begin{lemma}[Dujella, Jadrijevi\'{c}]\label{jadrilema}
Let $\alpha\beta$ be a positive integer which is not a perfect square and let
$p_k/q_k$ be the $k$-th convergent of continued fraction expansion of
$\sqrt{\frac{\alpha}{\beta}}$. Let the sequences $(s_k)_{k\geq-1}$ and $(t_k)_{k\geq-1}$ be
the sequences of the integers appearing in the continued fraction expansion of $\frac{\sqrt{\alpha\beta}}{\beta}$. Then
\begin{equation}\label{worley}
\alpha(rq_{k+1}+uq_k)^2-\beta(rp_{k+1}+up_k)^2=(-1)^k(u^2t_{k+1}+2rus_{k+2}
-r^2t_{k+2}).
\end{equation}
\end{lemma}
Applying Lemma \ref{jadrilema}, it is easy to conclude that we obtain
\begin{equation}\label{jadrilema1}
(c+2)Y^2-(c-2)X^2=d^2(-1)^k(u^2t_{k+1}+ 2rus_{k+2}-r^2t_{k+2}),
\end{equation} where ${(s_k)}_{k\geq-1}$ and ${(t_k)}_{k\geq-1}$ are sequences
of integers appearing in the continued fraction expansion of the quadratic irrationality
$\sqrt{\frac{c+2}{c-2}}$. Our next step is to determine the continued fraction expansion of
$\sqrt{\frac{c+2}{c-2}},$ where $c$ is an odd positive integer.
From the continued fraction expansion algorithm we get
$$s_0=0, \enspace t_0=c-2, \enspace a_0=1,$$
$$s_1=c-2, \enspace t_1=4, a_1=\frac{c-3}{2},$$
$$s_2=c-4, \enspace t_2=2c-5, \enspace a_2=1,$$
$$s_3=c-1, \enspace t_3=1, \enspace a_3=2c-2,$$
$$s_4=c-1, \enspace t_4=2c-5, \enspace a_4=1,$$
$$s_5=c-4, \enspace t_5=4, \enspace a_5=\frac{c-3}{2},$$
$$s_6=c-2, \enspace t_6=c-2, \enspace a_6=2,$$
hence $$\sqrt{\frac{c+2}{c-2}}=\Big[ 1;\overline{\frac{c-3}{2}, 1, 2c-2, 1, \frac{c-3}{2}, 2}\Big].$$
The length $l$ of the period of the continued fraction expansion of $\sqrt{\frac{c+2}{c-2}}$ is $l=6$, so we consider the equation $(\ref{jadrilema1})$ for $k=0, 1, 2, 3, 4, 5$ and determine all the positive integers $c$ that satisfy the congruence $(\ref{eq:kongr})$. From (\ref{eq:25}) and (\ref{jadrilema1}) we get
\begin{equation}\label{eq:dod}
d^2(-1)^k(u^2t_{k+1}+2rus_{k+2}-r^2t_{k+2})=-1996c+4008.
\end{equation}
Obviously, $d$ can be $d=1$ or $d=2,$ for all $k=0, 1, 2, 3, 4, 5$.
Let $k=0.$ From $(\ref{eq:dod}),$ we obtain
$$d^2(u^2t_1+2rus_2-r^2t_2)=-1996c+4008,$$
\begin{equation}\label{eq:opis}
d^2(4u^2+2(c-4)ru-r^2(2c-5))=-1996c+4008.
\end{equation}
First, we deal with the cases when $d=1$ and $d=2$ and check if it is possible that both sides in the above equation are identical for each such $d$.
For $d=1$ we get the system of two equations
\[
\begin{cases}
4u^2-8ru+5r^2=4008,\\
2ru-2r^2=-1996,
\end{cases}
\]
that does not have integer solutions. Analogously, for $d=2$ we get
the system
\[
\begin{cases}
4u^2-8ru+5r^2=1002,\\
2ru-2r^2=-499,
\end{cases}
\]
which also does not have any integer solution.
Generally, for $k=0$ and for all values of $d$, we obtain from (\ref{eq:opis}) that the positive integer $c$ is of the form
\begin{equation}\label{eq:prvic}
c=\frac{4008-4d^2u^2+8d^2ru-5d^2r^2}{1996+2d^2ru-2d^2r^2}.
\end{equation}
Our goal is to determine all positive integers $c$ that satisfy the congruence (\ref{eq:kongr}), that are of the form (\ref{eq:prvic}) and for which the triples $(d, r, u)$ satisfy the conditions $d\in\mathbb{N}, \enspace r\in\mathbb{N}, \enspace u\in\mathbb{Z}, \enspace u\neq0$ and the inequality
\begin{equation}\label{inequality}
d^2|ru|<3992.
\end{equation}
It is useful to mention that the latter condition implies $d\leq63$.\\
\indent Before dealing with the general case, we analyze the case when $ru=0$ from which we obtain
$$c=\frac{4008-4d^2u^2}{1996},$$ for $(r, u)=(0, u)$ and
$$c=\frac{4008-5d^2r^2}{1996-2d^2r^2},$$ for $(r, u)=(r, 0)$.
The equations do not hold for a positive integer $c$, except for $c=2$. Such $c$ does not satisfy the congruence $(\ref{eq:kongr})$ which allows us to conclude that in our case there are no integer solutions of the above equations that derive from these special cases.\\
\indent An algorithm for generating triples $(d, r, u)$ that satisfy the inequality (\ref{inequality}) is created. This algorithm plugs these triples $(d, r, u)$ into $(\ref{eq:prvic})$ and checks if positive integers $c$ satisfy the congruence $(\ref{eq:kongr})$.
For $k=0$ we get $$c\in\{17,227,497,647,857,2537,3107,4937\}.$$ For each such positive integer $c$ we obtain a Pellian equation of the form $(\ref{eq:25})$.
For $k=1$ the equation $(\ref{eq:dod})$ becomes
$$-d^2(u^2(2c-5)+2ru(c-1)-r^2)=-1996c+4008.$$
\indent If we consider the case when both sides in the above equation are identical, for $d=1$ we get $$5u^2+2ru+r^2=4008, \enspace 2u^2+2ru=1996,$$ while for $d=2,$ we obtain $$5u^2+2ru+r^2=1002, \enspace 2u^2+2ru=499.$$ There are no integer solutions of both systems.\\
\indent Generally, from $(\ref{eq:dod})$ we get that $c$ is represented by
$$c=\frac{5d^2u^2+2d^2ru+d^2r^2-4008}{2d^2u^2+2d^2ru-1996}.$$
The described algorithm is used to get the following values for $c$:
$$c\in\{17, 227, 497, 647, 857, 2537, 3107, 4937\}.$$
For $k=2$ we have
$$d^2(u^2+2ru(c-1)-r^2(2c-5))=-1996c+4008.$$
If both sides in the above equation are identical, for $d=1$ we obtain $$u^2-2ru+5r^2=4008, \enspace 2ru-2r^2=-1996$$ which is the system of two equations that does not have integer solutions. Analogously, for $d=2$ the system
$$u^2-2ru+5r^2=1002, \enspace 2ru-2r^2=-499$$ has no integer solutions.\\
Generally, the positive integer $c$ obtained from $(\ref{eq:dod})$ when $k=2$ is of the form
$$c=\frac{d^2u^2-2d^2ru+5d^2r^2-4008}{2d^2r^2-2d^2ru-1996}.$$
We get
$$c\in\{17,227,497,647,857,2537,3107,4937\}.$$
When $k=3$ from (\ref{eq:dod}) we get
$$-d^2(u^2(2c-5)+2ru(c-4)-4r^2)=-1996c+4008.$$
For $d=1$ and $d=2$ the following systems are obtained respectively
$$5u^2+8ru+4r^2=4008, \enspace 2u^2+2ru=1996$$ and $$5u^2+8ru+4r^2=1002, \enspace 2u^2+2ru=499.$$ Like in previous cases, these systems do not have integer solutions.
Generally, the positive integer $c$ is of the form
$$c=\frac{5d^2u^2+8d^2ru+4d^2r^2-4008}{2d^2u^2+2d^2ru-1996}.$$
We get the following values for $c$ in this case:
$$c\in\{17,227,497,647,857,2537,3107,4937\}.$$
Analogously, for $k=4$ we get
$$d^2(4u^2+2ru(c-2)-r^2(c-2))=-1996c+4008.$$
For $d=1$ we obtain $$4u^2-4ru+2r^2=4008, \enspace 2ru-r^2=1996,$$ while for $d=2$ we get $$4u^2-4ru+2r^2=1002, \enspace 2ru-r^2=499.$$ Both systems do not have integer solutions. Generally,
$$c=\frac{4d^2u^2-4d^2ru+2d^2r^2-4008}{d^2r^2-2d^2ru-1996}.$$
For $k=4$ we get $$c\in\{17,227,497,647,857,2537,3107,4937\}.$$
Finally, for $k=5$ from (\ref{eq:dod}) we get
$$-d^2(u^2(c-2)-r^2(c-2))=-1996c+4008.$$
If we take into account the case when both sides of the above equation are identical, for $d=1$ we obtain the following system of equations $$2u^2-2r^2=4008, \enspace r^2-u^2=-1996,$$ and for $d=2$ we get $$2u^2-2r^2=1002, \enspace r^2-u^2=499.$$ Both systems do not have integer solutions. Generally,
\begin{equation}\label{eq:c}c=\frac{2d^2u^2-2d^2r^2-4008}{d^2u^2-d^2r^2-1996}.\end{equation}
There is no $c$ of the form (\ref{eq:c}) which satisfies the congruence $(\ref{eq:kongr})$.
We gather all the possible positive integers $c\equiv17 \pmod{30}$ for $k=0, 1, 2, 3, 4, 5$ that we have determined using described algorithm and set a Pellian equation of the form $(\ref{eq:25})$ for every obtained $c$. The Pellian equations are
$$19Y^2-15X^2=-29924, \enspace \textrm{for} \enspace c=17,$$
$$229Y^2-225X^2=-449084, \enspace \textrm{for} \enspace c=227,$$
$$499Y^2-495X^2=-988004, \enspace \textrm{for} \enspace c=497,$$
$$649Y^2-645X^2=-1287404, \enspace \textrm{for} \enspace c=647,$$
$$859Y^2-855X^2=-1706564, \enspace \textrm{for} \enspace c=857,$$
$$2539Y^2-2535X^2=-5059844, \enspace \textrm{for} \enspace c=2537,$$
$$3109Y^2-3105X^2=-6197564, \enspace \textrm{for} \enspace c=3107,$$
$$4937Y^2-4935X^2=-9850244, \enspace \textrm{for} \enspace c=4937.$$
In order to determine whether these Pellian equations have solutions, we use \cite{alpern}.
\indent First of all, we assume that $X, Y$ are of the form $(\ref{eq:23})$, $(\ref{eq:24})$, respectively. We can easily determine that $X$ satisfies the following congruences
$$X\equiv0\pmod{4}, \enspace X\equiv1\pmod{3}, \enspace X\equiv4\pmod{5}, \enspace \textrm{hence} \enspace X\equiv4\pmod{60}.$$ We set $X=60i+4, \enspace i\in\mathbb{Z}$.
Analogously, $$Y\equiv2\pmod{4}, \enspace Y\equiv1\pmod{3}, \enspace Y\equiv3\pmod{5},$$ hence, $Y\equiv58\pmod{60}$. We set $Y=60j+58, \enspace j\in\mathbb{Z}$.
We deal with the first Pellian equation $$19Y^2-15X^2=-29924.$$ For $X=60i+4$ the above equation becomes
$$19Y^2-54000i^2-7200i+29684=0.$$
Using \cite{alpern} we determine that this Pellian equation does not have any integer solution.
The next Pellian equation is $$229Y^2-225X^2=-449084.$$
For $Y=60j+58,\enspace j\in\mathbb{Z}$, this equation becomes
$$824400j^2-225X^2+1593840j+1219440=0,$$
and it does not have any integer solution according to \cite{alpern}.
The next Pellian equation is \begin{equation}\label{eq:treca}499Y^2-495X^2=-988004.\end{equation}
We can notice that the equation (\ref{eq:treca}) has integer solutions for $X\equiv4\pmod{60}$ and $Y\equiv58\pmod{60}$. We need to get some additional conditions for $X, Y$ in order to reach the conclusion that the equation (\ref{eq:treca}) does not have any integer solution for such $X, Y$.\\
\indent Additionally, we know that $$Y=cy-c-2y=c(y-1)-2y\equiv-2\pmod{(c-2)}.$$ In this case, we have $c-2=495=3^2\cdot5\cdot11$, which implies $Y\equiv-2\pmod{3^2\cdot5\cdot11}$, or, more precisely, $$Y\equiv-2\pmod{9}, \enspace Y\equiv-2\pmod{5}, \enspace Y\equiv-2\pmod{11}.$$ We already know $Y\equiv2\pmod{4}$, so we can easily get $$Y=c(y-1)-2y=497(y-1)-2y\equiv-2y\equiv21, 28, 34, 61, 69\pmod{71}.$$ We set one Pellian equation of the form (\ref{eq:25}) for each residue that we get after dividing $Y$ by $71$ and we analyze each of these equations.
We have $$Y\equiv2\pmod{4}, \enspace Y\equiv 7\pmod{3^2}, \enspace Y\equiv9\pmod{11}, \enspace Y\equiv21\pmod{71}.$$ We get $Y\equiv11878\pmod{140580}$, hence $Y=140580j+11878, \enspace j\in\mathbb{Z}$. For such $Y$ the equation (\ref{eq:treca}) becomes $$9861605463600 j^2 - 495 X^2 + 1666469621520 j + 70403343120=0.$$
According to \cite{alpern} it does not have any integer solution.\\
\indent For $$Y\equiv2\pmod{4}, \enspace Y\equiv 7\pmod{3^2}, \enspace Y\equiv9\pmod{11}, \enspace Y\equiv28\pmod{71},$$ we conclude $Y\equiv27718\pmod{140580}$ and $Y=140580j+27718, \enspace j\in\mathbb{Z},$ so the equation (\ref{eq:treca}) becomes
$$9861605463600 j^2- 495 X^2 + 3888803247120 j+383376462480=0.$$
Using \cite{alpern} we conclude that the above equation does not have any integer solution.
In the case when
$$Y\equiv2\pmod{4}, \enspace Y\equiv 7\pmod{3^2}, \enspace Y\equiv9\pmod{11}, \enspace Y\equiv34\pmod{71},$$ we get that $Y\equiv61387\pmod{140580}$. For $Y=140580j+61387, \enspace j\in\mathbb{Z},$ we get
$$9861605463600j^2-495X^2+8612524891080 j+1880414508735 =0.$$
This equation does not have any integer solution according to \cite{alpern}.
For $$Y\equiv2\pmod{4}, \enspace Y\equiv 7\pmod{3^2}, \enspace Y\equiv9\pmod{11}, \enspace Y\equiv61\pmod{71},$$ we obtain $Y\equiv1978\pmod{140580}$, hence $Y=140580j+1978, \enspace j\in\mathbb{Z}$. We get the Pellian equation
$$9861605463600 j^2- 495 X^2 + 277511105520 j+1953317520 =0.$$
Using online calculator \cite{alpern} we determine that the above equation also does not have any integer solution.\\
\indent Finally, for $$Y\equiv2\pmod{4}, \enspace Y\equiv 7\pmod{3^2}, \enspace Y\equiv9\pmod{11}, \enspace Y\equiv69\pmod{71},$$ we get $Y\equiv140578\pmod{140580}$, which we can write as $Y=140580j+140578, j\in\mathbb{Z}.$ For such $Y$ we get the Pellian equation
$$9861605463600 j^2 - 495 X^2+ 19722930329520 j+ 9861325855920=0$$
which does not have any integer solution according to \cite{alpern}.\\
\indent We have proved that, in our case, the Pellian equation (\ref{eq:treca}) does not have any integer solution.\\
\indent The next Pellian equation is $$649Y^2-645X^2=-1287404.$$
For $Y=60j+58,\ j\in\mathbb{Z},$ we get
$$2336400j^2-645X^2+4517040j+3470640=0,$$
which is the Pellian equation that does not have any integer solution according to \cite{alpern}.\\
\indent The next Pellian equation is $$859Y^2-855X^2=-1706564.$$
For $X=60i+4,\ i\in\mathbb{Z},$ we get
$$859Y^2-3078000i^2-410400i+1692884=0.$$
By \cite{alpern}, the Pellian equation does not have any integer solution.\\
\indent The next Pellian equation is $$2539Y^2-2535X^2=-5059844.$$
It is known that $Y\equiv-2\pmod{(c-2)}$. In our case, we have $Y\equiv-2\pmod{2535}$, or precisely, $Y\equiv-2\pmod{3\cdot5\cdot11^2}$. We can conclude
$$Y\equiv1\pmod{3}, \enspace Y\equiv3\pmod{5}, \enspace Y\equiv167\pmod{169}.$$ It is already known from before that $Y\equiv2\pmod{4}$. We get that $Y\equiv10138\pmod{10140}$, or $Y=10140j+10138,\ j\in\mathbb{Z}$. The Pellian equation of the form
$$261058964400 j^2-2535 X^2 + 522014946960j + 260961052560 =0, $$
does not have any integer solution according to \cite{alpern}.\\
\indent The penultimate Pellian equation is $$3109Y^2-3105X^2=-6197564.$$
For $X=60i+4,\ i\in\mathbb{Z},$ we obtain $$3109Y^2-11178000i^2-1490400i+6147884=0.$$
By \cite{alpern}, this Pellian equation does not have any integer solution.\\
\indent The last Pellian equation
$$4937Y^2-4935X^2=-9850244$$
does not have any integer solution according to \cite{alpern}.
Since Pellian equations of the form (\ref{eq:25}) obtained for all the possible values of positive integers $c$ that satisfy the congruence (\ref{eq:kongr}) do not have solutions $X, Y$ in positive integers, we conclude that there do not exist positive integers of the form $n=2^{\alpha}5^{\beta}, \ \alpha, \beta \in\mathbb{N},$ that satisfy the variation of the congruence of Subbarao (\ref{eq:version}). Consequently, the only positive integers of the form $n=2^{\alpha}5^{\beta}, \ \alpha, \beta\geq0,$ that satisfy the congruence (\ref{eq:version}) are $n=1, 2, 5, 8.$\\
\noindent\textbf{\large{Acknowledgements}}\\ We would like to thank Professor Andrej Dujella for many valuable suggestions and his help with the preparation of this article and to Professor Andrzej Schinzel for valuable remarks.
\end{document} | arXiv | {
"id": "1607.01258.tex",
"language_detection_score": 0.6765549778938293,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Moment estimates for the exponential sum with higher divisor functions} \author{Mayank Pandey} \address{Department of Mathematics, California Institute of Technology, Pasadena, CA 91125} \email{mpandey@caltech.edu} \maketitle
\section{Introduction}
For a sequence $(a_n)_{n\ge 1}$ of arithmetic interest, it is often desirable to have estimates for the $L^p$ norms of the exponential sum $M(\alpha) = \sum_{n\le X}a_ne(n\alpha)$ as $X$ grows. Such estimates are useful in applications of the circle method. In addition, sufficiently strong estimates for them can yield estimates for the distribution function
$\{\alpha\in [0, 1] : |M(\alpha)|\ge \lambda\}$ for $\lambda$ in appropriate ranges.
In the case that $a_n$ is $1$ if $n$ is a $k$th power and $0$ otherwise, such estimates have connections to Waring's problem, and the consequences of conjectured estimates for $\int_0^1 |M(\alpha)|^sd\alpha$ for $s$ in various ranges have been studied by Vaughan and Wooley \cite{VW}.
This problem was also studied by Keil \cite{K} in the case of the indicator function of $k$-free numbers, and the size of $\int_0^1 |M(\alpha)|^sd\alpha$ was estimated up to a constant factor for all $s\ne 1 + \frac{1}{k}$, and in the case $s = 1 + \frac{1}{k}$, it was only determined up to a factor of $\log X$.
In general, when higher values of $s$ are considered, as long as the sequence in question has some structure in arithmetic progressions, the bulk of the contribution ends up coming from narrow regions near a
small number of points (typically rationals with small denominator). For this reason, one typically expects that $\int_0^1 |M(\alpha)|^sd\alpha$ is between $X^{-\eps}A_s(X)$ and $X^{\eps}A_s(X)$ with $A_s(X)$ equal to either $X^{\alpha_1s}$ or $X^{\alpha_1s} + X^{\alpha_2s - \sigma_1}$ for some $\alpha_1 < \alpha_2$, and some $\sigma_1 > 0$. The second case is what happens in the case of $k$-free numbers, as shown in \cite{K}, as well what is conjectured in the case of $k$th powers (see \cite{VW} for more discussion of this).
In the case of the M\"obius function, the first case is conjectured (it is implied by Mertens conjecture that $|M(\alpha)|\ll X^{1/2 + \eps}$).
In this paper, we study the case of divisor functions and high moments. In particular, let $k\ge 2$ be some integer, and $s > 2$ be real. Then, let \[\tau_k(n) = \sum_{d_1\dots d_k = n} 1\] and \[M(\alpha) = \sum_{n\le X} \tau_k(n)e(n\alpha).\] Our main result is the following. \begin{theorem} We have
\[\int_0^1 |M(\alpha)|^sd\alpha = X^{s - 1}(\log X)^{s(k - 1)}\sum_{\ell\ge 0}\frac{\gamma_{\ell, s, k}}{(\log X)^\ell} + O(X^{s - 1 - \delta_{s, k} + \eps})\] with \begin{align*} \delta_{s, k} = \frac{2(s - 2)}{(s + 7)(k + 1) + 2}. \end{align*}
for some coefficients $\gamma_{s, k,\ell}$ satisfying the bound $|\gamma_{s, k,\ell}|\ll \exp(O(\ell))$, with $\gamma_{s, k, 0} > 0$. \label{thm:main_result} \end{theorem}
We prove this with a straightforward application of the circle method. For such high moments, the contribution near rationals with small denominator (the major arcs) dominates. We therefore require bounds for the remaining points (the minor arcs).
The minor arc bounds we use follow from a decomposition of $\tau_k$ into type I and type II sums. Vaughan's identity in the proof of analogous bounds for the von Mangoldt function provides this decomposition, though the convolution structure of $\tau_k$ makes the decomposition somewhat more straightforward. The major arc estimates follow from standard estimates for partial sums of $\tau_k(n)\chi(n)$ coming from Voronoi summation (in particular, Theorem 4.16 in \cite{IK}).
In the course of dealing with the main term, we use a result on the order of magnitude of higher moments of Dirichlet kernels, which we state here. A proof of this will be given in a later section. Here, we write \[v(\beta) = \sum_{n\le X} e(n\beta).\] \begin{proposition} We have that for $s > 2$
\[\int_0^1 |v(\beta)|^s d\beta = A_sX^{s - 1} + O\left(X^{s - 2}\right).\] where
\[A_s = \frac{2}{\pi}\int_0^\infty \frac{|\sin t|^s}{t^s}dt.\] \label{prop:dir_ker_moment} \end{proposition}
Our methods likely generalize straightforwardly to the case of $\chi_1 * \dots *\chi_k$ for some fixed Dirichlet characters $\chi_1,\dots,\chi_k$, and yield a similar result. The case of Fourier coefficients of $\GL(k)$ cusp forms is quite distinct however, since it is expected, and was shown by Jutila \cite{J} for some of the $\GL(2)$ case, that the relevant exponential sum is small everywhere. Consequently, the bulk of the contribution should not be expected to come from the major arcs, so the method used here fails.
We have not taken much care to optimize the sizes of the error terms. In particular, the error terms in Proposition \ref{prop:maj_arc1} can likely be improved quite cheaply. However, an error term qualitatively superior to $\delta_{s,k}\ll_s\frac 1k$ is likely quite hard to breach.
\subsection{Notation and conventions}
$X$ is some sufficiently large real number that should be thought of as going to $\infty$, and $\eps > 0$ is some sufficiently small constant. $s > 2$ is a fixed real number, and $k\ge 2$ is some fixed integer. As usual, we use the notation $A\ll B\iff A\le O(B)\iff B\gg A$ to denote that $|A|\le CB$ for some absolute constant $C$. In any instance, this implied constant may depend on $s, k, \eps$, and any further parameters on which it may depend will be noted in a subscript. We write $a\sim A$ to denote that $A < a\le 2A$, and $a\asymp A$ to denote that $A\ll a\ll A$. \section{Setup}
Take $P = X^\eta$, with $\eta = \frac{2}{s - 2}\delta_{s, k}$. It is easy to see that we have the bound $\eta\le\frac{2}{5}$. $\eta$ also has the property that $\frac{2}{k + 1} - \bigg(\frac{9}{2} + \frac{1}{k + 1}\bigg)\eta = \frac{1}{2}\eta(s - 2)$. The significance of this will become clear later on when we are collecting various error terms. Also, let $\mf M$ be the union of
\[ \mf M(q, a) = \{\alpha\in [0, 1] : |\alpha - a/q|\le PX^{-1}\}\] for $q\le P, (a, q) = 1$, and $\mf m = [0, 1]\setminus\mf M$. Note that for large $X$, all the $\mf M(q, a)$ are disjoint. It is easy to see by Dirichlet's approximation theorem that for all $\alpha\in\mf m$, there exist $P < q\le X/P, (a, q) = 1$ so that
$|\alpha - a/q|\le q^{-2}$. Then, the main result follows if we can prove the following two estimates for the contribution of the major and minor arcs. \begin{proposition} We have that
\[\int_{\mf M} |M(\alpha)|^sd\alpha = X^{s - 1}(\log X)^{s(k - 1)}\sum_{\ell\ge 0}\frac{\gamma_{\ell, s, k}}{(\log X)^\ell} + O(X^{s - 1 - \delta_{s, k} + \eps})\] where $\gamma_{s, k, \ell},\delta_{s, k}$ are as in the statement of Theorem \ref{thm:main_result}. \label{prop:MT_est} \end{proposition} \begin{proposition} We have the bound
\[\int_{\mf m} |M(\alpha)|^sd\alpha\ll X^{s - 1 - \delta_{s, k}}(\log X)^{O(1)}.\] \label{prop:min_arc_bound} \end{proposition} \begin{proof} This follows immediately from Proposition \ref{prop:min_arc_est}, whose proof we defer to the last section, and Parseval. Indeed, note that since $\eta\le 2/5$, it follows from Proposition \ref{prop:min_arc_est}
that $\sup_{\alpha\in\mf m} |M(\alpha)|\ll X^{1 - \eta / 2}(\log X)^{O(1)}$, and therefore \begin{align*}
\int_{\mf m} |M(\alpha)|^sd\alpha&\ll \bigg(\sup_{\alpha\in\mf m} |M(\alpha)|\bigg)^{s - 2}\int_0^1 |M(\alpha)|^2d\alpha\\ &\ll (X^{1 - \eta/2})^{s - 2}X(\log X)^{O(1)}\ll X^{s - 1 - \frac{1}{2}\eta(s - 2)}(\log X)^{O(1)}. \end{align*} The proposition follows upon noting that $\frac{1}{2}\eta(s - 2) = \delta_{s, k}$. \end{proof}
In the next section, we shall prove Propositions \ref{prop:MT_est}, \ref{prop:min_arc_est}. The main theorem clearly follows from Propositions \ref{prop:MT_est} and \ref{prop:min_arc_bound}.
\section{Major arc estimates for higher divisor functions} Our main major arc estimate is the following. \begin{proposition} Suppose that $q\ge 1, (a, q) = 1$. Then, we have that \[\sum_{n\le X} \tau_k(n)e\left(\frac{an}{q}\right) = XP_{k, q}(\log X) + O(q^{\frac{1}{2} + \frac{k}{k + 1}}X^{\frac{k - 1}{k + 1}}(qX)^{\eps})\] where $P_{k, q}(\log X)$ is a polynomial of degree $k - 1$ in $\log X$ with coefficients of size $\ll \tau_2(q)^{O(1)}/q$. In addition, the coefficient of $(\log X)^{k - 1}$ is nonnegative and $\gg 1/q$. \label{prop:maj_arc1} \end{proposition} \begin{proof} This follows from the method in the proof of Proposition 4.2 in \cite{MRT}, though we may use Theorem 4.16 in \cite{IK} to achieve the above error terms. \end{proof} From partial summation, we then obtain the following. \begin{corollary}
Suppose that $q\ge 1, (a, q) = 1, |\beta|\le 1$. Then, we have that
\[\sum_{n\le X}\tau_k(n)e\left(\frac{an}{q} + n\beta\right) = Q_{k, q}(\log X)v(\beta) + O((1 + |\beta|X)q^{\frac{1}{2} + \frac{k}{k + 1}}X^{\frac{k - 1}{k + 1}}(qX)^{\eps})\] \label{cor:maj_arc_est} where $Q_{k, q}(\log X)$ is a polynomial of degree $k - 1$ in $\log X$ with coefficients of size $\ll \tau_2(q)^{O(1)}/q$. In addition, the leading coefficient is nonnegative and $\gg 1/q$. \end{corollary}
Before we start dealing with the main term, we shall prove Proposition \ref{prop:dir_ker_moment}. \begin{proof}[Proof of Proposition \ref{prop:dir_ker_moment}] Our proof here uses the method in a MathStackExchange post of daniel-fischer \cite{DF}, though we take some care here to track the error terms.
We shall suppose for simplicity that $X$ is an integer, as it can be easily checked that adjusting $X$ by $O(1)$ does not alter the main term on the RHS by a quantity that can't be absorbed into the error term.
It is well-known then that $v(\beta) = \frac{\sin(\pi (X + 1)\beta)}{\sin(\pi\beta)}$. Taylor expanding $\frac{\pi\beta}{\sin\pi\beta}$, we have that for $\beta\in [0, 1/2]$ \[\frac{\pi\beta}{\sin\pi\beta} = 1 + O(\beta^2)\]
and it can be easily checked that $|\pi\beta/\sin(\pi\beta) - 1|\le 3/4$. Therefore, we have that \[\bigg(1 + \bigg(\frac{\pi\beta}{\sin\pi\beta} - 1\bigg)\bigg)^p = 1 + O(\beta^2)\] so
\[\int_0^1 |v(\beta)|^sd\beta = 2\int_0^{1/2} |\sin((X + 1)\pi\beta)|^s(\pi\beta)^{-s} d\beta + O\bigg(\int_0^{1/2}|\sin((X + 1)\pi\beta)|^s\beta^{2 - s}d\beta\bigg).\]
By the bound $|\sin((X + 1)\pi\beta)|^s\ll \min(1, (\beta X)^s)$ that the term inside the $O(-)$ is \[\ll\int_0^{1/X} (\beta X)^s\beta^{2 - s}d\beta + \int_{1/X}^{1/2} \beta^{2 - s}d\beta\ll X^{s - 2}.\] Now, by a change of variables, the main term equals \begin{align*}
\frac{2}{\pi(X + 1)}\int_0^{(X + 1)\pi/2} |\sin t|^s(t/(X + 1))^{-s} dt &= \frac{2}{\pi}(X + 1)^{s - 1}\int_0^{(X + 1)\pi/2} \frac{|\sin t|^s}{t^s}dt\\
&= \frac{2}{\pi}X^{s - 1}\int_0^\infty \frac{|\sin t|^s}{t^s}dt + O(X^{s - 2}). \end{align*} as we have by a trivial bound that
\[\int_0^{(X + 1)\pi/2} \frac{|\sin t|^s}{t^s}dt = \int_0^\infty \frac{|\sin t|^s}{t^s}dt + O(X^{1 - s}).\] The desired result follows. \end{proof} We will now prove Proposition \ref{prop:MT_est} using Proposition \ref{prop:dir_ker_moment}. \begin{proof}[Proof of Proposition \ref{prop:MT_est}] From the definition of $\mf M$, we have that \begin{align*}
\int_{\mf M} |M(\alpha)|^sd\alpha &= \sum_{q\le P}\sumCp_{a(q)}\int_{-P/X}^{P/X} \bigg|M\left(\frac{a}{q} + \beta\right)\bigg|^s d\beta\\
&= \sum_{q\le P}\varphi(q)Q_{k, q}(\log X)^s\int_{-P/X}^{P/X} |v(\beta)|^s d\beta + O(X^{s - 1} P^{\frac{9}{2} + \frac{1}{k + 1}}X^{-\frac{2}{k + 1} + \eps}). \end{align*} We may extend the range of integration to $[-1/2, 1/2]$ at a total loss of $\ll P(X/P)^{s - 1} (\log X)^{O(1)}\ll X^{s - 1 - (s - 2)\eta}(\log X)^{O(1)}$ by the bound $v(\beta)\ll \min(X, \norm\beta^{-1})$. Applying Proposition \ref{prop:dir_ker_moment} then yields that the above equals \begin{align*} A_sX^{s - 1}\sum_{q\le P}\varphi(q)Q_{k, q}(\log X)^s + O(X^{s - 1} P^{\frac{9}{2} + \frac{1}{k + 1}}X^{-\frac{2}{k + 1} + \eps} + X^{s - 1}X^{-(s - 2)\eta + \eps}). \end{align*} Now, writing $Q_{k, q}(\log X) = \alpha_0(q) + \dots + \alpha_{k - 1}(q)(\log X)^{k - 1}$, we obtain that \begin{align*} Q_{k, q}(\log X)^s &= (\log X)^{s(k - 1)}\alpha_0(q)^s\left(1 + \frac{\alpha_1(q)\alpha_0(q)^{-1}}{\log X} + \dots + \frac{\alpha_{k - 1}(q)\alpha_0(q)^{-1}}{(\log X)^{k - 1}}\right)^s \\
&= \alpha_0(q)^{s}(\log X)^{s(k - 1)}\sum_{\ell\ge 0}\frac{(s)\dots (s - \ell + 1)}{\ell!}\cdot\frac{\beta_\ell(q)}{(\log X)^\ell} \end{align*}
for some coefficients $\beta_{\ell}(q)$ with $\beta_0(q) = 1$, and $|\beta_{\ell}(q)|\ll\tau_2(q)^{O(\ell)}$ for $\ell\ge 1$. Here, we have use the fact that $\alpha_0(q)$ is nonnegative and $\gg 1/q$. Executing the summation over $q$, we thus obtain that
\[\int_{\mf M} |M(\alpha)|^sd\alpha = X^{s - 1}(\log X)^{s(k - 1)}\sum_{\ell\ge 0}\frac{\gamma_{s, k,\ell}}{(\log X)^\ell} + O(X^{s - 1 + \left(\frac{9}{2} + \frac{1}{k + 1}\right)\eta - \frac{2}{k + 1} + \eps} + X^{s - 1 - (s - 2)\eta + \eps})\]
for some coefficients $\gamma_{s, k, \ell}$ satisfying the bound $|\gamma_{s, k,\ell}|\ll \exp(O(\ell))$. The desired result follows from our choice of $\eta$, as $(s - 2)\eta = 2\delta_{s, k}$, and as we noted previously \[\left(\frac{9}{2} + \frac{1}{k + 1}\right)\eta - \frac{2}{k + 1} = -\frac{s - 2}{2}\eta = -\delta_{s, k}.\] \end{proof}
\section{The minor arcs} To bound $M(\alpha)$ on the minor arcs, we shall use the following bound. This is essentially the same bound one obtains in the case of the von Mangoldt function. Our proof proceeds in the same manner as this case, through a decomposition of $\tau_k(n)$ into type I and type II sums.
\begin{proposition}
Supposed that $\alpha, a, q$ are so that $(a, q) = 1, |\alpha - \frac{a}{q}|\le 1/q^2$. Then, we have that
\[\bigg|\sum_{n\le X} \tau_k(n)e(n\alpha)\bigg|\ll \left(\sqrt{qX} + \frac{X}{\sqrt{q}} + X^{4/5}\right)(\log X)^{O(1)}\] \label{prop:min_arc_est} \end{proposition} \begin{proof} First, it is easy to see by splitting into dyadic intervals that it suffices to show the result with a sum over $n\sim X$, so we shall assume this from now on.
Our proof follows similarly to the proof of minor arc bounds for the exponential sum with the von Mangoldt function, though our decomposition into type I and II sums will follow straightforwardly from the structure of $\tau_k$ as a Dirichlet convolution. Note that \[\tau_k\charf{[1, 2X]} = \bigg(\substack{\underbrace{\charf{[1, 2X]}*\dots*\charf{[1, 2X]}}\\\text{k times}}\bigg)\charf{[X, 2X]}. \] Decomposing $[1, 2X]$ into dyadic intervals then yields that this is a linear combination (with coefficients of size $O(1)$) of $O((\log X)^{O(1)})$ summands of the form \[(\charf{I_1}*\dots*\charf{I_k})\charf{[X, 2X]}\] where $I_j$ is of the form either $[N_j, 2N_j]$ or $[N_j, 2X]$ (with $N_j\ge X$ in the second case) for all $j$, for some $1\le N_1\le\dots\le N_k$ satisfying $N_1\dots N_k\asymp X$. It suffices then to show the bound in the proposition for sums of the form \[\sum_{n\sim X} (\charf{I_1}*\dots*\charf{I_k})(n)e(n\alpha).\] We have two cases. If all the $N_k\le X^{1/5}$, then there exists a $j$ so that $X^{2/5}\ll N_1\dots N_j, N_{j + 1}\dots N_k\ll X^{3/5}$ so it follows that the sum equals \[\sum_{\substack{m\asymp N_1\dots N_j\\ n\asymp N_{j + 1}\dots N_k}} a(m)b(n)e(\alpha mn)\]
for some coefficients $a(m), b(n)$ so that $|a(m)|\ll \tau_j(m), |b(n)|\ll\tau_{k - j}(n)$. The bound then follows from a standard bound for type II sums (Lemma 13.8 in \cite{IK}, for example, suffices).
Otherwise, we have that $N_k > X^{1/5}$, so the sum equals \[\sum_{n\asymp N_k, m\asymp N_1\dots N_{k - 1}} a(m)e(\alpha mn)\] for some coefficients $a(m)$ bounded by $\tau_{k - 1}(n)$, so the desired result then follows standard bounds on type I sums, such as Lemma 13.7 in \cite{IK}.
\end{proof}
\emph{Acknowledgements.} The author would like to thank the anonymous referee for various corrections as well as for pointing out the simplified decomposition into type I and II sums used in the proof of Proposition \ref{prop:min_arc_est}.
\end{document} | arXiv | {
"id": "1908.04286.tex",
"language_detection_score": 0.7444153428077698,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Revealing hidden standard tripartite nonlocality by local filtering}
\author{Qiao-Qiao Lv$^1$ \and Jin-Min Liang $^1$ \and Zhi-Xi Wang$^1$ \and Shao-Ming Fei$^{1,2}$ } \institute{ Zhi-Xi Wang\at
\email{wangzhx@cnu.edu.cn}\\
\and Shao-Ming Fei \at
\email{feishm@cnu.edu.cn}\\
\at 1 School of Mathematical Sciences, Capital Normal University, Beijing 100048, China\\
\at 2 Max Planck Institute for Mathematics in the Sciences, Leipzig 04103, Germany }
\maketitle
\begin{abstract} \label{intro} Quantum nonlocality is a kind of significant quantum correlation that is stronger than quantum entanglement and EPR steering. The standard tripartite nonlocality can be detected by the violation of the Mermin inequality. By using local filtering operations, we give a tight upper bound on the maximal expected value of the Mermin operators. By detailed examples we show that the hidden standard nonlocality can be revealed by local filtering which can enhance the robustness of the noised entangled states.
\keywords{Mermin inequality, Standard tripartite nonlocality, Local filtering} \end{abstract}
\section{Introduction} Nonlocal quantum correlations are the essential nature of quantum physics \cite{EPRnonlocal}. For bipartite systems the Bell nonlocality \cite{BellNonlocal,Wiseman,StrongNonlocal} gives rise to stronger quantum correlations than quantum entanglement \cite{HHHH2009entangle,LS2019,CG2005} and EPR steering \cite{steer2020}. As a fascinating counterintuitive phenomenon related to the foundation of quantum mechanics \cite{Bell1966}, nonlocality is understood as a resource in quantum information process ranging form quantum computation \cite{Liang2020,Liang2022}, quantum key distributed \cite{AK1991} and random numbers certification \cite{Nature2010}. It is significant to detect the nonlocality of given quantum systems.
A bipartite quantum state $\rho_{AB}$ is said to be locally correlated if the joint probability distributions satisfies \cite{Wiseman},
\begin{eqnarray}\nonumber P(a, b|A_x, B_y)=\sum_{\lambda}p_{\lambda}P(a|A_x,\lambda)P(b|B_y,\lambda), \end{eqnarray}
where $p_{\lambda}$ is the probability distribution over the hidden variables $\lambda$, $p_{\lambda}\geq 0$, $\sum_{\lambda}p_{\lambda}=1$, $P(a,b|A_x, B_y)$ denotes the joint probability distribution that Alice performs measurement $A_x$ on subsystem $A$ with outcome $a$, and Bob performs measurement $B_y$ on subsystem $B$ with outcome $b$, $P(a|A_x, \lambda)$ denotes the conditional probability of getting outcome $a$ when Alice performs the measurement $A_x$ on her subsystem, $P(b|B_y, \lambda)$ is similarly defined. The bipartite nonlocality can be detected by the violation of a kind of Bell inequality, the CHSH inequality \cite{CHSH1969}.
The maximal violation of a Bell inequality can be enhanced by local filtering operations \cite{CHSHviolate}. In \cite{filterCHSH} the authors presented a class of two-qubit entangled states admitting local hidden variable models, and shew that these states violate a Bell inequality after the local filtering. Namely, there exist entangled states whose so called hidden non-locality can be revealed by using a sequence of measurements. In fact, local filtering operations can not only reveal hidden quantum nonlocality, but also hidden quantum steerability \cite{FilterSteer}.
For tripartite systems, the Mermin inequality \cite{Mermin1990} is a natural generalization of the CHSH inequality, which can be violated by not only genuine tripartite nonlocal states but also by standard tripartite nonlocal states. The upper bound on the maximal expectation value of the Mermin operator has been nicely derived in \cite{QIP2019}, though not always tight. Similar to the enhanced maximal violation of Bell inequalities, it is interesting to study the enhancement of the maximal violation of the Mermin inequality under local filtering.
In this work, we investigate the maximal violation of the Mermin inequality under local filtering. We first analyze and obtain the tight upper bounds on the maximal expected value of the Mermin operator under local filtering. Then applying our results to some special quantum states, the isotropic states and the noisy GHZ states, we show that local filtering can reveal the hidden standard nonlocality. Moreover, it is shown that the local filtering can transform the initial noisy state to a state with stronger tripartite nonlocality.
\section{Tight upper bound on Mermin operator under local filtering} \label{sec:1} A tripartite state $\rho_{ABC}$ is fully locally correlated if the joint probability distribution admits a local hidden variable (LHV) model \cite{Wiseman}, namely, \begin{eqnarray}\nonumber \begin{aligned}
P(a,b,c|A_x, B_y, C_z)=\sum_{\lambda}p_{\lambda}P(a|A_x, \lambda)P(b|B_y, \lambda)P(c|C_z, \lambda) \end{aligned} \end{eqnarray}
for all $x, y, z, a, b$ and $c$, where $P(a,b,c|A_x, B_y, C_z)$ is the joint probability when Alice, Bob and Charlie perform local measurements $A_x$, $B_y$ and $C_z$ with outcomes $a$, $b$ and $c$, respectively, $p_{\lambda}\geq 0$ is the probability distribution over the hidden variable $\lambda$, $\sum_{\lambda}p_{\lambda}=1$, $P(a|A_x, \lambda)$ denotes the conditional probability of obtaining outcome $a$ when Alice performs the measurement $A_x$ on her subsystem, $P(b|B_y, \lambda)$ and $P(c|C_z, \lambda)$ are similarly defined.
The tripartite non-locality of arbitrary 3-qubit states $\rho$ can be detected by the violation of the Mermin inequality $|\langle\mathcal{M}\rangle_{\rho}|\equiv |\textrm{Tr}[\mathcal{M} \rho]|\leq 2$. The Mermin operator $\mathcal{M}$ has the form \cite{Mermin1990}, $$\mathcal{M}=A_0\otimes (B_0C_1+B_1C_0)+A_1\otimes (B_0C_0-B_1C_1),\nonumber$$ where $A_0$, $A_1$, $B_0$, $B_1$, $C_0$ and $C_1$ are quantum mechanical observables of the form $K=\vec{k}\cdot\vec{\sigma}=\sum\limits ^{3}_{i=1}k_i\sigma_i$, with a unit vector $\vec{k}\in\{\vec{a}, \vec{a}', \vec{b}, \vec{b}', \vec{c}, \vec{c}'\}$, the standard Pauli matrices $\sigma_i$, $K\in\{A_0, A_1, B_0, B_1, C_0, C_1\}$.
In a recent work \cite{QIP2019} it has been shown that the maximal expectation value of the Mermin operator for arbitrary 3-qubit state $\rho$ is given by $\mathcal{Q}_{\mathcal{M}}:= \max|\langle\mathcal{M}\rangle_{\rho}|\leq 2\sqrt{2}\lambda_{\max}$, where $\lambda_{\max}$ is the largest singular value of matrix $C$, $C=(C_{j,ik})$ is the correlation matrix of $\rho$ with entries given by $C_{ijk}=\textrm{Tr}[\rho(\sigma_i\otimes\sigma_j\otimes\sigma_k)]$, $i, j, k=1, 2, 3$. This upper bound is tight if the degeneracy with respect to the largest singular value $\lambda_{\max}$ is more than 1, and the two degenerate nine-dimensional singular vectors corresponding to $\lambda_{\max}$ take the forms of $\vec{a}\otimes\vec{c}-\vec{a}'\otimes\vec{c}'$ and $\vec{a}\otimes\vec{c}'+\vec{a}'\otimes\vec{c}$.
Next, we investigate the violations of the Mermin inequality under local filtering, by computing the maximal expectation values of the Mermin operators with respect to the locally filtered 3-qubit states. For any 3-qubit state $\rho$, after local filtering one gets \cite{filterCHSH}, \begin{eqnarray}\label{rhop} \begin{aligned} \rho'=\frac{1}{F}(F_A\otimes F_B\otimes F_C)\rho(F_A\otimes F_B\otimes F_C)^{\dag}, \end{aligned} \end{eqnarray} where $F_A$, $F_B$ and $F_C$ are positive operators acted locally on the three subsystems respectively, $F=\textrm{Tr}[(F_A\otimes F_B\otimes F_C)\rho(F_A\otimes F_B\otimes F_C)^{\dag}]$ is the normalization constant. Suppose the filter operators $F_A$, $F_B$ and $F_C$ have the following spectral decompositions, \begin{align}\label{filter} F_A=U\Sigma_A U^{\dag},~~ F_B=V\Sigma_B V^{\dag},~~ F_C=W\Sigma_C W^{\dag}, \end{align} where $U$, $V$ and $W$ are unitary operators. Set \begin{eqnarray} \begin{aligned} \alpha_i=\Sigma_A\sigma_i\Sigma_A,~~ \beta_j=\Sigma_B\sigma_j\Sigma_B,~~ \gamma_k=\Sigma_C\sigma_k\Sigma_C, \end{aligned} \end{eqnarray} for $i, j, k=1, 2, 3$. Without loss of generality, we assume that the singular matrices have the forms, \begin{eqnarray} \begin{aligned} \Sigma_A=\left(\begin{array}{cc}
l&0\\
0&1 \end{array}\right),~~ \Sigma_B=\left(\begin{array}{cc}
m&0\\
0&1 \end{array}\right),~~ \Sigma_C=\left(\begin{array}{cc}
n&0\\
0&1 \end{array}\right), \end{aligned} \end{eqnarray} with $l,m,n\geq 0$.
We have the following theorem which provides a tight upper bound on the maximal violation value of the Mermin inequality.
\begin{theorem} For an arbitrary 3-qubit quantum state $\rho$, the maximal expectation value of the Mermin operator for the filtered state $\rho'$ satifies \begin{eqnarray}\label{QM} \begin{aligned}
\max|\langle\mathcal{M}\rangle_{\rho'}|\leq 2\sqrt{2}\lambda{'}_{\max}, \end{aligned} \end{eqnarray} where $\lambda{'}_{\max}$ is the maximal singular value of $\frac{\tilde{D}}{F}$, where $\tilde{D}=(\tilde{D}_{j,ik})$ with $\tilde{D}_{ijk}=\textrm{Tr}[\tilde{\rho}(\alpha_i\otimes \beta_j\otimes \gamma_k)]$, $\tilde{\rho}$ is a state that is locally unitary equivalent to $\rho$. \end{theorem}
\begin{proof} Based on the dual relation between $SU(2)$ and $SO(3)$ \cite{SO1995}, we have $U\sigma_i U^{\dag}=\sum_{i{'}}O_{ii{'}}\sigma_{i{'}}$, where $U$ is a unitary operator and $O=(O_{ij})$ belongs to $SO(3)$. Therefore, we have \begin{eqnarray} \begin{aligned} C'_{ijk}&=\textrm{Tr}[\rho'(\sigma_i\otimes\sigma_j\otimes\sigma_k)]\\ &=\textrm{Tr}[\frac{(F_A\otimes F_B\otimes F_C)\rho(F_A\otimes F_B\otimes F_C)^{\dag}}{F}(\sigma_i\otimes\sigma_j\otimes\sigma_k)]\\ &=\frac{1}{F}\textrm{Tr}[\rho(U\Sigma_A U^{\dag}\otimes V\Sigma_B V^{\dag}\otimes W\Sigma_C W^{\dag})(\sigma_i\otimes\sigma_j\otimes\sigma_k)(U\Sigma_A U^{\dag}\otimes V\Sigma_B V^{\dag}\otimes W\Sigma_C W^{\dag})]\\ &=\frac{1}{F}\textrm{Tr}[(U^{\dag}\otimes V^{\dag}\otimes W^{\dag})\rho(U\otimes V\otimes W)(\Sigma_AU^{\dag}\sigma_i U\Sigma_A\otimes\Sigma_BV^{\dag}\sigma_j V\Sigma_B\otimes\Sigma_CW^{\dag}\sigma_k W\Sigma_C)]\\ &=\frac{1}{F}\textrm{Tr}[\tilde{\rho}(\Sigma_A\sum\limits_{i'}O_{ii'}\sigma_{i'}\Sigma_A\otimes \Sigma_B\sum\limits_{j'}O_{jj'}\sigma_{j'}\Sigma_B\otimes\Sigma_C\sum\limits_{k'}O_{kk'}\sigma_{k'}\Sigma_C)]\\ &=\frac{1}{F}\sum\limits_{i'j'k'}O_{ii'}O_{jj'}O_{kk'}\textrm{Tr}[\tilde{\rho}(\Sigma_A\sigma_{i'}\Sigma_A\otimes \Sigma_B\sigma_{j'}\Sigma_B\otimes\Sigma_C\sigma_{k'}\Sigma_C)]\\ &=\frac{1}{F}\sum\limits_{i'j'k'}O_{ii'}O_{jj'}O_{kk'}\textrm{Tr}[\tilde{\rho}(\alpha_{i'}\otimes \beta_{j'}\otimes \gamma_{k'})]\\ &=\frac{1}{F}[O_A \tilde{D} (O^{T}_{B}\otimes O^{T}_{C})]_{ijk}. \end{aligned} \end{eqnarray} Hence, $C'=\frac{O_A \tilde{D} (O^{T}_{B}\otimes O^{T}_{C})}{F}$, and $(C')^{\dag}C'=\frac{1}{F^2}(O_B\otimes O_C)\tilde{D}^{\dag}O_A^{\dag}O_A \tilde{D}(O_B\otimes O_C)^{\dag}=\frac{1}{F^2}(O_B\otimes O_C)\tilde{D}^{\dag} \tilde{D}(O_B\otimes O_C)^{\dag}$. As $O_B$ and $O_C$ belong to $SO(3)$, $(C')^{\dag}C'$ has the same eigenvalues as $\frac{\tilde{D}^{\dag}\tilde{D}}{F^2}$. That is to say, $\lambda{'}_{\max}$ is also the maximal singular value of $\frac{\tilde{D}}{F}$. \end{proof}
\textit{\textbf{Remark }} The normalization factor $F$ has the following form \begin{eqnarray}\nonumber \begin{aligned} F&=\textrm{Tr}[(F_A\otimes F_B\otimes F_C)\rho(F_A\otimes F_B\otimes F_C)^{\dag}]\\ &=\textrm{Tr}[(U\Sigma_A U^{\dag}\otimes V\Sigma_B V^{\dag}\otimes W\Sigma_C W^{\dag})\rho(U\Sigma_A U^{\dag}\otimes V\Sigma_B V^{\dag}\otimes W\Sigma_C W^{\dag})]\\ &=\textrm{Tr}[\rho(U\Sigma^2_A U^{\dag}\otimes V\Sigma^2_B V^{\dag}\otimes W\Sigma^2_C W^{\dag})]\\ &=\textrm{Tr}[(U^{\dag}\otimes V^{\dag}\otimes W^{\dag})\rho(U\otimes V\otimes W)(\Sigma^2_A\otimes\Sigma^2_B\otimes\Sigma^2_C)]\\ &=\textrm{Tr}[\tilde{\rho}(\Sigma^2_A\otimes\Sigma^2_B\otimes\Sigma^2_C)], \end{aligned} \end{eqnarray} where $\tilde{\rho}$ and $\rho$ are local unitary equivalent. They have the same maximal violation value of the Mermin inequality.
Note that the inequality (\ref{QM}) saturates if the degeneracy of $\lambda'_{\max}$ is more than 1, and the two nine-dimensional singular vectors corresponding to $\lambda_{\max}$ take the forms of $\vec{a}\otimes\vec{c}-\vec{a}'\otimes\vec{c}'$ and $\vec{a}\otimes\vec{c}{'}+\vec{a}{'}\otimes\vec{c}$, respectively.
To illustrate the theorem let us consider the following examples.
\textit{\textbf{Example 1.}} Consider the 3-qubit mixed Greenberger-Horne-Zeilinger (GHZ) state \cite{EX12015},
$$\rho_{GHZ}=p|GHZ\rangle\langle GHZ|+\frac{1-p}{4} I_2\otimes\tilde{I},$$
where $0\leq p\leq 1$, $|GHZ\rangle=\frac{|000\rangle+|111\rangle}{\sqrt{2}}$, $I_2$ is the $2\times 2$ identity matrix and $\tilde{I}= $diag$(1, 0, 0, 1)$. The state $\rho_{GHZ}$ is shown to be genuine multipartite entangled for $\frac{1}{3}< p\leq 1$, and it admits bilocal hidden model for $0\leq p\leq 0.41667$ \cite{EX12015}. Later, Li \emph{et al.} pointed out that $\rho_{GHZ}$ is genuine multipartite nonlocal \cite{GN2013,LM2017} for $0.707107<p\leq 1$, namely, it violates the Svetlichny inequality (SI) \cite{Svetlichny1987} when $0.707107<p\leq 1$. The maximal violation of the Svetlichny inequality under local filtering has been also calculated \cite{FOP}. Recently, the upper bound of the Mermin operator has been studied in \cite{QIP2019}, which shows that the state violates the Mermin inequality if $\frac{1}{2}<p\leq 1$, i.e., the state is standard nonlocal for $\frac{1}{2}<p\leq 1$.
By direct calculation, we have the correlation matrix of $\rho_{GHZ}$, \begin{eqnarray} \begin{aligned} C=\left(\begin{array}{ccccccccc}
p&0&0&0&-p&0&0&0&0\\
0&-p&0&-p&0&0&0&0&0\\
0&0&0&0&0&0&0&0&0 \end{array}\right) \end{aligned} \end{eqnarray} and \begin{eqnarray} \begin{aligned} D=\left(\begin{array}{ccccccccc}
plmn&0&0&0&-plmn&0&0&0&0\\
0&-plmn&0&-plmn&0&0&0&0&0\\
0&0&0&0&0&0&0&0&T \end{array}\right), \end{aligned} \end{eqnarray} where $D=(D_{j,ik})$, $D_{ijk}=\textrm{Tr}[\rho_{GHZ}(\alpha_i\otimes \beta_j\otimes \gamma_k)]$. The singular values of $D$ are $\sqrt{2}plmn$, $\sqrt{2}plmn$ and $T=\frac{(l^2-1)(m^2n^2+1)}{4}+\frac{(l^2+1)(m^2n^2-1)}{4}p$. $\tilde{\rho}_{GHZ}$ is locally unitary equivalent to $\rho_{GHZ}$. Then we have that $\frac{\sqrt{2}plmn}{F}$, $\frac{\sqrt{2}plmn}{F}$ and $\frac{T}{F}$ are the singular values of the matrix $\frac{\tilde{D}}{F}$, where \begin{eqnarray}\nonumber F=\textrm{Tr}[\rho_{GHZ}(\Sigma^2_A\otimes\Sigma^2_B\otimes\Sigma^2_C)]=\frac{(l^2+1)(m^2n^2+1)}{4}+\frac{(l^2-1)(m^2n^2-1)}{4}p. \end{eqnarray} The maximal singular value is $\lambda^{'}_{\max}=\frac{\sqrt{2}plmn}{F}$ for given $p$ with $\frac{\sqrt{2}plmn}{F}>\frac{T}{F}$. Then the upper bound of the maximal value of the Mermin operator is $2\sqrt{2}\lambda{'}_{\max}=\frac{4plmn}{F}$. Two singular vectors corresponding to the singular value $\lambda{'}_{\max}$ with degeneracy 2 can be chosen as $\vec{v}_1=(-1,0,0,0,1,0,0,0,0)^T$ and $\vec{v}_2=(0,1,0,1,0,0,0,0,0)^T$, which can be further written as \begin{eqnarray} &\vec{v}_1=(1,0,0)^T\otimes(-1,0,0)^T-(0,-1,0)^T\otimes(0,1,0)^T\nonumber,\\ &\vec{v}_2=(1,0,0)^T\otimes(0,1,0)^T+(0,-1,0)^T\otimes(-1,0,0)^T\nonumber. \end{eqnarray} By taking $\vec{a}=(1,0,0)^T$, $\vec{a}'=(0,-1,0)^T$, $\vec{c}=(-1,0,0)^T$, $\vec{c}'=(0,1,0)^T$, $\vec{b}$ and $\vec{b}'$ some suitable unit vectors, the upper bound $\frac{4plmn}{F}$ of $\rho_{GHZ}$ is attained. Therefore, the state violates the Mermin inequality if $\frac{4plmn}{F}>2$ under the restriction $\frac{\sqrt{2}plmn}{F}>\frac{T}{F}$. As a result, the standard nonlocality of the state $\rho'_{GHZ}$ can be detected by the Mermin inequality for $0.471428<p\leq 1$. However, the state violates the Mermin inequality if $\frac{1}{2}<p\leq 1$ \cite{QIP2019}. Hence, the hidden standard nonlocality of $\rho_{GHZ}$ is revealed by local filtering operation for $0.471428\leq p\leq 0.5$, see FIG. 1. \begin{figure}
\caption{The state $\rho_{GHZ}$ violates Mermin inequality (MI) for $0.5<p\leq 1$. The locally filtered state shows standard nonlocality for $0.471428<p\leq 1$. The hidden standard tripartite nonlocality is revealed for $0.471428\leq p\leq 0.5$.}
\end{figure}
\textit{\textbf{Example 2.}} Consider the following state given in \cite{EX22007}, \begin{eqnarray} \begin{aligned}
\rho =p |\Psi\rangle\langle\Psi|+(1-p)|00\rangle\langle00|\otimes\frac{I_2}{2}, \end{aligned} \end{eqnarray}
where $0\leq p\leq 1$, $|\Psi\rangle=\cos\frac{\pi}{8}|000\rangle+\sin\frac{\pi}{8}|111\rangle$. Under local filtering we have \begin{eqnarray} \begin{aligned} D=\left(\begin{array}{ccccccccc}
\frac{plmn}{\sqrt{2}}&0&0&0&-\frac{plmn}{\sqrt{2}}&0&0&0&0\\
0&-\frac{plmn}{\sqrt{2}}&0&-\frac{plmn}{\sqrt{2}}&0&0&0&0&0\\
0&0&0&0&0&0&0&0&T \end{array}\right). \end{aligned} \end{eqnarray} The singular values of $D$ are $plmn$, $plmn$ and $T=\frac{l^2m^2(n^2-1)}{2}+\frac{-2+\sqrt{2}+2l^2m^2+\sqrt{2}l^2m^2n^2}{4}p$. Since $\tilde{\rho}$ is locally unitary equivalent to $\rho$, the singular value of the matrix $\frac{\tilde{D}}{F}$ are $\frac{plmn}{F}$, $\frac{plmn}{F}$ and $\frac{T}{F}$, where \begin{eqnarray}\nonumber F=\textrm{Tr}[\rho(\Sigma^2_A\otimes\Sigma^2_B\otimes\Sigma^2_C)]=\frac{l^2m^2(n^2+1)}{2}+\frac{2-\sqrt{2}-2l^2m^2+\sqrt{2}l^2m^2n^2}{4}p. \end{eqnarray} The maximal singular value is $\lambda'_{\max}=\frac{plmn}{F}$ for given $p$ with $\frac{plmn}{F}>\frac{T}{F}$. Then the upper bound of the maximal value of the Mermin operator is $2\sqrt{2}\lambda'_{\max}=\frac{2\sqrt{2}plmn}{F}$. This bound can be attained by selecting the two singular vectors, corresponding to the singular value $\lambda'_{\max}$ with degeneracy 2, to be $\vec{v}_1=(-1,0,0,0,1,0,0,0,0)^T$ and $\vec{v}_2=(0,1,0,1,0,0,0,0,0)^T$, which can be decomposed to
\begin{eqnarray} &\vec{v}_1=(1,0,0)^T\otimes(-1,0,0)^T-(0,-1,0)^T\otimes(0,1,0)^T\nonumber,\\ &\vec{v}_2=(1,0,0)^T\otimes(0,1,0)^T+(0,-1,0)^T\otimes(-1,0,0)^T\nonumber. \end{eqnarray} Let $\vec{a}=(1,0,0)^T$, $\vec{a}'=(0,-1,0)^T$, $\vec{c}=(-1,0,0)^T$, $\vec{c}'=(0,1,0)^T$. Together with some suitable unit vectors $\vec{b}$ and $\vec{b}'$, the upper bound $\frac{2\sqrt{2}plmn}{F}$ is attained. Therefore, the state violates the Mermin inequality if $\frac{2\sqrt{2}plmn}{F}>2$ under the restriction $\frac{plmn}{F}>\frac{T}{F}$, namely, the state $\rho$ violates the Mermin inequality if $0.318675 < p\leq 1$, for which the standard nonlocality of the state $\rho'$ is detected. The maximal violation of the Mermin inequality is shown in FIG. 2. \begin{figure}
\caption{$f(p)$ is the maximal value of $\mathcal{Q}(M)$. The red line represents the maximal violation of the locally filtered state $\rho'$. The blue line represents the maximal violation value of the initial state $\rho$.}
\end{figure}
Based on the protocol introduced in \cite{QIP2019}, $\rho$ is standard tripartite nonlocal for $0.707107<p\leq 1$. Therefore, the state $\rho$ shows hidden standard tripartite nonlocality for $0.318675\leq p\leq 0.707107$, see FIG. 3. \begin{figure}
\caption{The state $\rho$ does not violate SI for $0\leq p\leq 1$. It violates the Mermin inequality for $0.707107< p\leq 1$. After locally filtering $\rho'$ is standard nonlocal for $0.318675< p\leq 1$. The hidden standard nonlocality is revealed for $0.318675\leq p\leq 0.707107$.}
\end{figure}
\textit{\textbf{Example 3.}} The interaction between a quantum system and its environment may reduce the entanglement and nonlocality of the system. The GHZ state is a genuine tripartite nonlocal state as violates the Svetlichny inequality. Let us consider that the GHZ state goes through the amplitude damping(AD) noise channel which maps a qubit state $\rho$ to $\mathcal{E}_{AD}(\rho)=E_0\rho E^{\dag}_0+E_1\rho E^{\dag}_1$, where the Kraus operators are given by \cite{Nielsen},
$$E_0=\left(\begin{array}{cc}
1&0\\
0&\sqrt{1-\gamma} \end{array}\right),~~~ E_1=\left(\begin{array}{cc}
0&\sqrt{\gamma}\\
0&0 \end{array}\right),$$ $\gamma\in (0,1)$ is the damping rate.
When each qubit of the GHZ state $\rho_G$ undergoes the amplitude damping noise channel, one gets \begin{eqnarray}\label{GHZ1} \begin{aligned} \rho_G^{AD}&\equiv\mathcal{E}_{AD}(\rho_G)\\
&=\frac{1}{2}\Big((1+\gamma^3)|000\rangle\langle000|+(1-\gamma )^3|111\rangle\langle111|\\
&\quad+(1-\gamma )^{3/2}(|000\rangle\langle111|+|111\rangle\langle000|)\\
&\quad+(1-\gamma )\gamma ^2(|001\rangle\langle001|+|010\rangle\langle010|+|100\rangle\langle100|)\\
&\quad+(1-\gamma )^2 \gamma(|011\rangle\langle011|+|101\rangle\langle101|+|110\rangle\langle110|)\Big). \end{aligned} \end{eqnarray} The corresponding correlation matrix is \begin{eqnarray} \begin{aligned} C=\left(\begin{array}{ccccccccc}
s&0&0&0&-s&0&0&0&0\\
0&-s&0&-s&0&0&0&0&0\\
0&0&0&0&0&0&0&0&t \end{array}\right), \end{aligned} \end{eqnarray} where $s=(1-\gamma)^{\frac{3}{2}}$ and $t=\gamma(3-6\gamma+4\gamma^2)$. The singular values of $C$ are $\sqrt{2 (1-\gamma)^{\frac{3}{2}}}$, $\sqrt{2 (1-\gamma)^{\frac{3}{2}}}$ and $\gamma(4\gamma^2-6\gamma+3)$. We choose the two nine-dimension vectors to be $\vec{v}_1=(-1,0,0,0,1,0,0,0,0)^T$ and $\vec{v}_2=(0,1,0,1,0,0,0,0,0)^T$, which can be decomposed into \begin{eqnarray} &\vec{v}_1=(1,0,0)^T\otimes(-1,0,0)^T-(0,-1,0)^T\otimes(0,1,0)^T\nonumber,\\ &\vec{v}_2=(1,0,0)^T\otimes(0,1,0)^T+(0,-1,0)^T\otimes(-1,0,0)^T\nonumber. \end{eqnarray} Let $\vec{a}=(1,0,0)^T$, $\vec{a}'=(0,-1,0)^T$, $\vec{c}=(-1,0,0)^T$, $\vec{c}'=(0,1,0)^T$. Together with suitable unit vectors $\vec{b}$ and $\vec{b}'$, the upper bound of the maximal expectation of the Mermin operator is $4\sqrt{(1-\gamma)^{\frac{3}{2}}}$ based on \cite{Mermin1990}. Hence, the state is standard tripartite nonlocal for $\gamma\in(0,0.370039)$.
Now consider the filtering. The correlation matrix of the filtered state is \begin{eqnarray} \begin{aligned} D=\left(\begin{array}{ccccccccc}
S&0&0&0&-S&0&0&0&0\\
0&-S&0&-S&0&0&0&0&0\\
0&0&0&0&0&0&0&0&T \end{array}\right), \end{aligned} \end{eqnarray} where $S=lmn(1-\gamma)^{\frac{3}{2}}$. The singular values of $D$ are $\sqrt{2(1-p)^3l^2m^2n^2}$, $\sqrt{2(1-p)^3l^2m^2n^2}$ and \begin{eqnarray*} T&=&\frac{1}{2}(-1+l^2m^2n^2+\gamma^3(l^2+1)(m^2+1)(n^2+1)+\gamma(3+l^2+m^2+n^2)\\ &&-\gamma^2(3+2n^2+m^2(2+n^2)+l^2(2+m^2+n^2))). \end{eqnarray*} As $\tilde{\rho}^{AD}_G$ is locally unitary equivalent to $\rho^{AD}_G$, the singular values of the matrix $\frac{\tilde{D}}{F}$ are $\frac{\sqrt{2(1-p)^3l^2m^2n^2}}{F}$, $\frac{\sqrt{2(1-p)^3l^2m^2n^2}}{F}$ and $\frac{T}{F}$, where \begin{eqnarray*} F&=&\frac{1}{2}(1+l^2m^2n^2+\gamma^3(l^2-1)(m^2-1)(n^2-1)\\ &&+\gamma(-3+l^2+m^2+n^2)+\gamma^2(3-2n^2+m^2(-2+n^2)+l^2(-2+m^2+n^2))). \end{eqnarray*} The maximal violation value of the Mermin operator is $\frac{4\sqrt{(1-\gamma)^3l^2m^2n^2}}{F}$, with the restriction $\frac{\sqrt{2(1-p)^3l^2m^2n^2}}{F}\geq\frac{T}{F}$. Therefore, the filtered state violates the Mermin inequality for $\gamma\in(0, 0.394752)$. That is to say, for the state with $0.370069\leq\gamma\leq0.394752$, the state $\rho^{AD}_G$ is not a standard nonlocal state. FIG. 4 shows that for $\gamma\in(0, 0.394752)$, the filtered state violates the Mermin inequality, i.e., it is a standard tripartite nonlocal state. \begin{figure}
\caption{$f(\gamma)$ is the maximal value of $\mathcal{Q}(M)$. The red line represents the maximal violation value of the state after local filtering. The filtered state is standard nonlocal for $0\leq \gamma<0.394752$.}
\end{figure}
\section{Conclusion} In summary, we have investigated the maximal violation of the Mermin inequality under local filtering for any 3-qubit states. We have presented a tight upper bound for the maximal expectation value of the Mermin operator after local filtering. Furthermore, for the 3-qubit GHZ state, the standard tripartite nonlocal be revealed for $0.471428\leq p\leq 0.5$ by local filtering. Similarly, although the amplitude damping GHZ state is fully local for $0.370069\leq\gamma\leq0.394752$, the filtered one is standard nonlocal. The local filtering process may reveal certain hidden quantum correlations including nonlocality \cite{filterCHSH,FOP} and steerability \cite{FilterSteer}. In order to improve the efficiency of quantum information process, a number of scheme have been put froward \cite{purify,ErrorCorrection,EntanglementConcentration,QuantumRepeaters}. The filter operations can also be used to improve the fidelity between quantum states and efficiency of information processing with noisy entangled state \cite{FilterNoise}. Our approach presented in this article can also be used to deal with other Bell-type inequalities for tripartite or multipartite systems.
\end{document} | arXiv | {
"id": "2205.10562.tex",
"language_detection_score": 0.6341610550880432,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{CHROMATIC NUMBER OF GRAPHS\\AND EDGE FOLKMAN NUMBERS \thanks{This work was supported by the Scientific Research Fund of the St. Kliment Ohridski Sofia University under contract No~75, 2009.}} \author{Nedyalko Dimov Nenov}
\maketitle
\begin{abstract}
We consider only simple graphs. The graph $G_1+G_2$ consists of vertex disjoint copies of $G_1$ and $G_2$ and all possible edges between the vertices of $G_1$ and $G_2$. The chromatic number of the graph $G$ will be denoted by $\chi(G)$ and the clique number of $G$ by $\cli(G)$. The graphs $G$ for which $\chi(G)-\cli(G)\ge 3$ are considered. For these graphs the inequality $|V(G)|\ge\chi(G)+6$ was proved in~\cite{12}, where $V(G)$ is the vertex set of $G$. In this paper we prove that equality $|V(G)|=\chi(G)+6$ can be achieved only for the graphs $K_{\chi(G)-7}+Q$, $\chi(G)\ge 7$ and $K_{\chi(G)-9}+C_5+C_5+C_5$, $\chi(G)\ge 9$, where graph $Q$ is given on Fig.~1 and $K_n$ and $C_5$ are complete graph on $n$ vertices and simple 5-cycle, respectively. With the help of this result we prove some new facts for some edge Folkman numbers (Theorem~4.2).
\textbf{Key words:} chromatic number, edge Folkman numbers
\textbf{2000 Mathematics Subject Classification:} 05C55 \end{abstract}
\section{Introduction.} We consider only finite, non-oriented graphs without loops and multiple edges. We call a $p$-clique of the graph $G$ a set of $p$ vertices each two of which are adjacent. The largest positive integer $p$ such that $G$ contains a $p$-clique is denoted by $\cli(G)$ (clique number of $G$). We shall use also the following notations: \begin{itemize} \item $V(G)$ is the vertex set of $G$; \item $E(G)$ is the edge set of $G$; \item $\overline G$ is the complement of $G$; \item $G-V$, $V\subseteq V(G)$ is the subgraph of $G$ induced by $V(G)\setminus V$; \item $\alpha(G)$ is the vertex independence number of $G$; \item $\chi(G)$ is the chromatic number of $G$; \item $f(G)=\chi(G)-\cli(G)$; \item $K_n$ is the complete graph on $n$ vertices; \item $C_n$ is the simple cycle on $n$ vertices; \item $N_G(v)$ is the set of neighbours of a vertex $v$ in $G$. \end{itemize}
Let $G_1$ and $G_2$ be two graphs. We denote by $G_1+G_2$ the graph $G$ for which $V(G)=V(G_1)\cup V(G_2)$, $E(G)=E(G_1)\cup E(G_2)\cup E'$, where $E'=\{[x,y], x\in V(G_1),y\in V(G_2)\}$.
We will use the following theorem by \textsc{Dirac}~\cite{2}:
\begin{thm}\label{th:1.1}
Let $G$ be a graph such that $f(G)\ge 1$. Then $|V(G)|\ge\chi(G)+2$ and
$|V(G)|=\chi(G)+2$ only when $G=K_{\chi(G)-3}+C_5$. \end{thm}
If $f(G)\ge 2$, then we have~\cite{12} (see also~\cite{16})
\begin{thm}\label{th:1.2} Let $f(G)\ge 2$. Then \begin{enumerate}[\indent\rm(a)] \item
$|V(G)|\ge\chi(G)+4$; \item
$|V(G)|=\chi(G)+4$ only when $\chi(G)\ge 6$ and $G=K_{\chi(G)-6}+C_5+C_5$. \end{enumerate} \end{thm}
In the case $\chi(G)=4$ and $\chi(G)=5$ we have the following more good inequalities: \begin{gather}\label{1.1}
\text{if $f(G)\ge 2$ and $\chi(G)=4$ then $|V(G)|\ge 11$, \cite{1};}\\
\text{if $f(G)\ge 2$ and $\chi(G)=5$ then $|V(G)|\ge 11$, \cite{13} (see also~\cite{14}).} \label{1.2} \end{gather}
For the case $f(G)\ge 3$ it was known that~\cite{12} (see also~\cite{17,18})
\begin{thm}\label{th:1.3}
Let $G$ be a graph such that $f(G)\ge 3$. Then $|V(G)|\ge\chi(G)+6$. \end{thm}
In this paper we consider the case $|V(G)|=\chi(G)+6$. We prove the following main theorem.
\begin{thm}\label{th:1.4}
Let $G$ be a graph such that $f(G)\ge 3$ and $|V(G)|=\chi(G)+6$. Then $\chi(G)\ge 7$ and $G=K_{\chi(G)-7}+Q$ or $\chi(G)\ge 9$ and $G=K_{\chi(G)-9}+C_5+C_5+C_5$, where $Q$ is the graph, whose complementary graph $\overline Q$ is given in Fig.~1. \end{thm}
Obviously, if $f(G)\ge 3$ then $\chi(G)\ge 5$. Therefore we will consider only the cases $\chi(G)\ge 5$. If $\chi(G)=5$ or $\chi(G)=6$ then by Theorem~\ref{th:1.3} and Theorem~\ref{th:1.4} we see that $|V(G)|\ge\chi(G)+7$. In these two cases we can state the following more strong results: \begin{gather}\label{1.3}
\text{if $f(G)\ge 3$ and $\chi(G)=5$ then $|V(G)|\ge 22$, \cite{6};}\\
\text{if $f(G)\ge 3$ and $\chi(G)=6$ then $|V(G)|\ge 16$, \cite{9}.} \label{1.4} \end{gather}
The inequalities~\eqref{1.3} and~\eqref{1.4} are exact. \textsc{Lathrop} and \textsc{Radziszowski}~\cite{9} proved that there are only two 16-vertex graphs for which~\eqref{1.4} holds.
At the end of this paper we obtain by Theorem~\ref{th:1.4} new results about some edge-Folkman numbers (Theorem~\ref{th:4.2}).
\section{Auxiliary results.} A graph $G$ is defined to be vertex-critical chromatic if $\chi(G-v)<\chi(G)$ for all $v\in V(G)$. We shall use the following results of \textsc{Gallai}~\cite{4} (see also~\cite{5}).
\begin{thm}\label{th:2.1}
Let $G$ be a vertex-critical chromatic graph and $\chi(G)\ge 2$. If $|V(G)|<2\chi(G)-1$ then $G=G_1+G_2$, where $V(G_i)\ne\emptyset$, $i=1,2$. \end{thm}
\begin{thm}\label{th:2.2}
Let $G$ be a vertex-critical $k$-chromatic graph, $|V(G)|$ and $k\ge 3$. Then there exist $\ge\left\lceil\dfrac32\left(\dfrac53k-n\right)\right\rceil$ vertices with the property that each of them is adjacent to all the other $n-1$ vertices. \end{thm}
\begin{rem}\label{rem:2.1} The formulations of Theorem~\ref{th:2.1} and Theorem~\ref{th:2.2} given above are obviously equivalent to the original ones in~\cite{4} (see Remark~1 and Remark~2 in~\cite{16}). \end{rem}
\begin{pro}\label{pro:2.1}
Let $G$ be a graph such that $f(G)\ge 3$ and $|V(G)|=\chi(G)+6$. Then $G$ is a vertex-critical chromatic graph. \end{pro}
\begin{proof} Assume the opposite. Then $\chi(G-v)=\chi(G)$ for some $v\in V(G)$. Let $G'=G-v$. Since $\cli(G')\le\cli(G)$ we have $f(G')\ge f(G)\ge 3$. By Theorem~\ref{th:1.3} \[
|V(G')|\ge\chi(G')+6=\chi(G)+6=|V(G)|, \] which is a contradiction \end{proof}
The following result by \textsc{Kerry}~\cite{7} will be used later.
\begin{thm}\label{th:2.3} Let $G$ be a $13$-vertex graph such that $\alpha(G)\le 2$ and $\cli(G)\le 4$. Then $G$ is isomorphic to the graph $Q$, whose complementary graph $\overline Q$ is given in Fig.~1. \end{thm}
\begin{definition}\label{d:2.1} The graph $G$ is called a Sperner graph if $N_G(u)\subseteq N_G(v)$ for some $u,v\in V(G)$. \end{definition}
Obviously if $N_G(u)\subseteq N_G(v)$ then $\chi(G-u)=\chi(G)$. Thus we have
\begin{pro}\label{pro:2.2} Every vertex-critical chromatic graph is not a Sperner graph. \end{pro}
The following lemmas are used in the proof of Theorem~\ref{th:1.4}.
\begin{lemma}\label{lem:2.1} Let $G$ be a graph and $f(G)\ge 2$. Then \begin{enumerate}[\indent\rm(a)] \item
$|V(G)|\ge 10$; \item
$|V(G)|=10$ only when $G=C_5+C_5$. \end{enumerate} \end{lemma}
\begin{proof}
The inequality~(a) follows from~\eqref{1.1}, \eqref{1.2} and Theorem~\ref{th:1.2}(a). Let $|V(G)|=10$. Then by~\eqref{1.1}, \eqref{1.2} and Theorem~\ref{th:1.2}(a) we see that $\chi(G)=6$. From Theorem~\ref{th:1.2}(b) we obtain $G=C_5+C_5$. \end{proof}
\begin{lemma}\label{lem:2.2} Let $G$ be a graph such that $f(G)\ge 3$ and $G$ is not a Sperner graph. Then \[
|V(G)|\ge 11+\alpha(G). \] \end{lemma}
\begin{proof} Assume the opposite, i.e. \beq{2.1}
|V(G)|\le 10+\alpha(G). \end{equation} Let $A\subseteq V(G)$ be an independent set of vertices of $G$ such that
$|A|=\alpha(G)$. Consider the subgraph $G'=G-A$. From~\eqref{2.1} we see that
$|V(G')|\le 10$. Since $A$ is independent from $f(G)\ge 3$ it follows $f(G')\ge 2$. According to Lemma~\ref{lem:2.1}(b), $G'=C_5^{(1)}+C_5^{(1)}$, where $C_5^{(i)}$, $i=1,2$, are 5-cycles. Hence $\cli(G')=4$ and $\cli(G)\le 5$. Thus if $a\in A$, then $N_G(a)\cap V(C_5^{(1)})$ or $N_G(a)\cap V(C_5^{(2)})$ is an independent set. Let $N_G(a)\cap V(C_5^{(1)})$ be independent set and $C_5^{(1)}=v_1v_2v_3v_4v_5v_1$. Then we may assume that $N_G(a)\cap V(C_5^{(1)})\subseteq\{v_1,v_3\}$. We obtain that $N_G(a)\subseteq N_G(v_2)$ which contradicts the assumption of Lemma~\ref{lem:2.2}. \end{proof}
\begin{lemma}\label{lem:2.3}
Let $G$ be a graph such that $f(G)\ge 3$ and $|V(G)|=\chi(G)+6$. Then $\chi(G)\ge 7$ and: \begin{enumerate}[\indent\rm(a)] \item $G=Q$ if $\chi(G)=7$; \item $G=K_1+Q$ if $\chi(G)=8$; \item $G=K_2+Q$ or $G=C_5+C_5+C_5$ if $\chi(G)=9$. \end{enumerate} \end{lemma}
\begin{proof} Since $\chi(G)\ne\cli(G)$ we have $\cli(G)\ge 2$. Thus, from $f(G)\ge 3$ it follows $\chi(G)\ge 5$. By~\eqref{1.3} and~\eqref{1.4} we see that $\chi(G)\ne 5$ and $\chi(G)\ne 6$. Hence $\chi(G)\ge 7$.
\textsc{Case~1.}
$\chi(G)=7$. In this case $|V(G)|=13$. From $\chi(G)=7$ and $f(G)\ge 3$ we see that $\cli(G)\le 4$. According to Proposition~\ref{pro:2.1} and Proposition~\ref{pro:2.2}, $G$ is not a Sperner graph. It follows from Lemma~\ref{lem:2.2} that $\alpha(G)\le 2$. Thus, by Theorem~\ref{th:2.3}, $G=Q$.
\textsc{Case~2.}
$\chi(G)=8$. In this situation we have $|V(G)|=14$. By Proposition~\ref{pro:2.1},
$G$ is a vertex-critical chromatic graph. Since $|V(G)|<2\chi(G)-1$, from Theorem~\ref{th:2.1} we obtain that $G=G_1+G_2$. Clearly,
\begin{align}
|V(G)|&=|V(G_1)|+|V(G_2)|;\label{2.2}\\ \chi(G)&=\chi(G_1)+\chi(G_2);\label{2.3}\\ f(G)&=f(G_1)+f(G_2);\label{2.4}\\[-7.25ex]\notag \end{align} \beq{2.5} \text{$G_1$ and $G_2$ are vertex-critical chromatic graphs.} \end{equation}
\textsc{Subcase 2.a.} $G=K_1+G'$. Since $\chi(G')=7$ and $f(G')=f(G)\ge 3$, by the Case~1 we obtain $G'=Q$ and $G=K_1+Q$.
\textsc{Subcase 2.b.} $G_1$ and $G_2$ are not complete graphs. In this subcase, by~\eqref{2.5}, we have $\chi(G_i)\ge 3$ and $\chi(G_i)\ne\cli(G_i)$, $i=1,2$. Thus $f(G_i)\ge 1$,
$i=1,2$. According to Theorem~\ref{th:1.1}, $|V(G_i)|\ge 5$, $i=1,2$. From these inequalities and~\eqref{2.2} it follows \beq{2.6}
|V(G_i)|\le 9, \quad i=1,2.
\end{equation} Let $f(G_1)\le f(G_2)$. Then, by~\eqref{2.4}, $f(G_2)\ge 2$. From Lemma~\ref{2.1} we obtain $|V(G_2)|\ge 10$. This contradicts~\eqref{2.6}.
\textsc{Case 3.}
$\chi(G)=9$. In this case $|V(G)|=15$. By Proposition~\ref{pro:2.1}, $G$ is a vertex-critical chromatic graph. Since $|V(G)|<2\chi(G)-1$, from Theorem~\ref{th:2.1} it follows that $G=G_1+G_2$.
\textsc{Subcase 3.a.}
$G=K_1+G'$. Since $|V(G')|=14$, $\chi(G')=8$ and $f(G')=f(G)\ge 3$, by Case~2 we have $G'=K_1+Q$. Hence $G=K_2+Q$.
\textsc{Subcase 3.b.} $G_1$ and $G_2$ are not complete graphs. By~\eqref{2.5} it follows
$|V(G_i)|\ge 5$, $i=1,2$. From these inequalities and~\eqref{2.2} we obtain \beq{2.7}
|V(G_i)|\le 10, \quad i=1,2. \end{equation} Let $f(G_1)\le f(G_2)$. Then according to~\eqref{2.4} we have $f(G_2)\ge 2$. From~\eqref{2.7} and Theorem~\ref{lem:2.1} we obtain $G_2=C_5+C_5$. Since
$|V(G_2)|=10$ and $\chi(G_2)=6$ we see from~\eqref{2.2} and~\eqref{2.3} that
$|V(G_1)|=5$ and $\chi(G_1)=3$. Thus, by~\eqref{2.5}, we conclude that $G_1=C_5$. Hence $G=C_5+C_5+C_5$. \end{proof}
\section{Proof of Theorem~\ref{th:1.4}.} By Lemma~\ref{lem:2.3} we have that $\chi(G)\ge 7$. If $\chi(G)=7$ or
$\chi(G)=8$ Theorem~\ref{th:1.4} follows from Lemma~\ref{lem:2.3}. Let $\chi(G)\ge 9$. We prove Theorem~\ref{th:1.4} by induction on $\chi(G)$. The inductive base $\chi(G)=9$ follows from Lemma~\ref{lem:2.3}(c). Let $\chi(G)\ge 10$. Then $\frac53\chi(G)-|V(G)|>0$. By Proposition~\ref{pro:2.1} $G$ is vertex-critical chromatic graph. Thus, according to Theorem~\ref{th:2.2}, we have $G=K_1+G'$. As $\chi(G')=\chi(G)-1$, $f(G')=f(G)\ge 3$ and $|V(G')|=\chi(G')+6$, we can now use the inductive assumption and obtain \[ G'=K_{\chi(G')-7}+Q \quad\text{or}\quad G'=K_{\chi(G')-9}+C_5+C_5+C_5. \] Hence $G=K_{\chi(G)-7}+Q$ or $G=K_{\chi(G)-9}+C_5+C_5+C_5$.
\section{Edge Folkman numbers $F_e(a_1,\dots,a_r;R(a_1,\dots,a_r)-2)$.} Let $a_1$, \dots, $a_r$ be integers, $a_i\ge 2$, $i=1,\dots,r$. The symbol $G\toe(a_1\dots,a_r)$ means that in every $r$-coloring \[ E(G)=E_1\cup\dots\cup E_r, \quad E_i\cap E_j=\emptyset, \quad i\ne j, \] of the edge set $E(G)$there exists a monochromatic $a_i$-clique $Q$ of colour $i$ for some $i\in\{1,\dots,r\}$, that is $E(Q)\subseteq E_i$. The Ramsey number $R(a_1,\dots,a_r)$ is defined as $\min\{n:K_n\toe(a_1,\dots,a_r)\}$. Define \begin{align*} H_e(a_1,\dots,a_r;q)&=\{G:G\toe(a_1\dots,a_r)\text{ and }\cli(G)<q\};\\
F_e(a_1,\dots,a_r;q)&=\min\{|V(G)|:G\in H_e(a_1,\dots,a_r;q)\}. \end{align*} It is well known that \beq{4.1} F_e(a_1,\dots,a_r;q)\text{ exists }\iff q>\max\{a_1,\dots,a_r\}. \end{equation} In the case $r=2$ this was proved in~\cite{3} and the general case in~\cite{19}. The numbers $F_e(a_1,\dots,a_r;q)$ are called edge Folkman numbers. An exposition of the known edge Folkman numbers is given in~\cite{8}. In this section we consider the numbers $F_e(a_1,\dots,a_r;R(a_1\dots,a_r)-2)$, where $a_3\ge 3$, $i=1,\dots,r$. We know only one Folkman number of this kind, namely $F_e(3,3,3;15)=23$ (see~\cite{11}).
In~\cite{12} we prove the following statement.
\begin{thm}\label{th:4.1} Let $a_1,\dots,a_r$ be integers and $a_i\ge 3$, $i=1,\dots,r$, $r\ge 2$. Then \beq{4.2} F_e(a_1,\dots,a_r;R(a_1\dots,a_r)-2)\ge R(a_1\dots,a_r)+6. \end{equation} \end{thm}
\begin{rem}\label{rem:4.1} It follows from $a_i\ge 3$ and $r\ge 2$ that $R(a_1,\dots,a_r)>2+ \max\{a_1,\dots,a_r\}$. Thus, by~\eqref{4.1}, the numbers $F_e(a_1,\dots,a_r;R(a_1,\dots,a_r)-2)$ exist. \end{rem}
The aim of this section is to prove the following result.
\begin{thm}\label{th:4.2} Let $a_1,\dots,a_r$ be integers and $a_i\ge 3$, $i=1,\dots,r$, $r\ge 2$. Then \[ F_e(a_1,\dots,a_r;R(a_1,\dots,a_r)-2)=R(a_1,\dots,a_r)+6 \] if and only if $K_{R-7}+Q\toe(a_1,\dots,a_r)$ or $K_{R-9}+C_5+C_5+C_5\toe (a_1,\dots,a_r)$, where $R=R(a_1,\dots,a_r)$. \end{thm}
We shall use the following result obtained by \textsc{Lin}~\cite{10}: \beq{4.3} G\toe(a_1,\dots,a_r) \Rightarrow \chi(G)\ge R(a_1,\dots,a_r). \end{equation}
\begin{proof}[Proof of Theorem~\ref{th:4.2}] I. Let $F_e(a_1,\dots,a_r;R-2)=R+6$. Let $G\in H_e(a_1,\dots,a_r;R-2)$ and \beq{4.4}
|V(G)|=R+6. \end{equation} Since $\cli(G)\le R-3$, from~\eqref{4.3} it follows $f(G)\ge 3$. By Theorem~\ref{th:1.3}, we have \beq{4.5}
|V(G)|\ge\chi(G)+6. \end{equation} From~\eqref{4.3}, \eqref{4.4} and~\eqref{4.5} we see that $\chi(G)=R$ and
$|V(G)|=\chi(G)+6$. Thus, according to Theorem~\ref{th:1.4}, $G=K_{\chi(G)-7}+Q= K_{R-7}+Q$ or $G=K_{\chi(G)-9}+C_5+C_5+C_5=K_{R-9}+C_5+C_5+C_5$. This implies $K_{R-7}+Q\toe(a_1,\dots,a_r)$ or $K_{R-9}+C_5+C_5+C_5\toe(a_1,\dots,a_r)$ because $G\in H_e(a_1,\dots,a_r;R-2)$.
II. Let $K_{R-7}+Q\toe(a_1,\dots,a_r)$. Then $K_{R-7}+Q\in H_e(a_1,\dots,a_r;R-2)$ because $\cli(K_{R-7}+Q)=R-3$. Hence \[
F_e(a_1,\dots,a_r;R-2)\le|V(K_{R-7}+Q)|=R+6. \] This inequality and~\eqref{4.2} imply that $F_e(a_1,\dots,a_r;R-2)=R+6$.
In the same way we see that from $K_{R-9}+C_5+C_5+C_5\toe(a_1,\dots,a_r)$ it follows that $F_e(a_1,\dots,a_r;R-2)=R+6$. \end{proof}
\begin{rem}\label{rem:4.2} We obtain the equality $F_e(3,3,3;15)=23$ proving that $K_8+C_5+C_5+C_5\toe(3,3,3)$. We do not know whether $K_{10}+Q\toe(3,3,3)$. \end{rem}
\begin{rem}\label{rem:4.3} By Theorem~\ref{th:4.1} we have $F_e(3,5;12)\ge 20$ and $F_e(4,4;16)\ge 24$. The exact values of these numbers are not known. Therefore, having in mind Theorem~\ref{th:4.2}, it will be interesting to know whether the following statements are true: \begin{align*} &K_7+Q\toe(3,5),&&K_5+C_5+C_5+C_5\toe(3,5);\\ &K_{11}+Q\toe(4,4),&&K_9+C_5+C_5+C_5\toe(4,4).\\ \end{align*} \end{rem}
\begin{rem}\label{rem:4.4} By Theorem~\ref{th:4.1}, $F_e(3,4;7)\ge 15$. It was proved in~\cite{8} that $F_e(3,4;8)=16$. Thus $F_e(3,4;7)\ge 17$. \end{rem}
\oneaddr{ Faculty of Mathematics and Informatics\\ St Kliment Ohridski University of Sofia\\ 5, James Bourchier Blvd\\ 1164 Sofia, Bulgaria\\ \email{nenov@fmi.uni-sofia.bg}}
\end{document} | arXiv | {
"id": "1002.4332.tex",
"language_detection_score": 0.5821520686149597,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Notes on Thermodynamic Principle for Quantum Metrology} \author{Yaoming Chu} \affiliation{School of Physics, Hubei Key Laboratory of Gravitation and Quantum Physics, Institute for Quantum Science and Engineering, International Joint Laboratory on Quantum Sensing and Quantum Metrology, Huazhong University of Science and Technology, Wuhan 430074, China} \affiliation{Wuhan National High Magnetic Field Center, Wuhan National Laboratory for Optoelectronics, Huazhong University of Science and Technology, Wuhan 430074, China} \author{Jianming Cai} \email{jianmingcai@hust.edu.cn} \affiliation{School of Physics, Hubei Key Laboratory of Gravitation and Quantum Physics, Institute for Quantum Science and Engineering, International Joint Laboratory on Quantum Sensing and Quantum Metrology, Huazhong University of Science and Technology, Wuhan 430074, China} \affiliation{Wuhan National High Magnetic Field Center, Wuhan National Laboratory for Optoelectronics, Huazhong University of Science and Technology, Wuhan 430074, China}
\begin{abstract} Recently, we find a physical limit on energy consumption of quantum metrology, and demonstrate that it essentially arises from the erasure of quantum Fisher information (QFI) which determines the best achievable measurement precision. Here, we provide more details in order to further elaborate the essence of this principle. \end{abstract}
\maketitle
\subsection{The main result: Thermodynamic principle for quantum metrology}
We reiterate the main result of our Letter by quoting: {\it “As our main result, we find the physical limit on energy consumption of quantum metrology, and demonstrate that it essentially arises from the erasure of quantum Fisher information (QFI) which determines the best achievable measurement precision”} \cite{Chu2022}.
\begin{figure}
\caption{Schematic representation of a quantum metrology machine and its overall thermodynamic performance in terms of the average heat dissipation $\Delta Q_E=\langle \Delta Q_E^\lambda\rangle_{\lambda}$, which originates from recovering the memory's final state after the measurement (i.e. characterized by the ensemble $\rho_M$) to its standard state.}
\label{fig:protocol}
\end{figure}
The principle is explicitly presented in Eq.\,(8) of our Letter \cite{Chu2022}, where we consider the traditional pure-state quantum metrological scenario (see Fig.\ref{fig:protocol}), namely the interrogation Hamiltonian is in the form of $H_\lambda=\lambda h$ and the $\lambda$- parametrized state of the system is prepared via $|\psi_\lambda\rangle=e^{-i\lambda h t}|\psi\rangle$ with $t$ the interrogation time. After a general measurement in the basis of $\{|m\rangle\langle m|\}_{m=1}^d$ ($d$ is the system dimension) to extract the information on the parameter $\lambda$, the following recovery of the memory to its original state would result in heat dissipation into the environment. Given an environment of temperature $T$, we find that the average heat dissipation $\Delta Q_E$ over the unknown parameter $\lambda$ is lower bounded by the QFI $F_Q$ of the state $|\psi_\lambda\rangle$ about the parameter (assuming $\norm{h}=1$) as \begin{equation} \label{eq:Q-Fq-average}
\Delta Q_E \geq \log (2) k_B T t^{-2} F_Q. \end{equation} The validity of the above inequality is proved in the section of “{\it Average heat dissipation in quantum metrology}” (cf. Supplementary Materials \cite{Chu2022}). Moreover, the principle is illustrated using the example of quantum Rabi model and is extended to the scenario of multiqubit quantum metrology by introducing the weighted QFI, which highlights an efficient way to investigate energy efficiency of multiqubit quantum states. Below we clarify the three main ingredients that can help to understand the principle in Eq.\eqref{eq:Q-Fq-average}: (1) The definition of average heat dissipation; (2) The bound of average heat dissipation given by the Shannon entropy; (3) The bound of the Shannon entropy by the QFI.
\subsubsection{The definition of average heat dissipation}
In Landauer's principle, the energy consumption for erasing a bit (namely $k_B T$) is quantified by the average heat dissipation of {\it a universal protocol erasing a bit state randomly chosen from both state 0 and 1} \cite{Bennett2003,Reeb2014}. Similarly, the established thermodynamic principle for quantum metrology is formulated for a quantum metrology machine (with {\it a fixed measurement apparatus}, see Fig.\ref{fig:protocol}) in terms of the average heat dissipation over metrological runs for all possible values of the unknown parameter $\lambda$, namely \begin{equation} \label{Eq:Average_Q} \Delta Q_E=\langle \Delta Q_E^{\lambda}\rangle_{\lambda}, \end{equation}
where $\Delta Q_E^{\lambda}$ represents the heat dissipation for the parametrized state $\vert\psi_\lambda\rangle$. For a given measurement protocol $\{|m\rangle\langle m|\}_{m=1}^d$ ($d$ is the dimension of the system), i.e. \begin{equation}
\label{eq:M}
\mathcal{M}:|\psi_\lambda\rangle\langle\psi_\lambda| \otimes |x\rangle\langle x| \to \sum_m p_m^\lambda |m\rangle\langle m|\otimes |x_m\rangle\langle x_m|, \end{equation}
where $p_m^\lambda=|\langle m|\psi_\lambda\rangle|^2$, and $\{|x_m\rangle\}$ represents the internal structure of a memory, interacting with the system and storing the measurement outcomes. A unitary operation conditional on the memory's state brings the system back to the initial state without heat dissipation into environment because of its reversibility \cite{Peres1985}. In order to realize a closed metrological cycle, the memory in the mixed state $\rho_M^\lambda=\sum_m p_m^\lambda|x_m\rangle\langle x_m|$ must be recovered to its original standard state $|x\rangle$, resulting in a heat dissipation $\Delta Q_E^\lambda$ into the environment.
\subsubsection{The bound of average heat dissipation given by the Shannon entropy}
Similar to Landauer's principle \cite{Bennett2003,Reeb2014}, we consider the average heat dissipation (i.e. $\Delta Q_E$) over all possible values of the unknown parameter $\lambda$ for the given measurement basis $\{|m\rangle\langle m|\}_{m=1}^d$, and characterize the overall thermodynamic performance of a quantum metrology machine. In such a framework, the memory's state after the measurement protocol can be expressed as \begin{equation}
\rho_M=\langle \rho_M^\lambda\rangle=\sum_{m=1}^dp_m |x_m\rangle\langle x_m|.
\label{Eq:rho_M} \end{equation} The probability distribution $\{p_m\}_{m=1}^d$ is given by \begin{equation}
p_m = \frac{1}{\mathcal{N}}\int p_m^\lambda d\lambda = \frac{1}{\mathcal{N}}\int \langle m|\psi_\lambda\rangle\langle\psi_\lambda|m\rangle d\lambda = \langle m|\rho_s|m\rangle, \end{equation} where $\mathcal{N}$ is the normalization factor to keep $\sum_{m}p_m=1$, and \begin{equation}
\rho_s=\frac{1}{\mathcal{N}}\int|\psi_\lambda\rangle\langle\psi_\lambda|d\lambda, \label{eq:rho_s} \end{equation}
related to the parametrized-state ensemble $\{|\psi_\lambda\rangle\langle\psi_\lambda|\}$. According to Landauer's principle, recovering the memory to its standard state inevitably dissipates a certain amount of heat lower bounded by the entropy $\mathcal{S}=-\operatorname{tr}(\rho_M\log\rho_M)$ \cite{Bennett2003,Reeb2014}, namely \begin{equation} \label{Eq:Average_Q_2}
\Delta Q_E \geq k_B T\mathcal{S}= -k_B T\sum_{m=1}^d p_m \log(p_m). \end{equation} We emphasize that the above averaging procedure to characterize the quantum metrology machine is physically reasonable and meaningful, which results from the lacking of a priori information about the unknown parameter to be estimated in quantum metrology.
\subsubsection{The bound of the Shannon entropy by the QFI}
For the traditional quantum metrological scenario [i.e. Eq.\,(5) in our Letter \cite{Chu2022} where the interrogation Hamiltonian $H_\lambda=\lambda h$], we prove that (cf. Supplementary Materials \cite{Chu2022}) \begin{equation} \mathcal{S}\geq S(\rho_s)\geq \log (2) t^{-2} F_Q. \label{Eq:averageS} \end{equation}
Here, $\mathcal{S}=-\operatorname{tr}(\rho_M\log\rho_M)=-\sum_{m=1}^d p_m \log(p_m)$ denotes the entropy of the memory after the measurement, $S(\rho_s)=-\operatorname{tr}(\rho_s\log\rho_s)$ [see Eq.\eqref{eq:rho_s} for the definition of $\rho_s$] and $F_Q$ represents the QFI of the parametrized quantum state $|\psi_\lambda\rangle=e^{-i\lambda h t}|\psi\rangle$. Note that $F_Q$ is independent of the value of the parameter $\lambda$ and can be interpreted as the QFI associated with the state preparation, \begin{equation}
\mathcal{P}:\,\,|\psi\rangle \to |\psi_\lambda\rangle=e^{-i\lambda h t}|\psi\rangle. \end{equation} Based on Eq.\,\eqref{Eq:Average_Q_2} [which is essentially Landauer's principle] and Eq.\,\eqref{Eq:averageS}, we can establish the following thermodynamic principle [i.e. Eq.\,(8) in our Letter], \begin{equation} \Delta Q_E\geq k_B T \log (2) t^{-2} F_Q, \label{Eq:averageQ} \end{equation} which implies that the thermodynamic cost of a quantum metrology machine is lower bounded by the QFI.
\subsubsection{Another single-qubit example}
\begin{figure}
\caption{Illustration of Eq.\,\eqref{Eq:averageS} $\mathcal{S}\geq S(\rho_s)\geq \log(2)F_Q/t^2$ (cf. Eq.(7) in our Letter \cite{Chu2022}) with a single-qubit example. The result demonstrates that the Shannon entropy of the measurement outcomes $\mathcal{S}$ is well bounded by the von Neumann entropy $S(\rho_s)$ and the quantum Fisher information as $\log(2)t^{-2}F_Q$.}
\label{fig2}
\end{figure}
Note that Eq.\,\eqref{Eq:averageS} is proved for a general measurement protocol, including optimal and non-optimal measurements. In our Letter \cite{Chu2022}, we have demonstrated the application of the established thermodynamic principle in quantum Rabi model. Here, we further illustrate the principle by considering another example, namely a non-optimal measurement protocol in a single-qubit system. The qubit is prepared into the following parametrized state \begin{equation}
|\psi_\lambda\rangle=\exp [-i(\lambda \sigma_z/2) t](\sqrt{p}|0\rangle+\sqrt{1-p}|1\rangle), \label{eq:psi_lambda} \end{equation} with the interrogation Hamiltonian \begin{equation} H_{\lambda}=\lambda h=\lambda (\sigma_z/2). \end{equation}
The parametrized state is measured in the basis of $\{|\pm_p\rangle\langle \pm_p|\}$ with \begin{eqnarray}
|+_p\rangle&=&\sqrt{p}|0\rangle+\sqrt{1-p}|1\rangle,\\
|-_p\rangle&=&\sqrt{1-p}|0\rangle-\sqrt{p}|1\rangle. \end{eqnarray} The corresponding probabilities can be obtained as \begin{equation} \begin{aligned} p_{+}&=2p^2-2p+1+2p(1-p)\cos(\lambda t),\\
p_{-}&=2p(1-p)-2p(1-p)\cos(\lambda t).
\end{aligned} \end{equation}
Hence, the average probabilities in the state $|+_p\rangle$ and $|-_p\rangle$ over all the possible values of $\lambda$ are given by \begin{equation}
\tilde{p}_{+}=\langle p_+\rangle_\lambda=2p^2-2p+1, \quad \tilde{p}_-=\langle p_-\rangle_\lambda=2p(1-p), \end{equation} resulting in the Shannon entropy of the measurement outcomes as \begin{equation} \mathcal{S}=-\tilde{p}_+\log(\tilde{p}_+)-\tilde{p}_{-}\log(\tilde{p}_{-}). \end{equation} The von Neumann entropy $\rho_s$ [see Eq.\,\eqref{eq:rho_s}] is written as \begin{equation} S(\rho_s)=-p\log(p)-(1-p)\log(1-p). \end{equation} And the QFI corresponding to the parametrized state [see Eq.\,\eqref{eq:psi_lambda}] is \begin{equation} F_Q=4p(1-p). \end{equation} Thus, it is easy to verify that Eq.\,\eqref{Eq:averageS} holds. Furthermore, it can be seen from Fig.\ref{fig2} that the Shannon entropy (thereby the heat dissipation according to Landauer's principle) and the QFI shows strongly correlated behavior in this example.
\subsection{Relation between the Shannon entropy and the QFI for SLD measurement}
\subsubsection{A generalized inequality}
Apart from the main result of thermodynamic principle for quantum metrology based on the {\it average heat dissipation over the unknown parameter} (as we elaborate in the above section), in Ref.\,\cite{Chu2022}, we also present a relation between the Shannon entropy associated with the SLD measurement and the QFI for {\it a fixed value of the unknown parameter} in a pure-state metrology, cf. Eq.(3) in Ref.\,\cite{Chu2022}. We would like to point out that such a relation can be replaced by the following more generalized inequality with $2 \norm{h_\lambda} \rightarrow \norm{L_\lambda}$, namely \begin{equation} \label{Eq:entropy_QFI} \mathcal{S}\geq 4 \log(2) \norm{L_\lambda}^{-2} F_Q[\rho_\lambda], \end{equation} which holds for both pure and mixed states. Here, $\mathcal{S}$ is the Shannon entropy associated with the SLD measurement, $L_\lambda$ is the SLD operator, $F_Q[\rho_\lambda]$ is the QFI of a general mixed state dependent on the parameter $\lambda$. In more detail, the Shannon entropy of the measurement outcomes can be expressed as \begin{equation} \mathcal{S}=-\sum_\ell p_\ell^\lambda \log (p_\ell^\lambda), \end{equation}
where $p_\ell^\lambda=\operatorname{tr}(|\ell_\lambda\rangle\langle\ell_\lambda|\rho_\lambda)$ with $|\ell_\lambda\rangle$ the eigenvector of $L_\lambda$ associated with the eigenvalue $\ell_\lambda$. The QFI of $\rho_\lambda$ with respect to the unknown parameter $\lambda$ can be related to the variance of $L_\lambda$ [Note that $\operatorname{tr}(L_\lambda \rho_\lambda)=0$] as \begin{equation}
\label{eq:pure-QFI}
F_Q[\rho_\lambda]=\operatorname{tr}(L_\lambda^2 \rho_\lambda)=\text{Var}[L_\lambda,\rho_\lambda]=\frac{1}{2}\sum_{\ell,\ell^\prime}(\ell_\lambda-\ell^\prime_\lambda)^2 p_\ell^\lambda p_{\ell^\prime}^\lambda. \end{equation} Hence, we have \begin{equation} 2 F_Q[\rho_\lambda]/\norm{L_\lambda}^2\leq \sum_{\ell\neq \ell^\prime}p_\ell^\lambda p_{\ell^\prime}^\lambda=1-\sum_\ell (p_\ell^\lambda)^2. \end{equation} Based on Lemma 1 in Supplementary Material for our Letter \cite{Chu2022}, namely \begin{equation}
\label{eq:subsidiary}
-\sum_\ell p_\ell^\lambda \log (p_\ell^\lambda) +2 \log 2\sum_\ell (p_\ell^\lambda)^2\geq 2\log 2, \end{equation} we obtain the generalized inequality in Eq.\,\eqref{Eq:entropy_QFI}, which represents a general version of Eq.(3) in our Letter \cite{Chu2022}.
As an example, we consider the following specific example as \begin{equation} \rho_\lambda=\frac{1}{2}\begin{pmatrix} \lambda^2 & \lambda\\ \lambda & 2-\lambda^2 \end{pmatrix}. \label{Eq:example} \end{equation}
\begin{figure}
\caption{The Shannon entropy $\mathcal{S}$ (blue dot) and its bound given by the QFI (red curve), cf. Eq.\,\eqref{Eq:entropy_QFI}, for the example of Eq.\eqref{Eq:example}.}
\label{fig}
\end{figure}
We numerically calculate the Shannon entropy $\mathcal{S}$, the semi-norm of the SLD operator $\norm{L_\lambda}$, and the QFI $F_Q[\rho_{\lambda}]$. The result in Fig.\ref{fig} shows the relation between the Shannon entropy $\mathcal{S}$ and the bound given by the QFI [cf. Eq.\,\eqref{Eq:entropy_QFI}].
We remark that the generalized inequality in Eq.\,\eqref{Eq:entropy_QFI} can achieve the equality for two special cases: (i) $\rho_\lambda$ is pure (where $\norm{L_\lambda}=2 \sqrt{F_Q}$) \cite{Dooley2022}; (ii) $\rho_\lambda=p_1 |\phi_1(\lambda)\rangle\langle \phi_1(\lambda)|+p_2 |\phi_2(\lambda)\rangle\langle \phi_2(\lambda)|$ of a two-level system with $p_{1,2}$ independent of $\lambda$.
In these cases, both the Shannon entropy and the QFI bound in Eq.\,\eqref{Eq:entropy_QFI} equal $\log(2)$. Note that $\norm{L_\lambda}\leq 2\norm{h_\lambda}$ for pure state (by using $L_\lambda=2i[\rho_\lambda,h_\lambda]$) in Eq.\,\eqref{Eq:entropy_QFI} and gives rise to Eq.(3) in our Letter \cite{Chu2022}, which shall be replaced by Eq.\,\eqref{Eq:entropy_QFI} in order to provide meaningful connections between the Shannon entropy associated with the SLD measurement and the QFI, see an explicit example in the following section. We would like to stress that the main result of thermodynamic principle for quantum metrology is based on the {\it average heat dissipation over the unknown parameter}, while the relation between the Shannon entropy associated with the SLD measurement and the QFI is for {\it a fixed value of the unknown parameter}. The validity and applicability of the thermodynamic principle for quantum metrology established in our Letter does not rely on this relation.
\subsubsection{An example}
We remark that the generalized inequality in Eq.\,\eqref{Eq:entropy_QFI} may provide us interesting insights into the connections between the Shannon entropy and the QFI. Below we present an explicit example to illustrate this point. We consider a mixed state in the form of $\rho_\lambda=p_1(\lambda) |\phi_1(\lambda)\rangle\langle \phi_1(\lambda)|+p_2(\lambda) |\phi_2(\lambda)\rangle\langle \phi_2(\lambda)|$ in single-qubit system. In such a case, the SLD is given by \cite{Braunstein1994}
\begin{equation}
L_\lambda=\sum_{i}\frac{\partial_\lambda p_i}{p_i} |\phi_i\rangle\langle\phi_i|+(\Omega |\phi_1\rangle\langle \phi_2|+h.c.), \end{equation} with \begin{equation}
\Omega=2p_1\langle\partial_\lambda\phi_1|\phi_2\rangle+2p_2\langle\phi_1|\partial_\lambda\phi_2\rangle. \end{equation}
We note that the QFI can be divided as $F_Q=F_c+F_{nc}$ with the classical part $F_c=\sum_{i}(\partial_\lambda p_i)^2/p_i$ and the non-classical part $F_{nc}=|\Omega|^2$ \cite{Zanardi2007}. Thus, according to Eq.\,\eqref{Eq:entropy_QFI} we can further obtain \begin{equation} \mathcal{S}\geq \log(2) \frac{F_Q}{F_Q+\left(\displaystyle \frac{1}{4p_1p_2}-1\right)F_{c}}, \label{Eq:S_QFI_TLS} \end{equation} the right-hand side of which is related to the ratio $\alpha \equiv F_{c}/F_Q$. Eq.\,\eqref{Eq:S_QFI_TLS} demonstrates that the Shannon entropy associated with the SLD measurement for quantum metrology with mixed states is connected not only to the total QFI but also with its classical and non-classical components.
\subsection{Some remarks: The role of a priori information}
Firstly, we would like to remark that the generalized inequality in Eq.\,\eqref{Eq:entropy_QFI} pertains to the SLD measurement. For non-SLD measurement, the inequality may not be satisfied. In fact, based on the quantum version of Landauer's principle \cite{Reeb2014}, one can obtain that the heat dissipation of $|\psi_\lambda\rangle$ for a general measurement basis $\{|m\rangle\langle m|\}_{m=1}^d$ is lower bounded by \begin{equation} \Delta Q_E^\lambda\geq k_B T \mathcal{S}^\lambda\equiv -k_B T\operatorname{tr}(\rho_M^\lambda\log\rho_M^\lambda)=-k_B T\sum_{m=1}^d p_m^\lambda \log(p_m^\lambda). \end{equation}
The right-hand side of the above inequality for pure-state metrology can be made arbitrarily small (i.e. $\mathcal{S}^\lambda\to 0$) by choosing a non-SLD optimal measurement basis as $\{\Pi_q=|q\rangle\langle q|,\Pi_{\bar{q}}=|\bar{q}\rangle\langle \bar{q}|\}_{q\to 1}$, where $|q\rangle=\sqrt{q}|\psi_{\lambda_0}\rangle+\sqrt{1-q}|\psi_{\lambda_0}^\perp\rangle$, $|\bar{q}\rangle=\sqrt{1-q}|\psi_{\lambda_0}\rangle-\sqrt{q}|\psi_{\lambda_0}^\perp\rangle$ and $\lambda_0\to\lambda$ \cite{Dooley2022}. Note that the corresponding Fisher information of the measurement outcomes about the unknown parameter is maximized (i.e. equal to the QFI, which is non-zero) in this scenario. However, the entropy calculated in this example assumes a priori information about the parameter, such that the specific measurement basis can be chosen. We do not adopt this assumption for the formulation of a thermodynamic principle for quantum metrology in our Letter \cite{Chu2022}, as the role of a priori information about the unknown parameter needs to be properly taken into account. In general, $\Delta Q_E^\lambda$ would depend on how much a priori information about the unknown parameter is available.
Secondly, we circumvent this difficulty by assuming no a priori information on the unknown parameter to be estimated. In our Letter \cite{Chu2022}, we establish the thermodynamic principle for quantum metrology based on the average heat dissipation over metrological runs for all possible values of the unknown parameter $\lambda$, namely $\Delta Q_E=\langle \Delta Q_E^{\lambda}\rangle_{\lambda}$, where $Q_E^{\lambda}$ represents the heat dissipation for the parametrized state $\vert\psi_\lambda\rangle$. We stress that such a formulation, deeply connected to the erasure of the QFI (cf. Ref.\,\cite{Chu2022}, a "many-to-one" map $\mathcal{E}: |\psi_\lambda\rangle \to |\psi\rangle$ for all $\lambda$), is physically reasonable and justified, similar to Landauer's principle where the erasure of information represents $\mathcal{E}_L: 0/1 \to 0$ \cite{Bennett1982,Reeb2014}.
Thirdly, we shall note that the principle [Eq.\eqref{eq:Q-Fq-average}, i.e. Eq.(8) in our Letter \cite{Chu2022}] is proved for a general measurement protocol (not limited to SLD measurement), and is applicable not only for optimal measurement but also for non-optimal measurement. As an example, we consider the single-qubit example \cite{Dooley2022}, i.e \begin{equation}
|\psi_\lambda\rangle=e^{-i\lambda\sigma_z/2}(|0\rangle+|1\rangle)/\sqrt{2}. \end{equation}
The probability distribution under a fixed optimal measurement protocol (which achieves the maximal Fisher information) $\{|\pm\rangle\langle \pm|\}$ with $|\pm\rangle=(|0\rangle\pm e^{i\phi}|1\rangle)/\sqrt{2}$ is given by \begin{equation}
p_\pm^\lambda=|\langle \pm|\psi_\lambda\rangle|^2=\frac{1}{2}\left[1\pm\cos(\phi-\lambda)\right]. \end{equation} In this case, $\langle p_\pm^\lambda\rangle=1/2$ and thus the Shannon entropy related to the measurement outcomes of such a quantum metrology machine is given by $\mathcal{S}=\log(2)$, which equals the right-hand side of Eq.\,\eqref{Eq:averageS}, i.e. $\log(2) F_Q=\log(2)$, and thus does not lead to any violation of the principle.
\subsection{Summary $\&$ Outlook}
To summarize, the main results about the thermodynamic principle of quantum metrology [i.e.\,Eqs.\,(7-11) in our Letter \cite{Chu2022}], which are formulated based on the average heat dissipation over the unknown parameter assuming no a priori information, characterize the overall thermodynamic performance of a quantum metrology machine and hold for general measurement protocols. The generalization of the established principle to involve a certain amount of a priori information about the unknown parameter and further development of this new field of thermodynamics and quantum metrology would be definitely interesting and valuable in more realistic metrological scenarios. Besides, the generalized inequality [i.e. Eq.\,\eqref{Eq:entropy_QFI}] between the Shannon entropy under the SLD measurement and the QFI for both pure and mixed states replaces Eq.\,(3) in our Letter, and demonstrates a more clear relationship between the two information-theoretic quantities and deserves further investigation in order to gain more insights into these connections.
We thank Shane Dooley and Martin B. Plenio for very helpful discussions.
\end{document} | arXiv | {
"id": "2208.05167.tex",
"language_detection_score": 0.720581591129303,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\newcommand{\begin{equation}}{\begin{equation}}
\newcommand{\end{equation}}{\end{equation}}
\newcommand{\beql}[1]{\begin{equation}\label{eq:#1}}
\newcommand{\begin{eqnarray}}{\begin{eqnarray}}
\newcommand{\end{eqnarray}}{\end{eqnarray}}
\newcommand{\begin{eqnarray*}}{\begin{eqnarray*}}
\newcommand{\end{eqnarray*}}{\end{eqnarray*}}
\newtheorem{Theorem}{Theorem}
\newcommand{{\bf C}}{{\bf C}}
\newcommand{{\bf N}}{{\bf N}}
\newcommand{{\bf Q}}{{\bf Q}}
\newcommand{{\bf R}}{{\bf R}}
\newcommand{{\bf Z}}{{\bf Z}}
\renewcommand{\langle}{\langle}
\newcommand{\rangle}{\rangle} \renewcommand{\rangle}{\rangle}
\newcommand{{\cal H}}{{\cal H}}
\newcommand{{\cal Q}}{{\cal Q}}
\newcommand{{\hat T}}{{\hat T}}
\newcommand{\alpha}{\alpha}
\newcommand{\beta}{\beta}
\newcommand{\chi}{\chi}
\newcommand{\dagger}{\dagger}
\newcommand{\dagger}{\dagger}
\newcommand{\delta}{\delta}
\newcommand{\varepsilon}{\epsilon}
\newcommand{\eta}{\eta}
\newcommand{\gamma}{\gamma}
\newcommand{\hat{x}}{\hat{x}}
\newcommand{\hat{y}}{\hat{y}}
\newcommand{\iota}{\iota}
\newcommand{\kappa}{\kappa}
\newcommand{\lambda}{\lambda}
\newcommand{\mbox}{\mbox}
\newcommand{\nonumber}{\nonumber}
\newcommand{\omega}{\omega}
\newcommand{\phi}{\phi}
\newcommand{\psi}{\psi}
\newcommand{\rho}{\rho}
\newcommand{\sigma}{\sigma}
\newcommand{\tau}{\tau}
\newcommand{\theta}{\theta}
\newcommand{\upsilon}{\upsilon}
\newcommand{\varepsilon}{\varepsilon}
\newcommand{\varphi}{\varphi}
\newcommand{\varrho}{\varrho}
\newcommand{\zeta}{\zeta}
\newcommand{\Gamma}{\Gamma}
\newcommand{\Lambda}{\Lambda}
\newcommand{\Omega}{\Omega}
\newcommand{\Phi}{\Phi}
\newcommand{\Psi}{\Psi}
\newcommand{\Sigma}{\Sigma}
\newcommand{\Theta}{\Theta}
\newcommand{\Upsilon}{\Upsilon} \renewcommand{\varepsilon}{\varepsilon}
\newcommand{\eq}[1]{(\ref{eq:#1})}
\newcommand{\bra}[1]{\langle#1|}
\newcommand{\ket}[1]{|#1\rangle} \newcommand{\braket}[1]{\langle#1\rangle}
\chapter{QUANTUM TURING MACHINES: LOCAL TRANSITION, PREPARATION, MEASUREMENT, AND HALTING}
\author{Masanao Ozawa}
\affiliation{School of Informatics and Sciences\\ Nagoya University\\ Chikusa-ku, Nagoya 4648601, Japan}
\abstract{Foundations of the theory of quantum Turing machines are investigated. The protocol for the preparation and the measurement of quantum Turing machines is discussed. The local transition functions are characterized for fully general quantum Turing machines. A new halting protocol is proposed without augmenting the halting qubit and is shown to work without spoiling the computation.}
\section{INTRODUCTION}
The Church-Turing thesis\refnote{\cite{Chu36,Tur36}} states that to be computable is to be computable by a Turing machine and the modern discipline in computational complexity theory\refnote{\cite{Pap94}} states that to be efficiently computable is to be computable by a Turing machine within polynomial steps in the length of the input data. However, Feynman\refnote{\cite{Fey82}} pointed out that a Turing machine cannot simulate a quantum mechanical process efficiently and suggested that a computing machine based on quantum mechanics might be more powerful than Turing machines. Deutsch introduced quantum Turing machines\refnote{\cite{Deu85}} and quantum circuits\refnote{\cite{Deu89}} for establishing the notion of quantum algorithm exploiting ``quantum parallelism''. A different approach to quantum Turing machines was investigated earlier by Benioff\refnote{\cite{Beni80}}. Bernstein and Vazirani\refnote{\cite{BV97}} instituted quantum complexity theory based on quantum Turing machines and showed constructions of universal quantum Turing machines. Yao\refnote{\cite{Yao93}} showed that a computation by a quantum Turing machine can be simulated efficiently by a quantum circuit. Deutsch's idea of quantum parallelism was realized strikingly by Shor\refnote{\cite{Sho94}}, who found efficient quantum algorithms for the factoring problem and the discrete logarithm problem, for which no efficient algorithms have been found for classical computing machines. The purpose of this paper is to discuss foundations of quantum Turing machines and to propose a computational protocol for quantum Turing machines.
A precise formulation of quantum Turing machines is given along with Deutsch's formulation\refnote{\cite{Deu85}} and the computational protocol is discussed for the preparation and the measurement of quantum Turing machines.
The characterization of the transition functions of quantum Turing machines is also discussed. Deutsch\refnote{\cite{Deu85}} required that the transition function should be determined by local configurations. Bernstein and Vazirani\refnote{\cite{BV97}} found a simple characterization of the transition functions for the restricted class of quantum Turing machines in which the head must move either to the right or to the left at each step but a general characterization remains open. This problem is discussed and a solution is given.
The computational protocol for the halting of quantum Turing machines is discussed. In order to signal the completion of computation, Deutsch\refnote{\cite{Deu85}} introduced the halt flag by augmenting the halt qubit. Myers\refnote{\cite{Mye97}} pointed out a difficulty in this halting of quantum Turing machines. With improving the preceding work\refnote{\cite{98QU}}, a halting protocol is proposed without augmenting the halting qubit and it is shown that the monitoring of the halt flag does not spoil the computation.
\section{QUANTUM TURING MACHINES}
A {\em quantum Turing machine (QTM)} ${\cal Q}$ is a quantum system consisting of a {\em processor}, a bilateral infinite {\em tape}, and a {\em head} to read and write a symbol on the tape. Its configuration is determined by the {\em processor configuration} $q$ from a finite set $Q$ of symbols, the {\em tape configuration} $T$ represented by an infinite string from a finite set $\Sigma$ of symbols, and the discretized {\em head position} $\xi$, taking values in the set ${\bf Z}$ of integers. The tape consists of {\em cells} numbered by the integers and the head position $\xi$ is the place of the cell numbered by $\xi$. We assume that $Q$ contains two specific symbols $q_{0}$ and $q_{f}$ representing the initial configuration and the final configuration of the processor and that $\Sigma$ contains the symbol $B$ representing the blank cell in the tape. For any integer $m$ the symbol at the cell $m$ on the tape is denoted by $T(m)$. We assume that the possible tape configurations are such that $T(m)=B$ except for finitely many cells $m$. The set of all the possible tape configurations is denoted by $\Sigma^{\#}$. The set $\Sigma^{\#}$ is a countable set. Thus, any configuration $C$ of ${\cal Q}$ is represented by a triple $C=(q,T,\xi)$ in the {\em configuration space} $Q\times\Sigma^{\#}\times{\bf Z}$. The state of ${\cal Q}$ is represented by a unit vector in the Hilbert space ${\cal H}$ generated by the configuration space $Q\times\Sigma^{\#}\times{\bf Z}$. The complete orthonormal basis canonically in one-to-one correspondence with the configuration space is called the {\em computational basis}. Thus, the computational basis is represented by $\ket{C}=\ket{q}\ket{T}\ket{\xi}$ for any configuration $C=(q,T,\xi)\in Q\times\Sigma^{\#}\times{\bf Z}$; we shall write also $\ket{q,T,\xi}=\ket{q}\ket{T}\ket{\xi}$.
We shall denote by $|X|$ the number of the elements of a set $X$; for an indexed set the number of elements is understood as the number of indices. In order to define the observables quantizing the configurations, we assume the numbering of the sets $Q$ and $\Sigma$ such that
$Q=\{q_{0},\ldots,q_{|Q|-1}\}$ and
$\Sigma=\{\sigma_{0},\ldots,\sigma_{|\Sigma|-1}\}$. We define observables $\hat{q}$, $\hat{T}(m)$ for $m\in{\bf Z}$, and $\hat{\xi}$ as follows. $$
\hat{q}=\sum_{n=0}^{|Q|-1}n\ket{q_{n}}\bra{q_{n}},\quad
\hat{T}(m)=\sum_{n=0}^{|\Sigma|-1}n\ket{\sigma_{n}}\bra{\sigma_{n}},\quad \hat{\xi}=\sum_{\xi\in{\bf Z}}\xi\ket{\xi}\bra{\xi}. $$
We assume that we have a device to prepare the quantum Turing machine in the state $\ket{q,T,\xi}$ for any configuration $C=(q,T,\xi)$ and that we have a measuring device to measure sufficiently many $\hat{T}(m)$s simultaneously.
Let $\Gamma$ be a finite set of symbols and $\Gamma^{*}$ the set of finite strings from $\Gamma$. In this paper, we shall consider computations which are probabilistic transformations on $\Gamma^{*}$, or precisely functions
from $\Gamma^{*}$ to the set of probability distributions on $\Gamma^{*}$. The set $\Gamma$ is called the {\em alphabet} of the computation. A finite string from the set $\Gamma$ is called a {\em $\Gamma$-string}. The length of a $\Gamma$-string $x$ is denoted by $|x|$. When $|x|=0$, $x$ is called the empty string. We shall identify any $\Gamma$-string $x=(x_{0},\ldots,x_{|x|-1})$
with a function $x$ from $\{0,\ldots,|x|-1\}$ to $\Gamma$
such that $x(m)=x_{m}$ for all $m$ with $0\le m\le|x|-1$.
The computation by a QTM consists of encoding, preparation, time evolution, measurement, and decoding. The encoding transforms the {\em input $\Gamma$-string} to the {\em input tape string}. The preparation prepares the {\em initial state} of the quantum Turing machine with the input tape string, and the time evolution transforms the initial state to the {\em final state}. The measurement of the tape string in the final state gives a probability distribution of the {\em output tape string}. The decoding transforms the output tape string to the output $\Gamma$-string and hence transforms the probability distribution of the output tape string to the probability distribution of the output $\Gamma$-string. Therefore, the initial $\Gamma$-string is transformed to the {\em output probability distribution} of the $\Gamma$-string.
The {\em encoding} $e$ of the QTM ${\cal Q}$ is a polynomial time computable function from $\Gamma^{*}$ to $\Sigma^{\#}$. Thus, the encoding $e$ transforms any $\Gamma$-string $x$ to a tape configuration denoted by $e(x)$; if $T=e(x)$ we shall write $T\sim x$ and $T$ is said to {\em represent} the $\Gamma$-string $x$. Inversely, the {\em decoding} $d$ of ${\cal Q}$ is a polynomial time computable function from $\Sigma^{\#}$ to $\Gamma^{*}$ satisfying $d(e(x))=x$ for all $x\in\Gamma^{*}$.
In this paper, we assume that $B\not\in\Gamma$ and $\Sigma=\Gamma\cup\{B\}$. We assume that there is an infinite subset $S\subset {\bf N}$ of the set of tape cells, called the {\em data slot}, with polynomial time numbering $S=\{m_{1},m_{2},\ldots\}$ and that the encoding is such that \begin{equation} e(x)(m)=\left\{\begin{array}{rl}
x(n)&\quad\mbox{if $m=m_{n}\in S$ and $0\le n<|x|,$}\\
B&\quad\mbox{otherwise,}\\ \end{array}\right. \end{equation} for any $x\in\Sigma^{*}$, and the decoding is given by \begin{eqnarray}
|d(T)|&=&\min\{m_{n}\in S|\ T(m_{n})=B\},\\ d(T)(n)&=&T(m_{n})
\end{eqnarray} for $0\le n< |d(T)|$, where $T\in\Sigma^{\#}$.
The computation begins at $t=0$. At this time ${\cal Q}$ is prepared in an {\em initial state} $\ket{C_{0}}$ such that \begin{equation} \ket{C_{0}}=\ket{q_{0}}\ket{T_{in}}\ket{0}, \end{equation} where $T_{in}$ represents some $\Gamma$-string $x$. In this case, $T_{in}$ is called the {\em input tape},
$x$ is called the {\em input}, and $|x|$ is called the {\em input length}.
The computation proceeds in steps of a fixed unit duration $\tau$. Since the position of the head is discretized, the wave function $\ket{\psi(t)}$ may not stay within ${\cal H}$ at any time $t$ other than integer multiples of $\tau$. We assume therefore that the time $t$ is discretized to be an integer multiple of $\tau$. We also take the normalized unit of time in which the time $t$ is assumed to take values in ${\bf Z}$. The dynamics of ${\cal Q}$ are described by a unitary operator $U$ on ${\cal H}$ which specifies the evolution of any state $\ket{\psi(t)}$ during a single {\em computational step} so that we have \begin{eqnarray} &U^{\dagger}U=UU^{\dagger}=I,&\label{eq:1019a}\\ &\ket{\psi(t)}=U^{t}\ket{\psi(0)}& \end{eqnarray} for all positive integer $t$.
Since the number of all the possible tape strings in the data slot is countable, we assume them to be indexed as $\{T_{1},T_{2},\ldots\}$. Thus, the observable ${\hat T}(S)$ describing the tape string in the data slot can be represented by $$ {\hat T}(S)=\sum_{j=1}^{\infty}\lambda_{j}\,I_{1} \otimes\ket{T_{j}}\bra{T_{j}}\otimes I_{2}\otimes I_{3} $$ where $\{\lambda_{1},\lambda_{2},\ldots\}$ is a countable set of positive numbers in one-to-one correspondence with $\{T_{1},T_{2},\ldots\}$ by a polynomial time function and where $I_{1}$ is the identity on the state space spanned by the processor configurations $Q$, $I_{2}$ is the identity on the state space spanned by the tape strings outside the data slot, and $I_{3}$ is the identity on the state space spanned by the head positions ${\bf Z}$.
We assume that the measurement to obtain the output is allowed only for the computational basis or more specifically the observable $\hat{T}(S)$ describing directly the output symbol string on the tape, while in Deutsch's formulation\refnote{\cite{Deu85,Deu89}} and in later work no such restriction has been taken place. However, it is an unavoidable assumption in the definition of quantum Turing machine. In fact, if this assumption is dropped, any function would computable
without any computational time. To see this, suppose that the tape strings are encoded by the natural numbers. Let $|T_{n}\rangle$ be the computational basis state, ignoring the inessential degeneracy, in which the output tape string is the one encoded by $n$ and
let $\hat{T}$ be the observable such that $\hat{T}|n\rangle
=n|T_{n}\rangle$. Only such $\hat{T}$ is allowed to measure for obtaining the output. Otherwise, given any function $f$ of the natural numbers and a natural number $n$, if one prepares the tape
in the state $|T_{n}\rangle$ and measures the observable $f(\hat{T})$, one gets $f(n)$ surely without any computation. This contradicts the Church-Turing thesis. Thus, we cannot allow even the measurement of $f(\hat{T})$ unless $f$ is a polynomial time computable function.
\section{LOCAL TRANSITION FUNCTIONS}
Deutsch\refnote{\cite{Deu85}} requires that the QTM operate finitely, i.e., (i) only a finite system is in motion during any one step, (ii) the motion depends only on the state of a finite subsystem, and (iii) the rule that specifies the motion can be given finitely in the mathematical sense. To satisfy the above requirement, the matrix elements of $U$ takes the following form\footnotemark: \footnotetext{This condition is a natural extension of Deutsch's condition\refnote{\cite{Deu85}} to the case where the head is not required to move. } \begin{eqnarray} \bra{q',T',\xi'}U\ket{q,T,\xi} &=&[\delta_{\xi'}^{\xi+1}D(q,T(\xi),q',T'(\xi),1) +\delta_{\xi'}^{\xi}D(q,T(\xi),q',T'(\xi),0)\nonumber\\ & &\mbox{}+\delta_{\xi'}^{\xi-1}D(q,T(\xi),q',T'(\xi),-1)] \prod_{m\not=\xi}\delta_{T(m)}^{T'(m)} \label{eq:1019b} \end{eqnarray} for any configurations $(q,T,\xi)$ and $(q',T',\xi')$. The continued product on the right ensures that the tape is changed only at the head position $\xi$ at the beginning of each computational step. The terms $\delta_{\xi'}^{\xi\pm1}$, $\delta_{\xi'}^{\xi}$ ensure that during each step the head position cannot change by more than one unit. The function $D(q,T(\xi),q',T'(\xi),d)$, where $q,q'\in Q$, $T(\xi),T'(\xi)\in \Sigma$, and $d\in\{-1,0,1\}$, represents a dynamical motion depending only on the local observables $\hat{q}$ and $\hat{T}(\xi)$. We call $D$ the {\em local transition function} of the QTM ${\cal Q}$.
The function $D$ can be arbitrarily given except for the requirement \eq{1019a} that $U$ be unitary. Each choice defines a different QTM ${\cal Q}[D]$. Thus, if we have an intrinsic characterization of the local transition function $D$, QTMs can be defined formally without referring to the unitary operator $U$ as a primitive notion.
From \eq{1019b}, the time evolution operator $U$ is determined conversely from the local transition function $D$ \beql{1024a} U\ket{q,T,\xi} =\sum_{p,\tau,d}D(q,T(\xi),p,\tau,d) \ket{p,T^{\tau}_{\xi},\xi+d}. \end{equation} for any configuration $(q,T,\xi)$, where $T^{\tau}_{\xi}$ is the tape string defined by \begin{equation} T^{\tau}_{\xi}(m)=\left\{ \begin{array}{ll} \tau& \mbox{if $m=\xi$},\\ T(m)& \mbox{if $m\not=\xi$}. \end{array} \right. \end{equation} It follows that the relation $D(q,\sigma,q',\tau,d)=c$ can be interpreted as the following instruction of the operation of ${\cal Q}$: if the processor is in the configuration $q$ and if the head reads the symbol $\sigma$, then it follows with amplitudes $c$ that the processor's state turns to $q'$, the head writes the symbol $\tau$, and that the head moves one cell to the right if $d=1$, to the left if $d=-1$, or does not move if $d=0$.
Now we can formulate the characterization problem of local transition functions of QTMs: {\em Let $D$ be a complex-valued function on $Q\times\Sigma\times Q\times\Sigma\times\{-1,0,1\}$ and let $U$ be the operator on ${\cal H}$ defined by \eq{1024a}. Then, what conditions ensure that the operator $U$ is unitary?}
This problem is solved by the following theorem.\refnote{\cite{98eqc}}
\begin{Theorem} The operator $U$ is unitary if and only if $D$ satisfies the following conditions.
{\rm (a)} For any $(q,\sigma)\in Q\times\Sigma$, $$
\sum_{p,\tau,d}|D(q,\sigma,p,\tau,d)|^{2}=1. $$
{\rm (b)} For any $(q,\sigma), (q',\sigma')\in Q\times\Sigma$ with $(q,\sigma)\ne (q',\sigma')$, $$ \sum_{p,\tau,d} D(q',\sigma',p,\tau,d)^{*}D(q,\sigma,p,\tau,d)=0. $$
{\rm (c)} For any $(q,\sigma,\tau),(q',\sigma',\tau')\in Q\times\Sigma^{2}$, we have $$ \sum_{p\in Q}D(q',\sigma',p,\tau',1)^{*}D(q,\sigma,p,\tau,-1)=0. $$
{\rm (d)} For any $(q,\sigma,\tau),(q',\sigma',\tau')\in Q\times\Sigma^{2}$, we have $$ \sum_{p\in Q,d=0,1}D(q',\sigma',p,\tau',d-1)^{*}D(q,\sigma,p,\tau,d)=0. $$ \end{Theorem}
If it is assumed that the head must move either to the right or to the left at each step, the condition (d) is automatically satisfied. In this case, the above statement is reduced to the result due to Bernstein and Vazirani\refnote{\cite{BV97}}.
In order to maintain the Church-Turing thesis, we need to require that the unitary operator $U$ is constructive, or that the matrix elements of $U$ in the computational basis are computable complex numbers; otherwise, we cannot show the existence of the algorithm by the constructive language. From the complexity theoretical point of view, we need also to require that matrix elements are polynomially computable complex numbers. Thus, we require that the range of the transition function $\delta$ is in the polynomially computable complex numbers.
\section{HALTING PROTOCOL}
The result of a computation is obtained by measuring the tape string after the computation has been completed. Unlike the classical case, the machine configuration cannot be monitored throughout the computation because of the inevitable disturbance caused by measurement. Thus, the machine needs a specific halt scheme to signal actively when the computation has been completed.
Deutsch\refnote{\cite{Deu85}} introduced an additional single qubit, called the halt qubit, together with an observable $\hat{n}_{0}$, called
the halt flag, with the eigenstates $|0\rangle$ and $|1\rangle$, so that the processor configuration $q$ is represented by the state vector
$|q\rangle|1\rangle$ if $q$ is the final state in the classical picture or by
$|q\rangle|0\rangle$ otherwise. The halt qubit is initialized to $|0\rangle$ before starting the computation,
and every valid quantum algorithm sets the halt qubit to $|1\rangle$ when the computation has been completed but does not interact with the halt qubit otherwise. Deutsch claimed that {\em the observable $\hat{n}_{0}$ can then be periodically observed from the outside without affecting the operation of the machine}.
Myers\refnote{\cite{Mye97}} argued that the state entangles the non-halt qubits with the halt qubits so that the measurement of the halt flag changes the state and concluded that the halt scheme spoils the computation.
In the preceding work\refnote{\cite{98QU}}, Deutsch's halt scheme is reformulated precisely and it is shown that, even though it changes the state of the quantum Turing machine, the measurement of the halt flag does not change the probability distribution of the outcome of the computation so that it does not spoil the computation. It is also shown\refnote{\cite{98QU}} that the halt scheme is equivalent to the quantum nondemolition monitoring of the output observable.
In what follows, we shall give a new formulation of the halt scheme in which the additional halt qubit is not augmented.
The {\em halt flag} $\hat{n}_{0}$ is defined to be the observable corresponding to the projection on the final configuration of the processor, i.e. \begin{equation} \hat{n}_{0}=\ket{q_{f}}\bra{q_{f}}. \end{equation} We assume that we have a measuring apparatus to measure $\hat{n}_{0}$ precisely after each step instantaneously in the manner satisfying the projection postulate. Thus the $\hat{n}_{0}$-measurement gives surely the outcome 1 if and only if the processor is in $\ket{q_{f}}$. We shall denote by $[\![\hat{A}=a]\!]$ the spectral projection onto the eigenspace of an observable $\hat{A}$, considered as an operator on ${\cal H}$, corresponding to the eigenvalue $a$. The product $[\![\hat{A}=a]\!][\![\hat{B}=b]\!]$, if commutable, will be denoted by $[\![\hat{A}=a,\hat{B}=b]\!]$.
The precise formulation of the halting protocol is given as follows.
(I) The halt flag $\hat{n}_{0}$ is measured instantaneously after every step. This measurement is a precise measurement of the observable $\hat{n}_{0}$ satisfying the projection postulate. (Note that the above measurement is different from the procedure that one measures $\hat{q}$ and checks if the outcome is $q_{f}$ because this does not satisfy the projection postulate.)
(II) Once the halt flag is set to $\hat{n}_{0}=1$, the QTM no more changes the halt flag nor the result of computation. Thus, we require \begin{equation}\label{eq:820a} U[\![\hat{n}_{0}=1,\hat{T}(S)=T_{j}]\!]U^{t}\ket{C} =[\![\hat{n}_{0}=1,\hat{T}(S)=T_{j}]\!]U
[\![\hat{n}_{0}=1,\hat{T}(S)=T_{j}]\!]U^{t}\ket{C} \end{equation} for any initial configuration $C$, time $t\ge0$, and tape string $T_{j}$ over the data slot $S$.
(III) After the measurement of the halt flag $\hat{n}_{0}$ gives the outcome $1$, the tape string $\hat{T}(S)$ in the date slot is measured and the outcome of this measurement is defined to be the {\em output} of the computation.
Now we shall show that the halting protocol does not affect the result of the computation. For that purpose, it suffices to prove that the probability distribution of the output is not affected by monitoring of the halt flag.
Let $\Pr\{\mbox{output}=T_{j}|\mbox{monitored}\}$ be the probability of
finding the output $T_{j}$ up to $N$ steps by the halting protocol. Let $\Pr\{\mbox{output}=T_{j}|\mbox{not-monitored}\}$ be the probability of finding the output $T_{j}$ by the single measurement after $N$ steps. We shall prove \begin{equation}
\Pr\{\mbox{output}=T_{j}|\mbox{monitored}\} =
\Pr\{{\mbox{output}=T_{j}}|\mbox{not-monitored}\}\label{eq:328l} \end{equation}
Let $P=[\![\hat{n}_{0}=1]\!]$ and $Q_{j}=[\![\hat{T}(S)=T_{j}]\!]$. Let $\ket{C}$ be an arbitrary initial state. If $\ket{C}$ is the state of the machine before the computation, We have \begin{equation}
\Pr\{\mbox{output}=T_{j}|\mbox{not-monitored}\}
=\|PQ_{j}U^{N}\ket{C}\|^{2}. \end{equation} By the projection postulate, the joint probability of obtaining the outcome $\hat{n}_{0}=0$ at the times $1,\ldots,K-1$ and obtaining the outcomes $\hat{n}_{0}=1$ and $\hat{T}=\lambda_{j}$ at the time $K$ is given by \begin{equation}
\|PQ_{j}(UP^{\perp})^{K}\ket{C}\|^{2}, \end{equation} and hence we have \begin{eqnarray}
\lefteqn{\Pr\{\mbox{output}=T_{j}|\mbox{monitored}\}}\quad\nonumber\\
&=&\|PQ_{j}\ket{C}\|^{2}+\|PQ_{j}UP^{\perp}\ket{C}\|^{2}
+\cdots+\|PQ_{j}(UP^{\perp})^{N}\ket{C}\|^{2}. \end{eqnarray} Thus, it suffices to prove the relation \begin{equation}
\|PQ_{j}U^{N}\ket{C}\|^{2}
=\|PQ_{j}\ket{C}\|^{2}+\|PQ_{j}UP^{\perp}\ket{C}\|^{2}+\cdots
+\|PQ_{j}(UP^{\perp})^{N}\ket{C}\|^{2}\label{eq:325e} \end{equation} for any $N$ and any initial state $\ket{C}$.
Let $\psi=U^{t}\ket{C}$ where $t\ge0$. We first consider the relation \begin{equation}\label{eq:325f}
\|PQ_{j}U\psi\|^{2}=\|PQ_{j}\psi\|^{2}+\|PQ_{j}UP^{\perp}\psi\|^{2}. \end{equation}
From (\ref{eq:820a}), we have \begin{equation}\label{eq:325b} PQ_{j}UPQ_{j}\psi=UPQ_{j}\psi. \end{equation} It follows that \begin{equation}\label{eq:325d} PQ_{j}UPQ_{j}^{\perp}\psi=\sum_{k\not=j}PQ_{j}UPQ_{k}\psi=0. \end{equation}
From (\ref{eq:325b}) and (\ref{eq:325d}), we have \begin{eqnarray} PQ_{j}U\psi &=&PQ_{j}UPQ_{j}\psi+PQ_{j}UPQ_{j}^{\perp}\psi+PQ_{j}UP^{\perp}\psi\nonumber\\ &=&UPQ_{j}\psi+PQ_{j}UP^{\perp}\psi.\label{eq:326a} \end{eqnarray}
From (\ref{eq:325b}), we have \begin{eqnarray}
\langle UPQ_{j}\psi|PQ_{j}UP^{\perp}\psi\rangle &=&
\langle PQ_{j}UPQ_{j}\psi|UP^{\perp}\psi\rangle\nonumber\\ &=&
\langle UPQ_{j}\psi|UP^{\perp}\psi\rangle\nonumber\\ &=&0,\label{eq:326b} \end{eqnarray}
From (\ref{eq:326a}) and (\ref{eq:326b}), we have \begin{eqnarray}
\|PQ_{j}U\psi\|^{2} &=&
\|UPQ_{j}\psi+PQ_{j}UP^{\perp}\psi\|^{2}\nonumber\\ &=&
\|UPQ_{j}\psi\|^{2}+\|PQ_{j}UP^{\perp}\psi\|^{2}\nonumber\\ &=&
\|PQ_{j}\psi\|^{2}+\|PQ_{j}UP^{\perp}\psi\|^{2} \end{eqnarray} Thus, we have proved (\ref{eq:325f}).
The proof for general $N$ runs as follows. We use mathematical induction and assume that (\ref{eq:325e}) holds for $N-1$. By replacing $\psi$ by $U^{N-1}\ket{C}$ in (\ref{eq:325f}), we have \begin{equation}
\|PQ_{j}U^{N}\ket{C}\|^{2}
=\|PQ_{j}U^{N-1}\ket{C}\|^{2}+\|PQ_{j}UP^{\perp}U^{N-1}\ket{C}\|^{2}. \label{eq:325g} \end{equation}
From (\ref{eq:325b}), we have $P^{\perp}UP\psi=\sum_{j}P^{\perp}UPQ_{j}\psi=0$, and hence $P^{\perp}U\psi=P^{\perp}UP^{\perp}\psi$ so that $P^{\perp}U^{N-1}\ket{C}=P^{\perp}(UP^{\perp})^{N-1}\ket{C}$. It follows that \begin{equation}\label{eq:326c}
\|PQ_{j}UP^{\perp}U^{N-1}\ket{C}\|^{2}
=\|PQ_{j}(UP^{\perp})^{N}\ket{C}\|^{2}. \end{equation} By induction hypothesis, we have \begin{equation}
\|PQ_{j}U^{N-1}\ket{C}\|^{2}
=\|PQ_{j}\ket{C}\|^{2}+\|PQ_{j}UP^{\perp}\ket{C}\|^{2}+\cdots
+\|PQ_{j}(UP^{\perp})^{N-1}\ket{C}\|^{2}.\label{eq:326d} \end{equation} Therefore, from (\ref{eq:325g}), (\ref{eq:326c}), and (\ref{eq:326d}), we obtain (\ref{eq:325e}).
It is concluded that the the probability of finding the output $T_{j}$ up to $N$ steps by the halt protocol is equal to the probability of finding the output $T_{i}$ by the single measurement of $\hat{T}(S)$ after $N$ steps. It follows that the halting protocol does not affect the result of the computation.
Recently, Linden and Popescu\refnote{\cite{LP98}} claimed that the halt scheme given previously\refnote{\cite{98QU}} is not consistent with unitarity of the evolution operator. However, their argument applies only to the special case in which the whole tape is required not to change after the halt. As suggested in a footnote, the conclusion in the previous work\refnote{\cite{98QU}} can be obtained from the weaker condition for the general case where the tape is allowed to change except for the date slot. Linden and Popescu\refnote{\cite{LP98}} disregarded this case and hence their conclusion is not generally true. In this paper, the halting protocol with such a general formulation is treated explicitly and it is proved that even in this case the computation is not affected by the measurement of the halt flag. Moreover, contrary to Linden and Popescu\refnote{\cite{LP98}}, this general formulation is consistent with the unitarity. In fact, it can be shown that any unidirectional QTMs and stationary QTMs\refnote{\cite{BV97}} can be simulated by QTMs obeying this halting protocol with constant slowdown\refnote{\cite{98eqc}}. Thus, there is a universal QTM obeying the halting protocol.
\begin{numbibliography}
\bibitem{Chu36} A. Church,
{Am.\ J. Math.} {\bf 58}, 345 (1936).
\bibitem{Tur36} A.~M. Turing,
{Proc.\ Lond.\ Math.\ Soc.\ Ser.~2} {\bf 42}, 230 (1936).
\bibitem{Pap94} C.~H. Papadimitriou,
{\it Computational Complexity}
(Addison-Wesley, Reading, MA, 1994).
\bibitem{Fey82} R.~P. Feynman,
{Inter. J. Theor. Phys.} {\bf 21},
467 (1982).
\bibitem{Deu85} D. Deutsch,
{Proc.\ R. Soc.\ Lond.} A {\bf 400}, 97 (1985).
\bibitem{Deu89} D. Deutsch,
{Proc.\ R. Soc.\ Lond.} A {\bf 425}, 73 (1989).
\bibitem{Beni80} P. Benioff,
{J. Stat. Phys.} {\bf 22}, 563 (1980).
\bibitem{BV97} E. Bernstein and U. Vazirani,
{SIAM J. Comput.} {\bf 26}, 1411 (1997).
\bibitem{Yao93} A. Yao,
in {\it Proceedings of the 34th Annual Symposium on Foundations of Computer Science}, edited by S. Goldwasser, p.~352 (IEEE Computer Society Press, Los Alamitos, CA, 1993).
\bibitem{Sho94} P.~W. Shor,
in {\it Proceedings of the 35th Annual Symposium on Foundations of Computer Science}, edited by S. Goldwasser, p.~124 (IEEE Computer Society Press, Los Alamitos, CA, 1994).
\bibitem{Mye97} J.~M. Myers,
{Phys.\ Rev.\ Lett.} {\bf 78}, 1823 (1997).
\bibitem{98QU} M. Ozawa,
{Phys.\ Rev.\ Lett.} {\bf 80}, 631 (1998).
\bibitem{98eqc} H. Nishimura and M. Ozawa, Computational complexity of uniform quantum circuit families and quantum Turing machines, (in preparation).
\bibitem{LP98} N. Linden and S. Popescu,
The halting problem for quantum computers,
(Eprint: quant-ph/9806054).
\end{numbibliography}
\end{document} | arXiv | {
"id": "9809038.tex",
"language_detection_score": 0.736354649066925,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\author{Klaus Hulek} \address{Leibniz Universit\"at Hannover, Institut f\"ur Algebraische Geometrie, Wel\-fen\-gar\-ten 1, D-30167 Hannover, Germany} \email{hulek@math.uni-hannover.de} \author{Orsola Tommasi} \address{Leibniz Universit\"at Hannover, Institut f\"ur Algebraische Geometrie, Wel\-fen\-gar\-ten 1, D-30167 Hannover, Germany} \email{tommasi@math.uni-hannover.de} \subjclass[2000]{Primary 14K10; Secondary 14C15, 14F25, 14D22} \keywords{Abelian varieties, Voronoi compactification, Chow ring, cohomology ring}
\title[Cohomology of the toroidal compactification of $\mathcal A_3$]{Cohomology of the toroidal compactification\\ of $\mathcal A_3$} \date{March 29, 2009}
\begin{abstract} We prove that the cohomology groups with rational coefficients of the Voronoi compactification $\AVOR 3$ of the moduli space of abelian threefolds coincide with the Chow groups of that space, as determined by Van der Geer. \end{abstract} \maketitle
\section{Introduction}
The moduli space $\ab g$ of principally polarized abelian varieties has several compactifications, notably the Satake compactification $\ASAT g$ and various toroidal compactifications. Among the toroidal compactifications the so called Voronoi compactification $\AVOR g$ is distinguished by the fact that it represents a geometrically meaningful functor, as was shown by Alexeev \cite{A} and Olsson \cite{O}. Toroidal compactifications are defined by suitable fans in the cone of semi-positive symmetric real $(g \times g)$ matrices and in the case of $\AVOR g$ the fan is given by the second Voronoi decomposition. For a definition of the second Voronoi fan we refer the reader to \cite{V2b} or for a more modern reference to \cite{AN}.
A general discussion of toroidal compactifications of $\ab g$ can be found in the survey article \cite{HuSa}.
In genus $3$ all known toroidal compactifications of the moduli space $\ab 3$ of principally polarized abelian varieties coincide with the Voronoi compactification $\AVOR 3$. We recall the explicit description of the second Voronoi decomposition in the case $g=3$ in Section \ref{rank3}. A detailed description of the geometry of the space $\AVOR 3$ can be found in \cite{Tsushima}, and
the Chow ring of this space has been computed by Van der Geer \cite{vdG}.
In this note we compute the cohomology groups with rational coefficients and prove that they coincide with the Chow groups of this space. \begin{thm}\label{thm} The Betti numbers of $\AVOR 3$ are $b_0=b_{12}=1$, $b_2=b_{10}=2$, $b_4=b_8=4$ and $b_6=6$. \end{thm}
Our approach is similar to that of \cite{vdG}, and is based on a study of the stratification of $\AVOR 3$ defined by the torus rank, which we introduce in Section \ref{strat}.
We shall give the proof of the main result in Section \ref{proofmaintheo} modulo the computation of the cohomology of the various strata, which will be done in the subsequent sections.
As a corollary we obtain \begin{cor}\label{cor} The cycle map defines an isomorphism $$ CH^\bullet(\AVOR 3)\otimes \bb{Q} \cong H^\bullet(\AVOR 3;\bb{Q}) $$ between the Chow ring and the cohomology ring of $\AVOR 3$ with rational coefficients. \end{cor} \begin{proof} The Betti numbers coincide with the rank of the Chow groups as determined by Van der Geer (\cite{vdG}). Since the intersection pairing is non-degenerate, the cycle map gives an isomorphism. \end{proof}
Although this result is not particularly surprising, we could not find a reference to it in the literature, so we decided to fill the gap with this note.
We would like to remark that there are other possible approaches that yield the same result. For instance, one can consider the Torelli map $\Mb 3\rightarrow \AVOR 3$ from the moduli space of Deligne--Mumford stable curves of genus $3$ to the toroidal compactification of $\ab 3$. The moduli space $\Mb 3$ has a stratification by topological type. Since the Torelli map for genus $3$ is surjective, we can stratify $\AVOR 3$ by taking the images of the strata of $\Mb 3$. It is easy to show that all strata of $\AVOR 3$ obtained in this way are isomorphic to finite quotients of products of moduli spaces $\Mm gn$ with $g\leq 3$ and $0\leq n\leq 2(g-3)$. Then one can use the known results about the cohomology of these spaces $\Mm gn$ to calculate the cohomology of $\AVOR 3$.
In this note, we will work with the stack $\AVOR 3$ rather than the associated coarse moduli space. We recall that $\AVOR 3$ is a smooth Deligne--Mumford stack. Hence the rational cohomology of the stack and the associated coarse moduli space coincide.
Finally we remark that the same techniques also apply to the (easier) case of genus $2$.
\begin{rem} There is an isomorphism $$CH^\bullet(\AVOR 2)\otimes\bb{Q} \cong H^\bullet(\AVOR 2;\bb{Q}).$$ \end{rem}
\subsection*{Notation}\ \\ \begin{longtable}{lp{10cm}} $\ab g$ & moduli stack of principally polarized abelian varieties of genus~$g$\\[1pt] $\mathcal X_g$ & universal family over $\ab g$\\[1pt] $\ASAT g$ & Satake compactification of $\ab g$\\[1pt] $\AVOR g$ & Voronoi compactification of $\ab g$ \\[1pt] $\XVOR g$& universal family over $\AVOR g$\\[1pt] $\Mm gn$ & moduli stack of non-singular curves of genus $g$ with $n$ marked points\\[1pt] \multicolumn{2}{l}{$\M g:=\Mm g0$}\\[1pt] $\mathfrak S_d$ & symmetric group in $d$ letters\\[1pt] \end{longtable}
For every $g$, we denote by $\varphi_g\colon\thinspace \AVOR g \rightarrow \ASAT g$ the natural map from the Voronoi to the Satake compactification. Let $\pi_g: \XVOR g \to \AVOR g$ be the universal family, $q_g: \XVOR g \to \XVOR g/\pm 1$ the quotient map from the universal family to the universal Kummer family and $k_g: \XVOR g/\pm 1 \to \AVOR g$ the universal Kummer morphism.
Throughout the paper, we work over the field $\bb{C}$ of complex numbers.
\section{Stratification and outline of the proof}\label{stratification}
\subsection{A stratification}\label{strat} The object of this note is the rational cohomology of the toroidal compactification $\AVOR 3$ of the moduli space of abelian varieties of dimension $3$. We shall make use of a natural stratification of $\AVOR 3$ which was also used by Van der Geer \cite{vdG}, whose notation we adopt.
Recall that there is a natural map $\varphi_3\colon\thinspace \AVOR 3 \rightarrow \ASAT 3$ to the Satake compactification. The moduli space $\ASAT 3$ admits a stratification $\ASAT 3 = \ab 3\sqcup \ab2\sqcup \ab1\sqcup\ab0$. This defines a filtration $\{\beta_t\}_{0\leq t\leq 3}$ on $\AVOR 3$, by setting $$\beta_t:=\varphi_3^{-1}\left(\bigsqcup_{0\leq j\leq g-t}\ab j\right).$$ In other words, $\beta_t\subset\AVOR 3$ is the locus of semi-abelian varieties with torus rank at least $t$.
\subsection{Cohomology of the strata}\label{cohostrat} We shall now state the results about the cohomology with compact support of the various strata. Proofs will be given in the subsequent Sections \ref{rank1} -- \ref{rank3}.
The stratum $\beta_0\setminus\beta_1$ of the filtration $\{\beta_t\}$ is $\ab 3$. Its cohomology was computed by Hain in \cite{Hain}.
\begin{thm}\label{hain} The rational cohomology groups with compact support of $\ab 3$ are given by $$ H_c^k(\ab 3;\bb{Q})=\left\{\begin{array}{ll} \bb{Q}(-6) & k=12,\\ \bb{Q}(-5) & k=10,\\ \bb{Q}(-4) & k=8,\\ F & k=6,\\ 0 & \text{otherwise},\\ \end{array}\right. $$ where $F$ is a two-dimensional mixed Hodge structure which is an extension $$0\rightarrow \bb{Q} \rightarrow F\rightarrow \bb{Q}(-3)\rightarrow 0.$$ \end{thm}
\begin{proof} This is a rephrasing of \cite[Thm~1]{Hain}, by using the isomorphism $$H_c^k(\ab 3;\bb{Q})\cong H^{12-k}(\ab 3;\bb{Q})^*\otimes \bb{Q}(-6)$$ given by Poincar\'e duality on the $6$-dimensional space $\ab 3$. \end{proof}
The cohomology with compact support of the other strata is as follows.
\begin{prop}\label{propcohorank1} The cohomology with compact support of $\beta_1\setminus\beta_2$ is given by $$ \begin{array}{l} H_c^{10}({\beta_1\setminus\beta_2};\bb{Q})=\bb{Q}(-5)\\ H_c^{8}({\beta_1\setminus\beta_2};\bb{Q})=\bb{Q}(-4)^2\\ H_c^{6}({\beta_1\setminus\beta_2};\bb{Q})=\bb{Q}(-3)^2\\ \end{array} \ \begin{array}{l} H_c^5({\beta_1\setminus\beta_2};\bb{Q})=\bb{Q} \\ H_c^{4}({\beta_1\setminus\beta_2};\bb{Q})=\bb{Q}(-2)\\ H_c^k({\beta_1\setminus\beta_2};\bb{Q})=0 \text{ for }k\notin\{4,5,6,8,10\}.\\ \end{array} $$ \end{prop} For torus rank $2$ we obtain \begin{prop}\label{propcohorank2} The cohomology with compact support of the stratum $\beta_2\setminus\beta_3$ is given by $$ \begin{array}{l} H_c^{8}({\beta_2\setminus\beta_3};\bb{Q})=\bb{Q}(-4)\\ H_c^{6}({\beta_2\setminus\beta_3};\bb{Q})=\bb{Q}(-3)^2\\ H_c^{4}({\beta_2\setminus\beta_3};\bb{Q})=\bb{Q}(-2)\\ \end{array} \ \ \ \ \begin{array}{l} H_c^2({\beta_2\setminus\beta_3};\bb{Q})=\bb{Q}(-1) \\ H_c^k({\beta_2\setminus\beta_3};\bb{Q})=0 \text{\ \ \ for }k\notin\{2,4,6,8\}.\\
\ \\ \end{array} $$ \end{prop}
In the proof of the two propositions above, we make use of the following fact (see \cite{Tsushima}): the natural map $\beta_1\rightarrow \ASAT 2$ factors through $\AVOR 2$, giving rise to the commutative diagram
\begin{equation*} \xymatrix{ {\beta_1} \ar[r]^{k_2} \ar[rd]^{\pi_2}& {\AVOR 2}\\ &{\ASAT {2}}\ar@{<-}[u]_{\varphi_2} } \end{equation*} where $k_2\colon\thinspace \beta_1\cong(\XVOR2/\pm1)\rightarrow \AVOR2$ is the universal Kummer variety over $\AVOR 2$.
Finally, we use the toroidal description of $\AVOR 3$ to compute the cohomology of the stratum with torus rank $3$. The corresponding result is \begin{prop}\label{propcohorank3} The cohomology groups $H_c^k(\beta_3;\bb{Q})$ are trivial for degree $k$ different from $0,2,4,6$, and are given in the other cases by $$ \begin{array}{llll} H_c^6(\beta_3;\bb{Q})=\bb{Q}(-3)&&& H_c^2(\beta_3;\bb{Q})=\bb{Q}(-1) \\ H_c^4(\beta_3;\bb{Q})=\bb{Q}(-2)^2&&& H_c^0(\beta_3;\bb{Q})=\bb{Q}. \end{array} $$ Moreover, the generators of these cohomology groups with compact support can be identified with the fundamental classes of the strata of $\AVOR 3$ corresponding to the cones $\sigma^{(3)}_{\text{local}}$, $\sigma^{(4)}_{I}$, $\sigma^{(4)}_{II}$, $\sigma^{(5)}$ and $\sigma^{(6)}$ (to be defined in Section~\ref{rank3}). \end{prop}
\subsection{Spectral sequences in cohomology}\label{spectral}
Our proofs of results on the cohomology of $\AVOR 3$ and its strata are based on an intensive use of long exact sequences and spectral sequences in cohomology with compact support. We shall recall the definition of the sequences we use most often in the proofs.
Since the cohomology with rational coefficients of a Deligne--Mumford stack coincides with that of its coarse moduli space, in this section we will work with quasi-projective varieties. A more stack-theoretical approach can be obtained by recalling that $\AVOR g$ is the finite quotient of the fine moduli scheme $\AVOR g(n)$ of abelian varieties with level-$n$ structure for $n\geq3$. Then the same constructions can be obtained by working on $\AVOR g(n)$ equivariantly.
Recall that if $X$ is a quasi-projective variety and $Y$ a closed subvariety of $X$, then the inclusion $Y\hookrightarrow X$ induces a Gysin long exact sequence in cohomology with compact support: \begin{equation*} \cdots\rightarrow H_c^{k-1}(Y;\bb{Q})\rightarrow H_c^{k}(X\setminus Y;\bb{Q})\rightarrow H_c^k(X;\bb{Q})\rightarrow H_c^{k}(Y;\bb{Q})\rightarrow\cdots \end{equation*}
By functoriality of mixed Hodge structures (\cite[Prop.~5.54]{PS}, this exact sequence respects mixed Hodge structures.
Next, assume we have a filtration $\emptyset=Y_0\subset Y_1\subset Y_2\subset \cdots\subset Y_N = X$ by closed subvarieties of $X$. In this case, there is a spectral sequence $E_r^{p,q}\Rightarrow H_c^{p+q}(X;\bb{Q})$ associated to the filtration $\{Y_i\}$. The $E_1$ term is given by $E_1^{p,q}=H_c^{p+q}(Y_p\setminus Y_{p-1};\bb{Q})$. This spectral sequence can be constructed by taking a compactification $\overline X$ of $X$ with border $S:=\overline X\setminus X$. Let us denote by $\overline{Y_i}$ the closure of $Y_i$ in $\overline X$, and consider the filtration $\{Y'_i:=\overline{Y_i}\cup S\}_{0\leq i\leq N}$ of the pair $(\overline X,S)$. In particular, one has $H^\bullet(Y'_j,Y'_{j-1};\bb{Q})=H^\bullet_c(Y_j\setminus Y_{j-1};\bb{Q})$ for all $j\geq 1$. One can describe the spectral sequence associated to $\{Y_i\}$ as the spectral sequence associated to the bigraded exact couple $(D,E)$ with $D^{\alpha,\beta}=H^{\alpha+\beta}(\overline X,Y'_{\alpha-1};\bb{Q})$ and $E^{\alpha,\beta}=H^{\alpha+\beta}(Y'_{\alpha},Y'_{\alpha-1};\bb{Q})$, which converges to $H_c^{\alpha+\beta}(\overline X,S;\bb{Q})=H^{\alpha+\beta}(X;\bb{Q})$. Arguing as in \cite[Lemma~3.8]{arapura}, this ensures the compatibility with mixed Hodge structures by functoriality. For the definition of exact couples, see \cite[\S A.3.2]{PS}.
Note that the $d_1$ differentials of the spectral sequence in cohomology with compact support associated to $\{Y_i\}$ coincide with the differentials of the Gysin long exact sequences associated to the closed inclusions $Y_i\setminus Y_{i-1}\hookrightarrow Y_{i+1}\setminus Y_{i-1}$.
Leray spectral sequences play an intensive role in our computation of the cohomology of the strata $\beta_1$ and $\beta_2\setminus \beta_1$. Typically, we will be in the following situation: let $X$ and $Y$ be quasi-projective varieties, and $f:\;X\rightarrow Y$ a fibration with fibres which are homotopy equivalent under proper maps to a fixed quasi-projective variety $B$. Let us denote by $\mathcal H^{(p)}$ the local system on $X$ induced by the $p$th cohomology group with compact support of the fibre of $f$.
In this situation, one can consider the Leray spectral sequence of cohomology with compact support associated to $f$. This is the spectral sequence $E_r^{p,q}\Rightarrow H_c^{p+q}(X;\bb{Q})$ with $E_2^{p,q}\cong H^p(Y;\mathcal{H}^{(q)})$. Note that the Leray spectral sequence associated to $f$ respects Hodge mixed structures (e.g. see \cite[Cor.~6.7]{PS}).
\subsection{Proof of the main theorem}\label{proofmaintheo} The results on the cohomology with compact support stated in Section~\ref{cohostrat} enable us to compute the cohomology of $\AVOR 3$ using the spectral sequence $E_\bullet^{p,q}\Rightarrow H_c^{p+q}(\AVOR 3;\bb{Q})$, $E_1^{p,q}=H_c^{p+q}(\beta_{3-p}\setminus\beta_{4-p};\bb{Q})$ associated to the filtration $\beta_3\subset\beta_2\subset\beta_1\subset\beta_0=\AVOR 3$.
\begin{lem}\label{pfthm1} The $E_1$ term of the spectral sequence in cohomology with compact support associated to the filtration $\beta_3\subset\beta_2\subset\beta_1\subset\beta_0=\AVOR 3$ is as given in Table~\ref{main_spseq}. The only non-trivial differential of this spectral sequence is $d_1^{2,3}\colon\thinspace E_1^{2,3}\rightarrow E_1^{3,3}$, which is injective. In particular, the spectral sequence degenerates at $E_2$. \end{lem}
\begin{table} \caption{\label{main_spseq} $E_1$ term of the spectral sequence converging to $H_c^\bullet(\AVOR 3;\bb{Q})=H^\bullet(\AVOR 3;\bb{Q})$} $$
\begin{array}{r|ccccc} q&&&&\\[6pt] 9&0&0&0&\bb{Q}(-6)\\ 8&0&0&\bb{Q}(-5)&0\\ 7&0&\bb{Q}(-4)&0&\bb{Q}(-5)\\ 6&\bb{Q}(-3)&0&\bb{Q}(-4)^2&0\\ 5&0&\bb{Q}(-3)^2&0&\bb{Q}(-4)\\ 4&\bb{Q}(-2)^2&0&\bb{Q}(-3)^2&0\\ 3&0&\bb{Q}(-2)&\bb{Q}&F\\ 2&\bb{Q}(-1)&0&\bb{Q}(-2)&0\\ 1&0&\bb{Q}(-1)&0&0\\ 0&\bb{Q}&0&0&0\\ \hline
&0&1&2&3&p\\ \left(\begin{smallmatrix}\text{torus}\\\text{rank}\end{smallmatrix}\right) & (3)&(2)&(1)&(0) \end{array} $$ \end{table} \proof The description of the $E_2$ term of the spectral sequence follows from the description of the compactly supported cohomology of the strata given in Section~\ref{cohostrat} and from the definition of the spectral sequence in Section~\ref{spectral}.
An inspection of the spectral sequence in Table~\ref{main_spseq} yields that $E_1^{p,q}$ (and hence $E_r^{p,q}$) is always trivial if $p+q$ is odd, with the exception of $E_1^{2,3}$ (hence possibly also $E_r^{2,3}$ for $r\geq 2$). Therefore, all differentials not involving $E_r^{2,3}$ terms are necessarily trivial, since they are maps either from or to $0$.
This leaves us with only three possibly non-trivial differentials to investigate. The first two are the differentials $d_r^{2-r,2+r}\colon\thinspace E_r^{2-r,2+r}\rightarrow E_r^{2,3}$ for $r=1,2$. Note that in both cases, the Hodge structure on $E_r^{2,3}$ is pure of weight $0$, whereas the Hodge structure on $E_r^{2-r,2+r}$ is pure of weight $4$. Since the weights are different, the differential $d_r^{2-r,2+r}$ can only be the $0$ morphism.
Next, we investigate the differential $d_1^{2,3}\colon\thinspace E_1^{2,3}\rightarrow E_1^{3,3}$, which can have rank either $0$ or $1$. Assume for the moment that $d_1^{2,3}$ is the $0$ morphism. Then the spectral sequence degenerates at $E_1$, so that $H^{5}(\AVOR 3;\bb{Q})\cong H_c^{5}(\AVOR 3;\bb{Q})=E_1^{2,3}=\bb{Q}$ holds. This means that the cohomology of $\AVOR 3$ in degree $5$ is pure of Hodge weight~$0$. But $\AVOR 3$ is a smooth proper stack, being the quotient by a finite group of the stack $\AVOR 3(n)$ of principally polarized abelian varieties with a level-$n$ structure, which is represented by a smooth projective scheme for $n\geq 3$. In particular, the Hodge structure on $H^k(\AVOR3;\bb{Q})$ is pure of weight $k$. Hence, the rank of $d_1^{2,3}$ must be $1$. Therefore, this differential is injective with cokernel isomorphic to $\bb{Q}(-3)$. This ensures $E_2^{2,3}=0$ and $E_2^{3,3}=\bb{Q}(-3)$. \qed
Note that Lemma~\ref{pfthm1} directly implies that the cohomology of $\AVOR3$ is all algebraic, with Betti numbers as stated in Theorem~\ref{thm}.
In the remainder of this paper we will discuss the various strata defined by the torus rank and compute their cohomology.
\section{Torus rank $1$}\label{rank1}
To compute the cohomology with compact support of $\beta_1\setminus\beta_2$ we will use the map $k_2\colon\thinspace \beta_1\setminus\beta_2\rightarrow \ab 2$ realizing $\beta_1\setminus\beta_2$ as the universal Kummer variety over $\ab 2$. The fibre of $\beta_1\setminus\beta_2$ over a point parametrizing an abelian surface $S$ is $K:=S/\pm 1$. The cohomology of $K$ is one-dimensional in degree $0$ and $4$. The only other non-trivial cohomology group is $H^2(K;\bb{Q})\cong \bigwedge^2H^1(S;\bb{Q})$.
To compute $H_c^\bullet(\beta_1\setminus\beta_2;\bb{Q})$, we consider the Leray spectral sequence associated to $k_2$. Note that the $0$th and the fourth cohomology group of the fibre induce trivial local systems on $\ab 2$. Moreover, the second cohomology group of the fibre induces the rank $6$ local system $\mathbb V_{(1,1)}\oplus \bb{Q}(-1)$ on $\ab 2$. Here we denote by $\mathbb V_{(1,1)}$ the symplectic local system on $\ab 2$ determined by the irreducible representation of $\mathop{\mathrm {Sp}}\nolimits(4,\bb{Q})$ associated to the partition $(1,1)$.
We start by determining the cohomology with compact support of $\ab2$ with values in the local system $\mathbb V_{(1,1)}$.
\begin{lem}\label{a2v11} The rational cohomology groups with compact support of the moduli spaces $\M2$ and $\ab2$ with coefficients in $\mathbb V_{(1,1)}$ vanish in degree $k\neq 3$. In degree $3$, one has $$H_c^3(\ab 2;\mathbb V_{(1,1)})=H_c^3(\M2;\mathbb V_{(1,1)})=\bb{Q}.$$ \end{lem}
\begin{proof} We prove the claim about the cohomology of $\M2$ first.
Following the approach of \cite{G-2}, we use the forgetful map $p_2\colon\thinspace \Mm 22\rightarrow \M2$ to obtain information. Note that the fibre of $p_2$ is the configuration space of $2$ distinct points on a genus $2$ curve. The cohomology of $\Mm 22$, with the action of the symmetric group, was computed in \cite[Cor.~III.2.2]{OT-thesis}. This result allows us to conclude $H_c^3(\M2;\mathbb V_{(1,1)})=\bb{Q}$, $H_c^k(\M2;\mathbb V_{(1,1)})=0$ for $k\neq 3$. (Note that this is in agreement with the Hodge Euler characteristic of $\M2$ in the local system $\mathbb V_{(1,1)}$ computed in \cite[\S8.2]{G-2}.)
Next, we determine $H_c^\bullet(\ab2;\mathbb V_{(1,1)})$. To this end, we write $\ab 2$ as the disjoint union of the locus $A_{1,1}$ of decomposable abelian surfaces, and the image of the Torelli map $t\colon\thinspace\M2\rightarrow \ab2$.
Since the Torelli map is injective on the associated coarse moduli spaces, it induces an isomorphism between the cohomology of $\M2$ and that of $t(\M2)$ in every system of coefficients that is locally isomorphic to a $\bb{Q}$-vector bundle. Therefore, the Gysin long exact sequence with $\mathbb V_{(1,1)}$-coefficients associated to $A_{1,1}\hookrightarrow\ab 2$ yields $$ H_c^{k-1}(A_{1,1};\mathbb V_{(1,1)})\rightarrow H_c^{k}(\M2;\mathbb V_{(1,1)})\rightarrow H_c^k(\ab2;\mathbb V_{(1,1)})\rightarrow H_c^{k}(A_{1,1};\mathbb V_{(1,1)}). $$ In Lemma~\ref{a11v11} below, we will show that $H_c^\bullet(A_{1,1};\mathbb V_{(1,1)})$ is trivial. In view of the Gysin exact sequence above, this implies that $H_c^k(\ab2;\mathbb V_{(1,1)})$ is isomorphic to $H_c^{k}(\M2;\mathbb V_{(1,1)})$. This implies the claim. \end{proof}
\begin{rem} Getzler's result would have been sufficient for the purposes of this note. This follows again from the fact that $\AVOR 3$ is a finite quotient of $\AVOR 3(n)$, so in particular its Hodge Euler characteristic determines the cohomology of the space as graded vector space with $\bb{Q}$-Hodge structures. \end{rem}
\begin{lem}\label{a11v11} The cohomology with compact support of $A_{1,1}$ in the local system of coefficients given by the restriction of $\mathbb V_{(1,1)}$ is trivial. \end{lem} \begin{proof} We consider the restriction of $k_2$ to $A_{1,1}$. Let $S=E_1\times E_2$ be an element of $A_{1,1}$, and let $K:=k_2^{-1}(S)$. Recall that $\mathbb V_{(1,1)}\oplus\bb{Q}(-1)$ is the local system $\mathcal H^{(2)}$ on $A_{1,1}$ induced by $H^2(K;\bb{Q})$. Therefore, the cohomology of $A_{1,1}$ with values in $\mathbb V_{(1,1)}\oplus \bb{Q}(-1)$ coincides with the cohomology of $A_{1,1}$ with values in the local system induced by the part of $\bigwedge^2H^1(S;\bb{Q})$ which is invariant under the symmetries of $E_1\times E_2$ and under the interchange of the two factors $E_1,E_2$ (which can be done topologically albeit not algebraically). Using the K\"unneth formula one sees that the latter local system is one-dimensional and induces the local system $\bb{Q}(-1)$. From this one obtains $H_c^\bullet(A_{1,1};\mathbb V_{(1,1)})=0$. \end{proof}
This allows us to show the following result, which directly implies that the cohomology with compact support of $\beta_1\setminus\beta_2$ is as stated in Proposition \ref{propcohorank1}.
\begin{proof}[Proof of Proposition~\ref{propcohorank1}.] We compute the cohomology with compact support of $\beta_1\setminus\beta_2$ by using the Leray spectral sequence associated to the Kummer fibration $k_2\colon\thinspace\beta_1\setminus\beta_2\rightarrow\ab2$.
By the description of the fibre of $k_2$ given at the beginning of this section, the local systems $\mathcal H^{(0)}$ and $\mathcal H^{(4)}$ are the constant one, whereas $\mathcal H^{(2)}$ is the direct sum of the constant local system $\bb{Q}$ and $\mathbb V_{(1,1)}$.
The cohomology with compact support of $\ab 2$ is well known: it is one-dimen\-sion\-al in degree $4$ and $6$, and trivial elsewhere. This can be easily deduced from the results in \cite{Mu-curves} on the Chow ring of $\M2$. The cohomology of $\ab2$ in the local system $\mathbb V_{(1,1)}$ was computed in Lemma~\ref{a2v11}. From this, one obtains that the $E_2$ term of the Leray spectral sequence in cohomology with compact support associated to $k_2$ is as in Table~\ref{specM2}.
From an inspection of the spectral sequence, one finds that all $E_2^{p,q}$ have pure Hodge structures, which have the same Hodge weight if and only if the sums $p+q$ coincide. Therefore, all differentials $d_r$ ($r\geq 2$) of the spectral sequence are morphisms between Hodge structures of different weight. Hence all differentials are trivial for this reason. This means that the spectral sequence degenerates at $E_2$, thus implying Proposition~\ref{propcohorank1}. \begin{table} \caption{\label{specM2} $E_2$ term of the Leray spectral sequence converging to the cohomology with compact support of $\beta_1\setminus\beta_2$} $$
\begin{array}{r|ccccc} q&&&&\\[6pt] 4&0&\bb{Q}(-4)&0&\bb{Q}(-5)\\ 3&0&0&0&0\\ 2&\bb{Q}&\bb{Q}(-3)&0&\bb{Q}(-4)\\ 1&0&0&0&0\\ 0&0&\bb{Q}(-2)&0&\bb{Q}(-3) \\\hline & 3&4&5&6&p \end{array} $$ \end{table} \end{proof}
\section{Torus rank $2$}\label{rank2}
Recall that $k_2\colon\thinspace\beta_1\rightarrow \AVOR 2$ is the universal family of Kummer varieties over $\AVOR 2$. Under this map, the elements of $\AVOR 3$ with torus rank $2$ are mapped to elements of $\AVOR 2$ of torus rank $1$. If we denote by $\beta'_t$ the stratum of $\AVOR 2$ of semi-abelian varieties of torus rank $\geq t$, we get a commutative diagram $$\xymatrix@R=10pt@C=18pt{ {\AVOR 3}\ar@{<-^{)}}[d] &{\AVOR 2}\ar@{<-^{)}}[d] &{\AVOR 1}\ar@{<-^{)}}[d] \\ {\beta_2\setminus\beta_3} \ar[r]^{k_2} & {\beta'_1\setminus\beta'_2}\ar[r]^{k_1}& {\ab 1}\\ {}&{}&{}\\ {\pi_2^{-1}(\beta'_1\setminus\beta'_2)}\ar[uu]^{q_2}\ar[uur]_{\pi_2}&{{\mathcal X}_1}\ar[uu]^{q_1}\ar[uur]_{\pi_1}&{} }$$
The map $\pi_2$ is the restriction of the universal family over $\AVOR 2$. In particular, the fibres of $\pi_2$ over points of $\beta'_1\setminus\beta'_2$ are rank $1$ degenerations of abelian surfaces, i.e. compactified $\bb{C}^*$-bundles over elliptic curves. A geometric description of these $\bb{C}^*$-bundles is given in \cite{Mu}.
We want to describe this situation in more detail. For this consider the universal Poincar\'e bundle $\mathcal P\rightarrow \mathcal X_1\times_{\ab 1}\hat{\mathcal X_1}$ and let ${\overline U}=\mathbb P(\mathcal P\oplus \mathcal{O}_{\mathcal X_1\times_{\ab1}\mathcal X_1})$ be the associated ${\mathbb P}^1$-bundle. Using the principal polarization we can naturally identify $\hat{\mathcal X_1}$ and ${\mathcal X_1}$, which we will do from now on. We denote by $\Delta$ the union of the $0$-section and the $\infty$-section of this bundle. Set $U= {\overline U} \setminus \Delta$, which is simply the $\bb{C}^*$-bundle given by the universal Poincar\'e bundle $\mathcal P$ with the $0$-section removed and denote the bundle map by $f:U\rightarrow \mathcal X_1\times_{\ab 1}\mathcal X_1$. Then there is a map $\rho: \overline U \to \beta_2\setminus\beta_3$ with finite fibres. Note that the two components of $\Delta$ are identified under the map $\rho$. The restriction of $\rho$ to both $U$ and to $\Delta$ is given by a finite group action, although the group is not the same in the two cases (see the discussion below).
We now consider the situation over a fixed point $[E] \in \ab1$. For a fixed degree $0$ line bundle ${\mathcal L}_0$ on $E$ the preimage $f^{-1}(E \times \{{\mathcal L}_0 \})$ is a semi-abelian surface, namely the $\bb{C}^*$-bundle given by the extension corresponding to ${\mathcal L}_0 \in \hat{E}$. This semi-abelian surface admits a Kummer involution $\iota$ which acts as $x \mapsto -x$ on the base $E$ and by $t \mapsto 1/t$ on the fibre over the origin. The Kummer involution $\iota$ is defined universally on $U$.
Consider the two involutions $i_1, i_2$ on $\mathcal X_1\times_{\ab1}\mathcal X_1$ defined by $i_1(E,p,q)=(E,-p,-q)$ and $i_2(E,p,q)=(E,q,p)$ for every elliptic curve $E$ and every $p,q\in E$. These two involutions lift to involutions $j_1$ and $j_2$ on $U$ that act trivially on the fibre of $f\colon\thinspace U\rightarrow \mathcal X_1\times_{\ab1}\mathcal X_1$ over the origin.
\begin{lem} The diagram \begin{equation}\label{poincare} \xymatrix{
U \ar[r]\ar[d]^{\rho|_U} & {\mathcal X_1\times_{\ab1}\mathcal X_1}\ar[d]^{\rho'}\\ {(\beta_2\setminus\beta_3) \setminus \rho(\Delta)} \ar[r] & {\Sym^2_{\ab1}(\mathcal X_1/\pm 1)},\\ } \end{equation} where $\rho':{\mathcal X_1\times_{\ab1}\mathcal X_1}\to{\Sym^2_{\ab1}(\mathcal X_1/\pm 1)}$ is the natural map, is commutative. Moreover
$\rho|_U\colon\thinspace U\rightarrow \rho(U)\subset\beta_2\setminus\beta_3$ is the quotient of $U$ by the subgroup of the automorphism group of $U$ generated by $\iota, j_1$ and $j_2$. \end{lem}
\begin{proof}
Since the map $\rho'$ in the diagram~\eqref{poincare} has degree $8$ and $\iota, j_1,j_2$ generate a subgroup of order $8$ of the automorphism group of $U$, it suffices to show that the map $\rho|_U$ factors through each of the involutions $\iota$ and $j_1, j_2$.
Recall that the elements of $\beta_2\setminus\beta_3$ correspond to rank $2$ degenerations of abelian threefolds. More precisely, every point of $\rho(U)$ corresponds to a degenerate abelian threefold $X$ whose normalization is a ${\mathbb P}^1 \times {\mathbb P}^1$-bundle, namely the compactification of a product of two $\bb{C}^*$-bundles on the elliptic curve $E$ given by $k_1\circ k_2([X])$. The degenerate abelian threefold itself is given by identifying the $0$-sections and the $\infty$-sections of the ${\mathbb P}^1 \times {\mathbb P}^1$-bundle. This identification is determined by a complex parameter, namely the point on a fibre of $U\rightarrow \mathcal X_1\times_{\ab 1}\mathcal X_1$.
Since a degree $0$ line bundle ${\mathcal L}_0$ and its inverse define isomorphic semi-abelian surfaces and since the role of the two line bundles is symmetric, the map $\rho|_U$ factors through $\iota$ and$j_2$.
Since $j_1$ is the commutator of $\iota$ and $j_2$ the map $\rho|_U$ also factors through $j_1$. \end{proof}
A consequence of the lemma above is that the cohomology with compact support of $\rho(U)$ can be computed by taking the invariant part of the cohomology of the total space of the $\bb{C}^*$-bundle $f\colon\thinspace U\rightarrow \mathcal X_1\times_{\ab1}\mathcal X_1$. Hence, the invariant part of the Leray spectral sequence associated to $f$ gives a Leray spectral sequence converging to $H_c^\bullet(\rho(U);\bb{Q})$. Thus, we have to consider the part of $E_2^{p,q}(f)=H_c^q(\bb{C}^*;\bb{Q})\otimes H_c^p(\mathcal X_1\times_{\ab1}\mathcal X_1;\bb{Q})$ that is invariant under the action of $\iota, j_1$ and $j_2$.
Since $j_1$ and $j_2$ both fix the fibre of $f$ over the origin, they act trivially on the cohomology of $\bb{C}^*$. Instead, the Kummer involution $\iota$ acts as the identity on $H_c^2(\bb{C}^*;\bb{Q})$ and as the alternating representation on $H_c^1(\bb{C}^*;\bb{Q})$.
The action of $\iota$, $j_1$ and $j_2$ can be determined by considering the induced actions on $\mathcal X_1\times_{\ab1}\mathcal X_1$. Here one uses that all three involutions respect the map $\mathcal X_1\times_{\ab1}\mathcal X_1\rightarrow\ab1$, whose fibre over $[E]\in\ab1$ is isomorphic to $E\times E$. Note in particular that the involution $(E,p,q)\leftrightarrow (E,-p,q)$ induced by $\iota$ acts as the alternating representation on the linear subspace $\bigwedge^2H_c^1(E;\bb{Q})\subset H_c^2(E\times E;\bb{Q})$, on which $i_1$ and $i_2$ both act trivially.
This discussion yields that the invariant part of the spectral sequence $E_2$ term is as shown in Table~\ref{t:rank2}.
\begin{table}\caption{\label{t:rank2} $E_2$ term of the spectral sequence converging to the cohomology with compact support of $\rho(U)$} $$
\begin{array}{r|cccccc} q&&&&\\[6pt] 2&\bb{Q}(-2)&0&\bb{Q}(-3)&0&\bb{Q}(-4)\\ 1&0&0&\bb{Q}(-2)&0&0 \\\hline & 2&3&4&5&6&p \end{array} $$ \end{table}
\begin{lem} The cohomology groups with compact support of $\rho(U)$ are $1$-dimen\-sion\-al in degree $6$ and $8$ and trivial otherwise. \end{lem}
\begin{proof} It suffices to show that the differential $d_2^{2,2}\colon\thinspace E_2^{2,2}\rightarrow E_2^{4,1}$ in Table~\ref{t:rank2} is an isomorphism.
To describe the differential $d_2^{2,2}$ geometrically, it is useful to consider the restriction of the Torelli map $t\colon\thinspace\Mb 3\rightarrow \AVOR 3$ to the preimage of $\rho(U)$. Moreover, one can use the stratification of $\Mb3$ by topological type to describe $\beta_2$ and $\rho(U)$. In particular, this allows one to find a geometric generator for $H_c^4(\rho(U);\bb{Q})$.
Consider stable curves $C_1\cup C_2\cup C_3$, where the component $C_1$ is smooth of genus $1$, the component $C_2$ is a smooth rational curve and the component $C_3$ is a rational curve with exactly one node, satisfying $\#(C_1\cap C_2)=1$, $\#(C_1\cap C_3)=0$ and $\#(C_2\cap C_3)=2$.
Denote by $G$ the closure in $t^{-1}(\rho(U))$ of the locus of such curves, and denote by $t_*[G]$ the push-forward to $\rho(U)$ of the cycle class of $G$. Then the fundamental class of $t_*[G]$ generates $H_c^4(\rho(U);\bb{Q})$.
Recall that the locus in $\Mb 3$ of irreducible curves with two nodes maps surjectively to $\beta_2$ under the Torelli map. Moreover, all curves in $\Mb 3$ that have two nodes and map to $\beta_2$
can be constructed by taking a stable curve of genus $1$ with $4$ marked points and identifying the marked points pairwise. There is a well known relation between cycle classes of dimension $2$ in $\Mmb 14$, called Getzler's relation (see \cite{G-rela}). This relation is $\mathfrak S_4$-invariant and it induces a relation between dimension $2$ cycles in $t^{-1}(\beta_2)$, which if pushed forward under $t$ induces a relation in $H_c^4(\beta_2;\bb{Q})$. The latter relation involves non-trivially the push-forward of the fundamental class of $\overline G\subset t^{-1}(\beta_2)$. In particular, restricting to $\rho(U)\subset\beta_2$ yields that $t_*[G]$ vanishes in $H_c^4(\rho(U);\bb{Q})$. Hence, the differential $d_2^{2,2}$ must be an isomorphism. \end{proof}
\begin{rem} There is also another way to see that the differential $d_2^{2,2}\colon\thinspace E_2^{2,2}\rightarrow E_2^{4,1}$ in Table~\ref{t:rank2} is an isomorphism. Namely, one can compactify the $\bb{C}^*$-bundle $U$ to the ${\mathbb P^1}$-bundle ${\overline U}=\mathbb P(\mathcal P\oplus \mathcal{O}_{\mathcal X_1\times_{\ab1}\mathcal X_1})$ and compute the invariant part of the exact sequence in rational cohomology of the pair $({\overline U},\Delta)$. This then shows that the invariant part of $H^4_c(U;\bb{Q})$ vanishes as claimed. We decided to include the above proof involving Getzler's relation since the relation to $\Mb3$ is of independent interest. \end{rem}
\begin{proof}[Proof of Proposition~\ref{propcohorank2}] We compute the cohomology with compact support of $\beta_2\setminus\beta_3$ by exploiting the Gysin long exact sequence associated to the inclusion $\rho(\Delta)\hookrightarrow(\beta_2\setminus\beta_3)$: \begin{equation}\label{gysin2} \cdots\rightarrow H_c^{k-1}(\rho(\Delta);\bb{Q})\rightarrow H_c^{k}(\rho(U);\bb{Q})\rightarrow H_c^k(\beta_2\setminus\beta_3;\bb{Q})\rightarrow H_c^{k}(\rho(\Delta);\bb{Q})\rightarrow\cdots \end{equation}
The map $\rho$ identifies the two components of $\Delta$, each of which is isomorphic to $\mathcal X_1 \times_{\ab1} \mathcal X_1$. Moreover, it factors through the finite group $G$ generated by the following three involutions: the involution which interchanges the two factors of $\mathcal X_1 \times_{\ab1} \mathcal X_1$, the involution which acts by $(x,y)\mapsto (-x,-y)$ on each fibre $E \times E$ and finally the involution which acts by $(x,y) \mapsto (x+y,-y)$. This can be read off from the construction of the toroidal compactification (see \cite[Section I]{HuSa} for an outline of this construction. Also note that the stratum $\Delta$ corresponds to the stratum in the partial compactification in the direction of the $1$-dimensional cusp associated to a maximal-dimensional cone in the second Voronoi decomposition for $g=2$. A detailed description can be found in \cite[Part I, Chapter 3]{HKW}).
Hence $$H^\bullet_c(\rho(\Delta);\bb{Q})\cong H^\bullet_c(E \times E/G;\bb{Q}) \otimes H^\bullet_c(\bb A_\C^1;\bb{Q}).$$ A straightforward calculation shows that the $G$-invariant cohomology of $E\times E$ has rank $1$ in even dimension and vanishes otherwise. In particular this quotient behaves cohomologically like ${\mathbb P}^2$.
Since $H_c^k(\rho(U);\bb{Q})$ and $H_c^k(\rho(\Delta);\bb{Q})$ both vanish if $k$ is odd, the exact sequence \eqref{gysin2} splits into short exact sequences $$0\rightarrow H_c^{k}(\rho(U);\bb{Q})\rightarrow H_c^k(\beta_2\setminus\beta_3;\bb{Q})\rightarrow H_c^{k}(\rho(\Delta);\bb{Q})\rightarrow 0.$$ This implies the claim. \end{proof}
\begin{rem} We would like to take this opportunity to correct a slight error in \cite[3.8]{vdG} where it was claimed that the map $\rho$ factors through $\Sym^2_{\ab1}(\mathcal X_1/\pm 1)$ rather than through the quotient by $G$. This, however, does not effect the results of \cite{vdG}. \end{rem}
\section{Torus rank $3$}\label{rank3}
The stratum $\beta_3\subset \AVOR 3$ lying over $\ab 0\subset\ASAT 3$ is entirely determined by the fan of the toroidal compactification. For this we first have to describe the Voronoi fan $\Sigma$ in genus $3$.
Consider the free abelian group $\mathbb L_3\cong \bb{Z}^3$ with generators $x_1,x_2,x_3$ and let $\mathbb M_3=\Sym_2(\mathbb L_3)$. Then $\mathbb M_3$ is isomorphic to the space of $3\times 3$ integer symmetric matrices with respect to the basis $x_i$ via the map which assigns to a matrix $A$ the quadratic form $^{t}xAx$. We shall use the basis of $\mathbb M_3$ given by the forms $U_{i,j}^*$, $1\leq i\leq j\leq 3$ given by $$U_{i,j}^*=2^{\delta_{i,j}}x_ix_j.$$ Let $\Sym_2^{\geq 0}(\mathbb L_3\otimes\bb{R})$ be the cone of positive semidefinite forms in $\mathbb M_3\otimes\bb{Q}$. The group $\mathop{\mathrm {GL}}\nolimits(3,\bb{Z})$ acts on $\Sym_2^{\geq 0}(\mathbb L_3\otimes\bb{R})$ by $$\mathop{\mathrm {GL}}\nolimits(3,\bb{Z})\ni g:\ M\longmapsto {}^tg^{-1}Mg^{-1}.$$ Let $$ \sigma^{(6)}:=\bb{R}_{\geq 0}\alpha_1+\bb{R}_{\geq 0}\alpha_2+\bb{R}_{\geq 0}\alpha_3+\bb{R}_{\geq 0}\gamma_1+\bb{R}_{\geq 0}\gamma_2+\bb{R}_{\geq 0}\gamma_3, $$ where $\alpha_i=x_i^2$ for all $i=1,2,3$ and $\gamma_i=(x_j-x_k)^2$ for $\{i,j,k\}=\{1,2,3\}$. Since the forms $\alpha_j,\gamma_i$ form a basis of $\mathbb M_3$, this is a basic $6$-dimensional cone in $\Sym_2^{\geq 0}(\mathbb L_3\otimes\bb{R})$.
The Voronoi fan in genus $3$ is the fan $\Sigma$ in $\Sym_2^{\geq 0}(\mathbb L_3\otimes\bb{R})$ given by $\sigma^{(6)}$ and all its faces, together with their $\mathop{\mathrm {GL}}\nolimits(3,\bb{Z})$-translates. We use the notation $$\sigma^{(6)}=\alpha_1*\alpha_2*\alpha_3*\gamma_1*\gamma_2*\gamma_3,$$ and similarly for the faces of $\sigma^{(6)}$.
To describe $\AVOR 3$, we have to know all possible $\mathop{\mathrm {GL}}\nolimits(3,\bb{Z})$-orbits of $\sigma^{(6)}$ and its faces. An $i$-dimensional cone corresponds to a $(6-i)$-dimensional stratum in $\AVOR 3$. Since strata of dimension at least $4$ necessarily lie over $\ab l$ with $l\geq 1$, we only need to know the orbits of cones of dimension $\leq 3$.
The following lemma can be proved using the methods of \cite{Tsushima} (see \cite[Chapter~3]{Erdenberger}).
\begin{lem}\label{class} There are two $\mathop{\mathrm {GL}}\nolimits(3,\bb{Z})$-orbits of $3$-dimensional cones, represented by the cones $$ \sigma^{(3)}_{\text{local}}=\alpha_1*\alpha_2*\alpha_3,\ \ \ \sigma^{(3)}_{\text{global}}=\alpha_1*\alpha_2*\gamma_3. $$ The stratum associated to $\sigma^{(3)}_{\text{local}}$ lies over $\ab0$, that associated to $\sigma^{(3)}_{\text{global}}$ lies over $\ab 1$.
There are two $\mathop{\mathrm {GL}}\nolimits(3,\bb{Z})$-orbits of $4$-dimensional cones, given by $$ \sigma^{(4)}_{I}=\alpha_1*\alpha_2*\alpha_3*\gamma_1,\ \ \ \sigma^{(4)}_{II}=\alpha_1*\alpha_2*\gamma_1*\gamma_2. $$ In dimension $5$ and $6$ there is only one $\mathop{\mathrm {GL}}\nolimits(3,\bb{Z})$-orbit. The strata of all cones of dimension at least $4$ lie over $\ab0$. \end{lem}
Let $$\mathbb H_3 = \{\tau=(\tau_{i,j})_{1\leq i,j\leq 3}: \tau={}^t\tau, \im\tau>0\}$$ be the Siegel upper half plane of genus $3$. We consider the rank $6$ torus $T=T^6$ with coordinates $$t_{i,j}=e^{2\pi \sqrt{-1} \tau_{i,j}}\ (1\leq i,j\leq 3).$$
These coordinates correspond to the dual basis of the basis $U_{i,j}^*$. If $\sigma^{(l)}$ is an $l$-dimensional cone in $\Sigma$ then, since the fan $\Sigma$ is basic, it follows that the associated affine variety $T_{\sigma^{(l)}} \cong \bb{C}^l\times(\bb{C}^*)^{6-l}$. The corresponding stratum in $\AVOR 3$ is then a quotient of $\{(0,0,0)\}\times(\bb{C}^*)^{6-l}$ by a finite group. We consider the torus embedding $T\hookrightarrow T_{\sigma^{(6)}}\cong \bb{C}^6$, where the latter isomorphism holds since $\sigma^{(6)}$ is a basic cone of dimension $6$. Let $T_1,\dots,T_6$ be the coordinates of $\bb{C}^6$ corresponding to the basis $\alpha_1,\dots,\gamma_3$. If one computes the dual basis of $\alpha_1,\dots,\gamma_3$ in terms of the dual basis of $U_{i,j}^*$, one obtains that the torus embedding $T\hookrightarrow \bb{C}^6$ is given by $$ \begin{array}{lllll} T_1=t_{1,1}t_{1,3}t_{1,2},&& T_2=t_{2,2}t_{2,3}t_{1,2},&& T_3=t_{3,3}t_{1,3}t_{2,3},\\ T_4=t_{2,3}^{-1},&& T_5=t_{1,3}^{-1},&& T_6=t_{1,2}^{-1}. \end{array} $$
Let us start by considering the stratum associated to $$\sigma^{(3)}_{\text{local}}=\alpha_1*\alpha_2*\alpha_3.$$
Let $S_1,S_2$ and $S_3$ be coordinates corresponding to $\alpha_1,\alpha_2$ and $\alpha_3$, and let $t_{2,3}, t_{1,3},$ $t_{1,2}$ be as above. Then $$ T_{\sigma^{(3)}_{\text{local}}}\cong \bb{C}^3\times(\bb{C}^*)^3\subset \bb{C}^6=T_{\sigma^{(6)}}$$ with coordinates $S_1,S_2,S_3,t_{2,3}^{-1}, t_{1,3}^{-1}, t_{1,2}^{-1}$, where the inclusion is defined by considering $\sigma^{(3)}_{\text{local}}$ as a face of $\sigma^{(6)}$.
The stratum which we add is $\{(0,0,0)\}\times(\bb{C}^*)^3$ modulo a finite group $G=G_{\sigma^{(3)}_{\text{local}}}$, namely the stabilizer of the cone $\sigma^{(3)}_{\text{local}}$ in $\mathop{\mathrm {GL}}\nolimits(3,\bb{Z})$. In order to understand the action of the group $G$ explicitly, we recall that it is naturally a subgroup of the parabolic subgroup which belongs to the standard $0$-dimensional cusp
$$P=\left\{\left(\begin{array}{c|c}\;g&0\\[6pt]\hline\\[-6pt] 0&{}^tg^{-1}\end{array}\right): g\in\mathop{\mathrm {GL}}\nolimits(3,\bb{Z})\right\} \cong \mathop{\mathrm {GL}}\nolimits(3,\bb{Z}) \subset \mathop{\mathrm {Sp}}\nolimits(6,\bb{Z}).$$
\begin{lem}\label{s3loc} The stratum associated to $\sigma^{(3)}_{\text{local}}$ is an affine variety $Y_{\text{local}}^{(3)}=(\bb{C}^*)^3/G$ whose only non-trivial cohomology with compact support is in degree $6$. \end{lem}
\begin{proof} Since the stratum associated to $\sigma^{(3)}_{\text{local}}$ is the quotient of the smooth variety $(\bb{C}^*)^3$ by a finite group, its cohomology and cohomology with compact support are related by Poincar\'e duality. Hence, it suffices to show that the rational cohomology of the stratum is concentrated in degree $0$.
Denote by $T^3$ the rank $3$ torus with coordinates $(v_1,v_2,v_3)=(t_{2,3}^{-1}, t_{1,3}^{-1}, t_{1,2}^{-1})$. The stratum which we add for $\sigma^{(3)}_{\text{local}}$ is then isomorphic to $T^3/G$. Since $\sigma^{(3)}_{\text{local}}=\alpha_1*\alpha_2*\alpha_3$ with $\alpha_i=x_i^2$, we see that the group $G$ is the group generated by the permutations of the $x_i$ and the involutions $(x_1,x_2,x_3)\mapsto(\epsilon_1x_1,\epsilon_2x_2,\epsilon_3x_3)$ with $\epsilon_i=\pm1$. Note that the element $-id$ acts trivially both on $\mathbb H_3$ and on $\mathbb M_3$. Hence the group $G$ is an extension $$1\rightarrow (\bb{Z}/2\bb{Z})^2\rightarrow G \rightarrow \mathfrak S_3 \rightarrow 1,$$ where $\mathfrak S_3$ denotes the symmetric group in $3$ letters. Next, we have to analyze how this group acts on $\mathbb H_3$ and on the torus $T^3$. The permutation of $x_i$ and $x_j$ interchanges $\tau_{i,k}$ and $\tau_{j,k}$ but fixes $\tau_{i,j}$. Hence $\mathfrak S_3$ also acts as group of permutations on the coordinates of $T^3$. The action of the involutions generating $(\bb{Z}/2\bb{Z})^2$ can be seen for example from $$ \begin{pmatrix} -1&0&0\\ 0&1&0\\ 0&0&1 \end{pmatrix} \begin{pmatrix} \tau_{1,1}&\tau_{1,2}&\tau_{1,3}\\ \tau_{1,2}&\tau_{2,2}&\tau_{2,3}\\ \tau_{1,3}&\tau_{2,3}&\tau_{3,3} \end{pmatrix} \begin{pmatrix} -1&0&0\\ 0&1&0\\ 0&0&1 \end{pmatrix} = \begin{pmatrix} \tau_{1,1}&-\tau_{1,2}&-\tau_{1,3}\\ -\tau_{1,2}&\tau_{2,2}&\tau_{2,3}\\ -\tau_{1,3}&\tau_{2,3}&\tau_{3,3} \end{pmatrix}. $$
Hence, the involution $(x_1,x_2,x_3)\leftrightarrow (x_1,x_2,-x_3)$ induces the involution given by $(v_1,v_2,v_3)\leftrightarrow (v_1^{-1},v_2^{-1},v_3)$ and similarly for the other involutions. This allows us to describe the quotient $T^3/G$ explicitly, as given by the image of the map $$ \begin{array}{c@{\;\;}c@{\;\;}l}T ^3 \cong (\bb{C}^*)^3&\longrightarrow&\bb{C}^4\\ (v_1,v_2,v_3)&\longmapsto & (u_1+u_2+u_3,u_1u_2+u_1u_3+u_2u_3,u_1u_2u_3,u_4)=(s_1,s_2,s_3,t), \end{array}$$ where $$u_1=v_1+\frac1{v_1},\ \ u_2=v_2+\frac1{v_2},\ \ u_3=v_3+\frac1{v_3},\ \ u_4=\left(v_1-\frac1{v_1}\right)\left(v_2-\frac1{v_2}\right)\left(v_3-\frac1{v_3}\right). $$
Then the image is the hypersurface $W\subset\bb{C}^4$ given by $$\frac{t^2}{4}-(\frac{s_3}{2}+2s_1)^2+(s_2+4)^2=0. $$
Note that $W$ is a cone with vertex the line $t=\frac{s_3}{2}+2s_1=s_2+4=0$ in $\bb{C}^4$ over a plane projective conic. Then the claim follows from the contractibility of $W$.
Alternatively, one can also show that the cohomology $H^\bullet(T^3/G;\bb{Q})$ is concentrated in degree $0$, by proving that the only cohomology in $H^\bullet(T^3;\bb{Q})$ which is fixed under the group $G$ is in degree $0$. \end{proof}
The situation with the lower-dimensional strata is similar:
\begin{lem}\label{onlyh0} Let $\sigma^{(l)}$ be an $l$-dimensional subcone of $\alpha_1*\alpha_2*\alpha_3*\gamma_1*\gamma_2*\gamma_3$, with $l\geq 4$. Then the stratum of $\gamma_3$ associated to $\sigma^{(l)}$ has non-trivial cohomology with compact support only in the maximal degree $2(6-l)$. \end{lem}
\begin{proof} Recall that all $\mathop{\mathrm {GL}}\nolimits(3,\bb{Z})$-orbits of $\sigma^{(l)}$ were described in Lemma~\ref{class}. Hence it suffices to consider the cases in which $\sigma^{(l)}$ is one of the following cones: $\sigma^{(4)}_{I}$, $\sigma^{(4)}_{II}$, $\sigma^{(5)}:=\alpha_1*\alpha_2*\alpha_3*\gamma_1*\gamma_2$ and $\sigma^{(6)}$.
As mentioned above, if $\sigma^{(l)}$ is an $l$-dimensional cone in $\Sigma$ then we have $T_{\sigma^{(l)}}=\bb{C}^l\times(\bb{C}^*)^{6-l}$, because the fan $\Sigma$ is basic. The corresponding stratum in $\AVOR 3$ is then a quotient of $\{(0,0,0)\}\times(\bb{C}^*)^{6-l}\cong (\bb{C}^*)^{6-l}$ by a finite group $G_{\sigma^{(l)}}$. To prove the claim, it suffices to show that the part of the cohomology of $(\bb{C}^*)^{6-l}$ which is invariant for the action of $G$ coincides with $H^0((\bb{C}^*)^{6-l};\bb{Q})$. Since $(\bb{C}^*)^{6-l}$ is smooth, the result about cohomology with compact support will follow from Poincar\'e duality.
For instance, consider the case of $\sigma^{(4)}_{II}$. Using toric coordinates, one finds that the corresponding stratum is given by a quotient of $(\bb{C}^*)^2$ by the action of the finite group $\bb{Z}/2\bb{Z}\times\mathfrak S_3$. The factor $\mathfrak S_3$ acts on $\gamma_1*\alpha_2*\alpha_3$ by permuting $\gamma_1$, $\alpha_2$ and $\alpha_3$, whereas the action of the factor $\bb{Z}/2\bb{Z}$ is generated by the involution $x_1\leftrightarrow -x_1$. One can compute explicitly the action of $\bb{Z}/2\bb{Z}\times\mathfrak S_3$ and prove $(H^\bullet((\bb{C}^*)^2;\bb{Q}))^{\bb{Z}/2\bb{Z}\times\mathfrak S_3}=H^0((\bb{C}^*)^2;\bb{Q})$.
Analogous considerations yield the claim in the case of the other strata. \end{proof}
Concluding, the proof of Proposition \ref{propcohorank3} now follows from Lemmas~\ref{s3loc} and \ref{onlyh0}.
\end{document} | arXiv | {
"id": "0807.4099.tex",
"language_detection_score": 0.7309949398040771,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title[Subconvexity for additive equations]{Subconvexity for additive equations:\\ pairs of undenary cubic forms} \author[J\"org Br\"udern]{J\"org Br\"udern} \address{JB: Mathematisches Institut, Bunsenstrasse 3--5, D-37073 G\"ottingen, Germany} \email{bruedern@uni-math.gwdg.de} \author[Trevor D. Wooley]{Trevor D. Wooley$^*$} \address{TDW: School of Mathematics, University of Bristol, University Walk, Clifton, Bristol BS8 1TW, United Kingdom} \email{matdw@bristol.ac.uk} \thanks{$^*$Supported by a Royal Society Wolfson Research Merit Award.} \subjclass[2010]{11D72, 11P55} \keywords{Diophantine equations, Hardy-Littlewood method} \date{} \begin{abstract} We investigate pairs of diagonal cubic equations with integral coefficients. For a class of such Diophantine systems with $11$ or more variables, we are able to establish that the number of integral solutions in a large box is at least as large as the expected order of magnitude.\end{abstract} \maketitle
\section{Introduction} The convexity barrier in the Hardy-Littlewood method presents an apparently insurmountable obstacle to the analysis of Diophantine systems in which the underlying number of variables is smaller than twice the total degree of the system. As is well-known, this obstruction arises from the relative sizes of the product of local densities associated with the system, and the square-root of the available reservoir of variables that is a limiting feature of associated exponential sum estimates. In this paper, we establish a lower bound of the anticipated magnitude for the number of integral zeros of certain pairs of diagonal cubic forms in $11$ variables, thereby breaking this convexity barrier.\par
In order to introduce the Diophantine systems central to our discussion, take $l$, $m$, $n$ to be non-negative integers with $m\ge n$, and fix non-zero integers $a_i$, $b_i$, $c_j$, $d_k$, where $1\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec i\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec l$, $1\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec j\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec m$ and $1\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec k\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec n$. We then define $N(B)$ to be the number of integral solutions to the system \begin{equation}\label{1.1} \left.\begin{aligned} &a_1x_1^3+\ldots +a_lx_l^3+c_1y_1^3+\ldots +c_my_m^3&&=0,\\ &b_1x_1^3+\ldots +b_lx_l^3&+d_1z_1^3+\ldots +d_nz_n^3&=0, \end{aligned}\, \right\} \end{equation} with $x_i,y_j,z_k\in [-B,B]$. Associated to the system (\ref{1.1}) are the total number of variables $s=l+m+n$, and a measure of the minimal number of variables across equations $q_0^*=\min\{s-l,s-m,s-n\}$. Before announcing the principal conclusion of this paper, we direct the reader to \S6 for a description of Hooley's Riemann Hypothesis (which we call HRH).
\begin{theorem}\label{theorem1.1} Let $s\ge 11$ and $q_0^*\ge 7$, and suppose that the system $(\ref{1.1})$ admits non-singular $p$-adic solutions for each prime $p$. Then, with the possible exception of the case $(l,m,n)=(5,5,2)$, one has $N(B)\gg B^{s-6}$. In the latter exceptional case one recovers the same conclusion by appealing to HRH. \end{theorem}
Examples allied to the system \begin{equation*} \left.\begin{aligned} &49(x_1^3+2x_2^3+3x_3^3)+y_1^3+2y_2^3+7y_3^3+14y_4^3&&=0,\\ &49(x_1^3+4x_2^3+4x_3^3)&+z_1^3+2z_2^3+7z_3^3+14z_4^3&=0, \end{aligned}\, \right\} \end{equation*} demonstrate that the $p$-adic solubility hypothesis in the theorem is required, as the reader may easily verify.\par
The conclusion of Theorem \ref{theorem1.1} establishes the Hasse Principle for those systems (\ref{1.1}) with $s\ge 11$ and $q_0^*\ge 7$, and indeed an appropriate modification of our methods would confirm the weak approximation property for the same systems. In view of the convexity barrier, the cases in which $s=11$ are of particular interest. We note that when $s\ge 13$, the conclusion of Theorem \ref{1.1} follows from our previous work \cite{BW2007a, BW2007b} concerning pairs of diagonal cubic forms (in particular, see \cite[Theorem 2]{BW2007b}). When $s\ge 11$, moreover, the special case in which ${\mathbf a}$ and ${\mathbf b}$ are in rational ratio is covered by \cite[Theorem 10]{BW2007b}. Previous results on pairs of diagonal cubic equations, meanwhile, apply only to systems having $14$ or more variables (see, in chronological order, the references \cite{DL1966, Coo1972, Vau1977, BB1988, Bru1990}).\par
When $s\ge 12$, which is the threshold of the convexity barrier, we are able to refine the asymptotic lower bound of Theorem \ref{theorem1.1}. In this context, it is useful to introduce the product of local densities associated with the system (\ref{1.1}). The latter we define by $\mathscr{C}} \def\calCbar{{\overline \calC}=v_\infty \prod_p v_p$, in which $v_\infty$ is the area of the manifold defined by (\ref{1.1}) in the box $[-1, 1]^s$, and for each prime number $p$ one takes $$v_p=\lim_{h\rightarrow \infty}p^{h(2-s)}M(p^h),$$ where $M(q)$ denotes the number of solutions of (\ref{1.1}) with ${\mathbf x}\in ({\mathbb Z}}\def\dbQ{{\mathbb Q}/q{\mathbb Z}}\def\dbQ{{\mathbb Q})^s$.
\begin{theorem}\label{theorem1.2} When $s\ge 12$ and $q_0^*\ge 8$, one has $N(B)\ge (\mathscr{C}} \def\calCbar{{\overline \calC}+o(1))B^{s-6}$. \end{theorem}
For comparison, our earlier work \cite{BW2011} establishes a conclusion which implies Theorem \ref{theorem1.2} when $s\ge 14$ (see \cite[Theorem 1.1]{BW2011}).\par
Thus far we have discussed only problems involving simultaneous cubic equations, but for problems of very low degree alternative approaches may be applicable. Thus, for systems of linear equations, one has recent work of Green and Tao \cite{GT2010} for prime numbers, and work of the first author \cite{Bru2009} for limit periodic sequences. It is worth remarking also that the Kloosterman method provides conclusions on the edge of subconvexity for problems of quadratic type (for some of the relevant literature, see \cite{Est1962, HB1996, Klo1927}). Problems within the orbit of our methods are not limited to diagonal cubic examples alone, and in \S11 we outline some of what may be said concerning problems of higher degree.\par
In this paper we employ the well-known symbols of Landau and Vinogradov. The constants implicit in the use of these symbols depend at most on $s$, ${\mathbf a}$, ${\mathbf b}$, ${\mathbf c}$, ${\mathbf d}} \def\bfe{{\mathbf e}$ and $\varepsilon$, unless otherwise indicated. In an effort to simplify our analysis, we adopt the following convention concerning the number $\varepsilon$. Whenever $\varepsilon$ appears in a statement, either implicitly or explicitly, we assert that the statement holds for each $\varepsilon>0$. Note that the ``value'' of $\varepsilon$ may consequently change from statement to statement. Throughout, we take $B$ to be a positive real number with $B\gg 1$, in the sense indicated.
\section{Preliminary considerations} We initiate our discussion by introducing the notation and technical infrastructure necessary for our application of the circle method. Consider a system of the shape (\ref{1.1}) subject to the hypotheses of the statement of Theorem \ref{theorem1.1}. Since the conclusion of this theorem is already supplied by \cite[Theorem 2]{BW2007b}\footnote{We correct an oversight here in the statement of \cite[Theorem 9]{BW2007b}, an ingredient in the proof of \cite[Theorem 2]{BW2007b} relevant to our discussion when the system (\ref{1.1}) has the shape $(1,m,n)$ with $m\ge n\ge 6$. The statement of the former theorem should read as follows. \begin{theorem*} Suppose that $t$ is a natural number with $t\ge 6$, and let $c_1,\ldots ,c_t$ be natural numbers satisfying $(c_1,\ldots ,c_t)=1$. Then for each natural number $d$ there is a positive number $\Del$, depending at most on ${\mathbf c}$ and $d$, with the property that the set ${\mathcal E}_t(P)$, defined by $${\mathcal E}_t(P)=\{n\in \dbN : \text{$\nu Pd^{-1/3}<n\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec Pd^{-1/3}$, $(n,c_1\ldots c_t)=1$ and $R_t(dn^3;{\mathbf c})<\Del P^{t-3}$}\},$$ has at most $P^{1-\tau}$ elements. \end{theorem*} \noindent The inserted condition $(n,c_1\ldots c_t)=1$ should also be imposed in the subsequent application of this theorem in \cite[\S6]{BW2007b}. Our forthcoming work \cite{BW2012} discusses the case $(l,m,n)=(1,6,6)$ as a particular instance of more general investigations of diagonal senary cubic forms. }
when $s\ge 13$, there is no loss of generality in restricting to the situations wherein $s=11$ or $s=12$. A modicum of computation reveals that the triple $(l,m,n)$ associated with the system (\ref{1.1}) must take one of four shapes, namely: \begin{enumerate} \item[(A)] $(3,4,4)$ or $(3,5,4)$, \item[(B)] $(4,4,3)$, $(4,4,4)$, $(4,5,3)$ or $(5,4,3)$, \item[(C)] $(2,5,5)$, \item[(D)] $(5,5,2)$. \end{enumerate}
\par Systems of type A and B we analyse by very similar methods in \S\S3 and 4, respectively. The reader will find that the ideas developed in these sections serve as a model for the treatment of the remaining cases relevant to the proof of Theorem \ref{theorem1.1}, as well as the cases required to establish Theorem \ref{theorem1.2}. In order to handle systems of type C, we `borrow' a variable from each of the long blocks of $5$, adding them to the short block of $2$. In this way we obtain a system superficially resembling those of type B, though sharing characteristics with those of type A. In this way, we are able in \S5 to offer an economical treatment of systems of type C that rests heavily on the work of \S\S3 and 4. Readers may care to challenge themselves with the task of developing an alternative treatment based on our work \cite{ARTS1} joint with Kawada, in which the system (\ref{1.1}) is understood in terms of an exceptional set problem involving the representation of values of a binary diagonal form by a diagonal form in five variables. Finally, in order to accommodate systems of type D, we first develop mean value estimates for exponential sums conditional on HRH, and then adapt the methods used for our analysis of systems of type A and B. We offer an abbreviated account of this work in \S6.\par
It is apparent that the system (\ref{1.1}) possesses a real solution $({\mathbf x},{\mathbf y},{\mathbf z})=({\boldsymbol \xi}} \def\Xitil{\widetilde \Xi,{\boldsymbol \eta},\bfzet)\in (-1,1)^s$ in which $\xi_i$, $\eta_i$ and ${\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet}_i$ are each positive for $i\ne 1,2$. We put $$\nu=\tfrac{1}{2}\min_{i\ne 1,2}\{ \xi_i,\eta_i,{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet}_i\}.$$ Let $\eta$ be a small positive number to be fixed in due course, and write
$$\mathscr{A}} \def\calAbar{{\overline \calA}} \def\calAtil{{\widetilde \calA}_\eta (B)=\{ n\in {\mathbb Z}}\def\dbQ{{\mathbb Q}\cap [1,B]: \text{$p$ prime and $p|n$}\Rightarrow p\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^\eta\}.$$ We then put
$$\mathscr{A}} \def\calAbar{{\overline \calA}} \def\calAtil{{\widetilde \calA}_\eta^*(B)=\{n\in [-B,B]: \text{$|n|\in \mathscr{A}} \def\calAbar{{\overline \calA}} \def\calAtil{{\widetilde \calA}_\eta(B)$ or $n=0$}\}.$$ Define the exponential sums
$$f({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})=\sum_{|x|\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B}e({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} x^3),\quad g({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})=\sum_{\nu B<x\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B}e({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} x^3),\quad h({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})=\sum_{x\in \mathscr{A}} \def\calAbar{{\overline \calA}} \def\calAtil{{\widetilde \calA}_\eta^*(B)}e({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} x^3),$$ where, as usual, we write $e(z)$ for $e^{2\pi iz}$. Let $\tau_0$ be the positive number defined via the relation $\tau_0^{-1}=852+16\sqrt{2833}=1703.6\ldots $. Then, when $a$ and $b$ are fixed non-zero integers and $\tau_1$ is any real number with $\tau_1<\tau_0$, the methods of \cite{Woo2000} may be applied to confirm that whenever $\eta$ is a sufficiently small positive number, one has \begin{equation}\label{2.1}
\int_0^1|g(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^2h(b{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^4|{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll B^{13/4-\tau_1}. \end{equation} We direct the reader to \cite[\S5]{Woo1995b} and \cite[\S2]{Woo2000} for the necessary ideas, the presence of the coefficients $a$ and $b$ leading to superficial complications only. We put $\tau=\frac{1}{10}\tau_0$, and for the remainder of this paper we fix our choice of $\eta>0$ to be sufficiently small in the context of the upper bound (\ref{2.1}) with $\tau_1=9\tau$.\par
Having introduced the cast of exponential sums to appear in our application of the circle method, we next introduce the generating functions \begin{equation}\label{2.2} F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})=h(a_1{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_1{\beta}} \def\bfbet{{\boldsymbol \beta})h(a_2{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_2{\beta}} \def\bfbet{{\boldsymbol \beta})\prod_{i=3}^lg(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta}), \end{equation} \begin{equation}\label{2.3} G({\alpha}} \def\bfalp{{\boldsymbol \alpha})=h(c_1{\alpha}} \def\bfalp{{\boldsymbol \alpha})h(c_2{\alpha}} \def\bfalp{{\boldsymbol \alpha})\prod_{j=3}^mg(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha}),\quad H({\beta}} \def\bfbet{{\boldsymbol \beta})=h(d_1{\beta}} \def\bfbet{{\boldsymbol \beta})h(d_2{\beta}} \def\bfbet{{\boldsymbol \beta})\prod_{k=3}^ng(d_k{\beta}} \def\bfbet{{\boldsymbol \beta}). \end{equation} Here we adopt the convention that an empty product is equal to unity. When $\grB\subseteq [0,1)^2$ is measurable, we define \begin{equation}\label{2.4} N(B;\grB)=\iint_\grB F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})G({\alpha}} \def\bfalp{{\boldsymbol \alpha})H({\beta}} \def\bfbet{{\boldsymbol \beta}){\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}. \end{equation} Then, by orthogonality, one finds that \begin{equation}\label{2.5} N(B)\ge N(B;[0,1)^2). \end{equation}
At this stage we introduce the primary Hardy-Littlewood dissection. We take the major arcs $\grM$ to be the union of the intervals
$$\grM(q,a)=\{{\alpha}} \def\bfalp{{\boldsymbol \alpha}\in [0,1):|q{\alpha}} \def\bfalp{{\boldsymbol \alpha}-a|\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{-9/4}\},$$ with $0\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec a\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec q\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{3/4}$ and $(a,q)=1$. The corresponding set of minor arcs ${\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}$ is defined by putting ${\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}=[0,1)\setminus \grM$. In addition, we define a two-dimensional Hardy-Littlewood dissection as follows. With an eye towards concision in future sections, we put $$L=\log B,\quad \mathscr{L}=\log L\quad \text{and}\quad Q=L^{1/100}.$$ We then define the narrow major arcs $\grN$ to be the union of the boxes
$$\grN(q,a,b)=\{ ({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in [0,1)^2:\text{$|{\alpha}} \def\bfalp{{\boldsymbol \alpha}-a/q|\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec QB^{-3}$ and $|{\beta}} \def\bfbet{{\boldsymbol \beta}-b/q|\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec QB^{-3}$}\},$$ with $0\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec a,b\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec q\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec Q$ and $(a,b,q)=1$. The complementary set of minor arcs is ${\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}=[0,1)^2\setminus \grN$. Finally, we write \begin{equation}\label{2.6} \grK=(\grM\times \grM)\setminus \grN. \end{equation} We regard sets of major and minor arcs throughout as subsets of ${\mathbb R}}\def\dbT{{\mathbb T}/{\mathbb Z}}\def\dbQ{{\mathbb Q}$, or the appropriate higher dimensional analogue of the latter. Thus, for example, when we write ${\gamma}} \def\Gam{{\Gamma}\in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}$, then we are implicitly asserting that ${\gamma}} \def\Gam{{\Gamma}\in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}+{\mathbb Z}}\def\dbQ{{\mathbb Q}$.\par
Our strategy for obtaining a lower bound for $N(B;[0,1)^2)$ employs the Hardy-Littlewood method, of course, though in a somewhat unconventional manner. We begin by analysing the contribution of the narrow set of major arcs $\grN$.
\begin{lemma}\label{lemma2.1} Suppose that the system (\ref{1.1}) admits non-singular $p$-adic solutions for each prime number $p$. Then for systems of type A, B, C and D, one has $N(B;\grN)\gg B^{s-6}$. \end{lemma}
\begin{proof} We begin by defining the Gauss sum $$S(q,a)=\sum_{r=1}^qe(ar^3/q),$$ and then introduce the expression \begin{equation}\label{2.7} A(q)=\underset{(u,v,q)=1}{\sum_{u=1}^q\sum_{v=1}^q}\,T(q,u,v), \end{equation} where $$T(q,u,v)=\prod_{i=1}^lS(q,a_iu+b_iv)\prod_{j=1}^mS(q,c_ju)\prod_{k=1}^nS(q,d_kv).$$ In addition, we write $$v({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})=\int_{\nu B}^Be({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} {\gamma}} \def\Gam{{\Gamma}^3){\,{\rm d}}{\gamma}} \def\Gam{{\Gamma} \quad \text{and}\quad w({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})=\int_{-B}^Be({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} {\gamma}} \def\Gam{{\Gamma}^3){\,{\rm d}}{\gamma}} \def\Gam{{\Gamma} ,$$ and then put $$V(\xi,{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet})=V_F(\xi,{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet})V_G(\xi)V_H({\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet}),$$ where $$V_F(\xi,{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet})=w(a_1\xi+b_1{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet})w(a_2\xi+b_2{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet})\prod_{i=3}^lv(a_i\xi+b_i{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet}),$$ $$V_G(\xi)=w(c_1\xi)w(c_2\xi)\prod_{j=3}^mv(c_j\xi),\quad V_H({\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet})=w(d_1{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet})w(d_2{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet})\prod_{k=3}^nv(d_k{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet}).$$ Finally, we define $${\mathfrak J} (X)=\iint_{\mathscr{B}} \def\calBtil{{\widetilde \calB}(X)}V(\xi,{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet}){\,{\rm d}}\xi{\,{\rm d}}{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet} \quad \text{and}\quad \grS(X)=\sum_{1\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec q\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec X}A(q),$$ in which we have written $\mathscr{B}} \def\calBtil{{\widetilde \calB}(X)$ for the box $[-XB^{-3},XB^{-3}]^2$. Then by following the argument of \cite[\S7]{BW2007a} leading to \cite[equation (7.8)]{BW2007a}, one finds that there exists a positive constant $C$ with the property that \begin{equation}\label{2.8} N(B;\grN)-C\grS(Q){\mathfrak J}(Q)\ll B^{s-6}L^{-1/4}. \end{equation}
\par In order to estimate the truncated singular series $\grS(Q)$, we begin by following the argument of the proof of the estimate \cite[(7.14)]{BW2007a} presented on page 890 of the latter paper. In the present context we find that there is an integer $t\ge 3$, and a non-zero integer $\Del$ depending on ${\mathbf a}$, ${\mathbf b}$, ${\mathbf c}$, ${\mathbf d}} \def\bfe{{\mathbf e}$, such that
$$A(q)\ll q^{-s/3}\sum_{\substack{v_1,\ldots,v_t\\ v_1\ldots v_t|\Del q}}\frac{q^2}{v_1\ldots v_t}(v_1^{r_1}\ldots v_t^{r_t})^{1/3},$$ in which $r_1,\ldots,r_t$ are positive integers satisfying $r_1+\ldots +r_t=s$, and further $\max_ir_i=4$ when $s=11$, and $\max_ir_i\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec 5$ when $s=12$ . Consequently, one has $A(q)\ll q^{\varepsilon-4/3}$. An inspection of the proof of \cite[Lemma 12]{BW2007a}, noting \cite[equation (7.14)]{BW2007a}, reveals that $\grS=\underset{X\rightarrow \infty}{\lim}\grS(X)$ exists, that $\grS-\grS(X)\ll X^{-1/4}$, and thus $\grS(Q)\gg 1$. A pedestrian modification of the proof of \cite[Lemma 13]{BW2007a}, on the other hand, reveals that in the present context one has ${\mathfrak J}(Q)\gg B^{s-6}$. In combination with the lower bound $\grS(Q)\gg 1$ just obtained, the conclusion of the lemma is evident from the relation (\ref{2.8}). \end{proof}
A detailed account of the next step in our analysis, a comparison of $N(B;\grN)$ with $N(B;\grM\times \grM)$, depends on the particular case at hand, and this we defer to the following four sections. However, we take the opportunity now to record several auxiliary estimates of significance in the discussion to come. We begin by considering certain major arc integrals.
\begin{lemma}\label{lemma2.2} Let $a$ be a fixed non-zero integer, and let $b$ be a non-zero rational number. Then one has \begin{equation}\label{2.9}
\sup_{{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}\in {\mathbb R}}\def\dbT{{\mathbb T}}\int_\grM |g(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})h(b{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}+{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda})|^2{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll B^{1+\varepsilon}, \end{equation} and when ${\delta}} \def\Del{{\Delta}>0$, \begin{equation}\label{2.10}
\sup_{{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}\in {\mathbb R}}\def\dbT{{\mathbb T}}\int_\grM |g(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^{2+{\delta}} \def\Del{{\Delta}}|h(b{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}+{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda})|^2{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll B^{1+{\delta}} \def\Del{{\Delta}}. \end{equation} \end{lemma}
\begin{proof} The upper bound (\ref{2.10}) is immediate from \cite[Lemma 9]{BW2007a}. By taking ${\delta}} \def\Del{{\Delta}=0$ in the argument of the proof of the latter, meanwhile, one obtains (\ref{2.9}) (see also \cite[Lemma 3.4]{ARTS1}). \end{proof}
As an immediate consequence of this lemma, we obtain major arc estimates for $G({\alpha}} \def\bfalp{{\boldsymbol \alpha})$ and $H({\beta}} \def\bfbet{{\boldsymbol \beta})$.
\begin{lemma}\label{lemma2.3} For systems of type A and C, one has
$$\int_\grM |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^{m-3+\varepsilon} \quad \text{and}\quad \int_\grM |H({\beta}} \def\bfbet{{\boldsymbol \beta})|{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{n-3+\varepsilon}.$$ The former estimate holds also for systems of type B and D. \end{lemma}
\begin{proof} For systems of type A, B, C and D, one has $m\ge 4$. Thus, by applying a trivial estimate for $g({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})$ in combination with Schwarz's inequality and Lemma \ref{lemma2.2}, one obtains
$$\int_\grM|G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec g(0)^{m-4}\prod_{i=1}^2\Bigl( \int_\grM |g(c_{2+i}{\alpha}} \def\bfalp{{\boldsymbol \alpha})h(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\Bigr)^{1/2}\ll B^{m-4}(B^{1+\varepsilon}).$$ For systems of type A and C, meanwhile, one has $n\ge 4$. Hence, by following a similar argument to that just described, with $H({\beta}} \def\bfbet{{\boldsymbol \beta})$ in place of $G({\alpha}} \def\bfalp{{\boldsymbol \alpha})$, the second assertion of the lemma is confirmed in like manner. \end{proof}
\par We finish by recording two mean value estimates of some generality. In this context, when $r\ge t\ge 3$ and ${\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_1,\ldots,{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_r$ are fixed non-zero integers, we write $$E_t({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})=h({\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_1{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})h({\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_2{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})\prod_{i=3}^tg({\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_i{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}).$$
\begin{lemma}\label{lemma2.4} One has
$$\int_0^1|E_t({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^2{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll_\bflam B^{2t-11/4-9\tau}\quad (3\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec t\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec r),$$ and
$$\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |E_t({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^2{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll_\bflam B^{2t-13/4-8\tau}\quad (4\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec t\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec r).$$ \end{lemma}
\begin{proof} An application of Schwarz's inequality, combined with the trivial estimate $|g({\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_i{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B$, reveals that
$$\int_0^1|E_t({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^2{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{2t-6}\prod_{i=1}^2\Bigl( \int_0^1|g({\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_3{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^2h({\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_i{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^4|{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \Bigr)^{1/2}.$$ From here, the first estimate of the lemma follows from (\ref{2.1}). In order to confirm the second estimate, we begin by noting that a modified version of Weyl's inequality (see \cite[Lemma 1]{Vau1986}) supplies the bound \begin{equation}\label{2.11}
\sup_{{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}\in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}|g({\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_t{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|\ll B^{3/4+\varepsilon}. \end{equation} Thus, on utilising the mean value estimate just obtained, we deduce that
\begin{align*}\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |E_t({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^2{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} &\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \sup_{{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}\in{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}|g({\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_t{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|\Bigr)^2\int_0^1|E_{t-1}({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^2{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}\\ &\ll (B^{3/4+\varepsilon})^2B^{2(t-1)-11/4-9\tau}, \end{align*} and the second estimate of the lemma follows. \end{proof}
\section{Systems of type A} The strategy employed in our proof of Theorem \ref{theorem1.1} is somewhat circuitous, and we illustrate ideas in this section by concentrating on systems of type A. We may assume for the present, therefore, that $l=3$ and $m\ge n\ge 4$. Our strategy for obtaining a lower bound for the number of solutions of the system (\ref{1.1}) counted by $N(B)$ involves constraining the first block of $l$ variables. Restricting in such a manner that the corresponding diagonal forms $a_1x_1^3+\ldots +a_lx_l^3$ and $b_1x_1^3+\ldots +b_lx_l^3$ behave essentially as expected so far as multiplicity of representations is concerned, we obtain a modified counting function $N_0(B)$ whose behaviour is mollified by this arithmetic smoothing. In Lemma \ref{lemma3.3} we show that the major arc contribution within $N_0(B)$ is close to the corresponding contribution within $N(B)$. By means of a pruning operation discussed in Lemma \ref{lemma3.1}, this contribution is seen via Lemma \ref{lemma2.1} to have order of growth $B^{s-6}$. Meanwhile, the minor arc contribution within $N_0(B)$ exploits the available smoothing by means of Bessel's inequality, and is described in mixed form in Lemma \ref{lemma3.4}, and pure minor arc form in Lemma \ref{lemma3.5}. In this way, we aim to show that $$N(B)\ge N_0(B)\ge N(B;\grN)+o(B^{s-6})\gg B^{s-6},$$ and thereby establish Theorem \ref{theorem1.1}.\par
Our first step in the above plan is to prune from the set of arcs $\grM\times \grM$ to the narrow major arcs $\grN$. We apply Lemmata \ref{lemma2.2} and \ref{lemma2.3} in order to estimate the contribution of the set of arcs $\grK$ defined in (\ref{2.6}), a set we divide into the two subsets $$\grK_0=\{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in \grK:a_l{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_l{\beta}} \def\bfbet{{\boldsymbol \beta}\in \grM\}$$ and $$\grK_1=\{ ({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in \grK:a_l{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_l{\beta}} \def\bfbet{{\boldsymbol \beta}\in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}\}.$$
\begin{lemma}\label{lemma3.1} For systems of type A, one has $N(B;\grK)\ll B^{s-6}\mathscr{L}^{-1}$. \end{lemma}
\begin{proof} We first consider the contribution of the set $\grK_1$. Observe that as a consequence of the modified version of Weyl's inequality (\ref{2.11}), one has
$$\sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in \grK_1}|F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})|\ll B^{l-1}\sup_{a_l{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_l{\beta}} \def\bfbet{{\boldsymbol \beta}\in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}|g(a_l{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_l{\beta}} \def\bfbet{{\boldsymbol \beta})|\ll B^{l-1/4+\varepsilon}.$$ Then we deduce from Lemma \ref{lemma2.3} that \begin{align}
N(B;\grK_1)&\ll \Bigl(\sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in \grK_1}|F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})|\Bigr)\int_\grM |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\int_\grM|H({\beta}} \def\bfbet{{\boldsymbol \beta})|{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \notag \\ &\ll B^{l-1/4+\varepsilon}(B^{m-3+\varepsilon})(B^{n-3+\varepsilon})\ll B^{s-49/8}.\label{3.1} \end{align}
\par Turning our attention next to the contribution from the set $\grK_0$, we begin by considering the functions \begin{align}
\Phi_G({\alpha}} \def\bfalp{{\boldsymbol \alpha})&=|g(c_3{\alpha}} \def\bfalp{{\boldsymbol \alpha})g(c_4{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{5/4}|h(c_1{\alpha}} \def\bfalp{{\boldsymbol \alpha})h(c_2{\alpha}} \def\bfalp{{\boldsymbol \alpha})|,\label{3.2}\\
\Phi_H({\beta}} \def\bfbet{{\boldsymbol \beta})&=|g(d_3{\beta}} \def\bfbet{{\boldsymbol \beta})g(d_4{\beta}} \def\bfbet{{\boldsymbol \beta})|^{5/4}|h(d_1{\beta}} \def\bfbet{{\boldsymbol \beta})h(d_2{\beta}} \def\bfbet{{\boldsymbol \beta})|,\notag \end{align} and their mean values $$I_G=\int_\grM \Phi_G({\alpha}} \def\bfalp{{\boldsymbol \alpha}){\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \quad \text{and}\quad I_H=\int_\grM\Phi_H({\beta}} \def\bfbet{{\boldsymbol \beta}){\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} .$$ With an application of Schwarz's inequality mirroring that employed in the proof of Lemma \ref{lemma2.3}, followed by recourse to Lemma \ref{lemma2.2}, one obtains \begin{equation}\label{3.3}
I_G\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \prod_{i=1}^2\Bigl( \int_\grM |g(c_{2+i}{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{5/2}|h(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\Bigr)^{1/2}\ll B^{3/2}, \end{equation} and a symmetric argument yields $I_H\ll B^{3/2}$. Writing \begin{equation}\label{3.4} J_0=\int_\grM\int_\grM\Phi_G({\alpha}} \def\bfalp{{\boldsymbol \alpha})\Phi_H({\beta}} \def\bfbet{{\boldsymbol \beta}){\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} , \end{equation} therefore, we deduce that $J_0=I_GI_H\ll B^3$.\par
Next, when $i\in \{1,2\}$, $k\in \{l-1,l\}$ and $E$ is either $G$ or $H$, define \begin{equation}\label{3.5}
J_{i,k}^E=\iint_{\grK_0}|g(a_k{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_k{\beta}} \def\bfbet{{\boldsymbol \beta})|^{5/2}|\Phi_E({\alpha}} \def\bfalp{{\boldsymbol \alpha})h(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})^2|{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} . \end{equation} We note that at present we require these integrals only when $k=l$, though in \S4 we make use of them also when $k=l-1$. By means of a change of variable, one discerns from (\ref{3.3}) and Lemma \ref{lemma2.2} that \begin{align}
J_{i,l}^G&\ll \int_\grM \Phi_G({\alpha}} \def\bfalp{{\boldsymbol \alpha})\sup_{{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}\in {\mathbb R}}\def\dbT{{\mathbb T}}\int_\grM|g({\gamma}} \def\Gam{{\Gamma})|^{5/2}|h(b_ib_l^{-1}{\gamma}} \def\Gam{{\Gamma}+{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda})|^2{\,{\rm d}}{\gamma}} \def\Gam{{\Gamma}{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \notag \\ &\ll B^{3/2}I_G\ll B^3,\label{3.6} \end{align} and in an analogous manner one obtains $J_{i,l}^H\ll B^3$. Finally, we put
$$\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})=\prod_{i=1}^2|h(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})h(d_i{\beta}} \def\bfbet{{\boldsymbol \beta})h(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})^3|.$$ Then, as a consequence of the argument of the proof of \cite[Lemma 10]{BW2007a} (see in particular the display preceding \cite[equation (6.14)]{BW2007a}), one has \begin{equation}\label{3.7} \sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in {\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\ll B^{10}Q^{-1/10}. \end{equation}
\par By applying H\"older's inequality in combination with the estimates assembled above, and applying a trivial bound for $g({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})$, we conclude that \begin{align*} N(B;\grK_0)&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec g(0)^{s-11}\Bigl(\sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in {\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\Bigr)^{1/5}(J_{1,l}^GJ_{2,l}^GJ_{1,l}^HJ_{2,l}^H)^{1/10}J_0^{3/5}\\ &\ll B^{s-11}(B^{10}Q^{-1/10})^{1/5}(B^{12})^{1/10}(B^3)^{3/5}=B^{s-6}Q^{-1/50}.\end{align*} On recalling (\ref{3.1}), therefore, we obtain the upper bound $$N(B;\grK)=N(B;\grK_0)+N(B;\grK_1)\ll B^{s-6}Q^{-1/50},$$ and this completes the proof of the lemma. \end{proof}
Thus far our argument presents the appearance of a conventional application of the Hardy-Littlewood method. It is at this point that unconventional elements are introduced. When $u,v\in {\mathbb Z}}\def\dbQ{{\mathbb Q}$, denote by $\rho(u,v)$ the number of integral solutions of the system \begin{align} a_1y_1^3+\ldots +a_ly_l^3&=u,\label{3.8}\\ b_1y_1^3+\ldots +b_ly_l^3&=v,\label{3.9} \end{align} with $y_1,y_2\in \mathscr{A}} \def\calAbar{{\overline \calA}} \def\calAtil{{\widetilde \calA}_\eta^*(B)$ and $\nu B<y_i\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B$ $(3\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec i\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec l)$. In addition, write $\rho_1(u)$ for the number of integral solutions of (\ref{3.8}), and $\rho_2(v)$ for the number of integral solutions of (\ref{3.9}), subject to the same conditions on ${\mathbf y}$. Then if we put
$$\Ome=\sum_{i=1}^l(|a_i|+|b_i|)\quad \text{and}\quad \grX=[-\Ome B^3,\Ome B^3]\cap {\mathbb Z}}\def\dbQ{{\mathbb Q},$$ one finds that \begin{equation}\label{3.10} \rho_1(u)=\sum_{v\in \grX}\rho(u,v)\quad \text{and}\quad \rho_2(v)=\sum_{u\in \grX}\rho(u,v). \end{equation} The arithmetic smoothing to which we alluded in the introduction of this section is achieved by dividing the set $\grX^2$ into three subsets, calibrated by a truncation parameter $T$. We define the sets $\grX_i=\grX_i(T)$ for $i=0,1,2$ by taking \begin{align} \grX_0(T)&=\{ (u,v)\in \grX^2:\text{$\rho_1(u)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec T$ and $\rho_2(v)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec T$}\},\notag \\ \grX_1(T)&=\{ (u,v)\in \grX^2:\text{$\rho_1(u)>T$ and $\rho_2(v)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec T$}\},\label{3.11}\\ \grX_2(T)&=\{ (u,v)\in \grX^2:\rho_2(v)>T\},\notag \end{align} so that \begin{equation}\label{3.12} \grX_0(T)=\grX^2\setminus (\grX_1(T)\cup \grX_2(T)). \end{equation} For systems of type A, we fix the truncation parameter to be $T=B^{l-11/4}$.\par
At this point we pause to establish an auxiliary estimate for the quantity $$\Xi_i=\sum_{(u,v)\in \grX_i}\rho(u,v)\quad (i=1,2).$$
\begin{lemma}\label{lemma3.2} For systems of type A, one has $\Xi_i\ll B^{l-9\tau}$ $(i=1,2)$. \end{lemma}
\begin{proof} Observe first that in view of (\ref{3.10}) we have $$\Xi_1\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \sum_{\substack{u\in \grX\\ \rho_1(u)>B^{l-11/4}}}\sum_{v\in \grX}\rho(u,v)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{11/4-l}\sum_{u\in \grX}\rho_1(u)^2.$$ On considering the underlying Diophantine equation and applying Lemma \ref{2.4}, one sees that \begin{equation}\label{3.13}
\sum_{u\in \grX}\rho_1(u)^2=\int_0^1|F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\ll B^{2l-11/4-9\tau}. \end{equation} We therefore conclude that when $i=1$, one has $\Xi_i\ll B^{l-9\tau}$, and a symmetrical variant of this argument delivers the same bound when $i=2$. \end{proof}
Next, when $\grC$ and $\grD$ are measurable subsets of $[0,1)$, we put $$R_1(u;\grC)=\int_\grC G({\alpha}} \def\bfalp{{\boldsymbol \alpha})e({\alpha}} \def\bfalp{{\boldsymbol \alpha} u){\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \quad \text{and}\quad R_2(v;\grD)=\int_\grD H({\beta}} \def\bfbet{{\boldsymbol \beta})e({\beta}} \def\bfbet{{\boldsymbol \beta} v){\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} .$$ One then obtains \begin{equation}\label{3.14} N(B;\grC\times \grD)=\sum_{(u,v)\in \grX^2}\rho(u,v)R_1(u;\grC)R_2(v;\grD). \end{equation} Writing \begin{equation}\label{3.15} N_0(B;\grC,\grD)=\sum_{(u,v)\in \grX_0}\rho(u,v)R_1(u;\grC)R_2(v;\grD), \end{equation} the starting point for our analysis is the lower bound \begin{equation}\label{3.16} N(B)\ge N_0(B;[0,1),[0,1)). \end{equation} Our Hardy-Littlewood dissection is now executed by disassembling the set $[0,1)\times [0,1)$ into the four pieces $$\grM\times \grM,\quad \grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},\quad {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}\times \grM\quad \text{and}\quad {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}.$$ We examine each of these subsets in turn.
\begin{lemma}\label{lemma3.3} For systems of type A, one has $$N_0(B;\grM,\grM)-N(B;\grM\times \grM)\ll B^{s-6-\tau}.$$ \end{lemma}
\begin{proof} By applying the triangle inequality in combination with Lemma \ref{lemma2.3}, we find that \begin{equation}\label{3.17} R_1(u;\grM)\ll B^{m-3+\varepsilon}\quad \text{and}\quad R_2(v;\grM)\ll B^{n-3+\varepsilon}. \end{equation} Consequently, on recalling (\ref{3.12}), (\ref{3.14}) and (\ref{3.15}), and then applying Lemma \ref{lemma3.2}, we deduce that \begin{align*} N(B;\grM\times \grM)-N_0(B;\grM,\grM)&=\sum_{(u,v)\in \grX_1\cup \grX_2}\rho(u,v)R_1(u;\grM)R_2(v;\grM)\notag \\ &\ll B^{s-l-6+\varepsilon}(\Xi_1+\Xi_2)\ll B^{s-6-9\tau+\varepsilon}. \end{align*} This completes the proof of the lemma. \end{proof}
\begin{lemma}\label{lemma3.4} For systems of type A, one has $$N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{s-6-\tau}\quad \text{and}\quad N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},\grM)\ll B^{s-6-\tau}.$$ \end{lemma}
\begin{proof} Applying the first upper bound of (\ref{3.17}) in concert with (\ref{3.10}), one sees that \begin{align}
N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})&\ll B^{m-3+\varepsilon}\sum_{(u,v)\in \grX_0}\rho(u,v)|R_2(v;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|\notag \\
&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{m-3+\varepsilon}\sum_{v\in \grX}\rho_2(v)|R_2(v;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|.\label{3.18} \end{align} On the one hand, by applying Bessel's inequality together with the first estimate of Lemma \ref{lemma2.4}, one has
$$\sum_{v\in \grX}\rho_2(v)^2=\int_0^1|F(0,{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{2l-11/4-9\tau}.$$ On the other hand, Bessel's inequality in combination with the second estimate of Lemma \ref{lemma2.4} yields the bound
$$\sum_{v\in \grX}|R_2(v;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|^2\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\ll B^{2n-13/4-8\tau}.$$ Consequently, by applying Cauchy's inequality to (\ref{3.18}), we obtain
$$N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{m-3+\varepsilon}\Bigl( \sum_{v\in \grX}\rho_2(v)^2\Bigr)^{1/2}\Bigl( \sum_{v\in \grX}|R_2(v;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|^2\Bigr)^{1/2}\ll B^{s-6-8\tau}.$$ A symmetrical argument shows similarly that $N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},\grM)\ll B^{s-6-8\tau}$, and thus the proof of the lemma is complete. \end{proof}
\begin{lemma}\label{lemma3.5} For systems of type A, one has $N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{s-6-\tau}$. \end{lemma}
\begin{proof} Recall that for systems of type A, we take $T=B^{l-11/4}$ for the truncation parameter. First applying Cauchy's inequality, and then applying (\ref{3.10}) and (\ref{3.11}), therefore, in our first step we deduce that \begin{align*}
N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \sum_{(u,v)\in \grX_0}\rho(u,v)|R_1(u;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|^2\Bigr)^{1/2}\Bigl( \sum_{(u,v)\in \grX_0}\rho(u,v)|R_2(v;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|^2\Bigr)^{1/2}\\
&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \sum_{\substack{u\in \grX\\ \rho_1(u)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-11/4}}}\!\!\!\!\!\!\rho_1(u)|R_1(u;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|^2\Bigr)^{1/2}\Bigl( \sum_{\substack{v\in \grX\\ \rho_2(v)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-11/4}}}\!\!\!\!\!\!\rho_2(v)|R_2(v;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|^2\Bigr)^{1/2}\\
&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-11/4}\Bigl( \sum_{u\in \grX}|R_1(u;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|^2\Bigr)^{1/2}\Bigl( \sum_{v\in \grX}|R_2(v;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|^2\Bigr)^{1/2}. \end{align*} Next, applying Bessel's inequality together with Lemma 2.4, we conclude that \begin{align*}
N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-11/4}\Bigl( \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \Bigr)^{1/2}\Bigl( \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr)^{1/2}\\ &\ll B^{l-11/4}(B^{2m-13/4-8\tau})^{1/2}(B^{2n-13/4-8\tau})^{1/2}\ll B^{s-6-8\tau}. \end{align*} This completes the proof of the lemma. \end{proof}
We now come to the crescendo of our argument for systems of type A. Combining the upper bounds provided by Lemmata \ref{lemma3.3}, \ref{lemma3.4} and \ref{lemma3.5}, we deduce from (\ref{3.16}) that \begin{align*} N(B)&\ge N_0(B;\grM,\grM)+N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})+N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},\grM)+N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\\ &=N(B;\grM\times \grM)+O(B^{s-6-\tau}). \end{align*} Hence, in view of (\ref{2.6}), we conclude from Lemmata \ref{lemma2.1} and \ref{lemma3.1} that $$N(B)\ge N(B;\grN)+O(B^{s-6}\mathscr{L}^{-1})\gg B^{s-6}.$$ This completes the proof of Theorem \ref{theorem1.1} for systems of type A.
\section{Systems of type B} The proof of Theorem \ref{theorem1.1} in situations wherein $n=3$ is complicated by the relative inferiority of the minor arc bounds available for $H({\beta}} \def\bfbet{{\boldsymbol \beta})$ in mean square. Our argument for systems of type B, in which $l\ge 4$, $m\ge 4$ and $n\ge 3$, though modelled on that of the previous section, must therefore be modified in order to exploit better the exceptional nature of elements in the sets $\grX_1$ and $\grX_2$. Since Lemma \ref{lemma2.1} remains valid, and shows that $N(B;\grN)\gg B^{s-6}$, our first goal is to show that the conclusion of Lemma \ref{lemma3.1} remains valid in the present circumstances.\par
\begin{lemma}\label{lemma4.1} For systems of type B, one has $N(B;\grK)\ll B^{s-6}\mathscr{L}^{-1}$. \end{lemma}
\begin{proof} We begin by deriving an auxiliary mean value estimate. When $j\in \{l-1,l\}$, define $$F_j({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})=h(a_1{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_1{\beta}} \def\bfbet{{\boldsymbol \beta})h(a_2{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_2{\beta}} \def\bfbet{{\boldsymbol \beta})\prod_{\substack{3\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec i\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec l\\ i\ne j}}g(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta}).$$ Then as a consequence of Schwarz's inequality, one has
$$\int_0^1|F_j({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})H({\beta}} \def\bfbet{{\boldsymbol \beta})|{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \int_0^1|F_j({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)^{1/2}\Bigl( \int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)^{1/2}.$$ By orthogonality, the first integral on the right hand side here is bounded above by the number of solutions of a diophantine equation, and so by applying Lemma \ref{2.4} we obtain
$$\int_0^1|F_j({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \int_0^1|F_j(0,{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{2(l-1)-11/4-9\tau}.$$ The second integral on the right hand side may also be estimated via Lemma \ref{lemma2.4}, so that
$$\int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{2n-11/4-9\tau}.$$ We therefore deduce that
$$\int_0^1|F_j({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})H({\beta}} \def\bfbet{{\boldsymbol \beta})|{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{l+n-15/4-9\tau}.$$
\par Our next step is to prune the set $\grK$, the better to exploit available major arc estimates. Define \begin{align*} \grK_0&=\{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in\grK:\text{$a_{l-1}{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_{l-1}{\beta}} \def\bfbet{{\boldsymbol \beta}\in \grM$ and $a_l{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_l{\beta}} \def\bfbet{{\boldsymbol \beta}\in \grM$}\},\\ {\mathfrak k}}\def\grK{{\mathfrak K}}\def\grL{{\mathfrak L}}\def\grp{{\mathfrak p}_i({\alpha}} \def\bfalp{{\boldsymbol \alpha})&=\{ {\beta}} \def\bfbet{{\boldsymbol \beta}\in [0,1):a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta}\in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}\}\quad (i=l-1,l). \end{align*} Then from the modified version of Weyl's inequality (\ref{2.11}), when $i\in \{l-1,l\}$ one sees that
$$\sup_{{\beta}} \def\bfbet{{\boldsymbol \beta}\in {\mathfrak k}}\def\grK{{\mathfrak K}}\def\grL{{\mathfrak L}}\def\grp{{\mathfrak p}_i({\alpha}} \def\bfalp{{\boldsymbol \alpha})}|g(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})|\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \sup_{{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}\in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}|g({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|\ll B^{3/4+\varepsilon}.$$ Uniformly in ${\alpha}} \def\bfalp{{\boldsymbol \alpha}$, therefore, one has the estimate \begin{align*}
\int_{{\mathfrak k}}\def\grK{{\mathfrak K}}\def\grL{{\mathfrak L}}\def\grp{{\mathfrak p}_i({\alpha}} \def\bfalp{{\boldsymbol \alpha})}|F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})H({\beta}} \def\bfbet{{\boldsymbol \beta})|{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl(\sup_{{\beta}} \def\bfbet{{\boldsymbol \beta}\in{\mathfrak k}}\def\grK{{\mathfrak K}}\def\grL{{\mathfrak L}}\def\grp{{\mathfrak p}_i({\alpha}} \def\bfalp{{\boldsymbol \alpha})}|g(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})|\Bigr)\int_0^1|F_i({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})H({\beta}} \def\bfbet{{\boldsymbol \beta})|{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\\ &\ll B^{3/4+\varepsilon}(B^{l+n-15/4-9\tau})\ll B^{s-m-3-8\tau}. \end{align*} Consequently, on recalling Lemma \ref{lemma2.3}, one discerns the upper bound \begin{align*}
N(B;\grK\setminus \grK_0)&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \sum_{i=l-1}^l\int_\grM |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|\int_{{\mathfrak k}}\def\grK{{\mathfrak K}}\def\grL{{\mathfrak L}}\def\grp{{\mathfrak p}_i({\alpha}} \def\bfalp{{\boldsymbol \alpha})}|F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})H({\beta}} \def\bfbet{{\boldsymbol \beta})|{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \\
&\ll B^{s-m-3-8\tau}\int_\grM |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^{s-6-\tau}. \end{align*} In this way, we deliver the interim conclusion \begin{equation}\label{4.1} N(B;\grK)=N(B;\grK_0)+O(B^{s-6-\tau}). \end{equation}
\par We now imitate the argument of the proof of Lemma \ref{lemma3.1}, employing notation from the latter proof with some minor modifications. We define $\Phi_G({\alpha}} \def\bfalp{{\boldsymbol \alpha})$ as in (\ref{3.2}), and modify the definition of $\Phi_H({\beta}} \def\bfbet{{\boldsymbol \beta})$ by putting
$$\Phi_H({\beta}} \def\bfbet{{\boldsymbol \beta})=|g(d_n{\beta}} \def\bfbet{{\boldsymbol \beta})|^{5/2}|h(d_1{\beta}} \def\bfbet{{\boldsymbol \beta})h(d_2{\beta}} \def\bfbet{{\boldsymbol \beta})|.$$ One finds with little effort that the estimates $I_G\ll B^{3/2}$ and $I_H\ll B^{3/2}$ remain valid in present circumstances. Defining $J_{i,k}^G$ and $J_{i,k}^H$ as in (\ref{3.5}), though noting our revised definition of the set $\grK_0$, one finds just as in the argument leading to (\ref{3.6}) that when $i\in\{1,2\}$ and $k\in \{l-1,l\}$, one has $J_{i,k}^G\ll B^3$ and $J_{i,k}^H\ll B^3$. In the current situation, we modify the definition of $\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})$ by putting
$$\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})=\prod_{i=1}^2|h(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})h(d_i{\beta}} \def\bfbet{{\boldsymbol \beta})^3h(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})|.$$ The reader will have no difficulty in confirming that the upper bound (\ref{3.7}) remains valid. Consequently, an application of H\"older's inequality reveals that $$N(B;\grK_0)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec g(0)^{s-11}\Bigl(\sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in{\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\Bigr)^{1/5} (J_{1,l}^GJ_{2,l-1}^G)^{3/10}(J_{1,l}^HJ_{2,l-1}^H)^{1/10}J_0^{1/5},$$ in which $J_0$ is defined by (\ref{3.4}). The upper bound $J_0\ll B^3$, combined with our earlier estimates, therefore leads to the asymptotic relation $$N(B;\grK_0)\ll B^{s-11}(B^{10}Q^{-1/10})^{1/5}(B^6)^{3/10}(B^6)^{1/10}(B^3)^{1/5}=B^{s-6}Q^{-1/50}.$$ The conclusion of the lemma is now confirmed by recalling (\ref{4.1}). \end{proof}
At this stage of our discussion we introduce unconventional elements paralleling those introduced in the preambles to Lemmata \ref{lemma3.2} and \ref{lemma3.3}, employing the same notation throughout. We have only to record that for systems of type B, the truncation parameter is fixed to be $T=B^{l-3+\tau}$. Before launching the Hardy-Littlewood dissection proper, we pause to establish an auxiliary estimate for the quantity $$\Zet_i=\sum_{v\in \grX}\Biggl( \sum_{\substack{u\in \grX\\ (u,v)\in \grX_i}}\rho(u,v)\Biggr)^2\quad (i=1,2).$$
\begin{lemma}\label{lemma4.2} For systems of type B, one has $\Zet_i\ll B^{2l-13/4-8\tau}$ $(i=1,2)$. \end{lemma}
\begin{proof} We first seek to establish the lemma in the case $i=1$. Suppose that $u\in \grX$ is an integer for which $\rho_1(u)>B^{l-3+\tau}$. Then one has $$\int_0^1F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)e(-u{\alpha}} \def\bfalp{{\boldsymbol \alpha}){\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} =\rho_1(u)>B^{l-3+\tau}.$$ For systems of type B one has $l\ge 4$. As in the argument of the proof of Lemma \ref{lemma2.3}, one therefore finds from Lemma \ref{lemma2.2} that
$$\Bigl|\int_\grM F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)e(-u{\alpha}} \def\bfalp{{\boldsymbol \alpha}){\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\Bigr|\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \int_\grM |F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)|{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^{l-3+\varepsilon},$$ whence
$$\Bigl|\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)e(-u{\alpha}} \def\bfalp{{\boldsymbol \alpha}){\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \Bigr|\ge \tfrac{1}{2}\rho_1(u).$$ In this way, one obtains the upper bound \begin{align*} \sum_{(u,v)\in \grX_1}\rho(u,v)&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \sum_{\substack{u\in \grX\\ \rho_1(u)>B^{l-3+\tau}}}\sum_{v\in \grX}\rho(u,v)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{3-l-\tau}\sum_{\substack{u\in\grX\\ \rho_1(u)>B^{l-3+\tau}}}\rho_1(u)^2\\
&\ll B^{3-l-\tau}\sum_{u\in\grX}\Bigl| \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)e(-u{\alpha}} \def\bfalp{{\boldsymbol \alpha}){\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \Bigr|^2. \end{align*} From here, an application of Bessel's inequality in combination with the second bound of Lemma \ref{lemma2.4} yields
$$\sum_{(u,v)\in \grX_1}\rho(u,v)\ll B^{3-l-\tau}\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^{3-l-\tau}(B^{2l-13/4-8\tau}).$$ However, when $(u,v)\in \grX_1$ one has $$\sum_{\substack{u\in \grX\\ (u,v)\in \grX_1}}\rho(u,v)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \rho_2(v)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-3+\tau},$$ and so one arrives at the upper bound $$\sum_{v\in \grX}\Bigl( \sum_{\substack{u\in \grX\\ (u,v)\in \grX_1}}\rho(u,v)\Bigr)^2\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-3+\tau}\sum_{(u,v)\in \grX_1}\rho(u,v)\ll B^{l-3+\tau}(B^{l-1/4-9\tau}).$$ The conclusion of the lemma has therefore been established when $i=1$.\par
When $i=2$, we follow a similar though simpler path. Thus, one obtains \begin{align*}
\sum_{v\in \grX}\Bigl( \sum_{\substack{u\in \grX\\ (u,v)\in \grX_2}}\rho(u,v)\Bigr)^2&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \sum_{\substack{v\in \grX\\ \rho_2(v)>B^{l-3+\tau}}}\rho_2(v)^2\ll \sum_{v\in \grX}\Bigl| \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} F(0,{\beta}} \def\bfbet{{\boldsymbol \beta})e(-{\beta}} \def\bfbet{{\boldsymbol \beta} v){\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr|^2\\
&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |F(0,{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{2l-13/4-8\tau}. \end{align*} This completes the proof of the lemma in the case $i=2$. \end{proof}
In the present circumstances, our Hardy-Littlewood dissection proceeds by disassembling the set $[0,1)\times [0,1)$ into the three pieces $$\grM\times \grM, \quad {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}\times [0,1)\quad \text{and}\quad \grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}.$$ We analyse these subsets in turn by means of three lemmata.
\begin{lemma}\label{lemma4.3} For systems of type B, one has $$N_0(B;\grM,\grM)-N(B;\grM\times \grM)\ll B^{s-6-\tau}.$$ \end{lemma}
\begin{proof} We begin by deriving an auxiliary estimate for the quantity
$${\Upsilon}} \def\Upshat{{\widehat \Ups}_i=\sum_{(u,v)\in \grX_i}\rho(u,v)|R_2(v;\grM)|\quad (i=1,2).$$ Observe that by applying Bessel's inequality in combination with the first estimate of Lemma \ref{lemma2.4}, one discerns that
$$\sum_{v\in \grX}|R_2(v;\grM)|^2\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{2n-11/4-9\tau}.$$ When $i\in \{1,2\}$, therefore, we deduce from Cauchy's inequality together with Lemma \ref{lemma4.2} that
$${\Upsilon}} \def\Upshat{{\widehat \Ups}_i\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Zet_i^{1/2}\Bigl( \sum_{v\in \grX}|R_2(v;\grM)|^2\Bigr)^{1/2}\ll B^{l+n-3-8\tau}.$$ For systems of type B one has $m\ge 4$, and so it follows from Lemma \ref{lemma2.3} that $R_1(u;\grM)\ll B^{m-3+\varepsilon}$. Hence, we obtain \begin{align*} N(B;\grM\times \grM)-N_0(B;\grM,\grM)&=\sum_{(u,v)\in \grX_1\cup \grX_2}\rho(u,v)R_1(u;\grM)R_2(v;\grM)\\ &\ll B^{m-3+\varepsilon}({\Upsilon}} \def\Upshat{{\widehat \Ups}_1+{\Upsilon}} \def\Upshat{{\widehat \Ups}_2)\ll B^{s-6-7\tau }. \end{align*} This completes the proof of the lemma. \end{proof}
\begin{lemma}\label{lemma4.4} For systems of type B, one has $N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^{s-6-\tau}$. \end{lemma}
\begin{proof} An application of Cauchy's inequality reveals that $$N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec V_1^{1/2}V_2^{1/2},$$ where
$$V_1=\sum_{(u,v)\in \grX_0}\rho(u,v)|R_1(u;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|^2$$ and
$$V_2=\sum_{(u,v)\in\grX_0}\rho(u,v)|R_2(v;[0,1))|^2.$$ On recalling the definitions of the sets $\grX_i$ from (\ref{3.11}), noting that at present $T=B^{l-3+\tau}$, it follows from Bessel's inequality and Lemma \ref{lemma2.4} that
\begin{align*}V_1&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \sum_{\substack{u\in \grX\\ \rho_1(u)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-3+\tau}}}\rho_1(u)|R_1(u;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|^2\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-3+\tau}\sum_{u\in \grX}|R_1(u;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})|^2\\
&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-3+\tau}\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^{l-3+\tau}(B^{2m-13/4-8\tau}). \end{align*} Similarly, one finds that \begin{align*}
V_2&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \sum_{\substack{v\in \grX\\ \rho_2(v)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-3+\tau}}}\rho_2(v)|R_2(v;[0,1))|^2\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-3+\tau}\sum_{v\in \grX}|R_2(v;[0,1))|^2\\
&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-3+\tau}\int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{l-3+\tau}(B^{2n-11/4-9\tau}). \end{align*} Thus we deduce that $$N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^{l-3+\tau}(B^{n+m-3-8\tau})\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{s-6-7\tau},$$ and the proof of the lemma is complete. \end{proof}
\begin{lemma}\label{lemma4.5} For systems of type B, one has $N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{s-6-\tau}$. \end{lemma}
\begin{proof} Following the argument of the proof of Lemma \ref{lemma4.3}, one finds that \begin{align*} N(B;\grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})-N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})&=\sum_{(u,v)\in \grX_1\cup \grX_2}\rho(u,v)R_1(u;\grM)R_2(v;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\notag \\
&\ll B^{m-3+\varepsilon}(\Zet_1+\Zet_2)^{1/2}\Bigl( \int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr)^{1/2}. \end{align*} Thus we deduce that \begin{equation}\label{4.2} N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})-N(B;\grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{s-6-7\tau}. \end{equation}
\par We next observe that \begin{equation}\label{4.3} N(B;\grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})=\int_\grM\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})G({\alpha}} \def\bfalp{{\boldsymbol \alpha})H({\beta}} \def\bfbet{{\boldsymbol \beta}){\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} . \end{equation} As a consequence of Schwarz's inequality, one has
$$\int_0^1|F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})h(d_1{\beta}} \def\bfbet{{\boldsymbol \beta})h(d_2{\beta}} \def\bfbet{{\boldsymbol \beta})|{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec g(0)^{l-4}U_1^{1/2}U_2^{1/2},$$ where for $i\in \{1,2\}$ we write
$$U_i=\int_0^1|g(a_{2+i}{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_{2+i}{\beta}} \def\bfbet{{\boldsymbol \beta})h(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})h(d_i{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} .$$ On considering the underlying Diophantine equations and then appealing to Lemma \ref{lemma2.4}, one discerns that
$$U_i\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \int_0^1|g(b_{2+i}{\beta}} \def\bfbet{{\boldsymbol \beta})h(b_i{\beta}} \def\bfbet{{\boldsymbol \beta})h(d_i{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{13/4-9\tau}.$$ We therefore deduce from the modified version of Weyl's inequality (\ref{2.11}) that \begin{align*}
\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})H({\beta}} \def\bfbet{{\boldsymbol \beta})|{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} &\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec g(0)^{n-3}\Bigl(\sup_{{\beta}} \def\bfbet{{\boldsymbol \beta}\in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}|g(d_n{\beta}} \def\bfbet{{\boldsymbol \beta})|\Bigr) \int_0^1|F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})h(d_1{\beta}} \def\bfbet{{\boldsymbol \beta})h(d_2{\beta}} \def\bfbet{{\boldsymbol \beta})|{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \\ &\ll B^{n-3}(B^{3/4+\varepsilon})(B^{l-3/4-9\tau})\ll B^{s-m-3-8\tau}. \end{align*} Substituting this upper bound into (\ref{4.3}) and applying Lemma \ref{lemma2.3}, we obtain
$$N(B;\grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{s-m-3-8\tau}\int_\grM |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^{s-6-7\tau}.$$ The conclusion of the lemma follows by reference to (\ref{4.2}). \end{proof}
We are now equipped to finish off the discussion of systems of type B. Combining the estimates supplied by Lemmata \ref{lemma4.3}, \ref{lemma4.4} and \ref{lemma4.5}, we see that \begin{align*} N(B)&\ge N_0(B;\grM,\grM)+N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})+N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\\ &=N(B;\grM\times\grM)+O(B^{s-6-\tau}). \end{align*} Hence, in view of (\ref{2.6}), we conclude from Lemmata \ref{lemma2.1} and \ref{lemma4.1} that $$N(B)\ge N(B;\grN)+O(B^{s-6}\mathscr{L}^{-1})\gg B^{s-6}.$$ This completes the proof of Theorem \ref{theorem1.1} for systems of type B.
\section{Systems of type C} Our analysis of systems of type C, wherein $s=12$ and $(l,m,n)=(2,5,5)$, may be abbreviated by adjusting the argument of \S3 through modification of the generating functions $F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})$, $G({\alpha}} \def\bfalp{{\boldsymbol \alpha})$ and $H({\beta}} \def\bfbet{{\boldsymbol \beta})$. We begin with a discussion of the pruning operation implicit in the estimation of $N(B;\grK)$.\par
\begin{lemma}\label{lemma5.1} For systems of type C, one has $N(B;\grK)\ll B^6\mathscr{L}^{-1}$. \end{lemma}
\begin{proof} Define the mean values \begin{align*}
U_{ij}&=\iint_\grK |g(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{5/2}|g(d_j{\beta}} \def\bfbet{{\boldsymbol \beta})|^{9/2}|h(a_1{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_1{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} ,\\
V_{ij}&=\iint_\grK |g(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{9/2}|g(d_j{\beta}} \def\bfbet{{\boldsymbol \beta})|^{5/2}|h(a_2{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_2{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} ,\\
W_k&=\int_0^1\int_0^1|h(c_k{\alpha}} \def\bfalp{{\boldsymbol \alpha})h(d_k{\beta}} \def\bfbet{{\boldsymbol \beta})|^8{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} , \end{align*} and put
$$\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})=|h(c_1{\alpha}} \def\bfalp{{\boldsymbol \alpha})h(c_2{\alpha}} \def\bfalp{{\boldsymbol \alpha})h(d_1{\beta}} \def\bfbet{{\boldsymbol \beta})h(d_2{\beta}} \def\bfbet{{\boldsymbol \beta})|^3|h(a_1{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_1{\beta}} \def\bfbet{{\boldsymbol \beta})h(a_2{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_2{\beta}} \def\bfbet{{\boldsymbol \beta})|.$$ Then an application of H\"older's inequality reveals that \begin{equation}\label{5.1} N(B;\grK)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in {\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\Bigr)^{1/7}(W_1W_2)^{1/14}\prod_{i=3}^5\prod_{j=3}^5(U_{ij}V_{ij})^{1/21}. \end{equation}
\par The argument of the proof of \cite[Lemma 10]{BW2007a} shows that $$\sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in {\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\ll B^{14}Q^{-1/10}.$$ As a consequence of Lemma \ref{lemma2.2}, meanwhile, one has
$$U_{ij}\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \int_\grM|g(d_j{\beta}} \def\bfbet{{\boldsymbol \beta})|^{9/2}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr) \Bigl( \sup_{{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}\in{\mathbb R}}\def\dbT{{\mathbb T}}\int_\grM|g(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{5/2}|h(a_1{\alpha}} \def\bfalp{{\boldsymbol \alpha}+{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\Bigr)\ll B^3,$$ and a symmetric argument yields the estimate $V_{ij}\ll B^3$. Finally, one finds from \cite[Theorem 2]{Vau1986} that
$$W_k\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \int_0^1|h({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^8{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}\Bigr)^2\ll (B^5)^2=B^{10}.$$ Combining these estimates within (\ref{5.1}), we conclude that $$N(B;\grK)\ll (B^{14}Q^{-1/10})^{1/7}(B^{20})^{1/14}(B^6)^{9/21}\ll B^6Q^{-1/70}.$$ This completes the proof of the lemma. \end{proof}
Our next step is to relabel the coefficients of the system (\ref{1.1}) so that $\mtil=m-1$, $\widetilde n} \def\Ntil{\widetilde N=n-1$, $\widetilde l} \def\mtil{\widetilde m=l+2$, which is to say that $(\widetilde l} \def\mtil{\widetilde m,\mtil,\widetilde n} \def\Ntil{\widetilde N)=(4,4,4)$, and to put $$\ctil_j=c_j\quad \text{and}\quad \widetilde{d}} \def\etil{\widetilde e_j=d_j\quad (1\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec j\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec 4),$$ and $$(\widetilde a} \def\btil{\widetilde b} \def\ctil{\widetilde c_i,\btil_i)=(a_i,b_i)\quad (i=1,2),\quad (\widetilde a} \def\btil{\widetilde b} \def\ctil{\widetilde c_3,\btil_3)=(0,d_5)\quad (\widetilde a} \def\btil{\widetilde b} \def\ctil{\widetilde c_4,\btil_4)=(c_5,0).$$ We then define the generating functions $\widetilde F} \def\Gtil{\widetilde G} \def\Htil{\widetilde H({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})$, $\Gtil({\alpha}} \def\bfalp{{\boldsymbol \alpha})$ and $\Htil({\beta}} \def\bfbet{{\boldsymbol \beta})$ as in the respective definitions of $F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})$, $G({\alpha}} \def\bfalp{{\boldsymbol \alpha})$ and $H({\beta}} \def\bfbet{{\boldsymbol \beta})$ in (\ref{2.2}) and (\ref{2.3}), save that in the present context the integers $l$, $m$, $n$, and the coefficients $a_i$, $b_i$, $c_j$ and $d_k$, are to be decorated by tildes. Further notation from \S\S2 and 3 is understood to have the meaning naturally inferred in like manner when decorated by a tilde. An examination of the argument of \S3, leading from the discussion preceding Lemma \ref{lemma3.2} to the conclusion of the section, now reveals that no adjustment is necessary in order to accommodate the change of circumstances implicit in our present analysis. Here it is worth noting that, despite the fact that we now have $\widetilde l} \def\mtil{\widetilde m=4$ and $\widetilde a} \def\btil{\widetilde b} \def\ctil{\widetilde c_3=0$, the presence of three non-zero coefficients in the equation (\ref{3.8}) ensures that the analogue of the upper bound (\ref{3.13}) remains valid. Thus one obtains $\Xitil_1\ll B^{4-9\tau}$, and by means of a symmetric argument also $\Xitil_2\ll B^{4-9\tau}$. The analogue of Lemma \ref{lemma3.3} delivers the bound $$\Ntil_0(B;\grM,\grM)-\Ntil(B;\grM\times \grM)\ll B^{6-\tau},$$ and analogues of Lemmata \ref{lemma3.4} and \ref{lemma3.5} yield the estimates $$\Ntil_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{6-\tau},\quad \Ntil_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},\grM)\ll B^{6-\tau},\quad \Ntil_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{6-\tau}.$$ We therefore conclude that \begin{align*} \Ntil(B)&\ge \Ntil_0(B;\grM,\grM)+\Ntil_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})+\Ntil_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},\grM)+\Ntil_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\\ &=\Ntil(B;\grM\times \grM)+O(B^{6-\tau})=N(B;\grM\times \grM)+O(B^{6-\tau}). \end{align*} Then, in view of (\ref{2.6}), we conclude from Lemmata \ref{lemma2.1} and \ref{lemma5.1} that $$N(B)=\Ntil(B)\gg N(B;\grN)+O(B^6\mathscr{L}^{-1})\gg B^6.$$ This completes the proof of Theorem \ref{theorem1.1} for systems of type C.
\section{Systems of type D} At present, we have been unable to devise an unconditional treatment of systems of the shape (\ref{1.1}) in which $(l,m,n)=(5,5,2)$. A conditional treatment is available by appealing to HRH. In order to describe the nature of this particular Riemann Hypothesis, we must indulge in some discussion. Although a lengthy affair in full, for the sake of concision we conduct a rather sketchy account here of the treatment of systems of type D. Following Hooley \cite[\S\S5 and 6]{Hoo1986}, we consider the cubic form $\mathfrak g} \def\grG{{\mathfrak G}({\mathbf x})=x_1^3+\ldots +x_6^3$ and the associated discriminant $$\Del({\mathbf m})=3\prod (m_1^{3/2}\pm m_2^{3/2}\pm \ldots \pm m_6^{3/2}),$$ in which the product is taken over all possible choices of the signs. Let $\rho({\mathbf m};p^r)$ denote the number of points of the projective variety defined by $\mathfrak g} \def\grG{{\mathfrak G}({\mathbf x})={\mathbf m}\cdot {\mathbf x}=0$, having coordinates in the finite field $\dbF_{p^r}$, and put \begin{equation}\label{6.1} E({\mathbf m};p^r)=\rho({\mathbf m};p^r)-(p^{4r}-1)/(p^r-1). \end{equation} The Euler factors $L_p({\mathbf m};s)$ are then defined for $p\nmid \Del({\mathbf m})$ by putting $$L_p({\mathbf m};s)=\exp \Bigl( -\sum_{r=1}^\infty E({\mathbf m};p^r)p^{-rs}/r\Bigr).$$
When $p|\Del({\mathbf m})$, one must modify the definition of $L_p({\mathbf m};s)$, as described by Serre \cite{Ser1986}, so that for suitable coefficients ${\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_{j,p}={\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_{j,p}({\mathbf m})$ with $1\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec |{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_{j,p}|\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec p^{3/2}$, one has $$L_p({\mathbf m};s)=\prod_j (1-{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_{j,p}p^{-s})^{-1}.$$ The number of factors here is at most $10$, the precise definition of which need not detain us. Associated to the modified Hasse-Weil $L$-function $$L({\mathbf m};s)=\prod_pL_p({\mathbf m};s)$$ is the conductor $B({\mathbf m})$, given by
$$B({\mathbf m})=\prod_{p|\Del({\mathbf m})}p^{a_p},$$ in which the exponents $a_p$ are certain non-negative integers with $0\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec a_p\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec 200$. Finally, we put $$\xi({\mathbf m};s)=(2\pi)^{-5s}\Gam(s)^5B({\mathbf m})^{s/2}L({\mathbf m};s).$$
\begin{conjecture}[HRH]\label{conjecture6.1} Suppose that $\Del({\mathbf m})\ne 0$. Then: \begin{itemize} \item[(i)] the function $\xi({\mathbf m};s)$ has a meromorphic continuation to ${\mathbb C}}\def\dbF{{\mathbb F}}\def\dbN{{\mathbb N}}\def\dbP{{\mathbb P}$ of finite order, its only possible poles being at $s=\frac{3}{2}$ and $s=\frac{5}{2}$; \item[(ii)] with $w({\mathbf m})=\pm 1$, one has the functional equation $$\xi({\mathbf m};s)=w({\mathbf m})\xi({\mathbf m};4-s);$$ \item[(iii)] when $\text{Re}(s)\ne 2$, one has $\xi({\mathbf m};s)\ne 0$. \end{itemize} \end{conjecture}
It is the assertion (iii) of this conjecture that constitutes the Riemann Hypothesis within HRH. The relevance of Conjecture \ref{conjecture6.1} for our work here is made visible by the following lemma.
\begin{lemma}\label{lemma6.2} Provided that {\rm HRH} be valid, one has
$$\int_0^1|f({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^6{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll B^{3+\varepsilon}.$$ \end{lemma}
\begin{proof} When $n$ is a non-negative integer, write $r(n)$ for the number of representations of $n$ as the sum of three non-negative integral cubes. Then, subject to the validity of HRH, Hooley \cite[\S5]{Hoo1996} has shewn that \begin{equation}\label{6.2} \sum_{1\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec n\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec x}r(n)^2\ll x^{1+\varepsilon}, \end{equation} a conclusion that yields the bound claimed in the lemma as an immediate corollary. We note that Heath-Brown \cite[Theorem 1.1]{HB1998} has also shown that the upper bound (\ref{6.2}) holds conditional on the truth of HRH\footnote{In order to avoid possible confusion, we note that in the display preceding \cite[equation (4.4)]{HB1998}, there is a typographic error which is corrected in (\ref{6.1}) above.}. \end{proof}
Henceforth in this section, we assume the truth of HRH. We now put $Y=B^{10\tau}$, and introduce the generating functions $$k_p({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})=\sum_{B/p<w\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec 2B/p}e({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} w^3)\quad \text{and}\quad K({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} ;Y)=\sum_{Y<p\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec 2Y}k_p(p^3{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}),$$ in which the letter $p$ is reserved to indicate a prime number in the congruence class $2$ modulo $3$. Finally, we change the definition of the generating function $h({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})$ applied hitherto by setting \begin{equation}\label{6.3} h({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})=\sum_{j=1}^JK({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta};2^{-j}Y), \end{equation} where $J=[\frac{1}{2}\tau \log B]$.
\begin{lemma}\label{lemma6.3} When $a$ and $b$ are non-zero integers, one has
$$\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |g(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^4h(b{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^6|{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll B^{6-3\tau}.$$ \end{lemma}
\begin{proof} We apply the argument of the proof of Theorem 3.1 of the authors' recent work \cite{BW2010} concerning sums of cubes and minicubes, substituting the conditional bound supplied by Lemma \ref{lemma6.2} in place of the bound tantamount to (\ref{2.1}) employed in \cite{BW2010}. In the first instance, the relevance of this new bound is seen on considering the underlying Diophantine equations. One finds that
$$\int_0^1|K({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta};Y)|^6{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll B^{3+\varepsilon},$$ and likewise
$$\int_0^1\Bigl( \max_{Y<p\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec 2Y}|k_p({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|\Bigr)^6{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll (B/Y)^{3+\varepsilon}.$$
\par Next, when $X$ is a real parameter with $1\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec X\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{3/2}$, define
$$\grM(q,a;X)=\{ {\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \in [0,1):|q{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}-a|\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec XB^{-3}\},$$ and then take $\grM(X)$ to be the union of the arcs $\grM(q,a;X)$ with $0\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec a\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec q\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec X$ and $(a,q)=1$. We put ${\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}(X)=[0,1)\setminus\grM(X)$. In view of our choice for $Y$, it follows by an application of H\"older's inequality paralleling that employed in the proof of \cite[Corollary 3.2]{BW2010}, that
$$\int_{{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}(BY^3)}|g(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^2h(b{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^6|{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll B^{9/2+\varepsilon}(Y/B^\tau)^{-1/2}\ll B^{9/2-4\tau}.$$ As a consequence of Weyl's inequality (see \cite[Lemma 2.4]{Vau1997}), moreover, one has
$$\sup_{{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}(BY^3)}|g(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|\ll B^{3/4+\varepsilon}.$$ Hence we deduce that \begin{equation}\label{6.4}
\int_{{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}(BY^3)}|g(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^4h(b{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^6|{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll (B^{3/4+\varepsilon})^2B^{9/2-4\tau}\ll B^{6-3\tau}. \end{equation}
\par We next prune from ${\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}$ to ${\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}(BY^3)$. By a modified version of Weyl's inequality akin to that embodied in (\ref{2.11}), one finds that whenever $Y<p\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec 2Y$, then
$$\sup_{{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}\in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}|k_p(bp^3{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|\ll \sup_{{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} (B^{3/4}Y^{-4})}|k_p({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|\ll B^{3/4+\varepsilon}Y^2,$$ whence
$$\sup_{{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}\in{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}|h(b{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|\ll B^{3/4+\varepsilon}Y^3L\ll B^{4/5}.$$ In addition, the methods of \cite[\S\S4.3 and 4.4]{Vau1997} permit one to establish the estimate
$$\int_{\grM(BY^3)}|g(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^4{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll B^{1+\varepsilon}Y^{12}.$$ We are consequently led to the upper bound \begin{align*}
\int_{{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}\setminus {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}(BY^3)}|g(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^4h(b{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^6|{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} &\ll \Bigl( \sup_{{\alpha}} \def\bfalp{{\boldsymbol \alpha}\in{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}|h(b{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|\Bigr)^6\int_{\grM(BY^3)}|g(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^4{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \\ &\ll (B^{4/5})^6B^{1+\varepsilon}Y^{12}\ll B^{6-3\tau}. \end{align*} The conclusion of the lemma follows by reference to (\ref{6.4}). \end{proof}
We now aim to follow the argument of \S4, making adjustments as necessary. We first revise the definitions of the generating functions in (\ref{2.2}) and (\ref{2.3}) by putting $$F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})=g(a_4{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_4{\beta}} \def\bfbet{{\boldsymbol \beta})g(a_5{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_5{\beta}} \def\bfbet{{\boldsymbol \beta})\prod_{i=1}^3h(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta}),$$ $$G({\alpha}} \def\bfalp{{\boldsymbol \alpha})=g(c_4{\alpha}} \def\bfalp{{\boldsymbol \alpha})g(c_5{\alpha}} \def\bfalp{{\boldsymbol \alpha})\prod_{j=1}^3h(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha}),\quad H({\beta}} \def\bfbet{{\boldsymbol \beta})=g(d_1{\beta}} \def\bfbet{{\boldsymbol \beta})g(d_2{\beta}} \def\bfbet{{\boldsymbol \beta}).$$ Defining $N(B;\grB)$ as in (\ref{2.4}), we again obtain the lower bound (\ref{2.5}) for $N(B)$. All other definitions remain unchanged in the discussion to follow, unless explicitly noted. Notice that the new definition (\ref{6.3}) of the generating function $h({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})$ ensures that its behaviour on major arcs is very nearly as congenial as that of $g({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})$, since it differs from a classical Weyl sum only by the presence of a small prime factor. Indeed, one may sum trivially over this factor whenever necessary, treating the remaining part as a classical Weyl sum. In this way, one may verify that the conclusions of Lemmata \ref{lemma2.1}, \ref{lemma2.2} and \ref{lemma2.3} remain valid in the current situation, despite the novel identity of the exponential sum $h({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})$.\par
Our next step is to derive an analogue of the pruning lemma of \S4.
\begin{lemma}\label{lemma6.4} For systems of type D, one has $N(B;\grK)\ll B^6\mathscr{L}^{-1}$. \end{lemma}
\begin{proof} Define the mean values \begin{align*}
U_{ij}&=\iint_\grK |g(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})g(d_i{\beta}} \def\bfbet{{\boldsymbol \beta})|^{19/9}|h(a_j{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_j{\beta}} \def\bfbet{{\boldsymbol \beta})h(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} ,\\
W_j&=\int_0^1\int_0^1|h(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})h(a_j{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_j{\beta}} \def\bfbet{{\boldsymbol \beta})|^8{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} , \end{align*} and put
$$\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})=\prod_{j=1}^3|h(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})h(a_j{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_j{\beta}} \def\bfbet{{\boldsymbol \beta})|.$$ Then an application of H\"older's inequality reveals that \begin{equation}\label{6.5} N(B;\grK)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec g(0)^2\Bigl( \sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in {\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\Bigr)^{13/57}\prod_{j=1}^3(U_{4j}^9U_{5j}^9W_j)^{1/57}. \end{equation}
\par The argument of the proof of \cite[Lemma 10]{BW2007a} shows that $$\sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in {\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\ll B^6Q^{-1/10}.$$ As a consequence of Lemma \ref{lemma2.2}, meanwhile, one has \begin{align*}
U_{ij}&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \int_\grM |g(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{19/9}|h(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\Bigr) \Bigl( \sup_{{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}\in {\mathbb R}}\def\dbT{{\mathbb T}}\int_\grM |g(d_i{\beta}} \def\bfbet{{\boldsymbol \beta})|^{19/9}|h(b_j{\beta}} \def\bfbet{{\boldsymbol \beta}+{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)\\ &\ll (B^{10/9})^2=B^{20/9}. \end{align*} Also, by considering the underlying Diophantine equations, making a change of variables, and applying \cite[Theorem 2]{Vau1986}, one sees that
$$W_j\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \int_0^1|h({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^8{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}\Bigr)^2\ll (B^5)^2=B^{10}.$$ Combining these estimates within (\ref{6.5}), we conclude that $$N(B;\grK)\ll B^2(B^6Q^{-1/10})^{13/57}((B^{20})^2B^{10})^{3/57}\ll B^6Q^{-13/570}.$$ The conclusion of the lemma now follows. \end{proof}
We now proceed as in \S4, adopting the notation introduced in the discussion prior to Lemmata \ref{lemma3.2} and \ref{lemma3.3}. In present circumstances we have $(l,m,n)=(5,5,2)$, though comparison with \S4 will be assisted in what follows by explicit mention of $l$, $m$ and $n$. Thus, for systems of type D, the truncation parameter is fixed to be $T=B^{l-3+\tau}$, just as in \S4. Our next task is to derive a bound for the quantity $\Zet_i$ introduced in the preamble to Lemma \ref{lemma4.2}.
\begin{lemma}\label{lemma6.5} For systems of type D, one has $\Zet_i\ll B^{2l-4-3\tau}$ $(i=1,2)$. \end{lemma}
\begin{proof} The reader will experience no difficulty in adapting the argument of the proof of Lemma \ref{lemma4.2} to establish the claimed bounds, substituting when needed the estimates
$$\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}|F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\ll B^{2l-4-3\tau}\quad \text{and}\quad \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}|F(0,{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\ll B^{2l-4-3\tau},$$ made available from Lemma \ref{lemma6.3} via H\"older's inequality. \end{proof}
\begin{lemma}\label{lemma6.6} For systems of type D, one has $$N_0(B;\grM,\grM)-N(B;\grM\times \grM)\ll B^{6-\tau}.$$ \end{lemma}
\begin{proof} We adapt the argument of the proof of Lemma \ref{lemma4.3} to the present context. First, as a consequence of Hua's lemma (see \cite[Lemma 2.5]{Vau1997}) and Schwarz's inequality, one has \begin{align}
\int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} &\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \int_0^1|g(d_1{\beta}} \def\bfbet{{\boldsymbol \beta})|^4{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)^{1/2}\Bigl( \int_0^1|g(d_2{\beta}} \def\bfbet{{\boldsymbol \beta})|^4{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)^{1/2}\notag \\ &\ll B^{2+\varepsilon}=B^{2n-2+\varepsilon}.\label{6.6} \end{align} Then, as in the proof of Lemma \ref{lemma4.3}, we deduce from Lemma \ref{lemma6.5} that
$${\Upsilon}} \def\Upshat{{\widehat \Ups}_i\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Zet_i^{1/2}\Bigl( \int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr)^{1/2}\ll B^{l+n-3-3\tau/2+\varepsilon},$$ and hence that $$N_0(B;\grM,\grM)-N(B;\grM\times \grM)\ll B^{m-3+\varepsilon}({\Upsilon}} \def\Upshat{{\widehat \Ups}_1+{\Upsilon}} \def\Upshat{{\widehat \Ups}_2)\ll B^{s-6-\tau}.$$ The desired conclusion follows on recalling that $s=12$. \end{proof}
\begin{lemma}\label{lemma6.7} For systems of type D, one has $N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^{6-\tau/3}$. \end{lemma}
\begin{proof} By adapting the argument of the proof of Lemma \ref{lemma4.4}, one finds that
$$N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^{l-3+\tau}\Bigl(\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \Bigr)^{1/2}\Bigl( \int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr)^{1/2}.$$ The first integral on the right hand side may be estimated by means of Lemma \ref{lemma6.3} via H\"older's inequality, and the second from (\ref{6.6}). Thus one obtains $$N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^{l-3+\tau}(B^{2m-4-3\tau})^{1/2}(B^{2n-2+\varepsilon})^{1/2}\ll B^{s-6-\tau/3}.$$ The desired conclusion again follows on noting that $s=12$. \end{proof}
\begin{lemma}\label{lemma6.8} For systems of type D, one has $N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{6-\tau}$. \end{lemma}
\begin{proof} In the first step, by adapting the argument of the proof of Lemma \ref{lemma4.5}, we deduce from (\ref{6.6}) and Lemma \ref{lemma6.5} that \begin{align}
N(B;\grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})-N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})&\ll B^{m-3+\varepsilon}(\Zet_1+\Zet_2)^{1/2}\Bigl( \int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)^{1/2}\notag \\ &\ll B^{m-3+\varepsilon}(B^{2l-4-3\tau})^{1/2}(B^{2n-2+\varepsilon})^{1/2}.\label{6.7} \end{align} We next estimate $N(B;\grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})$, observing that a consideration of the underlying Diophantine equations in combination with H\"older's inequality delivers the bound \begin{align*} \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} &F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})H({\beta}} \def\bfbet{{\boldsymbol \beta}){\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\\
&\ll \Bigl( \sup_{{\beta}} \def\bfbet{{\boldsymbol \beta} \in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}|g(d_1{\beta}} \def\bfbet{{\boldsymbol \beta})|\Bigr) \Bigl( \int_0^1|f(d_2{\beta}} \def\bfbet{{\boldsymbol \beta})|^6{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)^{1/6}\prod_{i=1}^5\Bigl( \int_0^1|f(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})|^6{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)^{1/6}. \end{align*} We therefore deduce from Lemma \ref{lemma6.2} together with Weyl's inequality (see (\ref{2.11}) above) that $$\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})H({\beta}} \def\bfbet{{\boldsymbol \beta}){\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{3/4+\varepsilon}(B^{3+\varepsilon}).$$ Consequently, in view of Lemma \ref{lemma2.3}, we derive the upper bound \begin{align*} N(B;\grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})&=\int_\grM G({\alpha}} \def\bfalp{{\boldsymbol \alpha})\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})H({\beta}} \def\bfbet{{\boldsymbol \beta}){\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \\
&\ll B^{15/4+\varepsilon}\int_\grM |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^{15/4+\varepsilon}(B^{2+\varepsilon}). \end{align*} On recalling (\ref{6.7}), we conclude that $$N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{s-6-\tau}+B^{6-\tau}\ll B^{6-\tau},$$ and the proof of the lemma is complete. \end{proof}
The treatment of systems of type D is now completed just as in the analogous argument for systems of type B in \S4. By combining the conclusions of Lemmata \ref{lemma6.4}, \ref{lemma6.6}, \ref{lemma6.7} and \ref{lemma6.8}, we confirm by means of Lemma \ref{lemma2.1} that $$N(B)\ge N(B;\grN)+O(B^6\mathscr{L}^{-1})\gg B^6.$$ This completes the proof of Theorem \ref{theorem1.1} for systems of type D, subject to the validity of HRH.
\section{The anticipated asymptotic formula: preliminaries} In this section, we turn to the proof that $N(B)$ is asymptotically at least as large as anticipated. Consider a system of the shape (\ref{1.1}) subject to the hypotheses of the statement of Theorem \ref{theorem1.2}. The conclusion of this theorem is already supplied by \cite[Theorem 1.1]{BW2011} when $s\ge 14$, so there is no loss of generality in restricting to the situations with $s=12$ and $13$. A moment of thought reveals that the triple $(l,m,n)$ associated with the system (\ref{1.1}) must take one of three shapes, namely: \begin{enumerate} \item[(E)] $(4,4,4)$, $(4,5,4)$ or $(5,4,4)$, \item[(F)] $(3,5,5)$, \item[(G)] $(5,5,3)$. \end{enumerate} We now modify the argument of \S\S2--6 in order to accommodate the modest changes involved in obtaining an asymptotic formula for $N(B)$. We take the expedient approach of adopting all notation from those sections without further comment, unless noted otherwise, and thereby economise on space.\par
We begin by modifying the definitions of the generating functions defined in (\ref{2.2}) and (\ref{2.3}) by putting $$F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})=\prod_{i=1}^lf(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta}),\quad G({\alpha}} \def\bfalp{{\boldsymbol \alpha})=\prod_{j=1}^mf(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})\quad \text{and}\quad H({\beta}} \def\bfbet{{\boldsymbol \beta})=\prod_{k=1}^nf(d_k{\beta}} \def\bfbet{{\boldsymbol \beta}).$$ With the definition (\ref{2.4}) unchanged, one finds by orthogonality that \begin{equation}\label{7.1} N(B)=N(B;[0,1)^2). \end{equation}
\begin{lemma}\label{lemma7.1} For systems of type E, F and G, one has $$N(B;\grN) =\mathscr{C}} \def\calCbar{{\overline \calC} B^{s-6}+O(B^{s-6}\mathscr{L}^{-1}).$$ \end{lemma}
\begin{proof} We may apply the argument of the proof of Lemma \ref{lemma2.1}. The presence of additional classical Weyl sums, rather than their smooth brethren, ensures that the analysis underlying the proof of the latter lemma not only remains valid, but proceeds in a manner more pedestrian than in \S2 (see also the proof of \cite[Lemma 3.1]{BW2011}). Thus one obtains the asymptotic formula $$N(B;\grN)=\grS{\mathfrak J}(B)+O(B^{s-6}\mathscr{L}^{-1}),$$ where $\grS=\sum\limits_{q=1}^\infty A(q)$, with $A(q)$ defined as in (\ref{2.7}), and $${\mathfrak J}(B)=\iint_{{\mathbb R}}\def\dbT{{\mathbb T}^2} \prod_{i=1}^lw(a_i\xi+b_i{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet})\prod_{j=1}^mw(c_j\xi)\prod_{k=1}^n w(d_k{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet}){\,{\rm d}}\xi {\,{\rm d}}{\zeta}} \def\bfzet{{\boldsymbol \zeta}} \def\Zet{{\rm Z}} \def\Zettil{{\widetilde \Zet} .$$ A conventional analysis akin to that described in \S2 reveals that, provided the system (\ref{1.1}) admits non-singular $p$-adic solutions for each prime number $p$, then $1\ll \grS\ll 1$. Moreover, for a suitable positive constant ${\mathfrak J}$, one finds that ${\mathfrak J}(B)={\mathfrak J} B^{s-6}$. Here, in the notation introduced in the preamble to the statement of Theorem \ref{theorem1.2}, one has ${\mathfrak J}=v_\infty$ and $\grS=\prod_pv_p$ (compare the treatment of \cite{BW2011}). This confirms the asymptotic formula claimed in the statement of the lemma, with $\mathscr{C}} \def\calCbar{{\overline \calC}=\grS {\mathfrak J}$. \end{proof}
The exponential sum $g({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})$, avoiding as it does summands with arguments close to $0$, has slightly better behaviour on major arcs than does $f({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})$. We therefore record surrogates for Lemmata \ref{lemma2.2} and \ref{lemma2.3} of use in later sections.
\begin{lemma}\label{lemma7.2} Let $a$ be a fixed non-zero integer, and let $b$ be a non-zero rational number. Then when ${\delta}} \def\Del{{\Delta} >0$, one has
$$\sup_{{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda} \in {\mathbb R}}\def\dbT{{\mathbb T}}\int_\grM |f(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})^{3+{\delta}} \def\Del{{\Delta}}f(b{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}+{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda})^2|{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll B^{2+{\delta}} \def\Del{{\Delta}}$$ and
$$\int_\grM |f(a{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^{4+{\delta}} \def\Del{{\Delta}}{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll B^{1+{\delta}} \def\Del{{\Delta}}.$$ \end{lemma}
\begin{proof} The first estimate follows via the argument of the proof of \cite[Lemma 9]{BW2007a}, and the second from the methods of \cite[\S\S4.3 and 4.4]{Vau1997}. \end{proof}
\begin{lemma}\label{lemma7.3} Suppose that the integer $w$ is non-zero. Then for $m\ge 4$, one has $$R_1(w;\grM)\ll B^{m-3}L^\varepsilon\quad \text{and}\quad R_1(0;\grM)\ll B^{m-3+\varepsilon}.$$ Similarly, when $n\ge 4$, one has $$R_2(w;\grM)\ll B^{n-3}L^\varepsilon\quad \text{and}\quad R_2(0;\grM)\ll B^{n-3+\varepsilon}.$$ Finally, when $m\ge 5$ one has $R_1(w;\grM)\ll B^{m-3}$ for all integers $w$. \end{lemma}
\begin{proof} On recalling the definitions of $R_1(w;\grM)$ and $R_2(w;\grM)$ from the preamble to Lemma \ref{lemma3.3}, these estimates follow from the methods of \cite[\S\S4.3 and 4.4]{Vau1997}. We note that a precise form of these upper bounds may be derived from \cite[equations (1.3) and (1.4)]{Kaw1996}. \end{proof}
We finish by recording some mean value estimates for Weyl sums. In this context, when $r\ge t\ge 3$ and ${\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_1,\ldots,{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_r$ are fixed non-zero integers, we write $$D_t({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})=\prod_{i=1}^tf({\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}_i{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta}).$$
\begin{lemma}\label{lemma7.4} One has \begin{align*}
\int_0^1|D_t({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^2{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} &\ll_\bflam B^{2t-5/2}L^{\varepsilon-3/2}\quad (3\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec t\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec r),\\
\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |D_t({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^2{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} &\ll_\bflam B^{2t-3}L^{\varepsilon-3}\quad (4\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec t\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec r),\\
\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |D_t({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^2{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} &\ll_\bflam B^{2t-7/2}L^{\varepsilon-5/2}\quad (5\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec t\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec r). \end{align*} \end{lemma}
\begin{proof} The upper bounds \begin{equation}\label{7.2}
\int_0^1|f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^4{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^2\quad \text{and}\quad \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^8{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^5L^{\varepsilon-3}, \end{equation} that follow, respectively, from Hooley \cite[Theorem 1]{Hoo1980} and Boklan \cite{Bok1993}, combine through the medium of Schwarz's inequality to give
$$\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^6{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \int_0^1|f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^4{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \Bigr)^{1/2}\Bigl( \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^8{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \Bigr)^{1/2}\ll B^{7/2}L^{\varepsilon-3/2}.$$ In view of Lemma \ref{lemma7.2}, therefore, one finds that
$$\int_0^1|f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^6{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} =\int_\grM|f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^6{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} +\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}|f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^6{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^3+B^{7/2}L^{\varepsilon -3/2}.$$ By applying H\"older's inequality, when $3\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec t\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec r$ one obtains
$$\int_0^1|D_t({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^2{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec g(0)^{2t-6}\int_0^1|f({\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta})|^6{\,{\rm d}}{\theta}} \def\bftet{{\boldsymbol \theta}} \def\Tet{{\Theta} \ll B^{2t-6}(B^{7/2}L^{\varepsilon-3/2}).$$ This establishes the first estimate of the lemma.\par
The second estimate of the lemma follows from the second bound of (\ref{7.2}), following an application of H\"older's inequality in a manner similar to that above. For the third estimate of the lemma, we begin by recalling the sharpened version of Weyl's inequality
$$\sup_{{\alpha}} \def\bfalp{{\boldsymbol \alpha}\in {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}|f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|\ll B^{3/4}L^{1/4+\varepsilon},$$ available from \cite{Vau1986} via the work of Hall and Tenenbaum \cite{HT1988}. This leads from the second estimate of (\ref{7.2}) to the upper bound
$$\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}|f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{10}{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll \Bigl( \sup_{{\alpha}} \def\bfalp{{\boldsymbol \alpha}\in{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}|f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|\Bigr)^2\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |f({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^8{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll (B^{3/4}L^{1/4+\varepsilon})^2B^5L^{\varepsilon-3}.$$ The final estimate of the lemma therefore follows once again by employing H\"older's inequality. \end{proof}
\section{Systems of type E} The treatment of systems of type E is similar to that of systems of type B in \S4, and we imitate the latter throughout this section.
\begin{lemma}\label{lemma8.1} For systems of type E, one has $N(B;\grK)\ll B^{s-6}\mathscr{L}^{-1}$. \end{lemma}
\begin{proof} Define the mean values \begin{align*}
U_{ijk}&=\iint_\grK |f(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{10/3}|f(d_k{\beta}} \def\bfbet{{\boldsymbol \beta})|^{13/3}|f(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta},\\
V_{ijk}&=\iint_\grK |f(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{13/3}|f(d_k{\beta}} \def\bfbet{{\boldsymbol \beta})|^{10/3}|f(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \end{align*} and put
$$\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})=\prod_{i=1}^l|f(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})|^{24(l-2)m}\prod_{j=1}^m|f(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{4l(6m-23)}\prod_{k=1}^4|f(d_k{\beta}} \def\bfbet{{\boldsymbol \beta})|^{lm}.$$ Then an application of H\"older's inequality reveals that \begin{equation}\label{8.1} N(B;\grK)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in {\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\Bigr)^{1/(24lm)}\prod_{i=1}^l\prod_{j=1}^m\prod_{k=1}^4(U_{ijk}V_{ijk})^{1/(8lm)}. \end{equation}
\par The argument of the proof of \cite[Lemma 10]{BW2007a} shows that $$\sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in {\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\ll (B^{l+m-17/3})^{24lm}Q^{-1/10}.$$ As a consequence of Lemma \ref{lemma7.2}, meanwhile, one has \begin{align*}
U_{ijk}&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \int_\grM|f(d_k{\beta}} \def\bfbet{{\boldsymbol \beta})|^{13/3}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr)\Bigl( \sup_{{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}\in{\mathbb R}}\def\dbT{{\mathbb T}}\int_\grM |f(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{10/3}|f(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \Bigr)\\ &\ll (B^{4/3})(B^{7/3})=B^{11/3}, \end{align*} and a symmetric argument yields the estimate $V_{ijk}\ll B^{11/3}$. Combining these estimates within the framework of (\ref{8.1}), we conclude that $$N(B;\grK)\ll (B^{l+m-17/3}Q^{-1/(240lm)})(B^{11/3})\ll B^{s-6}Q^{-1/4800},$$ and the bound asserted in the lemma follows immediately. \end{proof}
We proceed now as in \S4, adopting the notation introduced in the discussion associated with Lemmata \ref{lemma3.2} and \ref{lemma3.3}. For systems of type E, the truncation parameter is fixed to be $T=B^{l-3}L$. As in \S4, we pause at this point to establish an estimate for the auxiliary quantity $\Zet_i$.
\begin{lemma}\label{lemma8.2} For systems of type E, one has $\Zet_i\ll B^{2l-3}L^{\varepsilon-3}$ $(i=1,2)$. \end{lemma}
\begin{proof} Since $l\ge 4$, the estimate $$\int_\grM F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)e(-{\alpha}} \def\bfalp{{\boldsymbol \alpha} u){\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^{l-3}L^\varepsilon\ (u\ne 0)$$ may be established just as in the proof of Lemma \ref{lemma7.3}. Meanwhile, H\"older's inequality combines with the first estimate of (\ref{7.2}) to supply the bound \begin{equation}\label{8.2} \int_0^1 F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0){\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^{l-2}, \end{equation} and Lemma \ref{lemma7.4} delivers the estimate \begin{equation}\label{8.2a}
\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^{2l-3}L^{\varepsilon-3}. \end{equation} The argument of the proof of Lemma \ref{lemma4.2} therefore demonstrates that \begin{align*} \sum_{(u,v)\in \grX_1}\rho(u,v)&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{3-l}L^{-1}\sum_{\substack{u\in\grX\\ \rho_1(u)>B^{l-3}L}}\rho_1(u)^2\\
&\ll B^{3-l}L^{-1}\Bigl( \rho_1(0)^2+\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \Bigr) \\ &\ll B^{3-l}L^{-1}((B^{l-2})^2+B^{2l-3}L^{\varepsilon-3}), \end{align*} and hence $$\Zet_1\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B^{l-3}L\sum_{(u,v)\in \grX_1}\rho(u,v)\ll B^{2l-3}L^{\varepsilon-3}.$$ Similarly, and again following the argument of the proof of Lemma \ref{lemma4.2}, one finds that
$$\Zet_2\ll \Bigl( \rho_2(0)^2+\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |F(0,{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr)\ll (B^{l-2})^2+B^{2l-3}L^{\varepsilon-3}.$$ The conclusion of the lemma therefore follows both for $i=1$ and $i=2$. \end{proof}
\begin{lemma}\label{lemma8.3} For systems of type E, one has $$N_0(B;\grM,\grM)-N(B;\grM\times \grM)\ll B^{s-6}L^{\varepsilon-3/2}.$$ \end{lemma}
\begin{proof} Since in present circumstances one has $n\ge 4$, it follows from Lemmata \ref{lemma7.2} and \ref{lemma7.4} via H\"older's inequality that \begin{equation}\label{8.3}
\int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \int_\grM|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} +\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{2n-3}. \end{equation} Following the argument of the proof of Lemma \ref{lemma4.3}, therefore, one obtains
$${\Upsilon}} \def\Upshat{{\widehat \Ups}_i\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Zet_i^{1/2}\Bigl( \int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)^{1/2}\ll B^{l+n-3}L^{\varepsilon-3/2}.$$ For systems of type E one has $m\ge 4$, and so it follows from Lemma \ref{lemma7.3} that \begin{align*}N(B;\grM\times \grM)&-N_0(B;\grM,\grM)\\ &=\sum_{(u,v)\in \grX_1\cup \grX_2}\rho(u,v)R_1(u;\grM)R_2(v;\grM)\\
&\ll B^{m-3}\Bigl( B^\varepsilon \sum_{v\in \grX}\rho(0,v)|R_2(v;\grM)|+L^\varepsilon ({\Upsilon}} \def\Upshat{{\widehat \Ups}_1+{\Upsilon}} \def\Upshat{{\widehat \Ups}_2)\Bigr). \end{align*} But since $l\ge 4$ and $n\ge 4$, one finds from (\ref{8.2}) and Lemma \ref{lemma7.3} that $$\sum_{v\in \grX}\rho(0,v)=\rho_1(0)\ll B^{l-2}\quad \text{and}\quad R_2(v;\grM)\ll B^{n-3+\varepsilon}.$$ Consequently, \begin{align*} N(B;\grM\times \grM)-N_0(B;\grM,\grM)&\ll B^{m-3}(B^{l+n-5+\varepsilon}+B^{l+n-3}L^{\varepsilon-3/2})\\ &\ll B^{s-6}L^{\varepsilon-3/2}, \end{align*} and the proof of the lemma is complete. \end{proof}
\begin{lemma}\label{lemma8.4} For systems of type E, one has $N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^{s-6}L^{\varepsilon-1/2}$. \end{lemma}
\begin{proof} By adapting the argument of the proof of Lemma \ref{lemma4.4} to the present context, one obtains
$$N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^{l-3}L\Bigl(\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\Bigr)^{1/2}\Bigl( \int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr)^{1/2}.$$ The first integral on the right hand side may be estimated by means of Lemma \ref{lemma7.4}, and the second from (\ref{8.3}). Thus one finds that $$N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^{l-3}L(B^{2m-3}L^{\varepsilon-3})^{1/2}(B^{2n-3})^{1/2}\ll B^{s-6}L^{\varepsilon-1/2}.$$ This completes the proof of the lemma. \end{proof}
\begin{lemma}\label{lemma8.5} For systems of type E, one has $N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{s-6}L^{\varepsilon-1/2}$. \end{lemma}
\begin{proof} By adapting the argument of the proof of Lemma \ref{lemma4.4} to the present context, one obtains
$$N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{l-3}L\Bigl(\int_0^1 |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\Bigr)^{1/2}\Bigl( \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr)^{1/2}.$$ The first integral on the right hand side may be estimated by means of a variant of (\ref{8.3}), and the second by means of Lemma \ref{lemma7.4}. Thus one finds that $$N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^{l-3}L(B^{2m-3})^{1/2}(B^{2n-3}L^{\varepsilon-3})^{1/2}\ll B^{s-6}L^{\varepsilon-1/2}.$$ This completes the proof of the lemma. \end{proof}
We may now complete the proof of Theorem \ref{theorem1.2} for systems of type E. We simply combine (\ref{7.1}) with Lemmata \ref{lemma8.1}, \ref{lemma8.3}, \ref{lemma8.4} and \ref{lemma8.5} as in the analogous argument completing the discussion of \S4, obtaining $$N(B)\ge N(B;\grN)+O(B^{s-6}\mathscr{L}^{-1})=\mathscr{C}} \def\calCbar{{\overline \calC} B^{s-6}+O(B^{s-6}\mathscr{L}^{-1}).$$
\section{Systems of type F} In common with the treatment of systems of type C in \S5, our argument for systems of type F, wherein $s=13$ and $(l,m,n)=(3,5,5)$, may be substantially abbreviated by adjusting the argument of \S8 through modification of the generating functions $F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})$, $G({\alpha}} \def\bfalp{{\boldsymbol \alpha})$ and $H({\beta}} \def\bfbet{{\boldsymbol \beta})$. We begin with a discussion of the pruning operation implicit in the estimation of $N(B;\grK)$.\par
\begin{lemma}\label{lemma9.1} For systems of type F, one has $N(B;\grK)\ll B^7\mathscr{L}^{-1}$. \end{lemma}
\begin{proof} Define the mean values
$$U_j=\iint_\grK |f(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})f(d_j{\beta}} \def\bfbet{{\boldsymbol \beta})|^{9/2}{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} ,$$ and put
$$\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})=\prod_{i=1}^3|f(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})|^{10}\prod_{j=1}^5|f(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})f(d_j{\beta}} \def\bfbet{{\boldsymbol \beta})|.$$ Then an application of H\"older's inequality reveals that \begin{equation}\label{9.1} N(B;\grK)\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in {\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\Bigr)^{1/10}\prod_{j=1}^5U_j^{1/5}. \end{equation} The argument of the proof of \cite[Lemma 10]{BW2007a} shows that $$\sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in {\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\ll B^{40}Q^{-1/10}.$$ As a consequence of Lemma \ref{lemma7.2}, meanwhile, one has \begin{align*}
U_j&\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Bigl( \int_\grM |f(c_j{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{9/2}{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\Bigr)\Bigl( \int_\grM|f(d_j{\beta}} \def\bfbet{{\boldsymbol \beta})|^{9/2}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr)\\ &\ll (B^{3/2})^2=B^3. \end{align*} Combining these estimates with (\ref{9.1}), we conclude that $$N(B;\grK)\ll (B^{40}Q^{-1/10})^{1/10}B^3\ll B^7Q^{-1/100}.$$ This completes the proof of the lemma. \end{proof}
Our next step is to relabel the coefficients of the system (\ref{1.1}) so that $\mtil=m-1$, $\widetilde n} \def\Ntil{\widetilde N=n-1$, $\widetilde l} \def\mtil{\widetilde m=l+2$, which is to say that $(\widetilde l} \def\mtil{\widetilde m,\mtil,\widetilde n} \def\Ntil{\widetilde N)=(5,4,4)$, and to put $$\ctil_j=c_j\quad \text{and}\quad \widetilde{d}} \def\etil{\widetilde e_j=d_j\quad (1\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec j\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec 4),$$ and $$(\widetilde a} \def\btil{\widetilde b} \def\ctil{\widetilde c_i,\btil_i)=(a_i,b_i)\quad (i=1,2,3),\quad (\widetilde a} \def\btil{\widetilde b} \def\ctil{\widetilde c_4,\btil_4)=(0,d_5)\quad (\widetilde a} \def\btil{\widetilde b} \def\ctil{\widetilde c_5,\btil_5)=(c_5,0).$$ As in the discussion of \S5, we then define the generating functions $\widetilde F} \def\Gtil{\widetilde G} \def\Htil{\widetilde H({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})$, $\Gtil({\alpha}} \def\bfalp{{\boldsymbol \alpha})$ and $\Htil({\beta}} \def\bfbet{{\boldsymbol \beta})$ as in the respective definitions of $F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})$, $G({\alpha}} \def\bfalp{{\boldsymbol \alpha})$ and $H({\beta}} \def\bfbet{{\boldsymbol \beta})$ in (\ref{2.2}) and (\ref{2.3}), save that in the present context the integers $l$, $m$, $n$, and the coefficients $a_i$, $b_i$, $c_j$ and $d_k$, are to be decorated by tildes. Further notation from \S\S2 and 3 is again understood to have the meaning naturally inferred in like manner when decorated by a tilde. An examination of the argument of \S8, leading from the discussion preceding Lemma \ref{lemma8.2} to the conclusion of the section, now reveals that no adjustment is necessary in order to accommodate the change of circumstances implicit in our present analysis. Here it is worth noting that, despite the fact that we now have $\widetilde l} \def\mtil{\widetilde m=5$ and $\widetilde a} \def\btil{\widetilde b} \def\ctil{\widetilde c_4=0$, the presence of four non-zero coefficients in the equation (\ref{3.8}) ensures that the analogue of the upper bounds (\ref{8.2}) and (\ref{8.2a}) remain valid. Thus one obtains $\Zettil_1\ll B^7L^{\varepsilon-3}$, and by means of a symmetric argument also $\Zettil_2\ll B^7L^{\varepsilon-3}$. The analogue of Lemma \ref{lemma8.3} delivers the bound $$\Ntil_0(B;\grM,\grM)-\Ntil(B;\grM\times \grM)\ll B^7L^{\varepsilon-3/2},$$ and analogues of Lemmata \ref{lemma8.4} and \ref{lemma8.5} yield $$\Ntil_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^7L^{\varepsilon-1/2}\quad \text{and}\quad \Ntil_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^7L^{\varepsilon-1/2}.$$ We therefore deduce that \begin{align*} \Ntil(B)&\ge \Ntil_0(B;\grM,\grM)+\Ntil_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})+\Ntil_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\\ &=\Ntil(B;\grM\times \grM)+O(B^7L^{\varepsilon-1/2})=N(B;\grM\times \grM)+O(B^7L^{\varepsilon-1/2}). \end{align*} Finally, we conclude from Lemmata \ref{lemma7.1} and \ref{lemma9.1} that $$N(B)=\Ntil(B)\ge N(B;\grN)+O(B^7\mathscr{L}^{-1})=\mathscr{C}} \def\calCbar{{\overline \calC} B^7+O(B^7\mathscr{L}^{-1}),$$ and this completes the proof of Theorem \ref{theorem1.2} for systems of type F.
\section{Systems of type G} Our argument when $(l,m,n)=(5,5,3)$ is motivated by the treatment of systems of type D in \S6.
\begin{lemma}\label{lemma10.1} For systems of type G, one has $N(B;\grK)\ll B^7\mathscr{L}^{-1}$. \end{lemma}
\begin{proof} Define the mean values
$$U_{ij}=\iint_\grK |f(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{9/2}|f(d_j{\beta}} \def\bfbet{{\boldsymbol \beta})^3f(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})^2|{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}$$ and put
$$\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})=\prod_{i=1}^5|f(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})^6f(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})|.$$ Then an application of H\"older's inequality reveals that \begin{equation}\label{10.1} N(B;\grK)\ll \Bigl( \sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in{\mathfrak n}}\def\grS{{\mathfrak S}}\def\grP{{\mathfrak P}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\Bigr)^{1/10}\prod_{i=1}^5\prod_{j=1}^3U_{ij}^{1/15}. \end{equation} The argument of the proof of \cite[Lemma 10]{BW2007a} shows that $$\sup_{({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\in{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N}}\Psi({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})\ll B^{35}Q^{-1/10}.$$ As a consequence of Lemma \ref{lemma7.2}, on the other hand, one has \begin{align*}
U_{ij}&\ll \Bigl( \int_\grM |f(c_i{\alpha}} \def\bfalp{{\boldsymbol \alpha})|^{9/2}{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha}\Bigr) \Bigl(\sup_{{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda}\in{\mathbb R}}\def\dbT{{\mathbb T}}\int_\grM |f(d_j{\beta}} \def\bfbet{{\boldsymbol \beta})^3f(b_i{\beta}} \def\bfbet{{\boldsymbol \beta}+{\lambda}} \def\Lam{{\Lambda}} \def\Lamtil{{\widetilde \Lambda}} \def\bflam{{\boldsymbol \lambda})^2|{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr) \\ &\ll (B^{3/2})(B^2)=B^{7/2}. \end{align*} Combining these estimates within (\ref{10.1}), we conclude that $$N(B;\grK)\ll (B^{35}Q^{-1/10})^{1/10}B^{7/2}\ll B^7Q^{-1/100}.$$ The conclusion of the lemma now follows. \end{proof}
We proceed now as in \S4, adopting the notation introduced in the discussion prior to Lemmata \ref{lemma3.2} and \ref{lemma3.3}. For systems of type G, the truncation parameter is fixed to be $T=B^{l-3}L$. Our immediate goal is to derive a bound for the quantity $\Zet_i$ introduced prior to Lemma \ref{lemma4.2}.
\begin{lemma}\label{lemma10.2} For systems of type G, one has $\Zet_i\ll B^{13/2}L^{\varepsilon-5/2}\quad (i=1,2)$. \end{lemma}
\begin{proof} We apply the argument of the proof of Lemma \ref{lemma8.2}, noting that since $l=5$, in this instance Lemma \ref{lemma7.4} delivers the estimates
$$\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |F({\alpha}} \def\bfalp{{\boldsymbol \alpha},0)|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^{2l-7/2}L^{\varepsilon-5/2}\quad \text{and}\quad \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |F(0,{\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{2l-7/2}L^{\varepsilon-5/2}.$$ Thus we obtain $$\sum_{(u,v)\in \grX_1}\rho(u,v)\ll B^{3-l}L^{-1}((B^{l-2})^2+B^{2l-7/2}L^{\varepsilon-5/2}),$$ and hence $$\Zet_1\ll B^{l-3}L\sum_{(u,v)\in \grX_1}\rho(u,v)\ll B^{2l-7/2}L^{\varepsilon-5/2}.$$ Also, though more directly, $$\Zet_2\ll (B^{l-2})^2+B^{2l-7/2}L^{\varepsilon-5/2}.$$ The conclusion of the lemma now follows for $i=1$ and $2$. \end{proof}
\begin{lemma}\label{lemma10.3} For systems of type G, one has $$N_0(B;\grM,\grM)-N(B;\grM\times \grM)\ll B^7L^{-1}.$$ \end{lemma}
\begin{proof} In the present situation one has $n=3$, and so Lemma \ref{lemma7.4} yields \begin{equation}\label{10.2}
\int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \ll B^{7/2}L^{\varepsilon-3/2}. \end{equation} Following the argument of the proof of Lemma \ref{lemma4.3}, one obtains
$${\Upsilon}} \def\Upshat{{\widehat \Ups}_i\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec \Zet_i^{1/2}\Bigl( \int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)^{1/2}\ll B^5L^{\varepsilon-2}.$$ But for systems of type G one has $m=5$, and so it follows from Lemma \ref{lemma7.3} that $R_1(u;\grM)\ll B^2$ uniformly in $u$. We therefore conclude that \begin{align*} N(B;\grM\times \grM)-N_0(B;\grM,\grM)&\ll \sum_{(u,v)\in \grX_1\cup \grX_2}\rho(u,v)R_1(u;\grM)R_2(v;\grM)\\ &\ll B^2({\Upsilon}} \def\Upshat{{\widehat \Ups}_1+{\Upsilon}} \def\Upshat{{\widehat \Ups}_2)\ll B^7L^{\varepsilon-2}, \end{align*} and the proof of the lemma is complete. \end{proof}
\begin{lemma}\label{lemma10.4} For systems of type G, one has $N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^7L^{\varepsilon-1}$. \end{lemma}
\begin{proof} Adapting the argument of the proof of Lemma \ref{lemma4.4} to the present situation, one finds that
$$N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^{l-3}L\Bigl( \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|^2{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \Bigr)^{1/2}\Bigl( \int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr)^{1/2}.$$ The first integral on the right hand side may be estimated via Lemma \ref{lemma7.4}, and the second by means of (\ref{10.2}). Thus one obtains $$N_0(B;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N},[0,1))\ll B^2L(B^{13/2}L^{\varepsilon-5/2})^{1/2}(B^{7/2}L^{\varepsilon-3/2})^{1/2}\ll B^7L^{\varepsilon-1}.$$ This completes the proof of the lemma. \end{proof}
\begin{lemma}\label{lemma10.5} For systems of type G, one has $N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^7L^{-1}$. \end{lemma}
\begin{proof} First, adapting the argument of the proof of Lemma \ref{lemma4.5}, we infer from Lemma \ref{lemma7.3} that \begin{align*}N(B;\grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})-N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})&=\sum_{(u,v)\in \grX_1\cup \grX_2}\rho(u,v)R_1(u;\grM)R_2(v;{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\\
&\ll B^2(\Zet_1+\Zet_2)^{1/2}\Bigl( \int_0^1|H({\beta}} \def\bfbet{{\boldsymbol \beta})|^2{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)^{1/2}. \end{align*} Consequently, from (\ref{10.2}) and Lemma \ref{lemma10.2}, one obtains \begin{align} N(B;\grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})-N_0(B;\grM,{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})&\ll B^2(B^{13/2}L^{\varepsilon-5/2})^{1/2}(B^{7/2}L^{\varepsilon-3/2})^{1/2}\notag \\ &=B^7L^{\varepsilon-2}.\label{10.3} \end{align}
We next estimate $N(B;\grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})$, observing that an application of H\"older's inequality together with (\ref{7.2}) and \cite[Theorem 2]{Vau1986} yields \begin{align*}
\int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} F({\alpha}} \def\bfalp{{\boldsymbol \alpha},{\beta}} \def\bfbet{{\boldsymbol \beta})H({\beta}} \def\bfbet{{\boldsymbol \beta}){\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} &\ll \prod_{k=1}^3\Bigl( \int_{\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N} |f(d_k{\beta}} \def\bfbet{{\boldsymbol \beta})|^8{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta} \Bigr)^{1/8}\prod_{i=1}^5\Bigl( \int_0^1|f(a_i{\alpha}} \def\bfalp{{\boldsymbol \alpha}+b_i{\beta}} \def\bfbet{{\boldsymbol \beta})|^8{\,{\rm d}}{\beta}} \def\bfbet{{\boldsymbol \beta}\Bigr)^{1/8}\\ &\ll (B^5L^{\varepsilon-3})^{3/8}(B^5)^{5/8}\ll B^5L^{\varepsilon-9/8}. \end{align*} Thus, one deduces from Lemma \ref{lemma7.3} that
$$N(B;\grM\times {\mathfrak m}}\def\grM{{\mathfrak M}}\def\grN{{\mathfrak N})\ll B^5L^{\varepsilon-9/8}\int_\grM |G({\alpha}} \def\bfalp{{\boldsymbol \alpha})|{\,{\rm d}}{\alpha}} \def\bfalp{{\boldsymbol \alpha} \ll B^7L^{-1}.$$ The conclusion of the lemma now follows by reference to (\ref{10.3}). \end{proof}
The proof of Theorem \ref{theorem1.2} for systems of type G follows by combining (\ref{7.1}) with Lemmata \ref{lemma10.1}, \ref{lemma10.3}, \ref{lemma10.4} and \ref{lemma10.5}, just as in the analogous argument completing the analysis of \S4, and so we arrive at the lower bound $$N(B)\ge N(B;\grN)+O(B^7\mathscr{L}^{-1})=\mathscr{C}} \def\calCbar{{\overline \calC} B^7+O(B^7\mathscr{L}^{-1}).$$
\section{Further applications} The key feature of the systems amenable to our methods is a block structure. Our methods make possible the analysis of Diophantine systems of the shape $$\left.\begin{aligned} &\phi(x_1,\ldots ,x_l)+\psi(y_1,\ldots,y_m)&&=0,\\ &\chi(x_1,\ldots ,x_l)&+{\omega}} \def\Ome{{\Omega}(z_1,\ldots ,z_n)&=0, \end{aligned}\, \right\}$$ for homogeneous polynomials $\phi$, $\psi$, $\chi$, ${\omega}} \def\Ome{{\Omega}$ of degree $d$, provided that $l$, $m$, $n$ are suitably large. The simplest situations to describe are those wherein one has non-trivial minor arc estimates in mean square for each of the polynomials $\phi$, $\psi$, $\chi$, ${\omega}} \def\Ome{{\Omega}$. Such is the case, for example, when these polynomials are suitably non-singular forms in a number of variables exceeding $(d-1)2^{d-1}$, as a consequence of the work of Birch \cite{Bir1961}, and also when these polynomials are diagonal forms of degree $d$ in $d^2$ variables (see \cite{Woo2011a, Woo2011b}). In the latter case, moreover, if one restricts the variables to be smooth then one can reduce the number of variables required to $\frac{1}{2}d(\log d+\log \log d+O(1))$ (see the methods of \cite{Woo1992, Woo1995a}).\par
It may be worthwhile to be more specific concerning the diagonal examples alluded to above. Consider then the Diophantine system \begin{equation}\label{11.1} \left.\begin{aligned} &a_1x_1^d+\ldots +a_lx_l^d+c_1y_1^d+\ldots +c_my_m^d&&=0,\\ &b_1x_1^d+\ldots +b_lx_l^d&+d_1z_1^d+\ldots +d_nz_n^d&=0, \end{aligned}\, \right\} \end{equation}
wherein $l$, $m$, $n$ are each at least $\frac{1}{2}d(\log d+\log \log d+O(1))$. Also, let $N(B)$ denote the number of integral solutions of (\ref{11.1}) with $|x_i|,|y_i|,|z_i|\leqslant} \def\ge{\geqslant} \def\pleq{\preccurlyeq} \def\ple{\prec B$. Provided that the system (\ref{11.1}) admits non-singular real and $p$-adic solutions for each prime number $p$, then one may prove via our methods that $N(B)\gg B^{s-2d}$, where $s=l+m+n$. Such systems, then, are accessible to our methods when $s\ge (\frac{3}{2}+o(1))d\log d$, previous approaches being applicable only for $s\ge (2+o(1))d\log d$. With $\mathscr{C}} \def\calCbar{{\overline \calC}$ defined to be the product of local densities associated with the system (\ref{11.1}), on the other hand, one may obtain the lower bound $N(B)\ge (\mathscr{C}} \def\calCbar{{\overline \calC}+o(1))B^{s-2d}$ whenever $s\ge 3d^2$. Hitherto, such a conclusion would be available only for $s\ge 4d^2$ or thereabouts.\par
We finish by noting that at the cost of additional complications our methods may be generalised so as to be applicable to systems of three or more equations. Thus, a system of $r$ equations partitioned appropriately into $r+1$ blocks may be successfully analysed by recourse to higher moment estimates along the lines contained in our previous work \cite{BW2010a}. The conditions that must be imposed on the number of variables comprising each block become progressively more complicated to analyse as $r$ increases. When the number of blocks exceeds $r+1$, on the other hand, although inspiration may be drawn from the investigations of this paper, it seems fair to comment that the situation remains highly experimental.
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\end{document} | arXiv | {
"id": "1208.2176.tex",
"language_detection_score": 0.44065672159194946,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{An absolutely stable $hp$-HDG method for the time-harmonic Maxwell equations with high wave number}
\author{Peipei Lu} \address{School of Mathematics Sciences, Soochow University, Suzhou, 215006, China} \email{pplu@suda.edu.cn}
\author{Huangxin Chen} \address{School of Mathematical Sciences and Fujian Provincial Key Laboratory on Mathematical Modeling and High Performance Scientific Computing, Xiamen University, Fujian, 361005, China} \email{chx@xmu.edu.cn}
\author{Weifeng Qiu} \address{Department of Mathematics, City University of Hong Kong, 83 Tat Chee Avenue, Kowloon, Hong Kong, China} \email{weifeqiu@cityu.edu.hk} \thanks{Corresponding author: Weifeng Qiu}
\thanks{All authors contributed equally in this paper. The work of the first author was supported by the NSF of China (Grant No.11401417), the Program of Natural Science Research of Jiangsu Higher Education Institutions of China (Grant No. 14KJB110021) and Jiangsu Provincial Key Laboratory for Numerical Simulation of Large Scale Complex Systems (No. 201404). The work of the second author was supported by the NSF of China (Grant No. 11201394) and the Fundamental Research Funds for the Central Universities (Grant No. 20720150005). The work of the third author was partially supported by a grant from the Research Grants Council of the Hong Kong Special Administrative Region, China (Project No. CityU 11302014). }
\begin{abstract} We present and analyze a hybridizable discontinuous Galerkin (HDG) method for the time-harmonic Maxwell equations. The divergence-free condition is enforced on the electric field, then a Lagrange multiplier is introduced, and the problem becomes the solution of a mixed curl-curl formulation of the Maxwell's problem. The method is shown to be an absolutely stable HDG method for the indefinite time-harmonic Maxwell equations with high wave number. By exploiting the duality argument, the dependence of convergence of the HDG method on the wave number $\kappa$, the mesh size $h$ and the polynomial order $p$ is obtained. Numerical results are given to verify the theoretical analysis. \end{abstract}
\subjclass[2000]{65N12,65N15, 65N30}
\keywords{hybridizable discontinuous Galerkin method, time-harmonic Maxwell equations, Lagrange multiplier, high wave number}
\maketitle
\markboth{P. Lu, H. Chen and W. Qiu}{HDG method for Maxwell equations}
\section{Introduction}\label{introduction} The time-harmonic Maxwell boundary value problem reads as follows: \begin{subequations} \label{pde_original} \begin{align} \label{pde_original_1} \Vc\Vu\Vr\Vl\,\Vc\Vu\Vr\Vl\,{\boldsymbol{u}} - \kappa^2 {\boldsymbol{u}} &= \widetilde{{\boldsymbol{f}}} \qquad \rm{in}\ \Omega,\\ \label{BC-PDE} \Vc\Vu\Vr\Vl\, {\boldsymbol{u}} \times {\boldsymbol{n}} + \mathbf{i} \kappa {\boldsymbol{u}}^t &= \widetilde{{\boldsymbol{g}}} \qquad \rm{on}\ \partial\Omega, \end{align} \end{subequations} where $\Omega\subset \mathbb{R}^3$ is a bounded, uniformly star-shaped polyhedral domain, the wave number $\kappa$ is real and positive, $\mathbf{i}$ denotes the imaginary unit, ${\boldsymbol{n}}$ denotes the unit outward normal to $\partial \Omega$, and ${\boldsymbol{u}}^t = ( {\boldsymbol{n}} \times {\boldsymbol{u}} ) \times {\boldsymbol{n}}$ denotes the tangential component of the electric field ${\boldsymbol{u}}$. Equation (\ref{BC-PDE}) is the standard impedance boundary condition which requires $\widetilde{{\boldsymbol{g}}} \cdot {\boldsymbol{n}} = 0$, thus, $\widetilde{{\boldsymbol{g}}}^t = \widetilde{{\boldsymbol{g}}}$. The above Maxwell equations are of considerable importance in the engineering and scientific computation. In this paper we assume the current density is divergence-free (namely $\Vd\Vi\Vv \,\widetilde{{\boldsymbol{f}}} =0$), hence the electric field ${\boldsymbol{u}}$ is also divergence-free.
The Maxwell's operator is strongly indefinite for high wave number $\kappa$, which brings difficulties both in theoretical analysis and numerical simulation. Various numerical methods which include finite element methods (FEM) \cite{Nedelec80,Nedelec86,Hiptmair02,Hiptmair11,Brenner07,Zhong09}, discontinuous Galerkin (DG) methods \cite{Perugia02,Perugia03, Cockburn04,Houston04,Houston05,Nguyena11,R.Hiptmair13,FW2014} and weak Galerkin FEM method \cite{MWYS15} have been developed to solve the Maxwell's problem. In particular, Feng and Wu \cite{FW2014} recently proposed and analyzed an interior penalty discontinuous Galerkin (IPDG) method for the problem (\ref{pde_original}) with high wave number, which is uniquely solvable without any mesh constraint. DG methods have several attractive features which include the capabilities to handle complex geometries, to provide high-order accurate solutions, etc. But the dimension of approximation DG space is much larger than the dimension of the corresponding conforming space. Hybridizable discontinuous Galerkin (HDG) methods \cite{Cockburn09} were recently introduced to address this issue. The HDG methods retain the advantages of standard DG methods, and the resulting system is only due to the unknowns on the skeleton of the mesh.
Two HDG methods were presented in \cite{Nguyena11} for the numerical solution of the Maxwell problem. The first HDG method enforces the divergence-free condition on the electric field and introduces a Lagrange multiplier. It produces a linear system for the degrees of freedom (DOF) of the approximate traces of both the tangential component of the vector field and the Lagrange multiplier. The second HDG method does not enforce the divergence-free condition and results in a linear system only for the DOF of the approximate trace of the tangential component of the vector field. Compared to the IPDG method for the time-harmonic Maxwell equations in \cite{Houston05,FW2014}, the two HDG methods have less globally coupled unknowns. The well-posedness, conservativity and consistence of the two HDG methods, together with a numerical demonstration, have been shown in \cite{Nguyena11}. However, no convergence analysis is given in \cite{Nguyena11}. Recently, the $h$-convergence analysis of the second HDG method was considered in \cite{FLX2015}. In this paper we are interested in the $hp$-convergence analysis for the first HDG method mentioned in \cite{Nguyena11} which solves a mixed curl-curl formulation of the time-harmonic Maxwell equation \begin{subequations} \label{pde_mixed} \begin{align} \label{maxwell_p} \Vc\Vu\Vr\Vl\,\Vc\Vu\Vr\Vl\,{\boldsymbol{u}} - \kappa^2 {\boldsymbol{u}} + \nabla \widetilde{\sigma}&= \widetilde{{\boldsymbol{f}}} \qquad \rm{in}\ \Omega,\\ \Vd\Vi\Vv \, {\boldsymbol{u}} &= 0 \qquad \rm{in}\ \Omega,\\ \label{BC-PDE-1} \Vc\Vu\Vr\Vl\, {\boldsymbol{u}} \times {\boldsymbol{n}} + \mathbf{i} \kappa {\boldsymbol{u}}^t &= \widetilde{{\boldsymbol{g}}} \qquad \rm{on}\ \partial\Omega, \\ \label{BC_p} \widetilde{\sigma}&= 0 \qquad \rm{on}\ \partial\Omega, \end{align} \end{subequations} where $\widetilde{\sigma}$ is a scalar Lagrange multiplier used to enforce the divergence-free condition. Taking the divergence of the equation (\ref{maxwell_p}) yields $ \Delta \widetilde{\sigma} = 0$, which together with the boundary condition (\ref{BC_p}) implies that $\widetilde{\sigma} = 0$ throughout the domain. Hence, under the divergence-free condition of the current density, the equations (\ref{pde_original}) and (\ref{pde_mixed}) are equivalent.
We aim to develop an HDG method which is absolutely stable without any mesh constraint for the above mixed curl-curl formulation (\ref{pde_mixed}) and reveal the dependence of convergence for the HDG method on the wave number $\kappa$, the mesh size $h$ and the polynomial order $p$. We mention that only simple $L^2$-projections are used in our analysis which is different from the projection-based error analysis in \cite{Cockburn10}, and the $p$-dependence of the stability estimate and the convergence can be derived.
We also mention that the stabilization parameters in our HDG method are different from those in \cite{Nguyena11}. The focus of our analysis is to apply the duality argument to establish the rigorous stability estimate and error analysis for the HDG method proposed for the mixed curl-curl formulation (\ref{pde_mixed}). Intrinsically, the regularity estimate of the solution of the dual problem used in this paper can be obtained due to introduction of a Lagrange multiplier in the mixed curl-curl formulation. This is also the reason why the Helmholtz decomposition technique can be avoided in the analysis and the $p$-estimate can be derived. We first apply the duality argument to obtain the estimates for $\|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}$ and the error $\|{\boldsymbol{u}}-{\boldsymbol{u}}_h\|_{{\mathcal T}_h}$, then the estimates for other variables of the HDG method can be further obtained. Up to our best knowledge, we give the first $p$-estimate of numerical methods using piecewise polynomial solution spaces for solving the time-harmonic Maxwell equations with high wave number.
The remainder of this paper is the following. We give some notations, introduce the HDG method for the mixed curl-curl formulation of the time-harmonic Maxwell equations (\ref{pde_mixed}) and present the main results of stability estimates and error estimates in the next section. Section \ref{stability_estimate} and section \ref{error_analysis} are devoted to providing detailed proofs of the stability estimates and error estimates respectively. In section \ref{ideal_case}, we discuss the stability estimates and error estimates for the HDG method under some ideal assumptions of the problem (\ref{pde_original}) and the associated dual problem. In the final section, we give some numerical results to confirm our theoretical analysis.
\section{Notation, HDG method and main results}\label{HDG_sec2}
Let ${\boldsymbol{f}}:=-{\mathbf{i}} \widetilde{{\boldsymbol{f}}}, \sigma:=-{\mathbf{i}} \widetilde{\sigma}$ and ${\boldsymbol{g}} := -{\mathbf{i}} \widetilde{{\boldsymbol{g}}}$. The HDG scheme for the equation (\ref{pde_mixed}) is based on a first-order system of this equation, which can be rewritten in a mixed formulation as follows: \begin{subequations} \label{pde_mixed_first_order} \begin{align} {\mathbf{i}} {\boldsymbol{w}}-\Vc\Vu\Vr\Vl\, {\boldsymbol{u}} &= 0 \qquad \rm{in}\ \Omega,\\ \Vc\Vu\Vr\Vl\,{\boldsymbol{w}} + {\mathbf{i}} \kappa^2 {\boldsymbol{u}} + \nabla \sigma&= {\boldsymbol{f}} \qquad \rm{in}\ \Omega,\\ \Vd\Vi\Vv \, {\boldsymbol{u}} &= 0 \qquad \rm{in}\ \Omega,\\ {\boldsymbol{w}} \times {\boldsymbol{n}} + \kappa {\boldsymbol{u}}^t &= {\boldsymbol{g}} \qquad \rm{on}\ \partial\Omega, \\ \sigma&= 0 \qquad \rm{on}\ \partial\Omega. \end{align} \end{subequations}
Throughout the paper we use the standard notations and definitions for Sobolev spaces (see, e.g., Adams \cite{Adams}). We denote by ${\mathcal T}_h$ a conforming triangulation of $\Omega$ made of shape-regular simplicial elements. We denote by $h_T$ the diameter of $T \in {\mathcal T}_h$ and $h = \max_{T \in {\mathcal T}_h} h_T$, the collection of faces is denoted by ${\mathcal E}_h$, with the collection of interior faces by ${\mathcal E}^0_h$ and the collection of boundary faces
by ${\mathcal E}^\partial_h$, the collection of element boundaries by $\partial {\mathcal T}_h :=\{ \partial T | T \in {\mathcal T}_h \}$. We let $C$ denote a positive number independent of the mesh size, polynomial order and wave number, whose value can take on different values in different occurrences. The corresponding finite element spaces for the HDG method for the first-order system (\ref{pde_mixed_first_order}) are defined to be \begin{align*}
{\boldsymbol{V}}_h &: = \{ {\boldsymbol{r}} \in {\boldsymbol{L}}^2(\Omega) \, : \, {\boldsymbol{r}}|_T \in {\boldsymbol{P}}_p(T), \forall \, T \in {\mathcal T}_h \},\\
{\boldsymbol{U}}_h &: = \{ {\boldsymbol{v}} \in {\boldsymbol{L}}^2(\Omega) \, : \, {\boldsymbol{v}}|_T \in {\boldsymbol{P}}_p(T), \forall \, T \in {\mathcal T}_h \},\\
{\boldsymbol{M}}^t_h &: = \{ \boldsymbol{\eta} \in {\boldsymbol{L}}^2({\mathcal E}_h) \, : \, \boldsymbol{\eta}|_F \in {\boldsymbol{P}}_p(F),
(\boldsymbol{\eta} \cdot {\boldsymbol{n}})|_F=0, \forall \, F \in {\mathcal E}_h \},\\
Q_h &: = \{ q \in L^2(\Omega) \, : \, q|_T \in P_p(T) , \forall \, T \in {\mathcal T}_h \},\\
M_h &:= \{\xi \in L^2({\mathcal E}_h) \, : \, \xi|_F \in P_p(F), \forall \, F \in {\mathcal E}_h \}, \end{align*} where the polynomial order $p\geq 1$, ${\boldsymbol{L}}^2(\Omega) = [L^2(\Omega)]^3$, ${\boldsymbol{L}}^2({\mathcal E}_h) = [L^2({\mathcal E}_h)]^3$, $ {\boldsymbol{P}}_p(T)=[ P_p(T)]^3$ and $ {\boldsymbol{P}}_p(F) = [P_p(F)]^3$. Here, $P_p(D)$ denotes the space of complex-valued polynomials of degree at most $p$ on $D$. Let $P_M$ denote the standard $L^2$-projection operator from $L^2({\mathcal E}_h)$ onto $P_p({\mathcal E}_h)$. In addition, we set $M_h(g):=\{ \xi \in M_h \, : \, \xi = P_M g \ \rm{on}\ \partial\Omega\}$. Similarly, ${\boldsymbol{P}}_{{\boldsymbol{M}}}$ denotes the standard $L^2$-projection operator from ${\boldsymbol{L}}^2({\mathcal E}_h)$ onto ${\boldsymbol{P}}_p({\mathcal E}_h)$. We use $\boldsymbol{\Pi_V}, \boldsymbol{\Pi_U},\Pi_Q$ to denote the standard $L^2$-projection onto ${\boldsymbol{V}}_h,{\boldsymbol{U}}_h$ and $Q_h$ respectively. In the analysis, we shall use the following approximation results of $L^2$-projections: \begin{subequations} \begin{align} \label{es_pj_1}
\| {\boldsymbol{w}}-\boldsymbol{\Pi_V}{\boldsymbol{w}} \|_{{\mathcal T}_h}& \leq C{h}^t/{p}^t \| {\boldsymbol{w}} \|_{t,\Omega} \qquad 0\leq t\leq p+1,\\
\label{es_pj_2}
\| {\boldsymbol{u}}-\boldsymbol{\Pi_U}{\boldsymbol{u}} \|_{{\mathcal T}_h} &\leq C{h}^s/{p}^s \| {\boldsymbol{u}} \|_{s,\Omega} \qquad 0\leq s\leq p+1,\\
\label{es_pj_3}
\| \sigma-{\Pi_Q}\sigma \|_{{\mathcal T}_h} &\leq C{h}^\beta/{p}^\beta \| \sigma \|_{\beta,\Omega} \qquad 0\leq \beta\leq p+1,\\
\label{es_pj_4}
\| {\boldsymbol{w}}-\boldsymbol{\Pi_V}{\boldsymbol{w}} \|_{0,\partial T} &\leq C{h}^{t-\frac{1}{2}}/{p}^{t-\frac{1}{2}} \| {\boldsymbol{w}} \|_{t,T} \qquad \forall T \in {\mathcal T}_h,\ 0\leq t\leq p+1,\\
\label{es_pj_5}
\| {\boldsymbol{u}}-\boldsymbol{\Pi_U}{\boldsymbol{u}} \|_{0,\partial T} &\leq C{h}^{s-\frac{1}{2}}/{p}^{s-\frac{1}{2}} \| {\boldsymbol{u}} \|_{s,T} \qquad \forall T \in {\mathcal T}_h,\ 0\leq s\leq p+1,\\
\label{es_pj_6}
\| \sigma-{\Pi_Q}\sigma \|_{0,\partial T} &\leq C{h}^{\beta-\frac{1}{2}}/{p}^{\beta-\frac{1}{2}} \| \sigma \|_{\beta,T} \qquad \forall T \in {\mathcal T}_h,\ 0\leq \beta\leq p+1,\\
\label{es_pj_7}
\| {\boldsymbol{w}}-\boldsymbol{P_M}{\boldsymbol{w}} \|_{\partial {\mathcal T}_h} &\leq C{h}^{t-\frac{1}{2} }/{p}^{t-\frac{1}{2} } \| {\boldsymbol{w}} \|_{t,\Omega} \qquad 0\leq t\leq p+1,\\
\label{es_pj_8}
\| {\boldsymbol{u}}-\boldsymbol{P_M}{\boldsymbol{u}} \|_{\partial {\mathcal T}_h} &\leq C{h}^{s-\frac{1}{2} }/{p}^{s-\frac{1}{2} } \| {\boldsymbol{u}} \|_{s,\Omega} \qquad 0\leq s\leq p+1,\\
\label{es_pj_9}
\| \sigma-{P_M}\sigma \|_{\partial {\mathcal T}_h} &\leq C{h}^{\beta-\frac{1}{2}}/{p}^{\beta-\frac{1}{2}} \| \sigma \|_{\beta,\Omega} \qquad 0\leq \beta\leq p+1. \end{align} \end{subequations}
Here $\|\cdot\|_{{\mathcal T}_h} = ( \sum_{T\in {\mathcal T}_h} \|\cdot\|^2_{0,T} )^{\frac{1}{2}}$ and $\|\cdot\|_{\partial {\mathcal T}_h} = ( \sum_{T\in {\mathcal T}_h} \|\cdot\|^2_{0,\partial T} )^{\frac{1}{2}}$. The above results hold due to the $hp$ approximation theory of polynomials and trace inequality when ${\mathcal T}_h$ consists of shape-regular simplices (cf. \cite{Schwab98,FW2011,Chernov2012,Egger13,Melenk14,Cangiani14}). The above $h$-dependence approximation results hold when ${\mathcal T}_h$ consists of shape-regular polyhedral elements. Thus when we only consider $\kappa$- and $h$-dependence in our analysis, ${\mathcal T}_h$ can be a conforming mesh consisting of shape-regular polyhedral elements. This is due to the fact that only the approximation results (\ref{es_pj_1})-(\ref{es_pj_3}) have been deduced recently in the literature (cf. \cite{Cangiani14}) when the mesh consists of general polyhedral elements. The $p$-dependence of convergence for the trace estimate of the polynomial $L^2$-projection (cf. (\ref{es_pj_4})-(\ref{es_pj_6})) was first studied in \cite{Chernov2012} on simplicial element, and as far as we know, no extension of the estimates (\ref{es_pj_4})-(\ref{es_pj_6}) to the $L^2$-projection defined on the general polyhedral element has been obtained.
We define the bilinear forms \begin{align*} (\boldsymbol{\eta},\boldsymbol{\zeta})_{\mathcal T_h}:=\sum_{T\in \mathcal T_h}(\boldsymbol{\eta},\boldsymbol{\zeta})_{T}, \quad \langle \boldsymbol{\eta},\boldsymbol{\zeta} \rangle_{ \partial \mathcal T_h} := \sum_{T\in \mathcal T_h}\langle \boldsymbol{\eta},\boldsymbol{\zeta} \rangle_{\partial T}\\ (\eta,\zeta)_{\mathcal T_h}:=\sum_{T\in \mathcal T_h}(\eta,\zeta)_T, \quad \ \langle \eta,\zeta \rangle_{\partial \mathcal T_h}:=\sum_{T\in \mathcal T_h}\langle \eta,\zeta\rangle_{\partial T}, \end{align*} where $(\boldsymbol{\eta},\boldsymbol{\zeta})_{D}$ (respectively, $(\eta,\zeta)_{D}$) denotes the integral of $\boldsymbol{\eta}\cdot \overline{\boldsymbol{\zeta}}$ (respectively, $\eta\overline{\zeta}$) over $D \subset \mathbb{R}^3$ and $\langle\boldsymbol{\eta},\boldsymbol{\zeta}\rangle_{D}$ (respectively, $\langle\eta,\zeta\rangle_{D}$) denotes the integral of $\boldsymbol{\eta}\cdot\overline{\boldsymbol{\zeta}}$ (respectively, $\eta\overline{\zeta}$) over $D \subset \mathbb{R}^2$.
The HDG method for the first-order system (\ref{pde_mixed_first_order}) yields a solution $({\boldsymbol{w}}_h,{\boldsymbol{u}}_h,\widehat{{\boldsymbol{u}}}^t_h,\sigma_h, \widehat{\sigma}_h) \in {\boldsymbol{V}}_h \times {\boldsymbol{U}}_h \times {\boldsymbol{M}}^t_h\times Q_h \times M_h(0)$ such that \begin{subequations} \label{discrete_mixed_form} \begin{align} \label{discrete_mixed_form_a} &({\mathbf{i}} {\boldsymbol{w}}_h,{\boldsymbol{r}}_h)_{{\mathcal T}_h} - ({\boldsymbol{u}}_h,\Vc\Vu\Vr\Vl\, {\boldsymbol{r}}_h)_{{\mathcal T}_h} + \langle\widehat{{\boldsymbol{u}}}^t_h\times {\boldsymbol{n}}, {\boldsymbol{r}}_h \rangle_{\partial {\mathcal T}_h} =0 \\ \label{discrete_mixed_form_b} &({\boldsymbol{w}}_h,\Vc\Vu\Vr\Vl\, {\boldsymbol{v}}_h)_{{\mathcal T}_h} - \langle \widehat{{\boldsymbol{w}}}^t_h \times {\boldsymbol{n}},{\boldsymbol{v}}_h \rangle_{\partial {\mathcal T}_h} + ( {\mathbf{i}} \kappa^2 {\boldsymbol{u}}_h,{\boldsymbol{v}}_h )_{{\mathcal T}_h}\nonumber\\ &\qquad\qquad\qquad \ \ - ( \sigma_h,\Vd\Vi\Vv \, {\boldsymbol{v}}_h )_{{\mathcal T}_h} + \langle \widehat{\sigma}_h, {\boldsymbol{v}}_h \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} = ({\boldsymbol{f}}, {\boldsymbol{v}}_h)_{{\mathcal T}_h},\\ \label{discrete_mixed_form_c} &-({\boldsymbol{u}}_h,\nabla q_h)_{{\mathcal T}_h} + \langle \widehat{{\boldsymbol{u}}}^n_h \cdot {\boldsymbol{n}}, q_h \rangle_{\partial {\mathcal T}_h} = 0,\\ \label{discrete_mixed_form_d} &\langle \widehat{{\boldsymbol{w}}}^t_h \times {\boldsymbol{n}}, \boldsymbol{\eta}_h \rangle_{\partial {\mathcal T}_h \setminus \partial \Omega}=0,\\ \label{discrete_mixed_form_e} & \langle \widehat{{\boldsymbol{w}}}^t_h \times {\boldsymbol{n}}, \boldsymbol{\eta}_h \rangle_{\partial \Omega} + \langle \kappa \widehat{{\boldsymbol{u}}}^t_h, \boldsymbol{\eta}_h \rangle_{\partial \Omega} = \langle {\boldsymbol{g}}, \boldsymbol{\eta}_h \rangle_{\partial \Omega},\\ \label{discrete_mixed_form_f} & \langle \widehat{{\boldsymbol{u}}}^n_h \cdot {\boldsymbol{n}}, {\xi}_h\rangle_{\partial {\mathcal T}_h} = 0, \end{align} \end{subequations} for all $({\boldsymbol{r}}_h, {\boldsymbol{v}}_h, \boldsymbol{\eta}_h, q_h,\xi_h) \in {\boldsymbol{V}}_h \times {\boldsymbol{U}}_h \times {\boldsymbol{M}}^t_h \times Q_h \times M_h(0)$, where \begin{align} \label{hat_definition} \widehat{{\boldsymbol{w}}}_h = {\boldsymbol{w}}_h + \tau_t({\boldsymbol{u}}^t_h-\widehat{{\boldsymbol{u}}}^t_h)\times {\boldsymbol{n}}, \quad \widehat{{\boldsymbol{u}}}^n_h = {{\boldsymbol{u}}}^n_h + \tau_n(\sigma_h-\widehat{\sigma}_h) {\boldsymbol{n}}. \end{align} Here, for any vector ${\boldsymbol{r}} \in \mathbb{R}^3$, ${\boldsymbol{r}}^n = ({\boldsymbol{r}} \cdot {\boldsymbol{n}}) {\boldsymbol{n}}$ denotes the normal component of the vector ${\boldsymbol{r}}$. The parameters $\tau_t$ and $\tau_n$ are the so-called {\it local stabilization parameters} which have an important effect on both the stability of the solution and the accuracy of the HDG scheme. We choose $\tau_t = p/h$ and $\tau_n = (1+\kappa) h/p$ in this paper.
\begin{remark} The mixed curl-curl formulation (\ref{pde_mixed}) can also be applied to the Maxwell equations (\ref{pde_original}) with $\Vd\Vi\Vv \, {\boldsymbol{f}} \neq 0$. In this case $\Vd\Vi\Vv \, {\boldsymbol{u}} = {\theta} \neq 0$ with ${\theta}$ a given variable. Indeed, taking the divergence of the equation (\ref{pde_original_1}) implies that ${\theta}$ satisfies that $-\kappa^2 {\theta} = \Vd\Vi\Vv \, \widetilde{{\boldsymbol{f}}}$. Then taking the divergence of the equation (\ref{maxwell_p}) again yields $ \Delta \widetilde{\sigma} =\Vd\Vi\Vv \, \widetilde{{\boldsymbol{f}}} + \kappa^2{\theta} = 0$, which together with the boundary condition (\ref{BC_p}) also implies that $\widetilde{\sigma} = 0$. Hence, the HDG scheme in this paper can also be used for the Maxwell equations (\ref{pde_original}) with $\Vd\Vi\Vv \, \widetilde{{\boldsymbol{f}}} \neq 0$. However, we mention that if the HDG method is used with non divergence-free current density, the regularity estimates in \cite{Hiptmair11} can not be applied. Thus, the theoretical analysis throughout this paper holds only under the assumption of divergence-free current density in (\ref{pde_original_1}). \end{remark}
When ${\boldsymbol{f}}$ is divergence-free and ${\boldsymbol{g}}\in {\boldsymbol{H}}_T^{\frac{1}{2}}(\partial\Omega)$, the solution of the first-order system (\ref{pde_mixed_first_order}) satisfies that ${\boldsymbol{u}} \in {\boldsymbol{H}}^{\frac{1}{2}+\alpha}(\Omega)$ and ${\boldsymbol{w}} \in {\boldsymbol{H}}^{\frac{1}{2}+\alpha}(\Omega)$, and there holds (cf. \cite{Hiptmair11}) \begin{align}
\kappa \| {\boldsymbol{u}} \|_{\frac{1}{2}+\alpha,\Omega} + \|{\boldsymbol{w}}\|_{\frac{1}{2}+\alpha,\Omega} \leq C ( 1 + \kappa ) {\boldsymbol{M}}({\boldsymbol{f}},{\boldsymbol{g}}) +C \|{\boldsymbol{g}}\|_{\frac{1}{2},\partial \Omega}. \label{sta_weak_new} \end{align} To state our main results, we need a regularity assumption of the dual problem. Let $\boldsymbol{\Psi}$ and $\varphi$ be the solution of the following dual problem: \begin{subequations} \label{dual_problem} \begin{align} \label{dual_p_1} \Vc\Vu\Vr\Vl\, \Vc\Vu\Vr\Vl\, \boldsymbol{\Psi} - \kappa^2 \boldsymbol{\Psi} + {\mathbf{i}} \nabla \varphi &= {\mathbf{i}} {\boldsymbol{J}} \quad \ \rm{in}\ \Omega,\\ \label{dual_p_2} \Vd\Vi\Vv \, \boldsymbol{\Psi} &= 0\qquad \rm{in}\ \Omega,\\ \label{bc_dual_3} \Vc\Vu\Vr\Vl\, \boldsymbol{\Psi} \times {\boldsymbol{n}} - {\mathbf{i}} \kappa \boldsymbol{\Psi}^t &= 0 \qquad \rm{on}\ \partial\Omega,\\ \varphi &= 0 \qquad \rm{on}\ \partial\Omega, \end{align} \end{subequations} where $\forall {\boldsymbol{J}} \in {\boldsymbol{U}}_h \subset {\boldsymbol{L}}^2(\Omega)$. Due to the fact that $\Omega$ is a bounded, uniformly star-shaped polyhedral domain, the solution $(\boldsymbol{\Psi}, \varphi)$ has the following regularity estimate (cf. \cite{Hiptmair11,FW2014}): \begin{align} \label{est_dual_1}
\kappa \|\boldsymbol{\Psi}\|_{\frac{1}{2}+\alpha,\Omega} + \|\Vc\Vu\Vr\Vl\, \boldsymbol{\Psi}\|_{\frac{1}{2}+\alpha,\Omega}
+ \kappa(1+\kappa) \| \boldsymbol{\Psi}\|_{0,\Omega} +(1+ \kappa) \|\nabla \varphi\|_{0,\Omega}
\leq C (1+\kappa) \| {\boldsymbol{J}}\|_{0,\Omega}, \end{align} where $0<\alpha\leq \widetilde{\alpha}$ and $0<\widetilde{\alpha}<\frac{1}{2}$ is a parameter only depending on $\Omega$. When $\Omega$ is convex, the above estimate holds true for all $0<\alpha<\frac{1}{2}$. Moreover, when $\Omega$ is also a $C^2$ star-shaped domain, (\ref{est_dual_1}) holds true for $\alpha=\frac{1}{2}$. In the following, we show that (\ref{est_dual_1}) holds true under the assumption of the domain $\Omega$. It is easy to see that $\boldsymbol{\Psi}$ satisfies \begin{align} \label{dual_proof_p_1} \Vc\Vu\Vr\Vl\, \Vc\Vu\Vr\Vl\, \boldsymbol{\Psi} - \kappa^2 \boldsymbol{\Psi} &= {\mathbf{i}} ({\boldsymbol{J}} - \nabla \varphi) \quad \ \rm{in}\ \Omega,\\ \label{dual_proof_p_2} \Vd\Vi\Vv \, \boldsymbol{\Psi} &= 0\qquad \rm{in}\ \Omega,\\ \Vc\Vu\Vr\Vl\, \boldsymbol{\Psi} \times {\boldsymbol{n}} - {\mathbf{i}} \kappa \boldsymbol{\Psi}^t &= 0 \qquad \rm{on}\ \partial\Omega. \end{align} By (\ref{dual_proof_p_1}), we have \begin{align*} (\Vc\Vu\Vr\Vl\, \Vc\Vu\Vr\Vl\, \boldsymbol{\Psi} - \kappa^2 \boldsymbol{\Psi}, \nabla q)_{\Omega} = ({\mathbf{i}} ({\boldsymbol{J}} - \nabla \varphi), \nabla q)_{\Omega}\qquad \forall \,q\in C_{0}^{\infty}(\Omega). \end{align*} By doing integration by parts and (\ref{dual_proof_p_2}), we have \begin{align*} ({\boldsymbol{J}}-\nabla \varphi , \nabla q)_{\Omega} = 0\qquad \forall \,q\in C_{0}^{\infty}(\Omega). \end{align*} By density argument, we have \begin{align*} ({\boldsymbol{J}} - \nabla \varphi , \nabla q)_{\Omega} = 0 \qquad \forall \, q \in H^1_0(\Omega). \end{align*}
We easily obtain $\Vd\Vi\Vv \, ({\boldsymbol{J}}-\nabla \varphi) =0$ and $\|\nabla \varphi\|_{0,\Omega} \leq \|{\boldsymbol{J}}\|_{0,\Omega}$. So, we can conclude that the estimate (\ref{est_dual_1}) holds true when $\Omega$ is a bounded, uniformly star-shaped polyhedron (cf. \cite{Hiptmair11}).
Now we are ready to outline the main results in the following by showing the stability estimates of the discrete solutions from the HDG method (\ref{discrete_mixed_form}) and the associated error estimates.
\begin{theorem}\label{stability_thm} Let $({\boldsymbol{w}},{\boldsymbol{u}},\sigma)$ and $({\boldsymbol{w}}_h,{\boldsymbol{u}}_h,\widehat{{\boldsymbol{u}}}^t_h,\sigma_h, \widehat{\sigma}_h) $ solve the equations (\ref{pde_mixed_first_order}) and (\ref{discrete_mixed_form}). We assume that (\ref{est_dual_1}) holds with $0<\alpha \leq \widetilde{\alpha}< \frac{1}{2}$ and $({\boldsymbol{w}},{\boldsymbol{u}}) \in {\boldsymbol{H}}^{\frac{1}{2}+\alpha}(\Omega)\times {\boldsymbol{H}}^{\frac{1}{2}+\alpha}(\Omega)$. Then the HDG method (\ref{discrete_mixed_form}) is absolutely stable. When $\kappa h / p \geq C_0$, we have \begin{align} \label{stability_u_h1}
\| {\boldsymbol{u}}_h \|_{{\mathcal T}_h} &\leq C\Big( C^2_{\rm stab}\|
\frac{{\boldsymbol{f}}}{\kappa} \|_{0,\Omega} + C_{\rm stab}
\|\frac{{\boldsymbol{g}}}{\kappa}\|_{0,\partial \Omega} \Big) ,\\ \label{stability_w_h1}
\| {\boldsymbol{w}}_h \|_{{\mathcal T}_h} &\leq C\Big( (\frac{1}{\kappa}+C^2_{\rm stab}) \| {\boldsymbol{f}} \|_{0,\Omega} +(\frac{1}{\kappa^{\frac{1}{2}}}+C_{\rm stab}) \|{\boldsymbol{g}}\|_{0,\partial \Omega} \Big), \\ \label{stability_ut_h1}
\| \widehat{ {\boldsymbol{u}}}^t_h \|_{\partial {\mathcal T}_h}& \leq C\big( (\frac{\kappa h}{p})^{\frac{1}{2}}
+ ph^{-\frac{1}{2}} \big)\Big( C^2_{\rm stab}\|
\frac{{\boldsymbol{f}}}{\kappa} \|_{0,\Omega} +C_{\rm stab}
\|\frac{{\boldsymbol{g}}}{\kappa}\|_{0,\partial \Omega} \Big), \end{align} where $ C_{\rm stab} := 1 + { \frac{\kappa (1+\kappa) h^{2} }{p^{2} }} +{ \frac{\kappa (1+\kappa)^2 h^{2\alpha+1} }{p^{2\alpha+1} }}+ \frac{(1+\kappa)^2 h^{2\alpha}}{p^{2\alpha}}. $ \end{theorem}
\begin{theorem}\label{error_thm} Let $({\boldsymbol{w}},{\boldsymbol{u}},\sigma)$ and $({\boldsymbol{w}}_h,{\boldsymbol{u}}_h,\widehat{{\boldsymbol{u}}}^t_h,\sigma_h, \widehat{\sigma}_h) $ solve the equations (\ref{pde_mixed_first_order}) and (\ref{discrete_mixed_form}). We assume that (\ref{est_dual_1}) holds with $0<\alpha \leq \widetilde{\alpha}< \frac{1}{2}$ and $({\boldsymbol{w}},{\boldsymbol{u}}) \in {\boldsymbol{H}}^{\frac{1}{2}+\alpha}(\Omega)\times {\boldsymbol{H}}^{\frac{1}{2}+\alpha}(\Omega)$. When $\kappa h / p \geq C_0$, we have \begin{align} \label{error_ut_h1}
\|{\boldsymbol{u}}-{\boldsymbol{u}}_h\|_{{\mathcal T}_h} &\leq C \big( R_{{\boldsymbol{w}}} \| {\boldsymbol{w}} \|_{\frac{1}{2}+\alpha,\Omega}+ R_{{\boldsymbol{u}}} \| {\boldsymbol{u}} \|_{\frac{1}{2}+\alpha,\Omega}\big),\\ \label{error_wt_h1}
\|{\boldsymbol{w}}-{\boldsymbol{w}}_h\|_{{\mathcal T}_h}& \leq C\big( (\frac{h}{p})^{\frac{1}{2}+\alpha} + \kappa R_{{\boldsymbol{w}}} \big) \| {\boldsymbol{w}} \|_{\frac{1}{2}+\alpha,\Omega} \nonumber \\& \quad +
C\big( \kappa R_{{\boldsymbol{u}}} + \kappa( 1+ (1+\kappa)^{-\frac{1}{2}})(\frac{ h}{p})^{\frac{1}{2}+\alpha} \big) \| {\boldsymbol{u}} \|_{\frac{1}{2}+\alpha,\Omega} , \end{align} where $
R_{{\boldsymbol{w}}} := \frac{(1+\kappa) h^{2\alpha+1}}{p^{2\alpha+1}} + \frac{(1+\kappa)^{\frac{1}{2}} h^{\alpha+\frac{3}{2}}}{p^{\alpha+\frac{3}{2}}} $ and $ R_{{\boldsymbol{u}}} := (1+(1+\kappa)^{\frac{1}{2}}) (\frac{ h}{p})^{\frac{1}{2}+\alpha} + (1+\kappa + (1+\kappa)^{\frac{1}{2}}) (\frac{ h}{p})^{2\alpha} . $ \end{theorem}
\begin{remark} For the solutions of the first-order system (\ref{pde_mixed_first_order}) which admit the regularity as in (\ref{sta_weak_new}), when $\kappa h/p\leq C_0$, one may tune the parameters $\tau_t$ and $\tau_n$ (cf. Remark \ref{remark_sta_1}) and also get the stability estimates and error estimates for the discrete solutions of the HDG method (\ref{discrete_mixed_form}). When we consider only $\kappa$- and $h$-dependence, the above results hold when ${\mathcal T}_h$ consists of general polyhedral elements. \end{remark}
\section{Stability estimate}\label{stability_estimate} In this section we shall show that the HDG method (\ref{discrete_mixed_form}) is absolutely stable. We first present a lemma which shall be used to estimate the stability estimate of ${\boldsymbol{u}}_h$. \begin{lemma} \label{stability_lemma} Let $({\boldsymbol{w}}_h,{\boldsymbol{u}}_h,\widehat{{\boldsymbol{u}}}^t_h,\sigma_h, \widehat{\sigma}_h) $ be the solution of the problem (\ref{discrete_mixed_form}). It holds that \begin{align} \label{estimate_mixed_1}
&\|\tau_t^{\frac{1}{2}}( {\boldsymbol{u}}^t_h - \widehat{{\boldsymbol{u}}}^t_h ) \|^2_{\partial {\mathcal T}_h} + \|\tau_n^{\frac{1}{2}}( \sigma_h - \widehat{\sigma}_h ) \|^2_{\partial {\mathcal T}_h} + \frac{\kappa}{2} \|\widehat{{\boldsymbol{u}}}^t_h\|^2_{0,\partial \Omega} \leq \|{\boldsymbol{f}}\|_{0,\Omega} \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h} + \frac{1}{2\kappa} \|{\boldsymbol{g}}\|^2_{0,\partial \Omega},\\ \label{estimate_mixed_2}
&\|{\boldsymbol{w}}_h\|^2_{{\mathcal T}_h} \leq \kappa^2 \|{\boldsymbol{u}}_h\|^2_{{\mathcal T}_h} + 2\|{\boldsymbol{f}}\|_{0,\Omega} \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h} + \frac{1}{\kappa} \|{\boldsymbol{g}}\|^2_{0,\partial \Omega}. \end{align} \end{lemma} \begin{proof} We first choose ${\boldsymbol{r}}_h = {\boldsymbol{w}}_h, {\boldsymbol{v}}_h = {\boldsymbol{u}}_h, \boldsymbol{\eta}_h = \widehat{{\boldsymbol{u}}}^t_h, q_h=\sigma_h,\xi_h = \widehat{\sigma}_h$ in (\ref{discrete_mixed_form_a})-(\ref{discrete_mixed_form_f}) to get the following equalities:\begin{subequations} \begin{align} &({\mathbf{i}} {\boldsymbol{w}}_h, {\boldsymbol{w}}_h )_{{\mathcal T}_h} - ({\boldsymbol{u}}_h,\Vc\Vu\Vr\Vl\, {\boldsymbol{w}}_h)_{{\mathcal T}_h} + \langle \widehat{{\boldsymbol{u}}}^t_h\times {\boldsymbol{n}},{\boldsymbol{w}}_h\rangle_{\partial {\mathcal T}_h} = 0, \label{pre_sta_pr_1}\\ & (\Vc\Vu\Vr\Vl\, {\boldsymbol{w}}_h, {\boldsymbol{u}}_h)_{{\mathcal T}_h} + \langle {\boldsymbol{w}}^t_h \times {\boldsymbol{n}}, {\boldsymbol{u}}_h\rangle_{\partial {\mathcal T}_h} - \langle \widehat{{\boldsymbol{w}}}^t_h \times {\boldsymbol{n}}, {\boldsymbol{u}}_h\rangle_{\partial {\mathcal T}_h} \nonumber\\& \qquad \qquad \qquad \quad + ({\mathbf{i}} \kappa^2 {\boldsymbol{u}}_h,{\boldsymbol{u}}_h)_{{\mathcal T}_h} - (\sigma_h, \Vd\Vi\Vv \, {\boldsymbol{u}}_h)_{{\mathcal T}_h} + \langle \widehat{\sigma}_h,{\boldsymbol{u}}^n_h \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} = ({\boldsymbol{f}}, {\boldsymbol{u}}_h)_{{\mathcal T}_h},\label{pre_sta_pr_2}\\ & (\Vd\Vi\Vv \, {\boldsymbol{u}}_h , \sigma_h)_{{\mathcal T}_h} - \langle {\boldsymbol{u}}^n_h \cdot {\boldsymbol{n}} ,\sigma_h\rangle_{\partial {\mathcal T}_h} + \langle \widehat{{\boldsymbol{u}}}^n_h \cdot {\boldsymbol{n}},\sigma_h \rangle_{\partial {\mathcal T}_h} =0,\label{pre_sta_pr_3}\\ &\langle \widehat{{\boldsymbol{w}}}^t_h \times {\boldsymbol{n}}, \widehat{{\boldsymbol{u}}}^t_h \rangle_{\partial {\mathcal T}_h \setminus \partial \Omega}=0,\label{pre_sta_pr_4}\\ & \langle \widehat{{\boldsymbol{w}}}^t_h \times {\boldsymbol{n}}, \widehat{{\boldsymbol{u}}}^t_h \rangle_{\partial \Omega} + \langle \kappa \widehat{{\boldsymbol{u}}}^t_h, \widehat{{\boldsymbol{u}}}^t_h \rangle_{\partial \Omega} = \langle {\boldsymbol{g}}, \widehat{{\boldsymbol{u}}}^t_h \rangle_{\partial \Omega},\label{pre_sta_pr_5}\\ & \langle \widehat{{\boldsymbol{u}}}^n_h \cdot {\boldsymbol{n}}, \widehat{\sigma}_h \rangle_{\partial {\mathcal T}_h} = 0,\label{pre_sta_pr_6} \end{align} \end{subequations} where (\ref{pre_sta_pr_2}) and (\ref{pre_sta_pr_3}) are obtained by integration by parts. Furthermore, noting the definitions of $\widehat{{\boldsymbol{w}}}_h$ in (\ref{hat_definition}) and applying complex conjugation to (\ref{pre_sta_pr_1}), (\ref{pre_sta_pr_3}) and (\ref{pre_sta_pr_6}), we get the following equalities after simple manipulations: \begin{align*} &-({\mathbf{i}} {\boldsymbol{w}}_h, {\boldsymbol{w}}_h )_{{\mathcal T}_h} - (\Vc\Vu\Vr\Vl\, {\boldsymbol{w}}_h, {\boldsymbol{u}}_h)_{{\mathcal T}_h} - \langle \tau_t ({\boldsymbol{u}}^t_h-\widehat{{\boldsymbol{u}}}^t_h)^t, \widehat{{\boldsymbol{u}}}^t_h\rangle_{\partial {\mathcal T}_h} + \langle \kappa \widehat{{\boldsymbol{u}}}^t_h,\widehat{{\boldsymbol{u}}}^t_h\rangle_{\partial \Omega} = \langle {\boldsymbol{g}}, \widehat{{\boldsymbol{u}}}^t_h\rangle_{\partial \Omega},\\
& (\sigma_h, \Vd\Vi\Vv \, {\boldsymbol{u}}_h )_{{\mathcal T}_h} - \langle \sigma_h,{\boldsymbol{u}}^n_h \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} + \langle \sigma_h,\widehat{{\boldsymbol{u}}}^n_h \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} =0,\\ & - \langle \widehat{\sigma}_h, \widehat{{\boldsymbol{u}}}^n_h \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} = 0. \end{align*} Adding the above three equalities and (\ref{pre_sta_pr_2}) together and noting that $ ({\boldsymbol{u}}^t_h-\widehat{{\boldsymbol{u}}}^t_h)^t ={\boldsymbol{u}}^t_h-\widehat{{\boldsymbol{u}}}^t_h $, we have \begin{align*} &-({\mathbf{i}} {\boldsymbol{w}}_h, {\boldsymbol{w}}_h )_{{\mathcal T}_h} + \langle \tau_t ({\boldsymbol{u}}^t_h-\widehat{{\boldsymbol{u}}}^t_h), {\boldsymbol{u}}^t_h-\widehat{{\boldsymbol{u}}}^t_h\rangle_{\partial {\mathcal T}_h} + \langle \kappa \widehat{{\boldsymbol{u}}}^t_h,\widehat{{\boldsymbol{u}}}^t_h\rangle_{\partial \Omega}\\ &\qquad\qquad\qquad \ + \langle \tau_n (\sigma_h-\widehat{\sigma}_h), \sigma_h-\widehat{\sigma}_h\rangle_{\partial {\mathcal T}_h} + ({\mathbf{i}} \kappa^2 {\boldsymbol{u}}_h,{\boldsymbol{u}}_h)_{{\mathcal T}_h} = ({\boldsymbol{f}}, {\boldsymbol{u}}_h)_{{\mathcal T}_h}+ \langle {\boldsymbol{g}}, \widehat{{\boldsymbol{u}}}^t_h\rangle_{\partial \Omega}, \end{align*} which implies the lemma by the Cauchy-Schwarz inequality. \end{proof}
Next we shall utilize the dual argument to give the $L^2$-norm estimate of ${\boldsymbol{u}}_h$. Given ${\boldsymbol{u}}_h \in {\boldsymbol{L}}^2(\Omega)$, we introduce the first-order system of the dual problem (\ref{dual_problem}) with ${\boldsymbol{J}} = {\boldsymbol{u}}_h$: \begin{subequations} \label{dual_FOS} \begin{align} {\mathbf{i}} \boldsymbol{\Phi} - \Vc\Vu\Vr\Vl\, \boldsymbol{\Psi} &= 0\qquad \rm{in}\ \Omega, \\ \Vc\Vu\Vr\Vl\, \boldsymbol{\Phi} + {\mathbf{i}}\kappa^2 \boldsymbol{\Psi} + \nabla \varphi &= {\boldsymbol{u}}_h \quad \ \rm{in}\ \Omega,\\ \label{dual_FOS_1} \Vd\Vi\Vv \, \boldsymbol{\Psi} &= 0\qquad \rm{in}\ \Omega,\\ \label{bc1_dual_FOS} \boldsymbol{\Phi} \times {\boldsymbol{n}} - \kappa \boldsymbol{\Psi}^t &= 0 \qquad \rm{on}\ \partial\Omega,\\ \varphi &= 0 \qquad \rm{on}\ \partial\Omega. \end{align} \end{subequations} Due to $\varphi \in H^1_0(\Omega)$, we easily obtain \begin{align} \label{est_dual_3_p}
\|\varphi\|_{1,\Omega} \leq C \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}. \end{align} When the estimate (\ref{est_dual_1}) holds, taking ${\boldsymbol{J}} = {\boldsymbol{u}}_h$ in (\ref{est_dual_1}) we have \begin{align} \label{est_dual_3_new}
\kappa \| \boldsymbol{\Psi} \|_{\frac{1}{2}+\alpha,\Omega} + \|\Vc\Vu\Vr\Vl\, \boldsymbol{\Psi}\|_{\frac{1}{2}+\alpha,\Omega} + \kappa(1+\kappa) \| \boldsymbol{\Psi}\|_{0,\Omega} \leq C(1+\kappa) \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}, \end{align} which implies \begin{align} \label{est_dual_3_1}
\| \boldsymbol{\Phi}\|_{\frac{1}{2}+\alpha,\Omega} \leq C (1+\kappa) \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}. \end{align} By the equation (\ref{dual_p_1}) with ${\boldsymbol{J}} = {\boldsymbol{u}}_h$, we directly have \[ (\Vc\Vu\Vr\Vl\, \Vc\Vu\Vr\Vl\, \boldsymbol{\Psi},\boldsymbol{\Psi})_{\Omega} - \kappa^2 (\boldsymbol{\Psi},\boldsymbol{\Psi})_{\Omega} + ({\mathbf{i}} \nabla \varphi, \boldsymbol{\Psi})_{\Omega} = ({\mathbf{i}} {\boldsymbol{u}}_h, \boldsymbol{\Psi})_{\Omega}, \] which together with the fact $( \nabla \varphi, \boldsymbol{\Psi})_{\Omega}=0$ and the boundary condition (\ref{bc_dual_3}) yields \[ (\Vc\Vu\Vr\Vl\, \boldsymbol{\Psi}, \Vc\Vu\Vr\Vl\, \boldsymbol{\Psi})_{\Omega} - \kappa^2 (\boldsymbol{\Psi},\boldsymbol{\Psi})_{\Omega} - \langle {\mathbf{i}} \kappa \boldsymbol{\Psi}^t, \boldsymbol{\Psi}^t \rangle_{\partial \Omega} = ({\mathbf{i}} {\boldsymbol{u}}_h, \boldsymbol{\Psi})_{\Omega}. \] Thus, taking the imaginary part of the left-hand side of the above equation and using (\ref{est_dual_3_new}) we have \begin{align} \label{est_dual_4}
\kappa \|\boldsymbol{\Psi}^t\|^2_{0, \partial \Omega} \leq \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h} \|\boldsymbol{\Psi}\|_{0,\Omega} \leq C \kappa^{-1} \|{\boldsymbol{u}}_h\|^2_{{\mathcal T}_h}. \end{align}
Next we present a key equality.
\begin{lemma} \label{lemma_equality_sta} Let $(\boldsymbol{\Phi},\boldsymbol{\Psi},\varphi)$ be the solution of the dual problem (\ref{dual_FOS}). We have \[
\|{\boldsymbol{u}}_h\|^2_{{\mathcal T}_h} = \sum_{k=1}^6 T_k, \] where \begin{align*} T_1 &= \langle {\boldsymbol{u}}^t_h \times {\boldsymbol{n}} - \widehat{{\boldsymbol{u}}}^t_h \times {\boldsymbol{n}} , \boldsymbol{\Phi} -\boldsymbol{\Pi_V \Phi} \rangle_{\partial {\mathcal T}_h},\\ T_2& = \langle {\boldsymbol{u}}^n_h \cdot {\boldsymbol{n}} - \widehat{{\boldsymbol{u}}}^n_h \cdot {\boldsymbol{n}} , \varphi - \Pi_Q \varphi \rangle_{\partial {\mathcal T}_h},\\ T_3 &= - \langle\tau_t ({\boldsymbol{u}}^t_h-\widehat{{\boldsymbol{u}}}^t_h), \boldsymbol{\Psi} - \boldsymbol{\Pi_U\Psi}\rangle_{\partial {\mathcal T}_h},\\ T_4 &=- \langle \kappa \widehat{{\boldsymbol{u}}}^t_h+\widehat{{\boldsymbol{w}}}^t_h \times {\boldsymbol{n}}, \boldsymbol{\Psi}^t \rangle_{\partial \Omega} ,\\ T_5 &= - ({\boldsymbol{f}}, \boldsymbol{\Pi_U\Psi})_{ {\mathcal T}_h},\\ T_6 &= (\sigma_h, \Vd\Vi\Vv \,( \boldsymbol{\Psi} -\boldsymbol{\Pi_U\Psi} ))_{ {\mathcal T}_h} - \langle \widehat{\sigma}_h, ( \boldsymbol{\Psi} -\boldsymbol{\Pi_U\Psi})\cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h}. \end{align*} \end{lemma} \begin{proof} Using the dual first-order system (\ref{dual_FOS}), we obtain \begin{align}
\|{\boldsymbol{u}}_h\|^2_{{\mathcal T}_h} &= ({\boldsymbol{u}}_h, \Vc\Vu\Vr\Vl\, \boldsymbol{\Phi} + {\mathbf{i}}\kappa^2 \boldsymbol{\Psi} + \nabla \varphi )_{{\mathcal T}_h} + ({\boldsymbol{w}}_h,{\mathbf{i}} \boldsymbol{\Phi} - \Vc\Vu\Vr\Vl\, \boldsymbol{\Psi})_{{\mathcal T}_h}\nonumber\\ & = ({\boldsymbol{u}}_h, \Vc\Vu\Vr\Vl\, \boldsymbol{\Pi_V \Phi} + {\mathbf{i}}\kappa^2 \boldsymbol{\Pi_U \Psi} + \nabla \Pi_Q \varphi)_{{\mathcal T}_h} + ({\boldsymbol{w}}_h,{\mathbf{i}} \boldsymbol{\Pi_V \Phi} - \Vc\Vu\Vr\Vl\, \boldsymbol{\Pi_U \Psi})_{{\mathcal T}_h}\nonumber\\ &\quad \ + ({\boldsymbol{u}}_h,\Vc\Vu\Vr\Vl\, (\boldsymbol{\Phi} -\boldsymbol{\Pi_V \Phi} ) )_{{\mathcal T}_h} + ({\boldsymbol{u}}_h,{\mathbf{i}}\kappa^2( \boldsymbol{\Psi} - \boldsymbol{\Pi_U\Psi} ))_{{\mathcal T}_h} + ({\boldsymbol{u}}_h, \nabla(\varphi- \Pi_Q \varphi))_{{\mathcal T}_h}\nonumber\\ &\quad \ + ({\boldsymbol{w}}_h,{\mathbf{i}}(\boldsymbol{\Phi} -\boldsymbol{\Pi_V\Phi} ))_{{\mathcal T}_h} - ({\boldsymbol{w}}_h, \Vc\Vu\Vr\Vl\, (\boldsymbol{\Psi}-\boldsymbol{\Pi_U\Psi} ))_{{\mathcal T}_h}. \label{proof_sta_1} \end{align} By the definitions of $\boldsymbol \Pi_U$ and $\boldsymbol \Pi_V$, we have $({\boldsymbol{u}}_h,{\mathbf{i}}\kappa^2( \boldsymbol{\Psi} - \boldsymbol{\Pi_U\Psi} ))_{{\mathcal T}_h} =0$ and $({\boldsymbol{w}}_h,{\mathbf{i}}(\boldsymbol{\Phi} -\boldsymbol{\Pi_V\Phi} ))_{{\mathcal T}_h}=0$. Integrating by parts and applying the property of the $L^2$-projections yields \begin{align} ({\boldsymbol{u}}_h,\Vc\Vu\Vr\Vl\, (\boldsymbol{\Phi} -\boldsymbol{\Pi_V \Phi} ) )_{{\mathcal T}_h} &= (\Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h, \boldsymbol{\Phi} -\boldsymbol{\Pi_V \Phi})_{{\mathcal T}_h} + \langle {\boldsymbol{u}}_h \times {\boldsymbol{n}}, \boldsymbol{\Phi} -\boldsymbol{\Pi_V \Phi} \rangle_{\partial {\mathcal T}_h} \nonumber\\ &= \langle {\boldsymbol{u}}_h \times {\boldsymbol{n}}, \boldsymbol{\Phi} -\boldsymbol{\Pi_V \Phi} \rangle_{\partial {\mathcal T}_h}\nonumber\\ &= \langle {\boldsymbol{u}}^t_h \times {\boldsymbol{n}}, \boldsymbol{\Phi} -\boldsymbol{\Pi_V \Phi} \rangle_{\partial {\mathcal T}_h},\label{sta_equ_pre_1} \end{align} \begin{align}
({\boldsymbol{u}}_h, \nabla(\varphi - \Pi_Q \varphi))_{{\mathcal T}_h} &= -(\Vd\Vi\Vv \, {\boldsymbol{u}}_h,\varphi-\Pi_Q \varphi)_{{\mathcal T}_h}+ \langle {\boldsymbol{u}}^n_h \cdot {\boldsymbol{n}}, \varphi-\Pi_Q\varphi\rangle_{\partial {\mathcal T}_h} \nonumber\\
& = \langle {\boldsymbol{u}}^n_h \cdot {\boldsymbol{n}}, \varphi-\Pi_Q\varphi\rangle_{\partial {\mathcal T}_h} ,\label{sta_equ_pre_2} \end{align} and \begin{align} - ({\boldsymbol{w}}_h, \Vc\Vu\Vr\Vl\, (\boldsymbol{\Psi}-\boldsymbol{\Pi_U\Psi} ))_{{\mathcal T}_h} &= -( \Vc\Vu\Vr\Vl\, {\boldsymbol{w}}_h, \boldsymbol{\Psi}-\boldsymbol{\Pi_U\Psi} )_{{\mathcal T}_h} - \langle {\boldsymbol{w}}_h \times {\boldsymbol{n}}, \boldsymbol{\Psi}-\boldsymbol{\Pi_U\Psi}\rangle_{\partial {\mathcal T}_h}\nonumber \\ & = - \langle {\boldsymbol{w}}^t_h \times {\boldsymbol{n}}, \boldsymbol{\Psi}^t \rangle_{\partial {\mathcal T}_h} + \langle {\boldsymbol{w}}^t_h \times {\boldsymbol{n}},\boldsymbol{\Pi_U\Psi}\rangle_{\partial {\mathcal T}_h}.\label{sta_equ_pre_3} \end{align} Taking ${\boldsymbol{r}}_h = \boldsymbol{\Pi_V \Phi}$ in the equation (\ref{discrete_mixed_form_a}), noting that $\widehat{{\boldsymbol{u}}}^t_h\times {\boldsymbol{n}}$ is continuous across each interior face and using the boundary condition (\ref{bc1_dual_FOS}), we obtain \begin{align} ({\boldsymbol{u}}_h, \Vc\Vu\Vr\Vl\, \boldsymbol{\Pi_V \Phi} )_{{\mathcal T}_h} &= ({\mathbf{i}} {\boldsymbol{w}}_h,\boldsymbol{\Pi_V \Phi} )_{{\mathcal T}_h} + \langle \widehat{{\boldsymbol{u}}}^t_h\times {\boldsymbol{n}}, \boldsymbol{\Pi_V \Phi}\rangle_{\partial {\mathcal T}_h} \nonumber\\ & = ({\mathbf{i}} {\boldsymbol{w}}_h,\boldsymbol{\Pi_V \Phi} )_{{\mathcal T}_h} + \langle \widehat{{\boldsymbol{u}}}^t_h\times {\boldsymbol{n}}, \boldsymbol{\Pi_V \Phi} - \boldsymbol{ \Phi} \rangle_{\partial {\mathcal T}_h} - \langle \widehat{{\boldsymbol{u}}}^t_h, \boldsymbol{ \Phi}\times{\boldsymbol{n}}\rangle_{\partial \Omega}\nonumber\\ & = ({\mathbf{i}} {\boldsymbol{w}}_h,\boldsymbol{\Pi_V \Phi} )_{{\mathcal T}_h} + \langle \widehat{{\boldsymbol{u}}}^t_h\times {\boldsymbol{n}}, \boldsymbol{\Pi_V \Phi} - \boldsymbol{ \Phi} \rangle_{\partial {\mathcal T}_h} - \langle \widehat{{\boldsymbol{u}}}^t_h, \kappa \boldsymbol{\Psi}^t \rangle_{\partial \Omega}.\label{sta_equ_pre_4} \end{align} Taking $q_h = \Pi_Q \varphi$ in (\ref{discrete_mixed_form_c}) and noting that $\widehat{{\boldsymbol{u}}}^n_h \cdot {\boldsymbol{n}}$ is continuous across each interior face and $\varphi \in H^1_0(\Omega)$ we have \begin{align} ({\boldsymbol{u}}_h,\nabla \Pi_Q \varphi )_{{\mathcal T}_h} = \langle \widehat{{\boldsymbol{u}}}^n_h \cdot {\boldsymbol{n}}, \Pi_Q \varphi \rangle_{\partial {\mathcal T}_h} = \langle \widehat{{\boldsymbol{u}}}^n_h \cdot {\boldsymbol{n}}, \Pi_Q \varphi - \varphi \rangle_{\partial {\mathcal T}_h} .\label{sta_equ_pre_5} \end{align} We further take ${\boldsymbol{v}}_h = \boldsymbol{\Pi_U \Psi} $ in (\ref{discrete_mixed_form_b}) to get \begin{align} &-( {\boldsymbol{w}}_h,\Vc\Vu\Vr\Vl\, \boldsymbol{\Pi_U\Psi})_{{\mathcal T}_h} \nonumber\\ &= - \langle \widehat{{\boldsymbol{w}}}^t_h \times {\boldsymbol{n}},\boldsymbol{\Pi_U\Psi}\rangle_{\partial {\mathcal T}_h} + ( {\mathbf{i}} \kappa^2 {\boldsymbol{u}}_h,\boldsymbol{\Pi_U\Psi})_{{\mathcal T}_h} - ( \sigma_h,\Vd\Vi\Vv \, \boldsymbol{\Pi_U\Psi} )_{{\mathcal T}_h} \nonumber\\ &\quad + \langle \widehat{\sigma}_h,\boldsymbol{\Pi_U\Psi} \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} - ({\boldsymbol{f}}, \boldsymbol{\Pi_U\Psi})_{{\mathcal T}_h}\nonumber\\ &= - \langle {{\boldsymbol{w}}}^t_h \times {\boldsymbol{n}} - \tau_t ({\boldsymbol{u}}^t_h-\widehat{{\boldsymbol{u}}}^t_h),\boldsymbol{\Pi_U\Psi} \rangle_{\partial {\mathcal T}_h} + ( {\mathbf{i}} \kappa^2 {\boldsymbol{u}}_h,\boldsymbol{\Pi_U\Psi} )_{{\mathcal T}_h} - ( \sigma_h,\Vd\Vi\Vv \,\boldsymbol{\Pi_U\Psi} -\Vd\Vi\Vv \, \boldsymbol{\Psi} )_{{\mathcal T}_h} \nonumber\\ &\quad + \langle \widehat{\sigma}_h,\boldsymbol{\Pi_U\Psi} \cdot {\boldsymbol{n}} - \Psi \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} - ({\boldsymbol{f}}, \boldsymbol{\Pi_U\Psi})_{{\mathcal T}_h},\label{sta_equ_pre_6} \end{align} where the above second equality holds due to the fact that $\Vd\Vi\Vv \, \boldsymbol{\Psi}=0$, $\widehat{\sigma}_h$ is continuous across each interior face and $\widehat{\sigma}_h=0$ on ${\mathcal E}^{\partial}_h$. Inserting the above equalities (\ref{sta_equ_pre_1})-(\ref{sta_equ_pre_6}) into the right-hand side of (\ref{proof_sta_1}), we obtain the result. This completes the proof. \end{proof}
We can now give the proof of Theorem \ref{stability_thm}. \begin{proof} (Proof of Theorem \ref{stability_thm}) We derive the upper bounds for $T_1,\cdots,T_6$ in Lemma \ref{lemma_equality_sta} under the assumptions in Theorem \ref{stability_thm}. By the Cauchy-Schwarz inequality, the approximation properties of standard $L^2$-projections, the inequalities (\ref{est_dual_3_p}) and (\ref{est_dual_3_new}), we have \begin{align*}
T_1& \leq C \| \tau_t^{\frac{1}{2}} ({\boldsymbol{u}}^t_h- \widehat{{\boldsymbol{u}}}^t_h) \|_{\partial {\mathcal T}_h} \tau_t^{-\frac{1}{2}} (\frac{h}{p})^{\alpha}\|\boldsymbol{\Phi}\|_{\frac{1}{2}+\alpha,\Omega} \leq C \| \tau_t^{\frac{1}{2}} ({\boldsymbol{u}}^t_h- \widehat{{\boldsymbol{u}}}^t_h) \|_{\partial {\mathcal T}_h} \tau_t^{-\frac{1}{2}} (\frac{h}{p})^{\alpha} (1+\kappa) \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h},\\
T_2 &= - \langle \tau_n(\sigma_h - \widehat{\sigma}_h) , \varphi - \Pi_Q \varphi \rangle_{\partial {\mathcal T}_h} \leq C \| \tau^{\frac{1}{2}}_n ( \sigma_h - \widehat{\sigma}_h ) \|_{\partial {\mathcal T}_h} \tau_n^{\frac{1}{2}} (\frac{h}{p})^{\frac{1}{2}} \| \varphi \|_{1,\Omega} \\&\leq C \| \tau^{\frac{1}{2}}_n ( \sigma_h - \widehat{\sigma}_h ) \|_{\partial {\mathcal T}_h} \tau_n^{\frac{1}{2}} (\frac{h}{p})^{\frac{1}{2}} \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h},\\
T_3 &\leq C \| \tau_t^{\frac{1}{2}} ({\boldsymbol{u}}^t_h- \widehat{{\boldsymbol{u}}}^t_h) \|_{\partial {\mathcal T}_h} \tau_t^{\frac{1}{2}} (\frac{h}{p})^{\alpha}\|\boldsymbol{\Psi}\|_{\frac{1}{2}+\alpha,\Omega} \leq C \| \tau_t^{\frac{1}{2}} ({\boldsymbol{u}}^t_h- \widehat{{\boldsymbol{u}}}^t_h) \|_{\partial {\mathcal T}_h} \tau_t^{\frac{1}{2}} (\frac{h}{p})^{\alpha} (1+1/\kappa) \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}. \end{align*} Taking $\boldsymbol{\eta}_h = {\boldsymbol{P}}_{{\boldsymbol{M}}} \boldsymbol{\Psi}^t$ in (\ref{discrete_mixed_form_e}) and using the property of the $L^2$-projection operator ${\boldsymbol{P}}_{{\boldsymbol{M}}}$ on ${\mathcal E}^{\partial}_h$ and the inequality (\ref{est_dual_4}) yields \[
T_4 = - \langle \kappa \widehat{{\boldsymbol{u}}}^t_h+\widehat{{\boldsymbol{w}}}^t_h \times {\boldsymbol{n}}, {\boldsymbol{P}}_{{\boldsymbol{M}}}\boldsymbol{\Psi}^t \rangle_{\partial \Omega} = -\langle {\boldsymbol{g}} , {\boldsymbol{P}}_{{\boldsymbol{M}}}\boldsymbol{\Psi}^t \rangle_{\partial \Omega} \leq \|{\boldsymbol{g}}\|_{0, \partial \Omega} \| \boldsymbol{\Psi}^t \|_{0,\partial \Omega} \leq C \kappa^{-1} \|{\boldsymbol{g}}\|_{0,\partial \Omega} \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}. \] For the estimate of $T_5$, we easily deduce \begin{align*}
T_5 \leq \| {\boldsymbol{f}} \|_{0,\Omega} \| \boldsymbol{\Psi} \|_{0,\Omega} \leq C \| {\boldsymbol{f}} \|_{0,\Omega} \kappa^{-1} \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}. \end{align*} Applying integration by parts on $T_6$, we have \begin{align*} T_6 &= -(\nabla \sigma_h, \boldsymbol{\Psi}-\boldsymbol{\Pi_U\Psi} )_{ {\mathcal T}_h} + \langle \sigma_h - \widehat{\sigma}_h, (\boldsymbol{\Psi} -\boldsymbol{\Pi_U\Psi} )\cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} = \langle \sigma_h - \widehat{\sigma}_h, (\boldsymbol{\Psi} -\boldsymbol{\Pi_U\Psi} )\cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} \\
&\leq C \| \tau^{\frac{1}{2}}_n (\sigma_h - \widehat{\sigma}_h) \|_{\partial {\mathcal T}_h} \tau^{-\frac{1}{2}}_n (\frac{h}{p})^{\alpha} \|\boldsymbol{\Psi} \|_{\frac{1}{2}+\alpha,\Omega} \leq C \| \tau^{\frac{1}{2}}_n (\sigma_h - \widehat{\sigma}_h) \|_{\partial {\mathcal T}_h} \tau^{-\frac{1}{2}}_n (\frac{h}{p})^{\alpha} (1+1/\kappa) \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}. \end{align*} Combining the above estimates for $T_1,\cdots, T_6$, we obtain \begin{align}
\|{\boldsymbol{u}}_h\|^2_{{\mathcal T}_h} & \leq C \kappa^{-1}\left(\| {\boldsymbol{f}} \|_{0,\Omega}+ \|{\boldsymbol{g}}\|_{0,\partial \Omega} \right)\|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}\nonumber \\ &\quad + C
\big(\tau_t^{-\frac{1}{2}} (\frac{h}{p})^{\alpha}(1+\kappa) + \tau_t^{\frac{1}{2}} (\frac{h}{p})^{\alpha} (1+1/\kappa) \big)\| \tau_t^{\frac{1}{2}} ({\boldsymbol{u}}^t_h- \widehat{{\boldsymbol{u}}}^t_h) \|_{\partial {\mathcal T}_h} \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h} \nonumber\\
& \quad + C \big( \tau_n^{\frac{1}{2}} (\frac{h}{p})^{\frac{1}{2}}+\tau^{-\frac{1}{2}}_n (\frac{h}{p})^{\alpha} (1+1/\kappa) \big) \| \tau^{\frac{1}{2}}_n (\sigma_h - \widehat{\sigma}_h) \|_{\partial {\mathcal T}_h} \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}. \label{uh_estimate_basis} \end{align} Here we choose $\tau_t = \frac{p}{h}$ and $\tau_n = \frac{(1+\kappa) h}{p}$. By the Young's inequality, we have \begin{align}
\|{\boldsymbol{u}}_h\|^2_{{\mathcal T}_h} \leq C \Big(& \kappa^{-2}\| {\boldsymbol{f}} \|^2_{0,\Omega}+ \kappa^{-2}\|{\boldsymbol{g}}\|^2_{0,\partial \Omega} \nonumber\\
&+ \big((1+\kappa)^2 (\frac{h}{p})^{2\alpha+1} +(1+1/\kappa)^2(\frac{h}{p})^{2\alpha-1} \big)\| \tau_t^{\frac{1}{2}} ({\boldsymbol{u}}^t_h- \widehat{{\boldsymbol{u}}}^t_h) \|^2_{\partial {\mathcal T}_h} \nonumber\\
&+ \big((1+\kappa) (\frac{h}{p})^2 + \frac{(1+\kappa)}{\kappa^2} (\frac{h}{p})^{2\alpha-1}\big)\| \tau^{\frac{1}{2}}_n (\sigma_h - \widehat{\sigma}_h) \|^2_{\partial {\mathcal T}_h} \Big).\nonumber \end{align}
Combining the above estimate and (\ref{estimate_mixed_1}), the absolutely stable property of $\|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}$ can be easily observed, and we can further obtain (\ref{stability_u_h1}) by the Young's inequality in the regime $\kappa h/p \geq C_0$. Then, by the estimate (\ref{estimate_mixed_2}), we can also see the absolutely stable property of $\|{\boldsymbol{w}}_h\|_{{\mathcal T}_h}$ and have \begin{align}
\|{\boldsymbol{w}}_h\|^2_{{\mathcal T}_h} \leq2 \kappa^2 \|{\boldsymbol{u}}_h\|^2_{{\mathcal T}_h} + \|\frac{{\boldsymbol{f}}}{\kappa}\|^2_{0,\Omega} + \frac{1}{\kappa} \|{\boldsymbol{g}}\|^2_{0,\partial \Omega}.\nonumber \end{align} Then (\ref{stability_w_h1}) is derived by (\ref{stability_u_h1}). Furthermore, combining the fact that (cf. \cite{Schwab98}) \begin{align}
\| {\boldsymbol{u}}^t_h \|_{\partial {\mathcal T}_h}\leq C p h^{-\frac{1}{2}} \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h},\nonumber \end{align}
(\ref{estimate_mixed_1}), (\ref{stability_u_h1}) and the triangular inequality yields the absolutely stable property of $\| \widehat{{\boldsymbol{u}}}^t_h \|_{\partial {\mathcal T}_h}$ and the estimate (\ref{stability_ut_h1}).
When ${\boldsymbol{f}} =0$ and ${\boldsymbol{g}}=0$ in the first-order system (\ref{pde_mixed_first_order}), the estimates (\ref{stability_u_h1})-(\ref{stability_ut_h1}) and Lemma \ref{stability_lemma} imply ${\boldsymbol{w}}_h=0,{\boldsymbol{u}}_h=0$ on ${\mathcal T}_h$ and $\widehat{{\boldsymbol{u}}}^t_h=0, \sigma_h = \widehat{\sigma}_h$ on $\partial {\mathcal T}_h$. It then follows from (\ref{discrete_mixed_form_b}) that for any ${\boldsymbol{v}}_h \in {\boldsymbol{U}}_h$, \[ - ( \sigma_h,\Vd\Vi\Vv \, {\boldsymbol{v}}_h )_{{\mathcal T}_h} + \langle \widehat{\sigma}_h, {\boldsymbol{v}}_h \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h}= ( \nabla \sigma_h, {\boldsymbol{v}}_h )_{{\mathcal T}_h} - \langle {\sigma}_h, {\boldsymbol{v}}_h \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h}+\langle \widehat{\sigma}_h, {\boldsymbol{v}}_h \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} = 0, \] which implies $\sigma_h$ is piecewise constant on ${\mathcal T}_h$. Due to the fact that $\sigma_h = \widehat{\sigma}_h=0$ on $\partial \Omega$, we have $\sigma_h = 0$ on ${\mathcal T}_h$ and $\widehat{\sigma}_h=0$ on $\partial {\mathcal T}_h$. Hence, the well-posedness of the HDG method (\ref{discrete_mixed_form}) always holds without imposing any mesh constraint, i.e., the HDG method (\ref{discrete_mixed_form}) is absolutely stable. \end{proof}
Moreover, under the assumptions made in Theorem \ref{stability_thm}, we can further get the upper bounds for $\|\Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h\|_{{\mathcal T}_h}$ and $\|\Vd\Vi\Vv \, {\boldsymbol{u}}_h \|_{{\mathcal T}_h}$. We take ${\boldsymbol{r}}_h = \Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h$ in (\ref{discrete_mixed_form_a}) to get \[ ({\mathbf{i}} {\boldsymbol{w}}_h,\Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h)_{{\mathcal T}_h} - ({\boldsymbol{u}}_h,\Vc\Vu\Vr\Vl\, \Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h)_{{\mathcal T}_h} + \langle\widehat{{\boldsymbol{u}}}^t_h\times {\boldsymbol{n}}, \Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h \rangle_{\partial {\mathcal T}_h} =0 . \] Using integration by parts on the above equation, we have \[ ({\mathbf{i}} {\boldsymbol{w}}_h,\Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h)_{{\mathcal T}_h} - ( \Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h,\Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h )_{{\mathcal T}_h} - \langle{{\boldsymbol{u}}}^t_h\times {\boldsymbol{n}}, \Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h \rangle_{\partial {\mathcal T}_h} + \langle\widehat{{\boldsymbol{u}}}^t_h\times {\boldsymbol{n}}, \Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h \rangle_{\partial {\mathcal T}_h} =0, \] which directly yields \[
\| \Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h \|^2_{{\mathcal T}_h} \leq \| {\boldsymbol{w}}_h \|_{{\mathcal T}_h}\| \Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h \|_{{\mathcal T}_h} + C \| \tau_t^{\frac{1}{2}} ({\boldsymbol{u}}^t_h- \widehat{{\boldsymbol{u}}}^t_h) \|_{\partial {\mathcal T}_h} \tau_t^{-\frac{1}{2}} p h^{-\frac{1}{2}} \| \Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h \|_{{\mathcal T}_h} . \] Combining the above inequality, (\ref{estimate_mixed_1}), (\ref{stability_u_h1}) and (\ref{stability_w_h1}), then we get \[
\| \Vc\Vu\Vr\Vl\, {\boldsymbol{u}}_h \|_{{\mathcal T}_h}\leq C \Big( \big(\frac{1}{\kappa} + p^{\frac{1}{2}} +(1+\frac{p^{\frac{1}{2}}}{\kappa})C^2_{\rm stab} \big)\|{\boldsymbol{f}}\|_{0,\Omega}+ \big( \frac{1}{\kappa^{\frac{1}{2}}} + \frac{p^{\frac{1}{2}}}{ \kappa^{\frac{1}{2}} } +(1+\frac{p^{\frac{1}{2}}}{\kappa})C_{\rm stab} \big)\|{\boldsymbol{g}}\|_{0,\partial\Omega} \Big). \] Taking $q_h = \Vd\Vi\Vv \, {\boldsymbol{u}}_h$ in (\ref{discrete_mixed_form_c}) and using integration by parts, we have \[
\|\Vd\Vi\Vv \, {\boldsymbol{u}}_h \|^2_{{\mathcal T}_h} = \langle {{\boldsymbol{u}}}^n_h\cdot {\boldsymbol{n}}-\widehat{{\boldsymbol{u}}}^n_h\cdot {\boldsymbol{n}},\Vd\Vi\Vv \, {\boldsymbol{u}}_h \rangle_{\partial {\mathcal T}_h}\leq C \| \tau^{\frac{1}{2}}_n ( \sigma_h - \widehat{\sigma}_h) \|_{\partial {\mathcal T}_h} \tau^{\frac{1}{2}}_n p h^{-\frac{1}{2}} \|\Vd\Vi\Vv \, {\boldsymbol{u}}_h \|_{{\mathcal T}_h}. \]
Then we obtain the upper bound for $\|\Vd\Vi\Vv \, {\boldsymbol{u}}_h \|_{{\mathcal T}_h}$ by the above estimate, (\ref{estimate_mixed_1}) and (\ref{stability_u_h1}) as follows: \[
\| \Vd\Vi\Vv \, {\boldsymbol{u}}_h \|_{{\mathcal T}_h} \leq C (1+\kappa)^{\frac{1}{2}}p^{\frac{1}{2}}\Big( (1+\frac{C^2_{\rm stab}}{\kappa}) \| {\boldsymbol{f}} \|_{0,\Omega} + \big( \frac{1}{\kappa^{\frac{1}{2}}} + \frac{C_{\rm stab}}{\kappa} \big) \|{\boldsymbol{g}}\|_{0,\partial \Omega} \Big). \]
\begin{remark}\label{remark_sta_1}
By the estimates (\ref{estimate_mixed_1}) and (\ref{stability_u_h1}), we can get the upper bound for $\| \tau^{\frac{1}{2}}_n ( \sigma_h - \widehat{\sigma}_h) \|_{\partial {\mathcal T}_h} $. Moreover, taking ${\boldsymbol{v}}_h = \nabla \sigma_h$ in (\ref{discrete_mixed_form_b}) and applying integration by parts, the Cauchy-Schwarz inequality, trace inequality and the estimates in Lemma \ref{stability_lemma} and Theorem \ref{stability_thm}, we can also get the stability estimate for $\| \nabla \sigma_h \|_{{\mathcal T}_h}$. When $\kappa h/p \leq C_0$, one may tune the parameters $\tau_t$ and $\tau_n$ according to the derivation of upper bound for the right-hand side of (\ref{uh_estimate_basis}) and get the stability estimates. \end{remark}
\section{Error analysis}\label{error_analysis} In this section we provide detailed proofs of the a priori error estimates in Theorem \ref{error_thm}. We denote \begin{align*} &{\boldsymbol{e}}_{{\boldsymbol{w}}} = \boldsymbol{\Pi}_{{\boldsymbol{V}}} {\boldsymbol{w}}-{\boldsymbol{w}}_h,\quad {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t} = {\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{w}}^t-\widehat{{\boldsymbol{w}}}^t_h,\quad {\boldsymbol{e}}_{{\boldsymbol{u}}} = \boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}}-{\boldsymbol{u}}_h,\quad {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} = {\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{u}}^t-\widehat{{\boldsymbol{u}}}^t_h,\\ &{\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n} = {\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{u}}^n-\widehat{{\boldsymbol{u}}}^n_h,\quad e_{\sigma} = \Pi_{Q} \sigma - \sigma_h, \quad e_{\widehat{\sigma}} = P_M \sigma - \widehat{\sigma}_h. \end{align*} In the following we first present the error equation for the analysis.
\begin{lemma} Let $({\boldsymbol{w}},{\boldsymbol{u}},\sigma)$ and $({\boldsymbol{w}}_h,{\boldsymbol{u}}_h,\widehat{{\boldsymbol{u}}}^t_h,\sigma_h, \widehat{\sigma}_h) $ solve the equations (\ref{pde_mixed_first_order}) and (\ref{discrete_mixed_form}). We have \begin{subequations} \label{error_eq} \begin{align} \label{error_eq_a} &({\mathbf{i}} {\boldsymbol{e}}_{{\boldsymbol{w}}},{\boldsymbol{r}}_h)_{{\mathcal T}_h} - ({\boldsymbol{e}}_{{\boldsymbol{u}}},\Vc\Vu\Vr\Vl\, {\boldsymbol{r}}_h)_{{\mathcal T}_h} + \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} \times {\boldsymbol{n}}, {\boldsymbol{r}}_h\rangle_{\partial {\mathcal T}_h} = 0,\\ \label{error_eq_b} &({\boldsymbol{e}}_{{\boldsymbol{w}}},\Vc\Vu\Vr\Vl\, {\boldsymbol{v}}_h)_{{\mathcal T}_h} - \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t} \times {\boldsymbol{n}}, {\boldsymbol{v}}_h\rangle_{\partial {\mathcal T}_h} + ({\mathbf{i}} \kappa^2{\boldsymbol{e}}_{{\boldsymbol{u}}},{\boldsymbol{v}}_h)_{{\mathcal T}_h} \nonumber\\ &\qquad \qquad \qquad \ - (e_{\sigma},\Vd\Vi\Vv \, {\boldsymbol{v}}_h)_{{\mathcal T}_h}+ \langle e_{\widehat{\sigma}},{\boldsymbol{v}}_h \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h}=0,\\ \label{error_eq_c} &-({\boldsymbol{e}}_{{\boldsymbol{u}}},\nabla q_h)_{{\mathcal T}_h}+ \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n} \cdot {\boldsymbol{n}}, q_h\rangle_{\partial {\mathcal T}_h} = 0,\\ \label{error_eq_d} & \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t} \times {\boldsymbol{n}}, \boldsymbol{\eta}_h\rangle_{\partial {\mathcal T}_h \setminus \partial \Omega}=0,\\ \label{error_eq_e} &\langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t} \times {\boldsymbol{n}}, \boldsymbol{\eta}_h\rangle_{ \partial \Omega}+ \langle \kappa {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}, \boldsymbol{\eta}_h\rangle_{ \partial \Omega}=0,\\ \label{error_eq_f} &\langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n} \cdot {\boldsymbol{n}},{\xi}_h \rangle_{\partial {\mathcal T}_h}=0, \end{align} \end{subequations} for all $({\boldsymbol{r}}_h, {\boldsymbol{v}}_h, \boldsymbol{\eta}_h, q_h,\xi_h) \in {\boldsymbol{V}}_h \times {\boldsymbol{U}}_h \times {\boldsymbol{M}}^t_h \times Q_h \times M_h(0)$. \end{lemma} \begin{proof}
We notice that the exact solution $({\boldsymbol{w}},{\boldsymbol{u}},{\boldsymbol{u}}^t|_{{\mathcal E}_h},\sigma,\sigma|_{{\mathcal E}_h})$ also satisfies the equation (\ref{discrete_mixed_form}). Hence, due to the property of standard $L^2$-projection, the solutions ${\boldsymbol{w}}_h$, $\widehat{{\boldsymbol{w}}}^t_h|_{{\mathcal E}_h}$, ${\boldsymbol{u}}_h$, $\widehat{{\boldsymbol{u}}}^t_h|_{{\mathcal E}_h}$, $\widehat{{\boldsymbol{u}}}^n_h|_{{\mathcal E}_h}$, $\sigma_h$, $\widehat{\sigma}_h|_{{\mathcal E}_h} $ in the equation (\ref{discrete_mixed_form}) can be replaced by $\boldsymbol{\Pi_V}{\boldsymbol{w}}$, $\boldsymbol{P_M}{\boldsymbol{w}}^t|_{{\mathcal E}_h}$, $\boldsymbol{\Pi_u}{\boldsymbol{u}}$, $\boldsymbol{P_M}{\boldsymbol{u}}^t|_{{\mathcal E}_h}$, $\boldsymbol{P_M}{\boldsymbol{u}}^n|_{{\mathcal E}_h}$, $\Pi_Q\sigma, P_M\sigma|_{{\mathcal E}_h} $ respectively to derive a new equation, which subtracts the equation (\ref{discrete_mixed_form}) to yield the result. \end{proof}
Next we are going to present our first error estimate. \begin{lemma} \label{error_analysis_1} If we choose $\tau_t = \frac{p}{h}$ and $\tau_n = \frac{(1+\kappa) h}{p}$, we have \begin{align}
& \|\kappa^{\frac{1}{2}} {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} \|_{0, \partial \Omega } + \| \tau_t^{\frac{1}{2}}({\boldsymbol{e}}_{{\boldsymbol{u}}}^t- {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} ) \|_{\partial {\mathcal T}_h} + \|\tau_n^{\frac{1}{2}} ( e_\sigma- e_{\widehat{\sigma}} ) \|_{\partial {\mathcal T}_h} \leq C \eta({\boldsymbol{w}},{\boldsymbol{u}}) , \label{pre_error_analysis_1} \\
&\| {\boldsymbol{e}}_{{\boldsymbol{w}}} \|_{{\mathcal T}_h} \leq \kappa \| {\boldsymbol{e}}_{{\boldsymbol{u}}} \|_{{\mathcal T}_h} + C\eta({\boldsymbol{w}},{\boldsymbol{u}}),\label{pre_error_analysis_2} \end{align}
where $\eta({\boldsymbol{w}},{\boldsymbol{u}}) = \frac{h^t}{p^t}\|{\boldsymbol{w}}\|_{t,\Omega} + \big(1+(1+\kappa) ^{-\frac{1}{2}}\big) \frac{h^{s-1}}{p^{s-1}} \|{\boldsymbol{u}}\|_{s,\Omega} $, $s>\frac{1}{2},t>\frac{1}{2}$. \end{lemma} \begin{proof} Let ${\boldsymbol{r}}_h = {\boldsymbol{e}}_{{\boldsymbol{w}}}, {\boldsymbol{v}}_h = {\boldsymbol{e}}_{{\boldsymbol{u}}}, \boldsymbol{\eta}_h = {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}, q_h=e_{\sigma}, \xi_h = e_{\widehat{\sigma}}$ in the error equation (\ref{error_eq}). Then we get the following equalities after some simple manipulations which includes applying integration by parts: \begin{subequations} \label{error_eq1} \begin{align} \label{error_eq1_a} &-({\mathbf{i}} {\boldsymbol{e}}_{{\boldsymbol{w}}},{\boldsymbol{e}}_{{\boldsymbol{w}}})_{{\mathcal T}_h} - ({\boldsymbol{e}}_{{\boldsymbol{w}}},\Vc\Vu\Vr\Vl\, {\boldsymbol{e}}_{{\boldsymbol{u}}})_{{\mathcal T}_h} + \langle {\boldsymbol{e}}^t_{{\boldsymbol{w}}}\times {\boldsymbol{n}}, {\boldsymbol{e}}^t_{{\boldsymbol{u}}}\rangle_{\partial {\mathcal T}_h} - \langle {\boldsymbol{e}}^t_{{\boldsymbol{w}}}\times {\boldsymbol{n}}, {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}\rangle_{\partial {\mathcal T}_h} = 0,\\ &({\boldsymbol{e}}_{{\boldsymbol{w}}},\Vc\Vu\Vr\Vl\, {\boldsymbol{e}}_{{\boldsymbol{u}}})_{{\mathcal T}_h} - \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t} \times {\boldsymbol{n}}, {\boldsymbol{e}}^t_{{\boldsymbol{u}}}\rangle_{\partial {\mathcal T}_h} + ({\mathbf{i}} \kappa^2{\boldsymbol{e}}_{{\boldsymbol{u}}},{\boldsymbol{e}}_{{\boldsymbol{u}}})_{{\mathcal T}_h} - (e_{\sigma},\Vd\Vi\Vv \, {\boldsymbol{e}}_{{\boldsymbol{u}}})_{{\mathcal T}_h}+ \langle e_{\widehat{\sigma}},{\boldsymbol{e}}^n_{{\boldsymbol{u}}} \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h}=0,\\ &(e_\sigma,\Vd\Vi\Vv \, {\boldsymbol{e}}_{{\boldsymbol{u}}})_{{\mathcal T}_h} - \langle e_\sigma, {\boldsymbol{e}}^n_{{\boldsymbol{u}}} \cdot {\boldsymbol{n}}\rangle_{\partial {\mathcal T}_h} + \langle e_\sigma, {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n} \cdot {\boldsymbol{n}}\rangle_{\partial {\mathcal T}_h} = 0,\\ & \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t} \times {\boldsymbol{n}}, {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}\rangle_{\partial {\mathcal T}_h \setminus \partial \Omega}=0,\\ \label{error_eq1_e} &\langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t} \times {\boldsymbol{n}}, {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}\rangle_{ \partial \Omega}+ \langle \kappa {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} , {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}\rangle_{ \partial \Omega}=0,\\ \label{error_eq1_f} &-\langle e_{\widehat{\sigma}}, {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n} \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h}=0. \end{align} \end{subequations} Adding the above equalities (\ref{error_eq1_a})-(\ref{error_eq1_f}) together yields \begin{align} -({\mathbf{i}} {\boldsymbol{e}}_{{\boldsymbol{w}}},{\boldsymbol{e}}_{{\boldsymbol{w}}})_{{\mathcal T}_h}&+({\mathbf{i}} \kappa^2{\boldsymbol{e}}_{{\boldsymbol{u}}},{\boldsymbol{e}}_{{\boldsymbol{u}}})_{{\mathcal T}_h} + \langle ({\boldsymbol{e}}^t_{{\boldsymbol{w}}}-{\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t})\times {\boldsymbol{n}}, {\boldsymbol{e}}^t_{{\boldsymbol{u}}}-{\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}\rangle_{\partial {\mathcal T}_h}\nonumber \\ &+\langle \kappa {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} , {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}\rangle_{ \partial \Omega} - \langle e_\sigma- e_{\widehat{\sigma}},({\boldsymbol{e}}^n_{{\boldsymbol{u}}} - {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n}) \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h}=0. \label{error_eq2} \end{align} By the definition of $\widehat{{\boldsymbol{w}}}_h$ in (\ref{hat_definition}) we have \begin{align} ({\boldsymbol{e}}^t_{{\boldsymbol{w}}}-{\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t})\times {\boldsymbol{n}} & = (\boldsymbol{\Pi}_{{\boldsymbol{V}}} {\boldsymbol{w}}-{\boldsymbol{w}}_h)^t \times {\boldsymbol{n}} - ({\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{w}}^t-\widehat{{\boldsymbol{w}}}^t_h)\times {\boldsymbol{n}} \nonumber \\ & = (\boldsymbol{\Pi}_{{\boldsymbol{V}}} {\boldsymbol{w}})^t\times {\boldsymbol{n}}-{\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{w}}^t\times {\boldsymbol{n}} - \tau_t ({\boldsymbol{u}}^t_h-\widehat{{\boldsymbol{u}}}^t_h)\label{error_eq3} \\ & = (\boldsymbol{\Pi}_{{\boldsymbol{V}}} {\boldsymbol{w}})^t\times {\boldsymbol{n}}-{\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{w}}^t\times {\boldsymbol{n}} - \tau_t \big( (\boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}} - {\boldsymbol{e}}_{{\boldsymbol{u}}})^t - ({\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{u}}^t- {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} ) \big)\nonumber \\ & = (\boldsymbol{\Pi}_{{\boldsymbol{V}}} {\boldsymbol{w}})^t\times {\boldsymbol{n}}-{\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{w}}^t\times {\boldsymbol{n}} +\tau_t({\boldsymbol{e}}_{{\boldsymbol{u}}}^t- {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} )-\tau_t \big( (\boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}})^t - {\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{u}}^t \big).\nonumber \end{align} Moreover, by the definition of $\widehat{{\boldsymbol{u}}}^n_h$ in (\ref{hat_definition}), we have \begin{align} ({\boldsymbol{e}}^n_{{\boldsymbol{u}}} - {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n}) \cdot {\boldsymbol{n}} & = ( \boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}} -{\boldsymbol{u}}_h )\cdot{\boldsymbol{n}} - ({\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{u}}^n-\widehat{{\boldsymbol{u}}}^n_h)\cdot{\boldsymbol{n}} \nonumber\\ & = ( \boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}}- {\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{u}}^n )\cdot{\boldsymbol{n}}+\tau_n(\sigma_h-\widehat{\sigma}_h)\nonumber\\ & = ( \boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}}- {\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{u}}^n )\cdot{\boldsymbol{n}}+ \tau_n \big( ( \Pi_Q \sigma - e_\sigma ) -( P_M \sigma - e_{\widehat{\sigma}} ) \big)\nonumber\\ & = ( \boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}}- {\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{u}}^n )\cdot{\boldsymbol{n}}- \tau_n( e_\sigma- e_{\widehat{\sigma}})+\tau_n ( \Pi_Q \sigma - P_M \sigma).\label{error_eq4} \end{align} Inserting (\ref{error_eq3}) and (\ref{error_eq4}) into (\ref{error_eq2}), we obtain \begin{align}
&-{\mathbf{i}} \| {\boldsymbol{e}}_{{\boldsymbol{w}}} \|^2_{{\mathcal T}_h}+{\mathbf{i}} \kappa^2\|{\boldsymbol{e}}_{{\boldsymbol{u}}}\|^2_{{\mathcal T}_h} +\kappa \| {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} \|^2_{0, \partial \Omega } + \| \tau_t^{\frac{1}{2}}({\boldsymbol{e}}_{{\boldsymbol{u}}}^t- {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} ) \|^2_{\partial {\mathcal T}_h} + \|\tau_n^{\frac{1}{2}} ( e_\sigma- e_{\widehat{\sigma}} ) \|^2_{\partial {\mathcal T}_h} \nonumber\\ &= - \langle (\boldsymbol{\Pi}_{{\boldsymbol{V}}} {\boldsymbol{w}})^t\times {\boldsymbol{n}}-{\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{w}}^t\times {\boldsymbol{n}} , {\boldsymbol{e}}_{{\boldsymbol{u}}}^t- {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} \rangle_{\partial {\mathcal T}_h} + \langle \tau_t \big( (\boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}})^t - {\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{u}}^t \big), {\boldsymbol{e}}_{{\boldsymbol{u}}}^t- {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} \rangle_{\partial {\mathcal T}_h} \nonumber\\ &\quad+ \langle e_\sigma- e_{\widehat{\sigma}}, ( \boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}}- {\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{u}}^n )\cdot{\boldsymbol{n}} \rangle_{\partial{\mathcal T}_h} + \langle e_\sigma- e_{\widehat{\sigma}},\tau_n ( \Pi_Q \sigma - P_M \sigma) \rangle_{\partial{\mathcal T}_h} \nonumber\\ & = - \langle (\boldsymbol{\Pi}_{{\boldsymbol{V}}} {\boldsymbol{w}}-{\boldsymbol{w}})\times {\boldsymbol{n}} , {\boldsymbol{e}}_{{\boldsymbol{u}}}^t- {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} \rangle_{\partial {\mathcal T}_h} + \langle \tau_t ( \boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}}- {\boldsymbol{u}} ), {\boldsymbol{e}}_{{\boldsymbol{u}}}^t- {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} \rangle_{\partial {\mathcal T}_h} \nonumber\\ & \quad+ \langle e_\sigma- e_{\widehat{\sigma}}, ( \boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}}- {\boldsymbol{u}} )\cdot{\boldsymbol{n}} \rangle_{\partial{\mathcal T}_h} + \langle e_\sigma- e_{\widehat{\sigma}},\tau_n ( \Pi_Q \sigma - \sigma) \rangle_{\partial{\mathcal T}_h},\label{error_eq5} \end{align} where the second equality is derived by the properties of $L^2$-projections ${\boldsymbol{P}}_{{\boldsymbol{M}}}$ and $P_M$. Based on (\ref{error_eq5}), taking the real part and imaginary part of the left-hand side of (\ref{error_eq5}) respectively, the estimates (\ref{pre_error_analysis_1}) and (\ref{pre_error_analysis_2}) can be obtained by the approximation properties of standard $L^2$-projections, the Young's inequality and the fact that $\sigma=0$. This completes the proof. \end{proof}
Now we start to use the duality argument to get an estimate for ${\boldsymbol{e}}_{{\boldsymbol{u}}}$. Given ${\boldsymbol{e}}_{{\boldsymbol{u}}} \in {\boldsymbol{L}}^2(\Omega)$, we introduce the first-order system of the dual problem (\ref{dual_problem}) with ${\boldsymbol{J}} = {\boldsymbol{e}}_{{\boldsymbol{u}}}$: \begin{subequations} \label{dual_FOS_error} \begin{align} {\mathbf{i}} \boldsymbol{\Phi} - \Vc\Vu\Vr\Vl\, \boldsymbol{\Psi} &= 0\qquad \rm{in}\ \Omega, \\ \Vc\Vu\Vr\Vl\, \boldsymbol{\Phi} + {\mathbf{i}}\kappa^2 \boldsymbol{\Psi} + \nabla \varphi &= {\boldsymbol{e}}_{{\boldsymbol{u}}} \quad \ \rm{in}\ \Omega,\\ \label{dual_FOS_error_1} \Vd\Vi\Vv \, \boldsymbol{\Psi} &= 0\qquad \rm{in}\ \Omega,\\ \label{bc1_dual_error_FOS} \boldsymbol{\Phi} \times {\boldsymbol{n}} - \kappa \boldsymbol{\Psi}^t &= 0 \qquad \rm{on}\ \partial\Omega,\\ \varphi &= 0 \qquad \rm{on}\ \partial\Omega. \end{align} \end{subequations} Similar to the estimates in (\ref{est_dual_3_p})-(\ref{est_dual_3_1}), we have \begin{align} \label{est_dual_error_s}
&\|\varphi\|_{1,\Omega} \leq C \|{\boldsymbol{e}}_{{\boldsymbol{u}}}\|_{{\mathcal T}_h}, \\ \label{est_dual_error_1_new}
&\| \boldsymbol{\Phi} \|_{\frac{1}{2}+\alpha,\Omega} + \kappa \| \boldsymbol{\Psi} \|_{\frac{1}{2}+\alpha,\Omega} + \|\Vc\Vu\Vr\Vl\, \boldsymbol{\Psi}\|_{\frac{1}{2}+\alpha,\Omega} + (1+\kappa)\kappa \| \boldsymbol{\Psi}\|_{0,\Omega} \leq C (1+\kappa) \|{\boldsymbol{e}}_{{\boldsymbol{u}}} \|_{{\mathcal T}_h}. \end{align}
Next we first present an important equality. \begin{lemma} \label{lemma_equality_error_eu} Let $(\boldsymbol{\Phi},\boldsymbol{\Psi},\varphi) $ be the solution of the dual problem (\ref{dual_FOS_error}). It holds that \begin{align} \label{equality_error_eu}
\| {\boldsymbol{e}}_{{\boldsymbol{u}}} \|^2_{{\mathcal T}_h} = \sum^{5}_{k=1} E_k, \end{align} where \begin{align*} E_1 &= \langle ({\boldsymbol{e}}^t_{{\boldsymbol{u}}} -{\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} )\times {\boldsymbol{n}}, \boldsymbol{\Phi} - \boldsymbol{\Pi_V \Phi} \rangle_{\partial {\mathcal T}_h},\\ E_2 & = \langle ({\boldsymbol{e}}^n_{{\boldsymbol{u}}} -{\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n} )\cdot {\boldsymbol{n}}, \varphi - \Pi_Q \varphi\rangle_{\partial {\mathcal T}_h},\\ E_3&=\langle ({\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t} - {\boldsymbol{e}}_{{\boldsymbol{w}}}^t )\times {\boldsymbol{n}},\boldsymbol{\Psi} - \boldsymbol{\Pi_U \Psi} \rangle_{\partial {\mathcal T}_h},\\ E_4 &= -\langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t} \times {\boldsymbol{n}}+ \kappa {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} , \boldsymbol{\Psi}^t \rangle_{\partial \Omega} , \\
E_5 & = (e_\sigma, \Vd\Vi\Vv \, (\boldsymbol{\Psi} - \boldsymbol{\Pi_U \Psi} ) )_{{\mathcal T}_h} - \langle e_{\widehat{\sigma}}, (\boldsymbol{\Psi} - \boldsymbol{\Pi_U \Psi} )\cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h}. \end{align*} \end{lemma}
\begin{proof} By the dual problem (\ref{dual_FOS_error}), we have \begin{align}
\| {\boldsymbol{e}}_{{\boldsymbol{u}}} \|^2_{{\mathcal T}_h} & = ({\boldsymbol{e}}_{{\boldsymbol{u}}} , \Vc\Vu\Vr\Vl\, \boldsymbol{\Phi} + {\mathbf{i}}\kappa^2 \boldsymbol{\Psi} + \nabla \varphi)_{{\mathcal T}_h} + ({\boldsymbol{e}}_{{\boldsymbol{w}}} , {\mathbf{i}} \boldsymbol{\Phi} - \Vc\Vu\Vr\Vl\, \boldsymbol{\Psi})_{{\mathcal T}_h}\nonumber\\ & = ({\boldsymbol{e}}_{{\boldsymbol{u}}} , \Vc\Vu\Vr\Vl\, \boldsymbol{\Pi_V \Phi} + {\mathbf{i}}\kappa^2 \boldsymbol{\Pi_U \Psi} + \nabla \Pi_Q \varphi )_{{\mathcal T}_h} + ({\boldsymbol{e}}_{{\boldsymbol{w}}} ,{\mathbf{i}} \boldsymbol{\Pi_V \Phi} - \Vc\Vu\Vr\Vl\, \boldsymbol{\Pi_U \Psi})_{{\mathcal T}_h}\nonumber\\ &\quad \ + ({\boldsymbol{e}}_{{\boldsymbol{u}}} ,\Vc\Vu\Vr\Vl\, (\boldsymbol{\Phi} -\boldsymbol{\Pi_V \Phi} ) )_{{\mathcal T}_h} + ({\boldsymbol{e}}_{{\boldsymbol{u}}} ,{\mathbf{i}}\kappa^2( \boldsymbol{\Psi} - \boldsymbol{\Pi_U\Psi} ))_{{\mathcal T}_h} + ({\boldsymbol{e}}_{{\boldsymbol{u}}} , \nabla(\varphi - \Pi_Q \varphi))_{{\mathcal T}_h}\nonumber\\ &\quad \ + ({\boldsymbol{e}}_{{\boldsymbol{w}}} ,{\mathbf{i}}(\boldsymbol{\Phi} -\boldsymbol{\Pi_V\Phi} ))_{{\mathcal T}_h} - ({\boldsymbol{e}}_{{\boldsymbol{w}}} , \Vc\Vu\Vr\Vl\, (\boldsymbol{\Psi}-\boldsymbol{\Pi_U\Psi} ))_{{\mathcal T}_h}.\label{err_equ_pre_0} \end{align} By the definitions of $\boldsymbol \Pi_U$ and $\boldsymbol \Pi_V$, we have $({\boldsymbol{e}}_{{\boldsymbol{u}}},{\mathbf{i}}\kappa^2( \boldsymbol{\Psi} - \boldsymbol{\Pi_U\Psi} ))_{{\mathcal T}_h} =0$ and $({\boldsymbol{e}}_{{\boldsymbol{w}}},{\mathbf{i}}(\boldsymbol{\Phi} -\boldsymbol{\Pi_V\Phi} ))_{{\mathcal T}_h}=0$. Similar to the derivations of (\ref{sta_equ_pre_1})-(\ref{sta_equ_pre_3}), we have \begin{align} ({\boldsymbol{e}}_{{\boldsymbol{u}}},\Vc\Vu\Vr\Vl\, (\boldsymbol{\Phi} -\boldsymbol{\Pi_V \Phi} ) )_{{\mathcal T}_h} &= \langle {\boldsymbol{e}}_{{\boldsymbol{u}}}^t \times {\boldsymbol{n}}, \boldsymbol{\Phi} -\boldsymbol{\Pi_V \Phi} \rangle_{\partial {\mathcal T}_h},\label{err_equ_pre_1} \\
({\boldsymbol{e}}_{{\boldsymbol{u}}}, \nabla(\varphi - \Pi_Q \varphi))_{{\mathcal T}_h} &= \langle {\boldsymbol{e}}_{{\boldsymbol{u}}}^n \cdot {\boldsymbol{n}}, \varphi-\Pi_Q \varphi\rangle_{\partial {\mathcal T}_h} ,\label{err_equ_pre_2} \\ - ({\boldsymbol{e}}_{{\boldsymbol{w}}}, \Vc\Vu\Vr\Vl\, (\boldsymbol{\Psi}-\boldsymbol{\Pi_U\Psi} ))_{{\mathcal T}_h} &= - \langle {\boldsymbol{e}}^t_{{\boldsymbol{w}}} \times {\boldsymbol{n}}, \boldsymbol{\Psi}-\boldsymbol{\Pi_U\Psi}\rangle_{\partial {\mathcal T}_h}.\label{err_equ_pre_3} \end{align} Taking ${\boldsymbol{r}}_h = \boldsymbol{\Pi_V \Phi}$ in the equation (\ref{error_eq_a}), noting that ${\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}\times {\boldsymbol{n}}$ is continuous across each interior face and using the boundary condition (\ref{bc1_dual_error_FOS}), we obtain \begin{align} ({\boldsymbol{e}}_{{\boldsymbol{u}}}, \Vc\Vu\Vr\Vl\, \boldsymbol{\Pi_V \Phi} )_{{\mathcal T}_h} &= ({\mathbf{i}} {\boldsymbol{e}}_{{\boldsymbol{w}}},\boldsymbol{\Pi_V \Phi} )_{{\mathcal T}_h} + \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}\times {\boldsymbol{n}}, \boldsymbol{\Pi_V \Phi}\rangle_{\partial {\mathcal T}_h} \nonumber\\ & = ({\mathbf{i}} {\boldsymbol{e}}_{{\boldsymbol{w}}},\boldsymbol{\Pi_V \Phi} )_{{\mathcal T}_h} + \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}\times {\boldsymbol{n}}, \boldsymbol{\Pi_V \Phi} - \boldsymbol{ \Phi} \rangle_{\partial {\mathcal T}_h} - \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}, \boldsymbol{ \Phi}\times{\boldsymbol{n}}\rangle_{\partial \Omega}\nonumber\\ & = ({\mathbf{i}} {\boldsymbol{e}}_{{\boldsymbol{w}}},\boldsymbol{\Pi_V \Phi} )_{{\mathcal T}_h} + \langle{\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}\times {\boldsymbol{n}}, \boldsymbol{\Pi_V \Phi} - \boldsymbol{ \Phi} \rangle_{\partial {\mathcal T}_h} - \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}, \kappa \boldsymbol{\Psi}^t \rangle_{\partial \Omega}.\label{err_equ_pre_4} \end{align} Note that ${\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n}\cdot {\boldsymbol{n}}$ is continuous across each interior face and $\varphi \in H^1_0(\Omega)$. We let $q_h = \Pi_Q \varphi$ in (\ref{error_eq_c}) to obtain \begin{align} ({\boldsymbol{e}}_{{\boldsymbol{u}}},\nabla \Pi_Q \varphi)_{{\mathcal T}_h} = \langle{\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n} \cdot {\boldsymbol{n}}, \Pi_Q \varphi \rangle_{\partial {\mathcal T}_h} = \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n}\cdot {\boldsymbol{n}}, \Pi_Q \varphi - \varphi \rangle_{\partial {\mathcal T}_h} .\label{err_equ_pre_5} \end{align} We further take ${\boldsymbol{v}}_h = \boldsymbol{\Pi_U \Psi} $ in (\ref{error_eq_b}) to get \begin{align} &-( {\boldsymbol{e}}_{{\boldsymbol{w}}},\Vc\Vu\Vr\Vl\, \boldsymbol{\Pi_U\Psi})_{{\mathcal T}_h} \nonumber\\ &= - \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t}\times {\boldsymbol{n}},\boldsymbol{\Pi_U\Psi} \rangle_{\partial {\mathcal T}_h} + ( {\mathbf{i}} \kappa^2 {\boldsymbol{e}}_{{\boldsymbol{u}}},\boldsymbol{\Pi_U\Psi} )_{{\mathcal T}_h} - ( e_\sigma,\Vd\Vi\Vv \, \boldsymbol{\Pi_U\Psi})_{{\mathcal T}_h} + \langle e_{\widehat{\sigma}}, \boldsymbol{\Pi_U\Psi} \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} ,\nonumber\\ &= - \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t}\times {\boldsymbol{n}},\boldsymbol{\Pi_U\Psi} - \boldsymbol{\Psi}\rangle_{\partial {\mathcal T}_h} - \langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t}\times {\boldsymbol{n}},\boldsymbol{\Psi}^t\rangle_{\partial \Omega} + ( {\mathbf{i}} \kappa^2 {\boldsymbol{e}}_{{\boldsymbol{u}}},\boldsymbol{\Pi_U\Psi} )_{{\mathcal T}_h} \nonumber\\ &\quad - ( e_\sigma,\Vd\Vi\Vv \, \boldsymbol{\Pi_U\Psi} - \Vd\Vi\Vv \, \boldsymbol{\Psi} )_{{\mathcal T}_h} + \langle e_{\widehat{\sigma}}, \boldsymbol{\Pi_U\Psi} \cdot {\boldsymbol{n}} -\boldsymbol{\Psi} \cdot {\boldsymbol{n}} \rangle_{\partial {\mathcal T}_h} ,\label{err_equ_pre_6} \end{align} where the above second equality holds due to the fact that $\Vd\Vi\Vv \, \boldsymbol{\Psi}=0$, $ {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t}\times {\boldsymbol{n}}$ and $e_{\widehat{\sigma}}$ are continuous across each interior face, and $e_{\widehat{\sigma}}=0$ on ${\mathcal E}^{\partial}_h$. Then, inserting (\ref{err_equ_pre_1})-(\ref{err_equ_pre_6}) into (\ref{err_equ_pre_0}) yields the result. \end{proof}
Based on the above lemma, we can obtain the estimate for $\|{\boldsymbol{e}}_{{\boldsymbol{u}}}\|_{{\mathcal T}_h}$.
\begin{lemma}\label{eu_pre} If the regularity property (\ref{est_dual_error_s}) holds, and $\tau_t,\tau_n$ are chosen as in Lemma \ref{error_analysis_1}, we have \begin{align} \label{est_eu_result}
\| {\boldsymbol{e}}_{{\boldsymbol{u}}} \|_{{\mathcal T}_h} \leq C \big( R_{{\boldsymbol{w}}} \| {\boldsymbol{w}} \|_{\frac{1}{2}+\alpha,\Omega}+ R_{{\boldsymbol{u}}} \| {\boldsymbol{u}} \|_{\frac{1}{2}+\alpha,\Omega}\big), \end{align} where $R_{{\boldsymbol{w}}}$ and $R_{{\boldsymbol{u}}}$ are defined as in Theorem \ref{error_thm}. \end{lemma}
\begin{proof} We need to derive the upper bounds for $E_1,\cdots,E_5$ in Lemma \ref{lemma_equality_error_eu}. By the Cauchy-Schwarz inequality and the approximation property of $\boldsymbol{\Pi_V}$, we obtain \begin{align*}
E_1 \leq C \| \tau_t^{\frac{1}{2}}({\boldsymbol{e}}^t_{{\boldsymbol{u}}} -{\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} ) \|_{\partial {\mathcal T}_h} \tau_t^{-\frac{1}{2}} (\frac{h}{p})^{\alpha} \| \boldsymbol{\Phi} \|_{\frac{1}{2}+\alpha,\Omega} \leq C\| \tau_t^{\frac{1}{2}}({\boldsymbol{e}}^t_{{\boldsymbol{u}}} -{\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t}) \|_{\partial {\mathcal T}_h} (1+\kappa)(\frac{h}{p} )^{\frac{1}{2}+\alpha}\| {\boldsymbol{e}}_{{\boldsymbol{u}}} \|_{{\mathcal T}_h}. \end{align*} By the identity (\ref{error_eq4}) for $ ({\boldsymbol{e}}^n_{{\boldsymbol{u}}} -{\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^n} )\cdot {\boldsymbol{n}}$ and the fact that $\sigma=0$, we can derive that \begin{align*} E_2& = (( \boldsymbol{\Pi}_{{\boldsymbol{U}}} {\boldsymbol{u}}- {\boldsymbol{u}}+{\boldsymbol{u}}^n-{\boldsymbol{P}}_{{\boldsymbol{M}}} {\boldsymbol{u}}^n )\cdot{\boldsymbol{n}}- \tau_n( e_\sigma- e_{\widehat{\sigma}})+\tau_n ( \Pi_Q \sigma - P_M \sigma) , \varphi - \Pi_Q \varphi\rangle_{\partial {\mathcal T}_h},\\
&\leq C\big( (\frac{h}{p})^{\alpha} \| {\boldsymbol{u}} \|_{\frac{1}{2}+\alpha,\Omega}
+\tau_n^{\frac{1}{2}} \| \tau_n^{\frac{1}{2}} (e_\sigma -e_{\widehat{\sigma}})\|_{\partial {\mathcal T}_h} \big) (\frac{h}{p})^{\frac{1}{2}} \| \varphi \|_{1,\Omega}\\
&\leq C\big((\frac{h}{p})^{\frac{1}{2}+\alpha} \| {\boldsymbol{u}} \|_{\frac{1}{2}+\alpha,\Omega} + \frac{ (1+\kappa)^{\frac{1}{2}}h}{p} \| \tau_n^{\frac{1}{2}} (e_\sigma -e_{\widehat{\sigma}})\|_{\partial {\mathcal T}_h} \big) \| {\boldsymbol{e}}_{{\boldsymbol{u}}} \|_{{\mathcal T}_h} \end{align*} Moreover, by the identity (\ref{error_eq3}) for $({\boldsymbol{e}}^t_{{\boldsymbol{w}}}-{\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t})\times {\boldsymbol{n}} $ and the triangular inequality, we get \begin{align*} E_3&=-\langle \tau_t ( {\boldsymbol{e}}^t_{{\boldsymbol{u}}} -{\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} ),\boldsymbol{\Psi} - \boldsymbol{\Pi_U \Psi} \rangle_{\partial {\mathcal T}_h}+\langle {\boldsymbol{P}}_{{\boldsymbol{M}}}{\boldsymbol{w}}^t \times {\boldsymbol{n}}-(\boldsymbol{\Pi_V {\boldsymbol{w}}} )^t \times {\boldsymbol{n}},\boldsymbol{\Psi} - \boldsymbol{\Pi_U \Psi} \rangle_{\partial {\mathcal T}_h}\\ &\quad +\tau_t \langle ( \boldsymbol{\Pi_U {\boldsymbol{u}}} )^t - {\boldsymbol{P}}_{{\boldsymbol{M}}}{\boldsymbol{u}}^t ,\boldsymbol{\Psi} - \boldsymbol{\Pi_U \Psi} \rangle_{\partial {\mathcal T}_h} \\
&\leq C \Big( \frac{(1+\kappa)}{\kappa}(\frac{h}{p})^{\alpha-\frac{1}{2}} \| \tau_t^{\frac{1}{2}}({\boldsymbol{e}}^t_{{\boldsymbol{u}}} -{\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} )\|_{\partial {\mathcal T}_h} + \frac{(1+\kappa)}{\kappa}(\frac{h}{p})^{2\alpha} \| {\boldsymbol{w}} \|_{\frac{1}{2}+\alpha,\Omega}\\
&\quad +\frac{(1+\kappa)}{\kappa}(\frac{h}{p})^{2\alpha-1} \|{\boldsymbol{u}}\|_{\frac{1}{2}+\alpha,\Omega} \Big) \| {\boldsymbol{e}}_{{\boldsymbol{u}}} \|_{{\mathcal T}_h}. \end{align*} By the boundary condition (\ref{error_eq_e}), we have $E_4 =-\langle {\boldsymbol{e}}_{\widehat{{\boldsymbol{w}}}^t} \times {\boldsymbol{n}}+ \kappa {\boldsymbol{e}}_{\widehat{{\boldsymbol{u}}}^t} , \boldsymbol{P_M}\boldsymbol{\Psi}^t \rangle_{\partial \Omega}=0 $. Applying integration by parts, we obtain the estimate for $E_5$ as follows: \begin{align*} E_5 & = ( \nabla e_\sigma, \boldsymbol{ \Psi-\Pi_U \Psi } )+\langle e_\sigma-e_{\widehat{\sigma}}, ( \boldsymbol{ \Psi-\Pi_U \Psi } )\cdot {\boldsymbol{n}}\rangle_{\partial {\mathcal T}_h} \\
&= \langle e_\sigma-e_{\widehat{\sigma}}, ( \boldsymbol{ \Psi-\Pi_U \Psi } )\cdot {\boldsymbol{n}}\rangle_{\partial {\mathcal T}_h} \leq C \frac{(1+\kappa)^{\frac{1}{2}}}{\kappa}(\frac{h}{p})^{\alpha-\frac{1}{2}}\| \tau_n^{\frac{1}{2}} (e_\sigma -e_{\widehat{\sigma}})\|_{\partial {\mathcal T}_h} \| {\boldsymbol{e}}_{{\boldsymbol{u}}} \|_{{\mathcal T}_h}. \end{align*} Finally, combining the above estimates for $E_1,\cdots,E_5$ and the estimate (\ref{pre_error_analysis_1}), we can conclude the result. \end{proof}
We can now give the proof of Theorem \ref{error_thm}. \begin{proof} (Proof of Theorem \ref{error_thm}) By the triangular inequality, we have \[
\|{\boldsymbol{u}}-{\boldsymbol{u}}_h\|_{{\mathcal T}_h} \leq \| {\boldsymbol{u}} - \boldsymbol{\Pi_U u} \|_{{\mathcal T}_h} + \|{\boldsymbol{e}}_{{\boldsymbol{u}}}\|_{{\mathcal T}_h}. \]
The error estimate (\ref{error_ut_h1}) can be obtained by the approximation property of $\boldsymbol{\Pi_U}$ and the estimate (\ref{est_eu_result}) for $\|{\boldsymbol{e}}_{{\boldsymbol{u}}}\|_{{\mathcal T}_h}$. Similarly, (\ref{error_wt_h1}) can be obtained by the triangular inequality, the approximation property of $\boldsymbol{\Pi_V}$, (\ref{pre_error_analysis_2}) and (\ref{est_eu_result}). \end{proof}
\begin{remark}
Besides we get the error estimate for $\|\tau_n^{\frac{1}{2}} ( e_\sigma- e_{\widehat{\sigma}} ) \|_{\partial {\mathcal T}_h}$ in (\ref{pre_error_analysis_1}), we can also obtain the error estimate for $\|\nabla e_\sigma\|_{{\mathcal T}_h}$. Actually, this can be similarly derived as the stability estimate for $\|\nabla \sigma_h\|_{{\mathcal T}_h}$ (cf. Remark \ref{remark_sta_1}) by taking ${\boldsymbol{v}}_h = \nabla e_\sigma$ in the error equation (\ref{error_eq_b}). Then the error estimate for $\| \nabla(\sigma-\sigma_h) \|_{{\mathcal T}_h}$ can be further deduced by the triangular inequality. When $\kappa h/p \leq C_0$, one may tune the parameters $\tau_t$ and $\tau_n$ (cf. Remark \ref{remark_sta_1}) and get the error estimates. \end{remark}
\section{Stability and error estimates for ideal case}\label{ideal_case} In this section, we consider the stability estimates and error estimates of the HDG method (\ref{discrete_mixed_form}) under some ideal assumptions of the problem (\ref{pde_original}) and the dual problem (\ref{dual_problem}). We assume that when $\Omega$ is a smooth star-shaped domain, the solutions of the first-order system (\ref{pde_mixed_first_order}) satisfy that ${\boldsymbol{u}} \in {\boldsymbol{H}}^2(\Omega)$ and ${\boldsymbol{w}} \in {\boldsymbol{H}}^1(\Omega)$. When ${\boldsymbol{f}} $ is divergence-free and ${\boldsymbol{g}}\in {\boldsymbol{H}}_T^{\frac{1}{2}}(\partial\Omega):=\{{\boldsymbol{g}}\in [H^{\frac{1}{2}}(\partial\Omega)]^3,\ {\boldsymbol{g}}\cdot\boldsymbol{{\boldsymbol{n}}}=0\ {\textrm {on}}\ \partial \Omega\}$, we assume the following estimate holds, which has been mentioned in \cite{FW2014} that \[
\| {\boldsymbol{u}} \|_{2,\Omega} + \|{\boldsymbol{w}}\|_{1,\Omega} \leq C ( 1 + \kappa ) {\boldsymbol{M}}({\boldsymbol{f}},{\boldsymbol{g}}) + C\|{\boldsymbol{g}}\|_{\frac{1}{2},\partial \Omega}, \]
where ${\boldsymbol{M}}({\boldsymbol{f}},{\boldsymbol{g}}) = \|{\boldsymbol{f}}\|_{0,\Omega} +\|{\boldsymbol{g}}\|_{0,\partial \Omega} $. In this ideal case, we can also assume the solution of the dual problem (\ref{dual_problem}) satisfies that $\boldsymbol{\Psi} \in {\boldsymbol{H}}^2(\Omega)$, the estimate (\ref{est_dual_1}) holds with $\alpha=\frac{1}{2}$ and there also holds \begin{align} \label{est_dual_2}
\|\boldsymbol{\Psi}\|_{2,\Omega} \leq C (1+\kappa) \| {\boldsymbol{J}}-\nabla \varphi \|_{0,\Omega} \leq C( 1+\kappa) \| {\boldsymbol{J}}\|_{0,\Omega}. \end{align} We assume the approximation results of $L^2$-projections in (\ref{es_pj_1})-(\ref{es_pj_1}) still hold, then we have the following stability estimates and error estimates for the HDG method (\ref{discrete_mixed_form}). \begin{lemma} \label{stability_thm0} We assume that (\ref{est_dual_1}) holds with $\alpha = \frac{1}{2} $ and (\ref{est_dual_2}) also holds true. Let $({\boldsymbol{w}}_h,{\boldsymbol{u}}_h,\widehat{{\boldsymbol{u}}}^t_h,\sigma_h, \widehat{\sigma}_h)$ be the solution of the problem (\ref{discrete_mixed_form}). We have \begin{align} \label{stability_u_h0}
&\| {\boldsymbol{u}}_h \|_{{\mathcal T}_h} \leq C\Big( \widetilde{C}^2_{\rm stab}\|
\frac{{\boldsymbol{f}}}{\kappa} \|_{0,\Omega} + \widetilde{C}_{\rm stab}
\|\frac{{\boldsymbol{g}}}{\kappa}\|_{0,\partial \Omega} \Big) ,\\ \label{stability_w_h0}
&\| {\boldsymbol{w}}_h \|_{{\mathcal T}_h} \leq C\Big( (\frac{1}{\kappa}+\widetilde{C}^2_{\rm stab}) \| {\boldsymbol{f}} \|_{0,\Omega} +(\frac{1}{\kappa^{\frac{1}{2}}}+\widetilde{C}_{\rm stab}) \|{\boldsymbol{g}}\|_{0,\partial \Omega} \Big), \\ \label{stability_ut_h0}
&\| \widehat{ {\boldsymbol{u}}}^t_h \|_{\partial {\mathcal T}_h} \leq C\big( (\frac{\kappa h}{p})^{\frac{1}{2}}
+ ph^{-\frac{1}{2}} \big)\Big( \widetilde{C}^2_{\rm stab}\|
\frac{{\boldsymbol{f}}}{\kappa} \|_{0,\Omega} + \widetilde{C}_{\rm stab}
\|\frac{{\boldsymbol{g}}}{\kappa}\|_{0,\partial \Omega} \Big), \end{align} where $\widetilde{C}_{\rm stab} := 1 + \frac{(1+\kappa)\kappa^{\frac{1}{2}} h}{p}+\frac{(1+\kappa)^{\frac{1}{2}}\kappa^{\frac{1}{2}} h}{p}$. \end{lemma} \begin{proof}
In order to get the upper bound for $\| {\boldsymbol{u}}_h \|_{{\mathcal T}_h}$, indeed, it only needs to bound the terms $T_1,\cdots,T_6$ as in the proof of Theorem \ref{stability_thm}. When (\ref{est_dual_1}) holds with $\alpha = \frac{1}{2} $ and (\ref{est_dual_2}) also holds, we have the following regularity estimate for the dual problem (\ref{dual_FOS}), \begin{align}
\| \boldsymbol{\Phi}\|_{1,\Omega} + \| \boldsymbol{\Psi} \|_{2,\Omega} + \kappa \| \boldsymbol{\Psi} \|_{1,\Omega} + \|\Vc\Vu\Vr\Vl\, \boldsymbol{\Psi}\|_{1,\Omega} + \kappa(1+\kappa) \| \boldsymbol{\Psi}\|_{0,\Omega} \leq C(1+\kappa) \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}.\nonumber \end{align} By the above regularity estimate, we have \begin{align*}
T_1& \leq C \| \tau_t^{\frac{1}{2}} ({\boldsymbol{u}}^t_h- \widehat{{\boldsymbol{u}}}^t_h) \|_{\partial {\mathcal T}_h} \tau_t^{-\frac{1}{2}} (\frac{h}{p})^{\frac{1}{2}} (1+\kappa) \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h},\\
T_3 &\leq C \| \tau_t^{\frac{1}{2}} ({\boldsymbol{u}}^t_h- \widehat{{\boldsymbol{u}}}^t_h) \|_{\partial {\mathcal T}_h} \tau_t^{\frac{1}{2}} (\frac{h}{p})^{\frac{3}{2}} (1+\kappa) \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h},\\
T_6 & \leq C \| \tau^{\frac{1}{2}}_n (\sigma_h - \widehat{\sigma}_h) \|_{\partial {\mathcal T}_h} \tau^{-\frac{1}{2}}_n (\frac{h}{p})^{\frac{3}{2}} (1+\kappa) \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h} \end{align*} and the estimates for $T_2,T_4,T_5$ are the same as the estimates in the proof of Theorem \ref{stability_thm}. Combining the estimates for $T_1,\cdots, T_6$ again, we obtain \begin{align}
\|{\boldsymbol{u}}_h\|^2_{{\mathcal T}_h} & \leq C \kappa^{-1}\left(\| {\boldsymbol{f}} \|_{0,\Omega}+ \|{\boldsymbol{g}}\|_{0,\partial \Omega} \right)\|{\boldsymbol{u}}_h\|_{{\mathcal T}_h} \nonumber\\ &\quad + C
\big(\tau_t^{-\frac{1}{2}} (\frac{h}{p})^{\frac{1}{2}} + \tau_t^{\frac{1}{2}} (\frac{h}{p})^{\frac{3}{2}} \big)(1+\kappa) \| \tau_t^{\frac{1}{2}} ({\boldsymbol{u}}^t_h- \widehat{{\boldsymbol{u}}}^t_h) \|_{\partial {\mathcal T}_h} \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h} \nonumber\\
& \quad + C \big( \tau_n^{\frac{1}{2}} (\frac{h}{p})^{\frac{1}{2}}+\tau^{-\frac{1}{2}}_n (\frac{h}{p})^{\frac{3}{2}} (1+\kappa) \big) \| \tau^{\frac{1}{2}}_n (\sigma_h - \widehat{\sigma}_h) \|_{\partial {\mathcal T}_h} \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}. \label{sta_uh_new0} \end{align} Here we choose $\tau_t = \frac{p}{h}$ and $\tau_n = \frac{(1+\kappa) h}{p}$. Then, the stability estimate (\ref{stability_u_h0}) can be obtained by (\ref{sta_uh_new0}), (\ref{estimate_mixed_1}) and the Young's inequality, and the stability estimates (\ref{stability_w_h0}) and (\ref{stability_ut_h0}) can be further derived as the analysis in Theorem \ref{stability_thm}. \end{proof}
\begin{lemma}\label{error_ideal_lemma} We assume that (\ref{est_dual_1}) holds with $\alpha = \frac{1}{2} $ and (\ref{est_dual_2}) also holds true. Let $({\boldsymbol{w}}_h,{\boldsymbol{u}}_h,\widehat{{\boldsymbol{u}}}^t_h,\sigma_h, \widehat{\sigma}_h)$ be the solution of the problem (\ref{discrete_mixed_form}). We have \begin{align} \label{error_eu_thm0}
&\| {\boldsymbol{u}} - {\boldsymbol{u}}_h \|_{{\mathcal T}_h} \leq C \big( \widetilde{R}_{{\boldsymbol{w}}} \| {\boldsymbol{w}} \|_{t,\Omega}+ \widetilde{R}_{{\boldsymbol{u}}} \| {\boldsymbol{u}} \|_{s,\Omega}\big) ,\\ \label{error_ew_thm0}
&\| {\boldsymbol{w}} - {\boldsymbol{w}}_h \|_{{\mathcal T}_h} \leq C \Big( \big( \frac{h^t}{p^t}+\kappa \widetilde{R}_{{\boldsymbol{w}}}\big) \| {\boldsymbol{w}} \|_{t,\Omega}+\big( (1+(1+\kappa)^{-\frac{1}{2}}) \frac{h^{s-1}}{p^{s-1}}+ \kappa \widetilde{R}_{{\boldsymbol{u}}} \big) \| {\boldsymbol{u}} \|_{s,\Omega} \Big), \end{align} where $s\geq 1, t\geq 1$, $\widetilde{R}_{{\boldsymbol{w}}}:=\frac{(1+\kappa)^{\frac{1}{2}} h^{t+1} }{p^{t+1}} +\frac{(1+\kappa) h^{t+1} }{p^{t+1}}$ and $\widetilde{R}_{{\boldsymbol{u}}}: = \frac{h^s}{p^s}+ \frac{(1+\kappa)^{\frac{1}{2}} h^{s} }{p^{s}} +\frac{(1+\kappa) h^{s} }{p^{s}} $. \end{lemma}
\begin{proof} When (\ref{est_dual_1}) holds with $\alpha = \frac{1}{2} $ and (\ref{est_dual_2}) also holds, we have the following regularity estimate for the dual problem (\ref{dual_FOS_error}), \begin{align*}
\| \boldsymbol{\Phi} \|_{1,\Omega} + \| \boldsymbol{\Psi} \|_{2,\Omega} + \kappa \| \boldsymbol{\Psi} \|_{1,\Omega} + \|\Vc\Vu\Vr\Vl\, \boldsymbol{\Psi}\|_{1,\Omega} + (1+\kappa)\kappa \| \boldsymbol{\Psi}\|_{0,\Omega} \leq C (1+\kappa) \|{\boldsymbol{e}}_{{\boldsymbol{u}}} \|_{{\mathcal T}_h}. \end{align*} Repeating the similar estimates in Lemma \ref{eu_pre} for $E_1,\cdots,E_5$ and using (\ref{pre_error_analysis_1}), we obtain \begin{align}
\| {\boldsymbol{e}}_{{\boldsymbol{u}}} \|_{{\mathcal T}_h} \leq C \big( \widetilde{R}_{{\boldsymbol{w}}} \| {\boldsymbol{w}} \|_{t,\Omega}+ \widetilde{R}_{{\boldsymbol{u}}} \| {\boldsymbol{u}} \|_{s,\Omega}\big).\label{last_est0} \end{align} Then the error estimate (\ref{error_eu_thm0}) is obtained directly by the triangular inequality, the approximation property of $\boldsymbol{\Pi_U}$ and the above estimate. The error estimate (\ref{error_ew_thm0}) can also be obtained by the triangular inequality, the approximation property of $\boldsymbol{\Pi_V}$, (\ref{pre_error_analysis_2}) and (\ref{last_est0}). \end{proof}
\begin{remark} By Lemma \ref{error_ideal_lemma}, under the assumptions made in the section, we have \begin{align*}
\| {\boldsymbol{u}} - {\boldsymbol{u}}_h \|_{{\mathcal T}_h} &\leq C \big( \frac{\kappa h^2}{p^2} + \frac{\kappa^{\frac{3}{2}} h^2}{p^2} + \frac{\kappa^2 h^2}{p^2} \big) \widehat{{\boldsymbol{M}}}({\boldsymbol{f}},{\boldsymbol{g}}) \leq C( \frac{\kappa h^2}{p^2} + \frac{\kappa^2 h^2}{p^2} ),\\
\| {\boldsymbol{w}} - {\boldsymbol{w}}_h \|_{{\mathcal T}_h}& \leq C \big( \frac{\kappa h}{p} + \frac{\kappa^2 h^2}{p^2} + \frac{\kappa^{\frac{5}{2}} h^2}{p^2} + \frac{\kappa^3 h^2}{p^2} \big) \widehat{{\boldsymbol{M}}}({\boldsymbol{f}},{\boldsymbol{g}}) \leq C( \frac{\kappa h}{p} + \frac{\kappa^3 h^2}{p^2} ). \end{align*}
Here $\kappa > 1$ and $\widehat{{\boldsymbol{M}}}({\boldsymbol{f}},{\boldsymbol{g}}) = {{\boldsymbol{M}}}({\boldsymbol{f}},{\boldsymbol{g}}) + \frac{1}{\kappa} \|{\boldsymbol{g}}\|_{\frac{1}{2},\partial \Omega} $. The above estimates indicate that the error $\| {\boldsymbol{w}} - {\boldsymbol{w}}_h \|_{{\mathcal T}_h} $ can not be controlled by $\frac{\kappa h}{p}$, and the pollution term is of order $O( \frac{\kappa^3 h^2}{p^2})$. This provides evidence of the existence of the so-called ``pollution effect''. When $\frac{\kappa^3 h^2}{p^2}\leq C$, the discrete stability estimates for $\|{\boldsymbol{u}}_h\|_{{\mathcal T}_h}$ and $\|{\boldsymbol{w}}_h\|_{{\mathcal T}_h}$ can be improved as $ \|{\boldsymbol{u}}_h\|_{{\mathcal T}_h} \leq \frac{C}{\kappa} {{\boldsymbol{M}}}({\boldsymbol{f}},{\boldsymbol{g}})$ and $\|{\boldsymbol{w}}_h\|_{{\mathcal T}_h} \leq C {{\boldsymbol{M}}}({\boldsymbol{f}},{\boldsymbol{g}})$. \end{remark}
\section{Numerical results} In this section, we present numerical results of the HDG method for the following time-harmonic Maxwell problem (cf. \cite{FW2014}) in a unit cube $\Omega=[0,1]\times[0,1]\times[0,1] $: \begin{align*} {\bf curl}\, {\bf curl}\, {{\boldsymbol{u}}}-\kappa^2 {{\boldsymbol{u}}}&={\bf 0 } \qquad {\rm in }\ \Omega,\\ {\bf curl}\, {{\boldsymbol{u}}}\times \boldsymbol{n} -{\bf i}\kappa {{\boldsymbol{u}}}^t&=\widetilde{{\boldsymbol{g}}} \qquad {\rm on }\ \partial\Omega. \end{align*} Here $\widetilde{{\boldsymbol{g}}}$ is chosen such that the exact solution is given by $${\boldsymbol{u}}=(e^{{\bf i}\kappa z},e^{{\bf i}\kappa x},e^{{\bf i}\kappa y})^T.$$
The time-harmonic Maxwell problem (\ref{pde_original}) is an approximation of electromagnetic scattering problem with time dependence $e^{{\bf i} \omega t}$, where $\omega$ is frequency. If the problem is proposed with time dependence $e^{-{\bf i} \omega t}$, then the sign before ${\bf i}$ in (\ref{BC-PDE}) is negative. The analysis of the HDG method in this paper fits well for both of cases. In the following experiment, we apply the HDG method with piecewise linear (HDG-P1), piecewise quadratic (HDG-P2) and piecewise cubic (HDG-P3) finite element spaces respectively to the second case. For the fixed wave number $\kappa$, we first show the dependence of the convergence of
$\|{\boldsymbol{u}}-{\boldsymbol{u}}_h\|_{0,\Omega}$ and $\|{\boldsymbol{w}}-{\boldsymbol{w}}_h\|_{0,\Omega}$ on polynomial order $p$ and mesh size $h$. Figure \ref{fig1} displays the above errors for $\kappa=20$ by the HDG-P1, HDG-P2, and HDG-P3 approximations. The pollution errors always appear on the coarse meshes. However, we find that the errors converge almost in $O(\kappa h^2/p^2)$ on the fine meshes, which is a little better than the theoretical prediction for $\|{\boldsymbol{w}}-{\boldsymbol{w}}_h\|_{0,\Omega}$. On the other hand, for the cases of $\kappa=30$ and $\kappa=50$, Figure \ref{fig2}
shows that the errors of $\|{\boldsymbol{u}}-{\boldsymbol{u}}_h\|_{0,\Omega}$ and
$\|{\boldsymbol{w}}-{\boldsymbol{w}}_h\|_{0,\Omega}$ always decrease for high order polynomial approximations.
\begin{figure}\label{fig1}
\end{figure}
\begin{figure}\label{fig2}
\end{figure}
Figure \ref{fig3} displays the relative errors $\|{\boldsymbol{u}}-{\boldsymbol{u}}_h\|_{0,\Omega}/\|{\boldsymbol{u}}\|_{0,\Omega}$ and $\|{\boldsymbol{w}}-{\boldsymbol{w}}_h\|_{0,\Omega}/\|{\boldsymbol{w}}\|_{0,\Omega}$ for the HDG-P1 approximation according to different mesh size conditions. The left graph of Figure \ref{fig3} shows the relationship between the relative errors and the wave number $\kappa$ under the mesh condition $\kappa h = 2$ for the HDG-P1 approximation. We observe that the relative errors cannot be controlled by $\kappa h $ and increase with $\kappa $, which indicates the existence of the pollution error. The right graph of Figure \ref{fig3} shows the relative errors of the HDG-P1 approximation under the mesh condition $\kappa^3h^2 = 2$. It shows that under this mesh condition, the relative errors do not increase with $\kappa$.
\begin{figure}\label{fig3}
\end{figure}
For fixed wave number $\kappa$, we show the relative error $\|{\boldsymbol{w}}-{\boldsymbol{w}}_h\|_{0,\Omega}/\|{\boldsymbol{w}}\|_{0,\Omega}$ for the HDG-P1 approximation with respect to the relative error $\|{\bf curl} ({\boldsymbol{u}}- {\boldsymbol{u}}_h)\|_{0,\Omega}/\|{\bf curl}\, {\boldsymbol{u}}\|_{0,\Omega}$ for the standard lowest-order edge element approximation of the second type. The left graph of Figure \ref{fig4} displays the relative error of the HDG-P1 solution for $\kappa= 10, 20, 30$, while the right one shows the relative error for the same cases based on the standard lowest-order edge element method. We find that the relative error for the HDG-P1 approximation stays around 100\% while the relative error for the standard edge element approximation oscillates around 100\% before they are less than 100\%, which confirms the stability property of our theoretical analysis for the HDG method and indicates that the HDG method is more stable than the standard edge element method for the time-harmonic Maxwell problem with high wave number.
\begin{figure}\label{fig4}
\end{figure}
\begin{table}[htbp]
\caption{\footnotesize The relative error $\|{\bf curl} ({\boldsymbol{u}}- {\boldsymbol{u}}_h)\|_{0,\Omega}/\|{\bf curl} \,{\boldsymbol{u}}\|_{0,\Omega}$ for the lowest-order edge element (the second type) approximation for the case $\kappa=50$ and the relative error $\|{\boldsymbol{w}}-{\boldsymbol{w}}_h\|_{0,\Omega}/\|{\boldsymbol{w}}\|_{0,\Omega}$ for the HDG-P1, HDG-P2 and HDG-P3 approximations with respect to different DOFs.}\label{tab1} \begin{center} \footnotesize
\begin{tabular}{|c|c|c|cccc|} \hline \multirow{3}{*}{Edge element}
& \multicolumn{2}{|c|}{DOFs} & 8368 & 62048 & 477376 & 3744128 \\ \cline{2-7}
& \multicolumn{2}{|c|}{The relative error } & 111.9\% & 115.8\% & 109.1\% & 42.7\% \\
\hline \multirow{3}{*}{HDG-P1}
& \multicolumn{2}{|c|}{DOFs} & 9792 & 76032 & 599040 & 4755456 \\ \cline{2-7}
& \multicolumn{2}{|c|}{The relative error } & 96.8\% & 96.7\% & 82.3\% & 30\% \\
\hline \multirow{3}{*}{HDG-P2}
& \multicolumn{2}{|c|}{DOFs} & --- & 19584 & 152064 & 1198080 \\ \cline{2-7}
& \multicolumn{2}{|c|}{The relative error } & --- & 100\% & 89.4\% & 21.6\% \\
\hline \multirow{3}{*}{HDG-P3}
& \multicolumn{2}{|c|}{DOFs} & --- & 32640 & 253440 & 1996800 \\ \cline{2-7}
& \multicolumn{2}{|c|}{The relative error } & --- & 100\% & 54.3\% & 2\% \\
\hline
\end{tabular} \end{center} \end{table}
Table \ref{tab1} shows the numbers of degrees of freedom (DOFs) and the relative error $\|{\bf curl} ({\boldsymbol{u}}- {\boldsymbol{u}}_h)\|_{0,\Omega}/\|{\bf curl}\, {\boldsymbol{u}}\|_{0,\Omega}$ for the standard edge element approximation with respect to the relative error $\|{\boldsymbol{w}}-{\boldsymbol{w}}_h\|_{0,\Omega}/\|{\boldsymbol{w}}\|_{0,\Omega}$ for the HDG-P1, HDG-P2 and HDG-P3 approximations. It can be observed that the HDG-P1 approximation performs better than the standard edge element method when the numbers of DOFs are close. We can also find that the HDG method with higher order polynomial approximation may reach more accurate solutions with less DOFs, which indicates the efficiency of the HDG method with high polynomial order for the time-harmonic Maxwell problem with high wave number. We should note that the numerical results in \cite{FW2014} show the stability of the IPDG method based on the piecewise linear polynomial approximation for the time-harmonic Maxwell problem with high wave number. Here, our HDG method preserves the advantages of the IPDG method in \cite{FW2014}, and it results in a discrete system with significantly reduced DOFs when it is applied for the high order polynomial approximation.
\begin{figure}
\caption{\footnotesize The traces of the real part of the first component of the HDG-P1 solutions for $\kappa=30$ and $\kappa=50$ (left and right) on the meshes with $h=1/16$ and $h=1/32$ (top and bottom). The traces of the real part of the first component of the exact solution are plotted in the blue lines.}
\label{fig5}
\end{figure}
\begin{figure}
\caption{\footnotesize The traces of the real part of the first component of the HDG-P2 and HDG-P3 solutions (top and bottom) for $\kappa=30$ and $\kappa=50$ (left and right) on the mesh with $h=1/16$. The traces of the real part of the first component of the exact solution are plotted in the blue lines.}
\label{fig6}
\end{figure}
For more detailed comparison between the HDG methods with different polynomial order approximations, we consider the problems with wave number $\kappa=30,50$. We restrict the solution plot in the line segment $\{(x,y,z): x=0.5, y=0.5, 0\leq z \leq 1\}$ and observe the traces of the real part of the first component of the HDG solutions. The traces of the real part of the first component of the exact solution are also plotted in the blue lines in Figure \ref{fig5} and Figure \ref{fig6}. The left graphs of Figure \ref{fig5} display the traces of the real part of the first component of the HDG-P1 solution on the meshes with $h=1/16$ and $h=1/32$ for $\kappa=30$, while the right graphs of Figure \ref{fig5} show the same traces for $\kappa=50$. Figure \ref{fig6} displays the traces of the real part of the first component of the HDG-P2 and HDG-P3 solutions on the mesh with $h=1/16$ for $\kappa=30,50$ (left, right). On the coarse mesh with $h=1/16$, the shapes of the HDG-P2 and HDG-P3 solutions are roughly the same as the exact solution while the shape of the HDG-P1 solution does not match the exact solution well. We can also observe that the HDG solutions of high order polynomial approximations on the mesh with $h=1/16$ perform even better than the HDG-P1 solution on the mesh with $h=1/32$ especially for $\kappa=50$, which shows the advantage of the HDG method with high order polynomial approximation for the time-harmonic Maxwell problem with high wave number. Thus, although the phase error appears in the cases of coarse mesh and low order polynomial approximation, it can be reduced in the fine meshes or by high order polynomial approximations.
\section*{Acknowledgment} The authors are very grateful to the anonymous referees and the editor for their many valuable comments and suggestions that led to an improved presentation of this paper.
\end{document} | arXiv | {
"id": "1503.07696.tex",
"language_detection_score": 0.4747737944126129,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title[Non-uniqueness theory in sampled STFT phase retrieval]{Non-uniqueness theory in sampled STFT phase retrieval}
\author[Philipp Grohs]{Philipp Grohs\thankssymb{1}\textsuperscript{,}\thankssymb{2}\textsuperscript{,}\thankssymb{3}} \address{\thankssymb{1}Faculty of Mathematics, University of Vienna, Oskar-Morgenstern-Platz 1, 1090 Vienna, Austria} \address{\thankssymb{2}Research Network DataScience@UniVie, University of Vienna, Kolingasse 14-16, 1090 Vienna, Austria} \address{\thankssymb{3}Johann Radon Institute of Applied and Computational Mathematics, Austrian Academy of Sciences, Altenbergstrasse 69, 4040 Linz, Austria}
\email{philipp.grohs@univie.ac.at} \author[Lukas Liehr]{Lukas Liehr\thankssymb{1}} \email{lukas.liehr@univie.ac.at}
\date{\today} \maketitle
\begin{abstract}
The reconstruction of a function from its spectrogram (i.e., the absolute value of its short-time Fourier transform) arises as a key problem in several important applications, including coherent diffraction imaging and audio processing. It is a classical result that for suitable windows any function can in principle be uniquely recovered up to a global phase factor from its spectrogram. However, for most practical applications only discrete samples -- typically from a lattice -- of the spectrogram are available. This raises the question of whether lattice samples of the spectrogram contain sufficient information to determine a function $f\in L^2(\mathbb{R}^d)$ up to a global phase factor. In recent work this question was answered in the negative for the case $d=1$, but the higher dimensional case has remained open. In the present paper, we show that the same non-identifiability result holds in arbitrary dimensions. Precisely, given any dimension $d$, any window function $g$ and any (symplectic or separable) lattice $\mathcal{L} \subseteq \mathbb{R}^d$ we construct pairs of functions $f,h\in L^2(\mathbb{R}^d)$ that do not agree up to a global phase factor, but whose spectrograms agree on $\mathcal{L}$. Our techniques are sufficiently flexible to produce counterexamples to unique recoverability under even more stringent assumptions: for example, if the window function is real-valued, the functions $f,h$ can even be chosen to satisfy $|f|=|h|$. Our results thus reveal the non-existence of a critical sampling density in the absence of phase information, a property which is in stark contrast to uniqueness results in time-frequency analysis.
\noindent \textbf{Keywords.} phase retrieval, sampling, lattice, time-frequency analysis, symplectic geometry, metaplectic operator, discretization
\noindent \textbf{AMS subject classifications.} 42A38, 44A15, 94A12, 94A20 \end{abstract}
\section{Introduction}
Given a square-integrable window function $g \in {L^2({\mathbb R}^d)}$ and a set $\mathcal{L} \subseteq {{\mathbb R}^d} \times {{\mathbb R}^d}$, a central problem in time-frequency analysis is the determination the spanning properties of the Gabor system $\mathcal{G}(g,\mathcal{L}),$ $$ \mathcal{G}(g,\mathcal{L}) = \left \{ e^{2\pi i \lambda' \cdot t} g(t-\lambda) : (\lambda,\lambda') \in \mathcal{L} \right \} \subseteq {L^2({\mathbb R}^d)} . $$ The completeness of the system $\mathcal{G}(g,\mathcal{L})$ in ${L^2({\mathbb R}^d)}$ is equivalent to the validity of the implication $$ \langle f, e^{2 \pi i \lambda' \cdot} g(\cdot - \lambda) \rangle_{{L^2({\mathbb R}^d)}} = 0 \ \forall (\lambda,\lambda') \in \mathcal{L} \implies f = 0 $$ whenever $f \in {L^2({\mathbb R}^d)}$. Alternatively, this means that $f$ is uniquely determined by samples at $\mathcal{L}$ of its short-time Fourier transform (STFT) with respect to the window function $g$, which is the map $$ V_gf(x,\omega) = \langle f, e^{2 \pi i \omega \cdot} g(\cdot - x) \rangle_{{L^2({\mathbb R}^d)}} = \int_{{\mathbb R}^d} f(t) \overline{g(t-x)} e^{-2\pi i \omega \cdot t} \, dt. $$ The recovery of $f$ by samples of $V_gf$, in particular by samples located on a lattice, has been the subject of intensive research and produced a vast literature, possible starting points are \cite{Groechenig, Heil2007}. Classical results in this field state that under some mild conditions on the window function $g$ and a suitable density assumption on $\mathcal{L}$, the Gabor system $\mathcal{G}(g,\mathcal{L})$ forms a frame for ${L^2({\mathbb R}^d)}$ and hence every $f \in {L^2({\mathbb R}^d)}$ is uniquely and stably determined by the STFT-samples $V_gf(\mathcal{L}) \coloneqq \{ V_gf(z) : z \in \mathcal{L} \}$. Suppose now that we have incomplete information about the STFT, namely only samples of its modulus at $\mathcal{L}$, represented by the set $$
|V_gf(\mathcal{L})| \coloneqq \{ |V_gf(z)| : z \in \mathcal{L} \}. $$
The function $|V_gf|$ is called the spectrogram of $f$ with respect to $g$. The problem of trying to invert the map which sends $f$ to the magnitude-only measurements $|V_gf(\mathcal{L})|$, $
f \mapsto |V_gf(\mathcal{L})| $, is known as the \emph{STFT phase retrieval} problem. This non-linear inverse problem enjoys rapidly growing attention due to its occurrence in a remarkably wide number of applications, most notably in the areas of coherent diffraction imaging, radar, audio processing and quantum mechanics \cite{appl1,appl2,appl3,appl4,appl5,appl6,appl7,appl8}. The STFT phase retrieval problem constitutes a challenging mathematical problem, a crucial reason lies in the appearance of severe instabilities \cite{daubcahill,alaifariGrohs,ALAIFARI2021401}.
Observe that two functions $f,h \in {L^2({\mathbb R}^d)}$ which \emph{agree up to a global phase}, i.e. there exists a unimodular constant $\nu \in {\mathbb T} \coloneqq \{ z \in {\mathbb C} : |z|=1 \}$ such that $f=\nu h$, are indistinguishable by their spectrograms since in this situation one has $|V_gf(\mathcal{L})| = |V_gh(\mathcal{L})|$. The uniqueness problem in STFT phase retrieval asks for assumptions on $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ and $g \in {L^2({\mathbb R}^d)}$ such that every $f \in {L^2({\mathbb R}^d)}$ is uniquely determined up to the ambiguity of a global phase factor by $|V_gf(\mathcal{L})|$. Concisely, one seeks to find conditions on $g$ and $\mathcal{L}$ so that the implication \begin{equation}\label{eq:uniqueness_implication}
|V_gf(z)| = |V_gh(z)| \ \forall z \in \mathcal{L} \implies \exists \nu \in {\mathbb T} : f=\nu h \end{equation}
holds true whenever $f,h \in {L^2({\mathbb R}^d)}$. This problem is well-understood if $\mathcal{L}$ is a continuous domain such as an open set. However, in all practical applications the set $\mathcal{L}$ is a discrete set of samples in ${{\mathbb R}^{2d}}$. Deriving uniqueness results from discrete sampling sets is a key step towards a discretization of the infinite-dimensional STFT phase problem and towards making the problem computationally tractable. The discretization problem has attracted recent attention in the mathematical community \cite{grohsLiehrJFAA, grohsliehr1, grohsliehr2, alaifari2020phase, att1, att2,grra}. However, the uniqueness problem from measurements arising from discrete sampling sets remains much less well-understood than its continuous counterpart. In view of practical applications, the most important discrete sampling sets are lattices, i.e. $\mathcal{L} = L{\mathbb Z}^{2d}$ for some invertible matrix $L \in \mathrm{GL}_{2d}({\mathbb R})$. The natural question arises if similar to the case where phase information is present, every map $f \in {L^2({\mathbb R}^d)}$ is determined (up to a global phase) by $|V_gf(\mathcal{L})|$ assuming that the lattice $\mathcal{L}$ satisfies a density condition and $g$ has certain regularity properties. For the univariate case $d=1$ we highlighted in our previous work \cite{grohsLiehrJFAA} a fundamental difference of the uniqueness question in the absence of phase information: there exists \emph{no} window function $g$ and \emph{no} lattice $\mathcal{L}$ such that the implication \eqref{eq:uniqueness_implication} holds true for every $f,h \in {L^2({\mathbb R}^d)}$.
Following this line of research, the present article provides a systematic study of fundamental discretization barriers arising in the STFT phase retrieval problem. Since the univariate case $d=1$ is not sufficient for a majority of applications, we aim to establish a unified approach to the derivation of non-discreizability statements in STFT phase retrieval, independent of the dimension $d$. The derived theorems show general discretization barriers in arbitrary dimensions and various settings, such as in induced Pauli-type problems and real-valued signal regimes. We further reveal the non-existence of a critical sampling density in the absence of phase information, a property which is in stark contrast to uniqueness results in time-frequency analysis. For the precise mathematical statements of our results, we refer to Section \ref{sec:contribution}. To give further motivation for our paper, we now explain an important application where the problem of discretization appears in a natural way.
\subsection{Motivation: Ptychographic imaging}\label{sec:ptychography}
Ptychography is a highly successful method in the field of diffraction imaging. It has seen a great deal of attention in recent years since it provides a methodology which leads to the detailed understanding of the structure and properties of materials \cite{appl3,appl4, appl5}. The principle of Ptychographic imaging is visualized in Figure \ref{fig:pt} and consists of three steps (indicated by (1), (2) and (3) in this figure): \begin{enumerate}
\item A so-called \emph{probe} which is a pinhole with a certain shape and which can be moved in $x_1$ or $x_2$-direction is modelled as the $(x_1,x_2)$-shift of a function $g \in L^2({\mathbb R}^2)$.
\item The probe is set in front of an unknown object which is modeled as a function $f$ of finite energy, $f \in L^2({\mathbb R}^2)$. An electron or x-ray beam enters the probe resp. pinhole which leads to a concentration and localization of the beam. Subsequently, he beam hits a small part of the object.
\item It can be shown that in a certain distance to the unknown object, the beam which gets diffracted at the object is the Fourier transform of the product of $f$ and the $(x_1,x_2)$-shift of the probe $g$, i.e. ${\mathcal{F}}(fT_{(x_1,x_2)}g)$. A digital device such as a camera measures the intensities of this diffracted wave which results in a diffraction pattern. The measured intensities are the absolute value of ${\mathcal{F}}(fT_{(x_1,x_2)}g)$, in other words, $|V_gf((x_1,x_2), \omega)|$ with $\omega \in \mathcal{B} \subseteq {\mathbb R}^2$. Due to the usage of a digital measuring device, the set $\mathcal{B}$ is a subset of a lattice in ${\mathbb R}^2$. \end{enumerate}
For each position $(x_1,x_2) \in {\mathbb R}^2$ of the probe, one obtains a set of measurements of the form $$\{ |V_gf((x_1,x_2),\omega)| : \omega \in \mathcal{B} \}.$$ Since one performs a so called raster-scan of the object (the scanning of the object with overlapping windows), the positions $(x_1,x_2)$ of the probe lie on a second lattice $\mathcal{A} \subseteq {\mathbb R}^2$. The complete data set is therefore a subset of \begin{equation}\label{dataset}
\{ |V_gf(z)| : z \in \mathcal{A} \times \mathcal{B} \}, \end{equation} i.e. the samples are taken on a subset of the lattice $\mathcal{A} \times \mathcal{B} \subseteq {\mathbb R}^4$. The final goal is to reconstruct $f \in {L^2({\mathbb R})}$ from the data set \eqref{dataset}. The natural question arises if the data set \eqref{dataset} determines the object $f$ uniquely (up to a global phase). The theory developed in the present article shows that this is not the case, independent of the particular choice of $\mathcal{A}$ and $\mathcal{B}$. Since $\mathcal{A} \times \mathcal{B}$ is a separable lattice, i.e. the Cartesian product of two lattices, we pay particular attention to lattices of this structure.
\begin{figure}
\caption{The principle of Ptychographic imaging.}
\label{fig:pt}
\end{figure}
\subsection{Contributions}\label{sec:contribution}
In this section we shall provide a description of the main results of the article. Recall that a set $\mathcal{L} \subseteq {\mathbb R}^{2d}$ is called a lattice if there exists an invertible matrix $L \in \mathrm{GL}_{2d}({\mathbb R})$ such that $\mathcal{L} = L {\mathbb Z}^{2d} = \{ Lz : z \in {\mathbb Z}^{2d} \}$. The matrix $L$ is called a generating matrix of $\mathcal{L}$. A matrix $S \in \mathrm{GL}_{2d}({\mathbb R})$ is called symplectic if $S^T \mathcal{J}S = \mathcal{J}$ where $\mathcal{J}$ denotes the standard symplectic matrix $$ \mathcal{J} \coloneqq \begin{pmatrix} 0 & -I_d \\ I_d & 0 \end{pmatrix}, $$ $I_d$ is the identity matrix in ${\mathbb R}^{d \times d}$ and $0 \coloneqq 0_d$ denotes the zero matrix in ${\mathbb R}^{d \times d}$. The collections of all symplectic matrices in ${\mathbb R}^{2d \times 2d}$ is called the (real) symplectic group and is denoted by $\mathrm{Sp}_{2d}({\mathbb R})$. A lattice $\mathcal{L} = L{\mathbb Z}^{2d}$ is called symplectic if $L=\alpha S$ for some $\alpha >0$ and some $S \in \mathrm{Sp}_{2d}({\mathbb R})$, i.e. $\mathcal{L}$ is generated by a scaled symplectic matrix. In the present exposition we work with the following equivalence relation which indicates that two functions $f_1,f_2$ agree up to a global phase factor: $$ f_1 \sim f_2 \iff \exists \nu \in {\mathbb T} : f_1 = \nu f_2. $$ If $f_1$ and $f_2$ do not agree up to a global phase then we write $f_1 \nsim f_2$.
\begin{theorem}\label{thm:start1} Suppose that $\mathcal{L} \subseteq {\mathbb R}^{2d}$ satisfies one of the following two conditions: \begin{enumerate}
\item $\mathcal{L} = S({\mathbb R}^d \times \Lambda) = \{ S(x,\lambda) : x \in {\mathbb R}^d, \lambda \in \Lambda \}$ with $S\in \mathrm{Sp}_{2d}({\mathbb R})$ a symplectic matrix and $\Lambda \subseteq {\mathbb R}^d$ an arbitrary lattice
\item $\mathcal{L} = L{\mathbb Z}^{2d}$ is a lattice generated by $L \in \mathrm{GL}_{2d}({\mathbb R})$ which satisfies the factorization property $L=ST$ with $S\in \mathrm{Sp}_{2d}({\mathbb R})$ symplectic and $T$ a block diagonal matrix of the form
$$ T = \begin{pmatrix} A & 0 \\ 0 & B \end{pmatrix}, \ \ A,B \in \mathrm{GL}_d({\mathbb R}). $$ \end{enumerate} Then for every window function $g \in {L^2({\mathbb R}^d)}$ there exist two functions $f_1,f_2 \in {L^2({\mathbb R}^d)}$ such that $$
|V_g(f_1)(z)| = |V_g(f_2)(z)| \ \forall z \in \mathcal{L} \ \ \text{and} \ \ f_1 \nsim f_2. $$ \end{theorem}
Crucial consequences of the previous statement are non-uniqueness results for the following class of important lattices which appear throughout time-frequency analysis and its application.
\begin{theorem}\label{thm:start2} Suppose that $\mathcal{L} \subseteq {\mathbb R}^{2d}$ is a lattice which satisfies one of the following four conditions: \begin{enumerate}
\item $\mathcal{L}$ is symplectic, i.e. $\mathcal{L}=\alpha S {\mathbb Z}^{2d}$ for some $\alpha>0$ and some $S \in \mathrm{Sp}_{2d}({\mathbb R})$
\item $\mathcal{L}$ is rectangular, i.e. its generating matrix is diagonal
\item $\mathcal{L}$ is separable, i.e. $\mathcal{L} = \mathcal{A} \times \mathcal{B}$ with $\mathcal{A},\mathcal{B}$ lattices in ${{\mathbb R}^d}$
\item $\mathcal{L}=\alpha {\mathbb Z}^d \times \beta {\mathbb Z}^d$ for some $\alpha,\beta \in {\mathbb R} \setminus \{ 0 \}$. \end{enumerate} Then for every window function $g \in {L^2({\mathbb R}^d)}$ there exists two functions $f_1,f_2 \in {L^2({\mathbb R}^d)}$ such that $$
|V_g(f_1)(z)| = |V_g(f_2)(z)| \ \forall z \in \mathcal{L} \ \ \text{and} \ \ f_1 \nsim f_2. $$ \end{theorem}
Notice that classical results in time-frequency analysis show that under some mild conditions on a window function $g \in {L^2({\mathbb R}^d)}$ and a certain density assumption on a symplectic or separable lattice $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$, every $f \in {L^2({\mathbb R}^d)}$ is determined uniquely by the STFT samples $V_gf(\mathcal{L})$. The conclusions made in Theorem \ref{thm:start2} are in stark contrast to the uniqueness results where phase information is present. The statement highlights that if the lattice $\mathcal{L}$ satisfies one of the properties of Theorem \ref{thm:start2} then in the absence of phase information there exists \emph{no} window function $g \in {L^2({\mathbb R}^d)}$ such that every $f \in {L^2({\mathbb R}^d)}$ is determined (up to a global phase) by $|V_gf(\mathcal{L})|$ and this result is independent of the density of $\mathcal{L}$ (the density of $\mathcal{L}$ is the quantity $|\det L|^{-1}$ where $\mathcal{L}=L{\mathbb Z}^{2d}$). We refer to Section \ref{sec:implications} for a detailed discussion and comparison between the setting where phase information is present resp. absent.
The conclusions of Theorem \ref{thm:start1} and Theorem \ref{thm:start2} show the existence of a function pair $(f_1,f_2)$ with the property that $f_1 \nsim f_2$ and $f_1$ and $f_2$ produce identical phaseless STFT samples on certain subsets of $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$. We now investigate the size of the class of functions which have these properties. To that end, we define for a window function $g \in {L^2({\mathbb R}^d)}$ and a set $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ the function class $\mathcal{N}(g,\mathcal{L})$ by
\begin{equation*}
\begin{split}
& \mathcal{N}(g,\mathcal{L}) \\
& \coloneqq \left \{ f \in {L^2({\mathbb R}^d)} : \exists h \in {L^2({\mathbb R}^d)} \ \text{s.t.} \ h \nsim f \ \text{and} \ |V_gf(\mathcal{L})| = |V_gh(\mathcal{L})| \right \}.
\end{split} \end{equation*}
Clearly, $\mathcal{N}(g,\mathcal{L}) = \emptyset$ if and only if every $f \in {L^2({\mathbb R}^d)}$ is uniquely determined up to a global phase by $|V_gf(\mathcal{L})|$. On the other hand, if $\mathcal{L}$ satisfies one of the assumptions of Theorem \ref{thm:start1} or Theorem \ref{thm:start2} then for every window function $g \in {L^2({\mathbb R}^d)}$ we have $\mathcal{N}(g,\mathcal{L}) \neq \emptyset$ which is equivalent to the statement that uniqueness is not guaranteed in the corresponding phase retrieval problem with window function $g$ and sampling locations $\mathcal{L}$.
Recall that a set $C \subseteq V$ of a real or complex vector space $V$ is called a cone if $$ \kappa C \subseteq C $$ for every $\kappa > 0$. A cone is called infinite-dimensional if it is not contained in any finite-dimensional subspace of $V$.
\begin{theorem}\label{thm:complex_cone} Let $g \in {L^2({\mathbb R}^d)}$ be an arbitrary window function. If $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ satisfies $\mathcal{L} \subseteq S({{\mathbb R}^d} \times \Lambda)$ for some symplectic matrix $S \in \mathrm{Sp}_{2d}({\mathbb R})$ and some lattice $\Lambda \subseteq {{\mathbb R}^d}$ then $\mathcal{N}(g,\mathcal{L})$ contains an infinite-dimensional cone. \end{theorem}
Next, we pay particular attention to the setting where the window function $g$ is assumed to be real-valued and the lattice $\mathcal{L}$ is separable, i.e. there exist lattices $\mathcal{A},\mathcal{B} \subseteq {{\mathbb R}^d}$ such that $\mathcal{L} = \mathcal{A} \times \mathcal{B}$. These are prototypical assumptions which are made in numerous applications, a prime example for a real-valued window function being a Gaussian and frequently appearing lattices being of the form $\mathcal{L} = \alpha {\mathbb Z}^d \times \beta {\mathbb Z}^d$ (often referred to as time-frequency lattices).
A first consequence drawn from the real-valuedness assumption on $g$ and the separability assumption on $\mathcal{L}$ is that the Pauli-problem induced by the STFT is not unique. The Pauli problem dates back to the seminal work by Pauli \cite{Pauli} and concerns the question of whether a function $f \in {L^2({\mathbb R})}$ is determined up to a global phase by its modulus $|f|$ and the modulus of its Fourier transform $|{\mathcal{F}} f|$ \cite{Pauli}. It is well-known that this is not the case, i.e. one can construct so-called Pauli-partners $f_1,f_2$ for which $|f_1|=|f_2|$ and $|{\mathcal{F}} f_1| = |{\mathcal{F}} f_2|$ but $f_1 \nsim f_2$ \cite{corbett_hurst_1977, VOGT1978365}. A natural extension of the Pauli problem replaces the Fourier transform by a different unitary operator or a finite number of unitary operators (see, for instance, Jaming's work on uniqueness from fractional Fourier transform measurements \cite{Jaming} and corresponding non-uniqueness results by Carmeli et al. \cite{CARMELI} or the very recent paper by Jaming and Rathmair on uniqueness from three unitary operators \cite{JamingRathmair}). In Theorem \ref{thm:start2} we showed that a function $f \in {L^2({\mathbb R}^d)}$ is not determined by its spectrogram samples given on a separable lattice, independent of the particular choice of the window function $g$. If the window function is real-valued then in a related spirit as in the Pauli problem, this non-uniqueness property remains valid even under prior knowledge of the modulus of $f$, as we show next.
\begin{theorem}\label{thm:start3} Let $g \in L^2({{\mathbb R}^d},{\mathbb R})$ be a real-valued window function and let $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ be a separable lattice. Then there exist functions $f_1,f_2 \in {L^2({\mathbb R}^d)}$ which satisfy all of the following three properties: \begin{enumerate}
\item $|V_g(f_1)(z)|=|V_g(f_2)(z)|$ for all $z \in \mathcal{L}$
\item $|f_1(t)| = |f_2(t)|$ for all $t \in {{\mathbb R}^d}$
\item $f_1 \nsim f_2$. \end{enumerate} \end{theorem}
Suppose now that we impose a restriction on the underlying signal space. Precisely, we assume that all input functions are real-valued and inquire about the problem of whether such signals are determined up to a sign-factor by spectrogram samples given on a separable lattice. In this case, the corresponding phase retrieval problem is also known as the sign retrieval problem. In recent years, the reconstruction of a real-valued signal belonging to certain shift-invariant spaces (e.g. Paley-Wiener spaces, Gaussian shift-invariant spaces, shift-invariant spaces with totally positive generator) by samples of its modulus was considered by several authors, see for instance \cite{1Thakur2011, 2Groechenig2020, 3Romero2021, 4Alaifari2017}. In the context of STFT phase retrieval, the following theorem demonstrates that the sign retrieval problem fails to be unique.
\begin{theorem}\label{thm:intro:sign_retrieval} Let $g \in L^2({{\mathbb R}^d},{\mathbb R})$ be a real-valued window function and suppose that $\mathcal{L} \subseteq {\mathbb R}^{2d}$ satisfies one of the following two conditions \begin{enumerate}
\item $\mathcal{L} = \Lambda \times {\mathbb R}^d$ with $\Lambda$ a lattice in ${{\mathbb R}^d}$
\item $\mathcal{L}$ is a separable lattice \end{enumerate} Then there exist two real-valued functions $f_1,f_2 \in L^2({{\mathbb R}^d},{\mathbb R})$ such that $$
|V_g(f_1)(z)| = |V_g(f_2)(z)| \ \forall z \in \mathcal{L} \ \ \text{and} \ \ f_1 \nsim f_2. $$ \end{theorem}
In view of numerical applications, there is one consequence of Theorem \ref{thm:intro:sign_retrieval} which is sufficiently important to have the status of a theorem.
\begin{theorem}\label{thm:start5} Suppose that $\mathcal{L} = L{\mathbb Z}^{2d} \subseteq {\mathbb R}^{2d}$ is a lattice which has the property that the generating matrix $L$ has rational entries, $L \in \mathrm{GL}_{2d}({\mathbb Q})$. Then for every real-valued window function $g \in L^2({{\mathbb R}^d},{\mathbb R})$ there exists two real-values functions $f_1,f_2 \in L^2({{\mathbb R}^d},{\mathbb R})$ such that $$
|V_g(f_1)(z)| = |V_g(f_2)(z)| \ \forall z \in \mathcal{L} \ \ \text{and} \ \ f_1 \nsim f_2. $$ \end{theorem}
In a similar fashion as above we define for a window function $g \in {L^2({\mathbb R}^d)}$ and a subset $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ the set $\mathcal{N}_{\mathbb R}(g,\mathcal{L})$ via \begin{equation*}
\begin{split}
& \mathcal{N}_{\mathbb R}(g,\mathcal{L}) \\ & \coloneqq \left \{ f \in L^2({{\mathbb R}^d}, {\mathbb R}) : \exists h \in L^2({{\mathbb R}^d}, {\mathbb R}) \ \text{s.t.} \ h \nsim f \ \text{and} \ |V_gf(\mathcal{L})| = |V_gh(\mathcal{L})| \right \}.
\end{split} \end{equation*}
If $\mathcal{N}_{\mathbb R}(g,\mathcal{L}) = \emptyset$ then every $f \in L^2({{\mathbb R}^d},{\mathbb R})$ is determined up to a sign factor by $|V_gf(\mathcal{L})|$ within the signal class $L^2({{\mathbb R}^d},{\mathbb R})$. If $g \in L^2({{\mathbb R}^d},{\mathbb R})$ is real-valued and $\mathcal{L} \subseteq {\mathbb R}^{2d}$ is of the form $\mathcal{L}=\Lambda \times {{\mathbb R}^d}$ then according to Theorem \ref{thm:intro:sign_retrieval} it holds that $\mathcal{N}_{\mathbb R}(g,\mathcal{L}) \neq \emptyset$. In fact, $\mathcal{N}_{\mathbb R}(g,\mathcal{L})$ contains an infinite-dimensional cone.
\begin{theorem}\label{thm:real_cone} Let $g \in {L^2({\mathbb R}^d)}$ be an arbitrary real-valued window function. If $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ satisfies $\mathcal{L} \subseteq \Lambda \times {{\mathbb R}^d}$ for some lattice $\Lambda \subseteq {{\mathbb R}^d}$ then $\mathcal{N}_{\mathbb R}(g,\mathcal{L})$ contains an infinite-dimensional cone. \end{theorem}
\subsection{Outline} The paper is structured as follow. Section \ref{sec:preliminaries} presents the necessary background on time-frequency analysis, symplectic matrices, metaplectic operators as well as lattices. Further, this section introduces the concept of shift-invariant spaces induced by a metaplectic operators, a key concept which will be used throughout the article. The exposition continues with Section \ref{sec:main_results_3} where the main results of the paper are presented and proved. In Section \ref{sec:spec} we construct function pairs which have the property that their spectrograms agree on certain lattices and semi-discrete sampling sets. In Section \ref{sec:noneq} we characterize those function pairs constructed before, which do not agree up to a global phase. The resulting consequences for the STFT phase retrieval problem (in particular Theorem \ref{thm:start1}, Theorem \ref{thm:start2} and Theorem \ref{thm:complex_cone} above) are presented in Section \ref{sec:implications} together with visualizations in Section \ref{sec:examples_v}. Section \ref{sec:separability} is devoted to the proofs of Theorem \ref{thm:start3}, Theorem \ref{thm:intro:sign_retrieval}, Theorem \ref{thm:start5} and Theorem \ref{thm:real_cone}.
\section{Preliminaries}\label{sec:preliminaries}
In this section we discuss several definitions and basic results which are needed through the remainder of the article.
\subsection{Time-frequency analysis}
Let ${L^2({\mathbb R}^d)}$ be the Lebesgue space of all measurable, square-integrable functions $f : {{\mathbb R}^d} \to {\mathbb C}$. The subspace consisting of all real-valued functions in ${L^2({\mathbb R}^d)}$ is denoted by $L^2({{\mathbb R}^d},{\mathbb R})$. The inner product $\langle a,b \rangle \coloneqq \int_{{\mathbb R}^d} a(t)\overline{b(t)} \, dt$ renders ${L^2({\mathbb R}^d)}$ into a Hilbert space with induced norm $\| \cdot \|_2$. For $\tau,\nu \in {{\mathbb R}^d}$, the translation, modulation and reflection operator are denoted by $T_\tau : {L^2({\mathbb R}^d)} \to {L^2({\mathbb R}^d)}, M_\nu : {L^2({\mathbb R}^d)} \to {L^2({\mathbb R}^d)}$ resp. $\mathcal{R} : {L^2({\mathbb R}^d)} \to {L^2({\mathbb R}^d)}$ and are defined by \begin{equation*}
\begin{split}
T_\tau h (t) &= h(t-\tau) \\
M_\nu h(t) &= e^{2 \pi i \nu \cdot t} h(t) \\
\mathcal{R} h(t) &= h(-t).
\end{split} \end{equation*} Note that the quantity $\nu \cdot t \coloneqq \sum_{j=1}^d \nu_j t_j, t=(t_1, \dots, t_d), \nu=(\nu_1,\dots,\nu_d),$ denotes the Euclidean inner product of two vectors in ${{\mathbb R}^d}$. The short-time Fourier transform (STFT) of a function $f \in {L^2({\mathbb R}^d)}$ with respect to a window function $g \in {L^2({\mathbb R}^d)}$ is the map $V_gf : {{\mathbb R}^{2d}} \to {\mathbb C}$, $$ V_gf(x,\omega) \coloneqq \langle f,M_\omega T_x g \rangle = \int_{{\mathbb R}^d} f(t)\overline{g(t-x)} e^{-2\pi i \omega \cdot t} \, dt, \ \ x,\omega \in {{\mathbb R}^d}. $$ Observe that $V_gf(x,\cdot) = {\mathcal{F}}(f\overline{T_x g})$ where ${\mathcal{F}}$ denotes the Fourier transform which is defined on $L^1({{\mathbb R}^d}) \cap {L^2({\mathbb R}^d)}$ via $$ {\mathcal{F}} h(\omega) \coloneqq \hat h(\omega) \coloneqq \int_{{\mathbb R}^d} h(t)e^{-2\pi i \omega \cdot t} \, dt $$ and extends to a unitary operator mapping ${L^2({\mathbb R}^d)}$ bijectively onto ${L^2({\mathbb R}^d)}$. The following lemma summarizes elementary properties of the operators introduced beforehand.
\begin{lemma}\label{lma:stft_properties} Let $\tau,\nu \in {{\mathbb R}^d}$ and let $f,g \in {L^2({\mathbb R}^d)}$. Then the short-time Fourier transform has the following properties: \begin{enumerate}
\item The map $V_gf : {{\mathbb R}^{2d}} \to {\mathbb C}$ is uniformly continuous.
\item The STFT satisfies the covariance property which reads
$$
V_g(T_\tau M_\nu f)(x,\omega) = e^{-2\pi i \tau \cdot \omega} V_gf(x-\tau,\omega-\nu).
$$
\item The $L^2({{\mathbb R}^{2d}})$-norm of $V_gf$ is given by $$\| V_gf\|_{L^2({{\mathbb R}^{2d}})} = \| f \|_{{L^2({\mathbb R}^d)}} \| g \|_{{L^2({\mathbb R}^d)}}.$$ In particular, $V_g : {L^2({\mathbb R}^d)} \to L^2({{\mathbb R}^{2d}})$ is a bounded linear operator. \end{enumerate} Moreover, the operators ${\mathcal{F}}, T_\tau,M_\nu$ and $\mathcal{R}$ obey the relations \begin{enumerate}
\item[(4)] $\overline{{\mathcal{F}} f} = \mathcal{R}{\mathcal{F}} \overline{f}$,
\item[(5)] ${\mathcal{F}} \mathcal{R} = {\mathcal{F}} \mathcal{R}$,
\item[(6)] ${\mathcal{F}} T_\tau = M_{-\tau} {\mathcal{F}}$,
\item[(7)] $\mathcal{R} T_\tau = T_{-\tau} \mathcal{R}$. \end{enumerate} \end{lemma}
\subsection{Symplectic matrices and metaplectic operators}\label{sec:symplectic_geometry}
For $d \in {\mathbb N}$ we denote by $I_d$ the identity matrix in ${\mathbb R}^{d \times d}$. The matrix $\mathcal{J} \in {\mathbb R}^{2d\times 2d}$, defined by $$ \mathcal{J} \coloneqq \begin{pmatrix} 0 & -I_d \\ I_d & 0 \end{pmatrix}, $$ is called the \emph{standard symplectic matrix}. Using the standard symplectic matrix, we can define the symplectic group.
\begin{definition} A matrix $S \in \mathrm{GL}_{2d}({\mathbb R})$ is called symplectic if $$ S^T \mathcal{J} S = \mathcal{J}. $$ The set of all symplectic matrices is called the (real) symplectic group and is denoted by $\mathrm{Sp}_{2d}({\mathbb R})$. \end{definition}
The property of a matrix $S \in \mathrm{GL}_{2d}({\mathbb R})$ being symplectic can be characterized in the following way \cite[Lemma 9.4.2]{Groechenig}.
\begin{lemma}\label{lma:symplecticCharacterization} Let $S \in \mathrm{GL}_{2d}({\mathbb R})$ be an invertible matrix such that $$ S = \begin{pmatrix} A & B \\ C & D \end{pmatrix} $$ with $A,B,C,D$ denoting block matrices in ${\mathbb R}^{d \times d}$. Then $S$ is symplectic if and only if $AC^T = A^TC$, $BD^T=B^TD$ and $A^TD - C^TB = I_d$. \end{lemma}
In the case $d=1$, Lemma \ref{lma:symplecticCharacterization} implies that a symplectic matrix $S \in \mathrm{Sp}_2({\mathbb R})$ with $$ S= \begin{pmatrix} a & b \\ c & d\end{pmatrix}, \ \ a,b,c,d \in {\mathbb R}, $$ satisfies the relation $ad-cb=1$. Hence, $\mathrm{Sp}_2({\mathbb R})$ coincides with the special linear group, $\mathrm{Sp}_2({\mathbb R}) = \mathrm{SL}_{2}({\mathbb R})$. The mataplectic group $\mathrm{Mp}(d)$ is the unitary representation of the double cover of the symplectic group $\mathrm{Sp}_{2d}({\mathbb R})$ on the Hilbert space ${L^2({\mathbb R}^d)}$. One can define it by requiring that the sequence of group homomorphisms $$ 0 \to {\mathbb Z}_2 \to \mathrm{Mp}(d) \to \mathrm{Sp}_{2d}({\mathbb R}) \to 0 $$ is exact. Hence, to every symplectic matrix $S \in \mathrm{Sp}_{2d}({\mathbb R})$ corresponds a pair of unitary operators $U_1,U_2 : {L^2({\mathbb R}^d)} \to {L^2({\mathbb R}^d)}$ differing by a sign, $U_1=-U_2$. Following the notation introduced in \cite{Folland+2016,Groechenig}, we denote this operator (defined up to a sign) by $\mu(S)$ and call it the metaplectic operator corresponding to $S$. Further, if $S,M \in \mathrm{Sp}_{2d}({\mathbb R})$ are two symplectic matrices then, according to Schur's lemma, the metaplectic operator corresponding to the product of $S$ and $M$ satisfies $$ \mu(SM) = c \mu(S)\mu(M), \ \ c \in \{ 1,-1\}. $$ Hence, $\mu$ can be regarded as a group homomorphism up to a global phase. Further we have the relation $$ \mu(S)^{-1} = c \mu(S^{-1}), \ \ c \in \{ 1,-1\}. $$ For certain symplectic block-diagonal matrices one can write out the corresponding metaplectic operator in a concise form \cite[Example 9.4.1(b)]{Groechenig}.
\begin{lemma}\label{lma:block_diag_representation} For $A \in \mathrm{GL}_d({\mathbb R})$ define the block-diagonal matrix $S_A$ via $$ S_A = \begin{pmatrix} A & 0 \\ 0 & A^{-T} \end{pmatrix} $$ where $A^{-T} = (A^T)^{-1}$. Then the metaplectic operator $\mu(S_A)$ corresponding to $S_A$ is given by $$
(\mu(S_A)f)(x) = |\det A |^{-1/2} f(A^{-1}x), \ \ f \in {L^2({\mathbb R}^d)}. $$ \end{lemma}
In addition to the previous Lemma, the present exposition makes extensive use of the following interaction property of symplectic matrices, metaplectic operators and the STFT \cite[Lemma 9.4.3]{Groechenig}.
\begin{lemma}\label{lma:interaction_property} Let $f,g \in {L^2({\mathbb R}^d)}$ and let $S \in \mathrm{Sp}_{2d}({\mathbb R})$ be a symplectic matrix. Then for every $(x,\omega) \in {\mathbb R}^{2d}$ we have $$ V_gf(S(x,\omega)) = e^{\pi i (x \cdot \omega - x' \cdot \omega')} V_{\mu(S)^{-1}g}(\mu(S)^{-1}f)(x,\omega) $$ where $(x',\omega') = S(x,\omega)$ is the image of $(x,\omega)$ under $S$. \end{lemma}
For a detailed study of symplectic matrices and metaplectic operators as well as the significance of symplectic geometry in quantum mechanics, the reader may consult the books by de Gosson \cite{deGosson} and Folland \cite{Folland+2016}.
\subsection{Lattices}
A set $\Lambda \subseteq {{\mathbb R}^d}$ is called a lattice if there exists an invertible matrix $A \in \mathrm{GL}_d({\mathbb R})$ such that $\Lambda = A{\mathbb Z}^d = \{ Az : z \in {\mathbb Z}^d \}$. The matrix $A$ is called the generating matrix of $\Lambda$ and $\Lambda$ is called generated by $A$. The reciprocal lattice (or: dual lattice) of $\Lambda$, denoted by $\Lambda^*$, is the lattice generated by $A^{-T} \coloneqq (A^T)^{-1}$. Equivalently, it is the set of all points $\lambda^* \in {{\mathbb R}^d}$ which satisfy $$ \lambda \cdot \lambda^* \in {\mathbb Z} \ \ \ \forall \lambda \in \Lambda. $$
The density of a lattice $\Lambda = A{\mathbb Z}^d$ is given by $|\det A|^{-1}$, a quantity independent of the generating matrix $A$.
Special lattices deserve special names. If $\mathcal{A},\mathcal{B}\subseteq {{\mathbb R}^d}$ are lattices and if $\Lambda = \mathcal{A} \times \mathcal{B}$ then $\Lambda$ is called a separable lattice in ${{\mathbb R}^{2d}}$. If $\Lambda$ is generated by a diagonal matrix then we call it rectangular. Finally, $\Lambda \subseteq {\mathbb R}^{2d}$ is called symplectic if $\Lambda = \alpha S {\mathbb Z}^{2d}$ for some $\alpha \in {\mathbb R} \setminus \{ 0 \}$ and some $S \in \mathrm{Sp}_{2d}({\mathbb R})$. Thus, a symplectic lattice is generated by a scaled symplectic matrix.
Every lattice $\Lambda \subseteq {{\mathbb R}^d}$ gives rise to a tiling of ${{\mathbb R}^d}$ in terms of a fundamental domain. Denoting the standard cube in ${{\mathbb R}^d}$ by $Q_d \coloneqq [0,1]^d$ and assuming that $\Lambda$ is generated by $A \in \mathrm{GL}_d({\mathbb R})$, then the set $$ \mathcal{P}(\Lambda) \coloneqq AQ_d = \left \{ A(x_1, \dots, x_d) : x_j \in [0,1] \ \forall j \in \{ 1, \dots, d \} \right \} $$ is called a fundamental domain of $\Lambda$. Further we have $$ {{\mathbb R}^d} = \bigcup_{\lambda \in \Lambda} \lambda + \mathcal{P}(\Lambda) $$ and the intersection of two shifted fundamental domains $\lambda + \mathcal{P}(\Lambda)$ and $\lambda' + \mathcal{P}(\Lambda)$ with $\lambda,\lambda' \in \Lambda$ has $d$-dimensional Lebesgue measure zero. Note that $\mathcal{P}(\Lambda)$ depends on the choice of the generating matrix $A$. However, in the present exposition the exact shape of the fundamental domain of a lattice will not play a role and all statements are valid for an arbitrary choice of a fundamental domain.
\subsection{Shift-invariant spaces associated to metaplectic operators}
Let $\Lambda \subseteq {{\mathbb R}^d}$ be a lattice and let $\phi \in {L^2({\mathbb R}^d)}$. The (principle) shift-invariant space generated by $\phi$ and subjected to the lattice $\Lambda$ is defined to be \begin{equation}\label{classical_si_space}
\mathcal{V}_\Lambda(\phi) \coloneqq \overline{{\mathrm{span} \,} \{ T_\lambda \phi : \lambda \in \Lambda \}} \end{equation} where $T_\lambda$ denotes the shift-operator, $T_\lambda \phi = \phi(\cdot - \lambda)$. The closure is taken with respect to the $L^2$-norm. Shift-invariance means that $T_\lambda h \in \mathcal{V}_\Lambda(\phi)$ provided that $h \in \mathcal{V}_\Lambda(\phi)$ and $\lambda \in \Lambda$. Note that the shift-operator satisfies the relation $$ T_\lambda = \mu(S)T_\lambda\mu(S)^{-1} \ \ \text{for} \ \ S=I_{2d} $$ since in this case $\mu(S) =\mu(I_{2d})$ is the identity operator on ${L^2({\mathbb R}^d)}$. This relation motivates a generalization of a shift-invariant space to the setting where the space is invariant under the operator $\mu(S)T_\lambda\mu(S)^{-1}$ for a general $S \in \mathrm{Sp}_{2d}({\mathbb R})$.
\begin{definition} Let $\Lambda \subseteq {{\mathbb R}^d}$ be a lattice, $S \in \mathrm{Sp}_{2d}({\mathbb R})$ a symplectic matrix and let $\phi \in {L^2({\mathbb R}^d)}$ be a generating function. The $S$-shift of $\phi$ by $\lambda \in \Lambda$ is defined by $$ T_\lambda^S \phi \coloneqq \mu(S)T_\lambda\mu(S)^{-1} \phi. $$ Further, we call the space $$ \mathcal{V}_\Lambda^S(\phi) \coloneqq \overline{{\mathrm{span} \,}\{ T_\lambda^S \phi : \lambda \in \Lambda \}} $$ the shift-invariant space generated by $\phi$, associated to $S$-shift, and subjected to the lattice $\Lambda$ (short: $(S,\Lambda)$-shift invariant space). \end{definition}
In equation \eqref{classical_si_space} we defined the $(S,\Lambda)$-shift-invariant space $\mathcal{V}_\Lambda^S(\phi)$ as the $L^2$-closure of the $S$-shifts by $\lambda \in \Lambda$ of the generating function $\phi$. Frequently, shift-invariant subspaces of ${L^2({\mathbb R}^d)}$ are defined as the set of all functions \begin{equation}\label{series}
f = \sum_{\lambda \in \Lambda} c_\lambda T_\lambda^S \phi \end{equation}
such that $\{ c_\lambda \}$ is a square-summable sequence, $\{ c_\lambda \} \in \ell^2(\Lambda)$. This definition is used if the generator $\phi$ satisfies certain regularity properties which guarantee convergence of the series in \eqref{series}. In the present exposition we aim at results of the highest generality so in our case no prior assumptions on $\phi$ are made. However, whenever $f \in \mathcal{V}_\Lambda^S(\phi)$ can be written as a convergent series of the form $f = \sum_{\lambda \in \Lambda} c_\lambda T_\lambda^S \phi$ with $\{ c_\lambda \} \in \ell^2(\Lambda)$ then we call $\{ c_\lambda \}$ a \emph{defining sequence} of $f$. Note that if $\{ c_\lambda \} \in c_{00}(\Lambda)$, the space of all sequences with only finitely many non-zero components, then $f = \sum_{\lambda \in \Lambda} c_\lambda T_\lambda^S \phi$ is a well-defined element in $\mathcal{V}_\Lambda^S(\phi)$ and no questions of convergence appear. If the system $\{ T_\lambda^S \phi : \lambda \in \Lambda \}$ forms a Bessel sequence in ${L^2({\mathbb R}^d)}$ then standard theory implies that the series $\sum_{\lambda \in \Lambda} c_\lambda T_\lambda^S \phi$ converges unconditionally for every $\{ c_\lambda \} \in \ell^2(\Lambda)$ (see, for instance, \cite[Corollary 3.2.5]{christensenBook}). Recall that a sequence $\{ f_n : n \in {\mathbb N} \} \subseteq X$ of a separable Hilbert space $X$ with inner product $\langle \cdot , \cdot \rangle_X$ and induced norm $\| \cdot \|_X$ is called a Bessel sequence if there exists a constant $B>0$ such that $$
\sum_{n=1}^\infty |\langle f,f_n \rangle_X|^2 \leq B \| f \|_X^2 \ \ \ \forall f \in X. $$
The property of $\{ T_\lambda^S \phi : \lambda \in \Lambda \}$ being a Bessel sequence in ${L^2({\mathbb R}^d)}$ can be phrased in terms of boundedness of a certain lattice periodization. To that end we introduce for a map $\phi \in {L^2({\mathbb R}^d)}$ the $\Lambda$-periodization of $|{\mathcal{F}} \phi|^2$ by $$
\mathfrak{p}_\Lambda[\phi](t) \coloneqq \sum_{\lambda \in \Lambda} |{\mathcal{F}} \phi (t+\lambda)|^2. $$
\begin{proposition}\label{prop:bessel_condition} Let $\phi \in {L^2({\mathbb R}^d)}$ and let $\Lambda \subseteq {{\mathbb R}^d}$ be a lattice. If $\mathfrak{p}_{\Lambda^*}[\phi] \in L^\infty(\mathcal{P}(\Lambda^*))$ then $\{ T_\lambda \phi : \lambda \in \Lambda \}$ is a Bessel sequence and the series $$\sum_{\lambda \in \Lambda} c_\lambda T_\lambda \phi$$ converges unconditionally for every $\{ c_\lambda \} \in \ell^2(\Lambda)$. \end{proposition} \begin{proof} The proof is similar the one presented in \cite[Theorem 9.2.5]{christensenBook}. For the convenience of the reader we include a proof of the statement in the Appendix \hyperref[appendix:A]{A}. \end{proof}
A consequence of the previous Proposition is the following result.
\begin{corollary}\label{cor:besselCorollary} Let $\phi \in {L^2({\mathbb R}^d)}$, $\Lambda \subseteq {{\mathbb R}^d}$ a lattice and $S \in \mathrm{Sp}_{2d}({\mathbb R})$ a symplectic matrix. If $\mathfrak{p}_{\Lambda^*}[\mu(S)^{-1}\phi] \in L^\infty(\mathcal{P}(\Lambda^*))$ then $\{ T_\lambda^S \phi : \lambda \in \Lambda \}$ is a Bessel sequence and the series $\sum_{\lambda \in \Lambda} c_\lambda T_\lambda^S \phi$ converges unconditionally for every $\{ c_\lambda \} \in \ell^2(\Lambda)$. \end{corollary} \begin{proof} Proposition \ref{prop:bessel_condition} implies that under the conditions stated above, the system $\{ T_\lambda \mu(S)^{-1} \phi : \lambda \in \Lambda \}$ is a Bessel sequence in ${L^2({\mathbb R}^d)}$. Since the property of being a Bessel sequence is invariant under the action of a bounded linear operator, it follows that $$ \{ \mu(S)T_\lambda\mu(S)^{-1} \phi : \lambda \in \Lambda \} = \{ T_\lambda^S \phi : \lambda \in \Lambda \} $$ is a Bessel sequence. In particular, $\sum_{\lambda \in \Lambda} c_\lambda T_\lambda^S \phi$ converges unconditionally for every $\{ c_\lambda \} \in \ell^2(\Lambda)$. \end{proof}
\section{Main results}\label{sec:main_results_3}
The objective of this section is to state and prove the main results of the article. The goal is to derive non-equivalent function pairs $f_1 \nsim f_2$ which produce identical spectrogram samples on certain determined lattices in the time-frequency plane and which satisfy certain structural properties. In particular, we prove the theorems stated in Section \ref{sec:contribution}.
\subsection{Spectrogram equalities}\label{sec:spec}
Let $\Lambda \subseteq {{\mathbb R}^d}$ be a lattice, $S \in \mathrm{Sp}_{2d}({\mathbb R})$ a symplectic matrix, $\phi \in {L^2({\mathbb R}^d)}$ a generating function and $f \in \mathcal{V}_\Lambda^S(\phi)$. Whenever $f$ has defining sequence $\{ c_\lambda \}$, i.e. $f$ may be written as a convergent series of the form $f = \sum_{\lambda \in \Lambda} c_\lambda T_\lambda^S \phi$, we define a corresponding function $f_\times$ via $$ f_\times \coloneqq \sum_{\lambda \in \Lambda} \overline{c_\lambda} T_\lambda^S \phi. $$ The map $f_\times$ arises from $f$ via complex conjugation of its defining sequence $\{ c_\lambda \}$. We start by inspecting the spectrogram of $f$ and $f_\times$ on sets of the form $S(A \times B) = \{ S(a,b) : a \in A, b \in B \} \subseteq {\mathbb R}^{2d}$ with $A,B \subseteq {{\mathbb R}^d}$ and $S \in \mathrm{Sp}_{2d}({\mathbb R})$.
\begin{theorem}\label{thm:equalSpectrograms} Let $g \in {L^2({\mathbb R}^d)}$ be a window function, $\Lambda \subseteq {{\mathbb R}^d}$ a lattice and $S \in \mathrm{Sp}_{2d}({\mathbb R})$ a symplectic matrix. Suppose that $\{ c_\lambda \} \subseteq {\mathbb C}$ is the defining sequence of $f \in \mathcal{V}_\Lambda^S(\mathcal{R}g)$. Then the following holds: \begin{enumerate}
\item If $\{ c_\lambda \} \in c_{00}(\Lambda)$ then $$
|V_gf(z)| = |V_g(f_\times)(z)| \ \ \forall z \in S({{\mathbb R}^d} \times \Lambda^*). $$ \item If $\mathfrak{p}_{\Lambda^*}[\mu(S)^{-1}\mathcal{R}g] \in L^\infty(\mathcal{P}(\Lambda^*))$ and $\{ c_\lambda \} \in \ell^2(\Lambda)$ then $$
|V_gf(z)| = |V_g(f_\times)(z)| \ \ \forall z \in S({{\mathbb R}^d} \times \Lambda^*). $$ \end{enumerate} \end{theorem} \begin{proof} \textbf{Proof of (1).} Suppose that $f = \sum_{\lambda \in \Lambda} c_\lambda T_\lambda^S \mathcal{R}g \in \mathcal{V}_\Lambda^S(\mathcal{R}g)$ has defining sequence $\{ c_\lambda \} \in c_{00}(\Lambda)$. For $(x,\omega) \in {{\mathbb R}^{2d}}$ we obtain the relations \begin{align}\label{eq:specF}
|V_gf(S(x,\omega))| & \textoverset[0]{Lemma \ref{lma:interaction_property}}{=} \left | V_{\mu(S)^{-1} g} \left ( \sum_{\lambda \in \Lambda} c_\lambda T_{\lambda} \mu(S)^{-1} \mathcal{R}g \right ) (x,\omega) \right | \nonumber \\
& \textoverset[0]{linearity}{=} \left | \sum_{\lambda \in \Lambda} c_\lambda V_{\mu(S)^{-1} g} (T_\lambda \mu(S)^{-1} \mathcal{R}g) (x,\omega) \right | \nonumber \\
& \textoverset[0]{Lemma \ref{lma:stft_properties}(2)}{=} \left | \sum_{\lambda \in \Lambda} c_\lambda e^{-2 \pi i \lambda \cdot \omega} V_{\mu(S)^{-1} g}(\mu(S)^{-1}\mathcal{R}g)(x-\lambda,\omega) \right | \nonumber \\
& \textoverset[0]{}{=} \left | \sum_{\lambda \in \Lambda} c_\lambda e^{-2 \pi i \lambda \cdot \omega} {\mathcal{F}} ( (\mu(S)^{-1}\mathcal{R}g) ( \overline{T_{x-\lambda} \mu(S)^{-1} g } ) )(\omega) \right |. \end{align} If we replace $c_\lambda$ with $\overline{c_\lambda}$ in equation \eqref{eq:specF} we obtain the identity \begin{align}\label{eq:specF_cross1}
|V_g(f_\times)(S(x,\omega))| & \textoverset[0]{}{=} \left | \sum_{\lambda \in \Lambda} \overline{c_\lambda} e^{-2 \pi i \lambda \cdot \omega} {\mathcal{F}} ( (\mu(S)^{-1}\mathcal{R}g) ( \overline{T_{x-\lambda} \mu(S)^{-1} g } ) )(\omega) \right | \nonumber \\
& \textoverset[0]{Lemma \ref{lma:stft_properties}(4)}{=} \left | \sum_{\lambda \in \Lambda} c_\lambda e^{2 \pi i \lambda \cdot \omega} \mathcal{R} {\mathcal{F}} ( (\overline{\mu(S)^{-1}\mathcal{R}g}) (T_{x-\lambda} \mu(S)^{-1} g ) )(\omega) \right |. \end{align} We now inspect the operator $\mu(S)^{-1}\mathcal{R}$ appearing in equation \eqref{eq:specF_cross1}. Note that Lemma \ref{lma:block_diag_representation} shows that for every $h \in {L^2({\mathbb R}^d)}$ we have $$
(\mu(-I_{2d}) h)(x) = |\det(-I_{2d})|^{-1/2}h(-x) = \mathcal{R}h(x). $$ Consequently, the metaplectic operator corresponding to the negative of the identity matrix in ${\mathbb R}^{2d \times 2d}$ is the reflection operator $\mathcal{R}$. Using the properties of the map $\mu$ as given in Section \ref{sec:symplectic_geometry} and the fact that every $S \in \mathrm{Sp}_{2d}({\mathbb R})$ commutes with $-I_{2d}$, it follows that there exists a constant $\nu \in \{ 1,-1\}$ such that \begin{equation}\label{R_commuting}
\mu(S)^{-1}\mathcal{R} = \nu \mathcal{R}\mu(S)^{-1}. \end{equation} In other words, $\mu(S)^{-1}$ and $\mathcal{R}$ commute up to a global phase. Proceeding with equation \eqref{eq:specF_cross1} yields \begin{align}\label{eq:specF_cross2}
|V_g(f_\times)(S(x,\omega))| & \textoverset[0]{equation \eqref{R_commuting}}{=} \left | \sum_{\lambda \in \Lambda} c_\lambda e^{2 \pi i \lambda \cdot \omega} \mathcal{R} {\mathcal{F}} ( ( \mathcal{R} \overline{ \mu(S)^{-1} g} ) (T_{x-\lambda} \mu(S)^{-1} g ) )(\omega) \right | \nonumber \\
& \textoverset[0]{Lemma \ref{lma:stft_properties}(5)}{=} \left | \sum_{\lambda \in \Lambda} c_\lambda e^{2 \pi i \lambda \cdot \omega} {\mathcal{F}} ( ( \overline{\mu(S)^{-1} g} ) (\mathcal{R} T_{x-\lambda} \mu(S)^{-1} g ) )(\omega) \right | \nonumber \\
& \textoverset[0]{Lemma \ref{lma:stft_properties}(7)}{=} \left | \sum_{\lambda \in \Lambda} c_\lambda e^{2 \pi i \lambda \cdot \omega} {\mathcal{F}} ( (\overline{\mu(S)^{-1} g} ) (T_{\lambda-x} \mathcal{R} \mu(S)^{-1} g ) )(\omega) \right | \nonumber \\
& \textoverset[0]{equation \eqref{R_commuting}}{=} \left | \sum_{\lambda \in \Lambda} c_\lambda e^{2 \pi i \lambda \cdot \omega} {\mathcal{F}} ( (\overline{\mu(S)^{-1} g} ) (T_{\lambda-x} \mu(S)^{-1} \mathcal{R} g ) )(\omega) \right | \nonumber \\
& \textoverset[0]{}{=} \left | \sum_{\lambda \in \Lambda} c_\lambda e^{2 \pi i \lambda \cdot \omega} {\mathcal{F}} ( T_{\lambda-x} [ ( T_{x-\lambda} \overline{\mu(S)^{-1} g}) ( \mu(S)^{-1} \mathcal{R} g ) ] )(\omega) \right | \nonumber \\
& \textoverset[0]{Lemma \ref{lma:stft_properties}(6)}{=} \left | \sum_{\lambda \in \Lambda} c_\lambda e^{2 \pi i \lambda \cdot \omega} e^{-2\pi i \omega \cdot (\lambda-x)} {\mathcal{F}} ( ( T_{x-\lambda} \overline{\mu(S)^{-1} g}) ( \mu(S)^{-1} \mathcal{R} g ))(\omega) \right | \nonumber \\
& \textoverset[0]{}{=} \left | \sum_{\lambda \in \Lambda} c_\lambda {\mathcal{F}} ( ( \overline{ T_{x-\lambda} \mu(S)^{-1} g}) ( \mu(S)^{-1} \mathcal{R} g ))(\omega) \right |. \end{align} Now suppose that $\omega = \lambda^* \in \Lambda^*$ is an element of the reciprocal lattice of $\Lambda$. Then $ \lambda \cdot \lambda^* \in {\mathbb Z}$ and therefore the phase factor $e^{-2 \pi i \lambda \cdot \omega}$ appearing in equation \eqref{eq:specF} reduces to $$ e^{-2 \pi i \lambda \cdot \omega} = e^{-2 \pi i \lambda \cdot \lambda^*} = 1. $$
Comparing equation \eqref{eq:specF} with equation \eqref{eq:specF_cross2} shows that in the case $\omega \in \Lambda^*$ we have $|V_gf(S(x,\omega))| = |V_g(f_\times)(S(x,\omega))|$. Since $x \in {{\mathbb R}^d}$ was arbitrary we conclude that the spectrogram of $f$ agrees with the spectrogram of $f_\times$ on the set $S({{\mathbb R}^d} \times \Lambda^*)$.
\textbf{Proof of (2).} Now assume that $\mathfrak{p}_{\Lambda^*}[\mu(S)^{-1}\mathcal{R}g] \in L^\infty(\mathcal{P}(\Lambda^*))$. According to Corollary \ref{cor:besselCorollary} the series $f = \sum_{\lambda \in \Lambda} c_\lambda T_\lambda^S \mathcal{R}g$ converges unconditionally to an element in $\mathcal{V}_\Lambda^S(\mathcal{R}g)$ whenever $\{ c_\lambda \} \in \ell^2(\Lambda)$. Since the STFT is a continuous operator on ${L^2({\mathbb R}^d)}$, this implies that the chain of equalities leading to equation \eqref{eq:specF} is justified under the weaker assumption that $\{ c_\lambda \} \in \ell^2(\Lambda)$ since one can interchange summation with the application of the STFT. From this point, the proof of statement (2) follows analogously to the proof of statement (1). \end{proof}
The foregoing theorem dealt with equality of two spectrograms on sets of the form $S({{\mathbb R}^d} \times \mathcal{B})$ with $\mathcal{B}$ a lattice. Replacing $S({{\mathbb R}^d} \times \mathcal{B})$ by $S(\mathcal{A} \times \mathcal{B})$ with $\mathcal{A} \subseteq {{\mathbb R}^d}$ a lattice, leads to sufficient conditions on matrices $L\in \mathrm{GL}_{2d}({\mathbb R})$ so that the spectrogram of $f$ and $f_\times$ agree on the lattice generated by $L$.
\begin{corollary}\label{cor:ST} Let $L \in \mathrm{GL}_{2d}({\mathbb R})$ be an invertible matrix which factors into $L=ST$ where $S \in \mathrm{Sp}_{2d}({\mathbb R})$ is a symplectic matrix and $T \in \mathrm{GL}_{2d}({\mathbb R})$ is a block-diagonal matrix of the form $$ T = \begin{pmatrix} A & 0 \\ 0 & B \end{pmatrix}, \ \ A,B \in \mathrm{GL}_d({\mathbb R}). $$ Further, let $\mathcal{L}=L{\mathbb Z}^{2d}$ be the lattice generated by $L$ and let $\mathcal{B}=B{\mathbb Z}^d$ be the lattice generated by $B$. Let $g \in {L^2({\mathbb R}^d)}$ be a window function and suppose that $\{ c_\lambda : \lambda \in \mathcal{B}^* \} \subseteq {\mathbb C}$ is the defining sequence of $f \in \mathcal{V}_{\mathcal{B}^*}^S(\mathcal{R}g)$. Then the following holds: \begin{enumerate}
\item If $\{ c_\lambda \} \in c_{00}(\mathcal{B}^*)$ then $$
|V_gf(z)| = |V_g(f_\times)(z)| \ \ \forall z \in \mathcal{L}. $$ \item If $\mathfrak{p}_\mathcal{B}(\mu(S)^{-1}\mathcal{R}g) \in L^\infty(\mathcal{P}(\mathcal{B}))$ and $\{ c_\lambda \} \in \ell^2(\mathcal{B}^*)$ then $$
|V_gf(z)| = |V_g(f_\times)(z)| \ \ \forall z \in \mathcal{L}. $$ \end{enumerate} \end{corollary} \begin{proof} \textbf{Proof of (1).} Suppose that $\{ c_\lambda \} \in c_{00}(\mathcal{B}^*)$ is the defining sequence of $f \in \mathcal{V}_{\mathcal{B}^*}^S(\mathcal{R}g)$. Since $(\mathcal{B}^*)^* = \mathcal{B}$, Theorem \ref{thm:equalSpectrograms}(1) and the choice of $f$ implies that \begin{equation}\label{eq:s1}
|V_gf(z)| = |V_g(f_\times)(z)| \ \ \forall z \in S({{\mathbb R}^d} \times \mathcal{B}). \end{equation} The statement is therefore a consequence of the inclusion \begin{equation}\label{eq:inclusion}
\mathcal{L}=ST{\mathbb Z}^{2d} = S(A{\mathbb Z}^d \times B{\mathbb Z}^d) \subseteq S({\mathbb R}^d \times B{\mathbb Z}^d) = S({{\mathbb R}^d} \times \mathcal{B}). \end{equation} In view of identity \eqref{eq:s1}, the spectrogram of $f$ agrees with the spectrogram of $f_\times$ on the lattice $\mathcal{L}$.
\textbf{Proof of (2).} To prove the second statement we observe that the condition $\mathfrak{p}_\mathcal{B}(\mu(S)^{-1}\mathcal{R}g) \in L^\infty(\mathcal{P}(\mathcal{B}))$ implies that $f = \sum_{\lambda \in \mathcal{B}^*} c_\lambda T_\lambda^S \mathcal{R}g$ converges unconditionally provided that $\{ c_\lambda \} \in \ell^2(\mathcal{B}^*)$. In a similar fashion as in the proof of statement (1) we conclude from Theorem \ref{thm:equalSpectrograms}(2) that $$
|V_gf(z)| = |V_g(f_\times)(z)| \ \ \forall z \in S({{\mathbb R}^d} \times \mathcal{B}), $$ thereby proving the statement by invoking the inclusion given in \eqref{eq:inclusion}. \end{proof}
The univariate case $d=1$ deserves special attention. In this case, Corollary \ref{cor:ST} covers all lattices in ${\mathbb R}^2$: starting from an arbitrary lattice $\mathcal{L} \subseteq {\mathbb R}^2$ we can construct functions $f$ and $f_\times$ so that their spectrograms agree on $\mathcal{L}$. This is a consequence of the fact that in ${\mathbb R}^2$ all lattices are symplectic, a property which holds exclusively in ${\mathbb R}^2$.
\begin{corollary}\label{cor:1d_lattice} Let $\mathcal{L} \subseteq {\mathbb R}^2$ be an arbitrary lattice generated by $L \in \mathrm{GL}_2({\mathbb R})$ and let $g \in {L^2({\mathbb R})}$ be a window function. If $\alpha \coloneqq \det L$ then $S \coloneqq \alpha^{-1} L$ is a symplectic matrix. Moreover, if $\{ c_\lambda \} \subseteq {\mathbb C}$ is the defining sequence of $f \in \mathcal{V}_{\alpha^{-1}{\mathbb Z}}^S(\mathcal{R}g)$ then the following holds: \begin{enumerate}
\item If $\{c_\lambda \} \in c_{00}(\alpha^{-1}{\mathbb Z})$ then $$
|V_gf(z)| = |V_g(f_\times)(z)| \ \ \forall z \in \mathcal{L}. $$ \item If $\mathfrak{p}_{\alpha {\mathbb Z}} (\mu(S)^{-1}\mathcal{R}g) \in L^\infty([0,\alpha])$ and if $\{ c_\lambda \} \in \ell^2(\alpha^{-1}{\mathbb Z})$ then $$
|V_gf(z)| = |V_g(f_\times)(z)| \ \ \forall z \in \mathcal{L}. $$ \end{enumerate} \end{corollary} \begin{proof} \textbf{Proof of (1).} Since $0 \neq \alpha = \det L$, the matrix $S=\alpha^{-1}L$ satisfies $\det S = 1$. Writing $$ S= \begin{pmatrix} a & b \\ c & d\end{pmatrix}, \ \ a,b,c,d \in {\mathbb R}, $$ we obtain the relation $ad-cd=1$. In view of Lemma \ref{lma:symplecticCharacterization}, this implies that $S \in \mathrm{GL}_2({\mathbb R})$ is a symplectic matrix. Now let $T=\alpha I_2 \in {\mathbb R}^{2 \times 2}$. Then $L=ST$ and the reciprocal lattice of $\alpha{\mathbb Z}$ is given by $\alpha^{-1}{\mathbb Z}$. Thus, Corollary \ref{cor:ST} implies that if $f \in \mathcal{V}_{\alpha^{-1}{\mathbb Z}}^S(\mathcal{R}g)$ has defining sequence $\{ c_\lambda \} \in c_{00}(\alpha^{-1}{\mathbb Z})$ then the spectrograms of $f$ and $f_\times$ agree on $$ ST{\mathbb Z}^2 = L{\mathbb Z}^2 = \mathcal{L}. $$
\textbf{Proof of (2).} Suppose, on the other hand, that $\{ c_\lambda \} \in \ell^2(\alpha^{-1}{\mathbb Z})$. Since $(\alpha^{-1}{\mathbb Z})^* = \alpha {\mathbb Z}$ and sine $\mathcal{P}(\alpha{\mathbb Z})=[0,\alpha] \subseteq {\mathbb R}$, the condition $\mathfrak{p}_{(\alpha^{-1}{\mathbb Z})^*} (\mu(S)^{-1}\mathcal{R}g) \in L^\infty(\mathcal{P}((\alpha^{-1}{\mathbb Z})^*))$ is equivalent to the condition $\mathfrak{p}_{\alpha {\mathbb Z}}(\mu(S)^{-1}\mathcal{R}g) \in L^\infty([0,\alpha])$. Under this assumption, Corollary \ref{cor:besselCorollary} implies that $f \in \mathcal{V}_{\alpha^{-1}{\mathbb Z}}^S(\mathcal{R}g)$ converges unconditionally provided that $f$ has defining sequence $\{ c_\lambda \} \in \ell^2(\alpha^{-1}{\mathbb Z})$. The derivation of statement (2) then follows analogously to the derivation of statement (1). \end{proof}
\subsection{Characterization of equality up to a global phase}\label{sec:noneq}
Let $\Lambda \subseteq {{\mathbb R}^d}$ be a lattice, $S \in \mathrm{Sp}_{2d}({\mathbb R})$ a symplectic matrix and $\phi \in {L^2({\mathbb R}^d)}$. In this section we seek to classify those defining sequences $\{ c_\lambda \} \subseteq {\mathbb C}$ of a functions $f = \sum_{\lambda \in \Lambda} c_\lambda T_\lambda^S \phi \in \mathcal{V}_\Lambda^S(\phi)$, such that $f$ and its companion $f_\times$ do not agree up to a global phase, i.e. $f \nsim f_\times$. The obtained results will serve as a machinery for the establishment of fundamental discretization barriers for the STFT phase retrieval problem. The statements derived in the present section are based on linear independence properties of systems of translates. These properties are given in Theorem \ref{thm:independence} below. Before stating and proving this result, we require an elementary uniqueness property of functions of several complex variables.
\begin{lemma}\label{lma:elementary_uniqueness} Let $\mathcal{Y} \subseteq {{\mathbb R}^d}$ be a Lebesgue-measurable set of positive $d$-dimensional Lebesgue measure. If $F : {\mathbb C}^d \to {\mathbb C}$ is a holomorphic function of $d$ complex variables which vanishes on $\mathcal{Y}$ then $F$ vanishes identically. \end{lemma} \begin{proof} See the Appendix \hyperref[appendix:B]{B}. \end{proof}
If $\Lambda \subseteq {{\mathbb R}^d}$ is a lattice then we say that a function $f : {{\mathbb R}^d} \to {\mathbb C}$ is $\Lambda$-periodic if $f(t+\lambda)=f(t)$ for every $t \in {{\mathbb R}^d}$ and every $\lambda \in \Lambda$. Similarly, we say that a set $\Omega \subseteq {{\mathbb R}^d}$ is $\Lambda$-periodic if $\lambda + \Omega = \Omega$ for every $\lambda \in \Lambda$.
\begin{theorem}\label{thm:independence} Let $0 \neq \phi \in {L^2({\mathbb R}^d)}$ and let $\Lambda \subseteq {{\mathbb R}^d}$ be a lattice. Then the following holds: \begin{enumerate}
\item The system of ${{\mathbb R}^d}$-shifts of $\phi$, i.e. the set $ \{ T_x \phi : x \in {{\mathbb R}^d} \} $ forms a (finitely) linearly independent system. \item If there exist positive constants $0<A,B<\infty$ such that $$ A \leq \mathfrak{p}_{\Lambda^*}[\phi](t) \leq B $$ for almost every $t \in \mathcal{P}(\Lambda^*)$ then $\{ T_\lambda \phi : \lambda \in \Lambda \}$ is $\ell^2(\Lambda)$-independent, i.e. a sequence $\{c_\lambda \} \in \ell^2(\Lambda)$ is the zero sequence provided that $\sum_{\lambda \in \Lambda} c_\lambda T_\lambda \phi = 0$. \end{enumerate} \end{theorem} \begin{proof} \textbf{Proof of (1).}
We start by proving the claim made in (1). To that end, let $\mathcal{X} \subset {{\mathbb R}^d}$ be a finite set, $|\mathcal{X}|<\infty$, and let $c_x \in {\mathbb C}$ with $x \in \mathcal{X}$. Suppose that \begin{equation}\label{eq:lin_indep_zero}
\sum_{x \in \mathcal{X}} c_x T_x \phi = 0. \end{equation} We have to show that $c_x = 0$ for every $x \in \mathcal{X}$. To do so, we take the Fourier transform on both sides of equation \eqref{eq:lin_indep_zero} and obtain the relation $$
\underbrace{\sum_{x \in \mathcal{X}} c_x e^{-2\pi i x \cdot t}}_{\coloneqq E(t)} {\mathcal{F}} \phi(t) = 0. $$ which holds for every $t \in {\mathbb R}$. Since $\phi\neq 0$ there exists a set $\mathcal{Y} \subseteq {{\mathbb R}^d}$ of positive $d$-dimensional Lebesgue measure such that ${\mathcal{F}} \phi (t) \neq 0$ for almost every $t \in \mathcal{Y}$. Hence, the map $E$ must vanish almost everywhere on $\mathcal{Y}$. But $E$ extends from ${{\mathbb R}^d}$ to a holomorphic function of $d$ complex variables. Therefore, Lemma \ref{lma:elementary_uniqueness} shows that $E$ must vanish identically. Now observe that since $\mathcal{X}$ is a set, all its elements are distinct. A general theorem on the linear independence of characters due to Artin \cite[Theorem 4.1]{lang2005algebra} shows that the complex exponentials $t \mapsto e^{-2\pi i t \cdot x}, x \in \mathcal{X}$, are linearly independent. Hence, the property that $E$ vanishes identically implies that $c_x=0$ for all $x \in \mathcal{X}$ and this concludes the proof of the claim.
\textbf{Proof of (2).} We continue with the proof of the second statement. First observe that since $\mathfrak{p}_{\Lambda^*}[\phi] \in L^\infty(\mathcal{P}(\Lambda^*))$ it follows from Proposition \ref{prop:bessel_condition} that the series $\sum_{\lambda \in \Lambda} c_\lambda T_\lambda \phi$ converges unconditionally for every $\{ c_\lambda \} \in \ell^2(\Lambda)$. Now suppose that $\{ c_\lambda \} \in \ell^2(\Lambda)$ is such that $\sum_{\lambda \in \Lambda} c_\lambda T_\lambda \phi = 0$. By taking the Fourier transform we obtain the relation \begin{equation}\label{G_zero}
\underbrace{\sum_{\lambda \in \Lambda} c_\lambda e^{-2 \pi i \lambda \cdot t}}_{\coloneqq G(t)} {\mathcal{F}} \phi(t) = 0 \end{equation} which holds for almost every $t \in {{\mathbb R}^d}$. By assumption we have that \begin{equation}\label{eq:lower_triangle_bound}
\mathfrak{p}_{\Lambda^*}[\phi](t) = \sum_{\lambda^* \in \Lambda^*} |{\mathcal{F}} \phi (t+\lambda^*)|^2 \geq A >0 \end{equation} for almost every $t \in \mathcal{P}(\Lambda^*)$. Since $\mathfrak{p}_{\Lambda^*}[\phi]$ is $\Lambda^*$-periodic it follows that the lower bound given in equation \eqref{eq:lower_triangle_bound} holds for almost every $t \in {{\mathbb R}^d}$. Moreover, it shows that there exists no measurable $\Lambda^*$-periodic set $\Omega \subseteq {{\mathbb R}^d}$ of positive Lebesgue measure such that ${\mathcal{F}} \phi$ vanishes almost everywhere on $\Omega$. Since the map $G$ is $\Lambda^*$-periodic as well, equation \eqref{G_zero} implies that $G$ must vanish almost everywhere. The uniqueness theorem for Fourier coefficients finally shows that $c_\lambda = 0$ for every $\lambda \in \Lambda$, as desired. \end{proof}
In order to characterize equivalence up to a global phase we require the following observation which was proved in \cite[Lemma 3.4]{grohsLiehrJFAA}.
\begin{lemma}\label{lemma:funcion_values_lines} Let $Y$ be an arbitrary set and $f : Y \to {\mathbb C}$ a complex-valued map. Then $f \sim \overline{f}$ if and only if there exists an $\alpha \in {\mathbb R}$ such that $f(y) \in e^{i\alpha}{\mathbb R}$ for every $y \in Y$. \end{lemma}
The previous lemma characterizes equivalence of $f$ and $\overline{f}$ in terms of function values on a line $e^{i\alpha}{\mathbb R} \subseteq {\mathbb C}, \alpha \in {\mathbb R}$. If $\Lambda \subseteq {{\mathbb R}^d}$ is a lattice then every sequence $\{ c_\lambda \} \in \ell^2(\Lambda)$ is a function from $\Lambda$ to ${\mathbb C}$ and we can consider those sequences $\{ c_\lambda \} \in \ell^2(\Lambda)$ whose values may or may not lie on a line in the complex plane passing through the origin. This motivates the following definition.
\begin{definition} Let $\Lambda \subseteq {{\mathbb R}^d}$ be a lattice. We define the class of square-summable sequences with index set $\Lambda$ whose elements do not lie on a line in the complex plane passing through the origin by $$ \ell^2_{\mathcal{O}}(\Lambda) \coloneqq \left \{ \{ c_\lambda \} \in \ell^2(\Lambda) : \nexists \, \alpha \in {\mathbb R} \ \mathrm{s.t.} \ \{ c_\lambda \} \subseteq e^{i\alpha} {\mathbb R} \right \}. $$ \end{definition}
Note that if $\{ c_\lambda \} \in \ell^2(\Lambda) \setminus \ell^2_{\mathcal{O}}(\Lambda)$ then $\{ c_\lambda \}$ takes values on a line in the complex plane passing through the origin, i.e. there exists an $\alpha \in {\mathbb R}$ such that $c_\lambda \in e^{i\alpha}{\mathbb R}$ for every $\lambda \in \Lambda$. In other words, $\{ c_\lambda \}$ is a real sequence up to a global phase.
\begin{theorem}\label{thm:nonEquivalence} Let $0 \neq \phi \in {L^2({\mathbb R}^d)}$, $\Lambda \subseteq {{\mathbb R}^d}$ a lattice and let $S \in \mathrm{Sp}_{2d}({\mathbb R})$ be a symplectic matrix. Suppose that $\{c_\lambda \} \subseteq {\mathbb C}$ is the defining sequence of $f \in \mathcal{V}_\Lambda^S(\phi)$. Then the following holds: \begin{enumerate}
\item If $\{c_\lambda \} \in c_{00}(\Lambda)$ then
$$
f \nsim f_\times \iff \{ c_\lambda \} \in \ell^2_\mathcal{O}(\Lambda).
$$
\item If there exist positive constants $0<A,B<\infty$ such that $$ A \leq \mathfrak{p}_{\Lambda^*}[\mu(S)^{-1}\phi](t) \leq B $$ for almost every $t \in \mathcal{P}(\Lambda^*)$ then $$
f \nsim f_\times \iff \{ c_\lambda \} \in \ell^2_\mathcal{O}(\Lambda).
$$ \end{enumerate} \end{theorem} \begin{proof} \textbf{Proof of (1).} We start by proving that $\{c_\lambda \} \in c_{00}(\Lambda) \cap \ell^2_\mathcal{O}(\Lambda)$ implies that $f \nsim f_\times$. To that end, let $\{ c_\lambda \} \in \ell^2_\mathcal{O}(\Lambda) \cap c_{00}(\Lambda)$ and suppose by contradiction that $f \sim f_\times$. Then there exists a constant $\nu \in {\mathbb T}$ such that $$ \sum_{\lambda \in \Lambda} c_\lambda \mu(S)T_\lambda\mu(S)^{-1} \phi = \nu \sum_{\lambda \in \Lambda} \overline{c_\lambda} \mu(S)T_\lambda\mu(S)^{-1} \phi. $$ Since $\mu(S)$ is invertible it follows that $$ \sum_{\lambda \in \Lambda} (c_\lambda - \nu \overline{c_\lambda} )T_\lambda\mu(S)^{-1} \phi = 0. $$ By assumption we have $\phi \neq 0$ and therefore the invertibility of $\mu(S)$ implies that $\mu(S)^{-1} \phi \neq 0$. In view of Theorem \ref{thm:independence} the system of translates $\{ T_x \mu(S)^{-1} \phi : x \in {{\mathbb R}^d} \}$ forms a finitely linearly independent system. Since $\{ c_\lambda \} \in c_{00}(\Lambda)$ there exists a finite set $U \subseteq \Lambda$ such that $$ \sum_{\lambda \in \Lambda} (c_\lambda - \nu \overline{c_\lambda} )T_\lambda\mu(S)^{-1} \phi = \sum_{\lambda \in U} (c_\lambda - \nu \overline{c_\lambda} )T_\lambda\mu(S)^{-1} \phi = 0. $$ Linear independence yields $c_\lambda - \nu \overline{c_\lambda} = 0$ for all $\lambda \in U$. Hence, the map $U \to {\mathbb C}, \lambda \mapsto c_\lambda$, is equivalent to its complex conjugate, whence Lemma \ref{lemma:funcion_values_lines} applies and we conclude that all elements of the sequence $\{ c_\lambda \}$ lie on a line in the complex plane passing through the origin. This contradicts the assumption that $\{ c_\lambda \} \in \ell^2_\mathcal{O}(\Lambda)$.
We continue by proving the opposite direction, i.e. $f \nsim f_\times$ implies that $\{c_\lambda \} \in \ell^2_\mathcal{O}(\Lambda)$. Since $\{c_\lambda \} \in c_{00}(\Lambda)$ we have $\{c_\lambda \} \in \ell^2(\Lambda)$ and it suffices to show that the elements of the sequence $\{ c_\lambda \}$ do not lie on a line in the complex plane passing though the origin, provided that $f \nsim f_\times$. We prove the claim by contraposition, i.e. we assume that there exists an $\alpha \in {\mathbb R}$ such that $c_\lambda \in e^{i\alpha} {\mathbb R}$ for every $\lambda \in \Lambda$. Writing $c_\lambda = e^{i\alpha}r_\lambda$ for some $r_\lambda \in {\mathbb R}$ it follows that $f$ and $f_\times$ satisfy $$ f = \sum_{\lambda \in \Lambda} e^{i \alpha} r_\lambda T_{\lambda}^S \phi, \ \ f_\times = \sum_{\lambda \in \Lambda} e^{-i \alpha} r_\lambda T_{\lambda}^S \phi. $$
Hence, $e^{2 i \alpha} f_\times = f$ and therefore $f \sim f_\times$. This concludes the proof of the first statement.
\textbf{Proof of (2).} For the second equivalence we start by showing that $\{ c_\lambda \} \in \ell^2_\mathcal{O}(\Lambda)$ implies that $f$ and $f_\times$ are not equivalent, $f \nsim f_\times$. To that end we assume that $\{ c_\lambda \} \in \ell^2_\mathcal{O}(\Lambda)$ and $f \sim f_\times$ and show that this leads to a contradiction. By Corollary \ref{cor:besselCorollary}, both series $f = \sum_{\lambda \in \Lambda} c_\lambda \mu(S)T_\lambda\mu(S)^{-1} \phi$ and $f_\times = \sum_{\lambda \in \Lambda} \overline{c_\lambda } \mu(S)T_\lambda\mu(S)^{-1} \phi$ converge unconditionally provided that $\{c_\lambda \} \in \ell^2_\mathcal{O}(\Lambda) \subseteq \ell^2(\Lambda)$. In particular, if $f \sim f_\times$ then there exists a constant $\nu \in {\mathbb T}$ such that $$ \sum_{\lambda \in \Lambda} (c_\lambda - \nu \overline{c_\lambda} )T_\lambda\mu(S)^{-1} \phi = 0. $$ According to Theorem \ref{thm:independence}(2), the lower boundedness of $\mathfrak{p}_{\Lambda^*}[\mu(S)^{-1}\phi]$ implies that the system $\{ T_\lambda \mu(S)^{-1} : \lambda \in \Lambda \}$ is $\ell^2(\Lambda)$-independent. Hence, $c_\lambda = \nu \overline{c_\lambda}$ for every $\lambda \in \Lambda$ and Lemma \ref{lemma:funcion_values_lines} implies that the elements of the sequence $\{ c_\lambda\}$ lie on a line in the complex plane passing through the origin, contradicting the assumption that $\{ c_\lambda \} \in \ell^2_\mathcal{O}(\Lambda)$.
To prove the other direction, assume that $\{ c_\lambda \} \notin \ell^2_\mathcal{O}(\Lambda)$. Then there exists a constant $\alpha \in {\mathbb R}$ such that $c_\lambda \in e^{i\alpha}{\mathbb R}$ for every $\lambda \in \Lambda$. Therefore, for every $\lambda \in \Lambda$ there exists a constant $r_\lambda \in {\mathbb R}$ such that $c_\lambda = e^{i\alpha} r_\lambda$. This implies that $e^{2 i \alpha} f_\times = f$ and consequently $f \sim f_\times$. \end{proof}
\subsection{Implications for sampled STFT phase retrieval}\label{sec:implications}
Let $g \in {L^2({\mathbb R}^d)}$ be a window function and let $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ be a subset of the time-frequency plane. Following the notation introduces in \cite{grohsLiehrJFAA}, we make the following definition.
\begin{definition}
Let $g \in {L^2({\mathbb R}^d)}$ and let $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$. Then $(g,\mathcal{L})$ is called a uniqueness pair if every $f \in {L^2({\mathbb R}^d)}$ is determined up to a global phase by $|V_gf(\mathcal{L})|$, i.e. $$
|V_gf(z)| = |V_gh(z)| \ \forall z \in \mathcal{L} \implies f \sim h $$ whenever $f,h \in {L^2({\mathbb R}^d)}$. \end{definition}
For instance, if $g$ has an a.e.-non-vanishing Ambiguity function then $(g,{{\mathbb R}^{2d}})$ is a uniqueness pair \cite[Theorem 4.27]{GrohsKoppensteinerRathmair}. For practical applications it is crucial to discretize the continuous STFT phase retrieval problem in order to achieve unique recovery of a signal from sampling sets $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ which are discrete, most notably lattices. As outlined in the introduction, this question remains much less well-understood than its continuous analogue. Building on the results of the previous sections, we can formulate the following discretization barrier for multivariate STFT phase retrieval.
\begin{theorem}\label{thm:phaseRetrieval_discB} Let $g \in {L^2({\mathbb R}^d)}$ be an arbitrary window function and let $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$. Then $(g,\mathcal{L})$ is never a uniqueness pair, provided that $\mathcal{L}$ satisfies one of the following two conditions: \begin{enumerate}
\item $\mathcal{L}=S({{\mathbb R}^d} \times \Lambda)$ where $S \in \mathrm{Sp}_{2d}({\mathbb R})$ is a symplectic matrix and $\Lambda \subseteq {{\mathbb R}^d}$ is a lattice
\item $\mathcal{L}=L{\mathbb Z}^{2d}$ is a lattice with generating matrix $L \in \mathrm{GL}_{2d}({\mathbb R})$ which factors into $L=ST$ where $S \in \mathrm{Sp}_{2d}({\mathbb R})$ is symplectic and $T$ is a block-diagonal matrix of the form
$$ T = \begin{pmatrix} A & 0 \\ 0 & B \end{pmatrix}, \ \ A,B \in \mathrm{GL}_d({\mathbb R}). $$ \end{enumerate} \end{theorem} \begin{proof} \textbf{Proof of (1).} The case $g=0$ is trivial. Therefore, assume in the following that $g \neq 0$. For the first statement consider a function $f \in \mathcal{V}_{\Lambda^*}^S(\mathcal{R}g)$ with defining sequence $\{ c_\lambda \} \in c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*)$. According to Theorem \ref{thm:equalSpectrograms}, the spectrogram of $f$ coincides with the spectrogram of $f_\times$ on the set $S({{\mathbb R}^d} \times \Lambda)$. Moreover, Theorem \ref{thm:nonEquivalence}(1) shows that $f \nsim f_\times$. This proves the first claim.
\textbf{Proof of (2).} Let $\mathcal{B}=B{\mathbb Z}^d$ be the lattice generated by $B$. According to statement (1), there exists two functions in ${L^2({\mathbb R}^d)}$ which do not agree up to a global phase but their spectrograms agree on $S({\mathbb R}^d \times \mathcal{B})$. The assertion is therefore a consequence of the inclusion $$ \mathcal{L}=L{\mathbb Z}^{2d} = S(A{\mathbb Z}^d \times B {\mathbb Z}^d) \subseteq S({\mathbb R}^d \times \mathcal{B}). $$ \end{proof}
At this juncture, it is fruitful to compare the statement given in Theorem \ref{thm:phaseRetrieval_discB} to the situation where phase information is present. To that end, consider a window function $g \in {L^2({\mathbb R}^d)}$, a lattice $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$, and the problem of reconstructing $f \in {L^2({\mathbb R}^d)}$ from the ordinary STFT samples $$ V_gf(\mathcal{L}) = \{ V_gf(z) : z \in \mathcal{L} \}. $$ Classical results in time-frequency analysis state that mild conditions on $g$ and a density assumption on $\mathcal{L}$ imply that the Gabor system $\mathcal{G}(g,\mathcal{L})$ is a frame for ${L^2({\mathbb R}^d)}$, i.e. there exist constants $0<A,B<\infty$ such that $$
A\| f \|_2^2 \leq \sum_{\substack{\lambda \in \Lambda \\ \lambda = (x,\omega)}} |\langle f , M_{\omega}T_x g \rangle|^2 \leq B \| f \|_2^2 $$ for all $f \in {L^2({\mathbb R}^d)}$. Note that the frame property is a significantly stronger property than the uniqueness property since it gives additional stability guarantees. In particular, if $\mathcal{G}(g,\mathcal{L})$ is a frame for ${L^2({\mathbb R}^d)}$ then every $f \in {L^2({\mathbb R}^d)}$ is uniquely determined by $V_gf(\mathcal{L})$. Consider for example the following result on Gabor frames \cite[Theorem 6.5.2]{Groechenig}.
\begin{theorem}[Walnut] Let $g \in W({{\mathbb R}^d})$ with
$$
W({{\mathbb R}^d}) = \left \{ h \in L^\infty({{\mathbb R}^d}) : \sum_{n \in {\mathbb Z}^d} \esssup_{x \in [0,1]^d} |h(x+n)| < \infty \right \}
$$
denoting the Wiener-amalgam space. If $\alpha,a,b>0$ are such that
$$
a \leq \sum_{k \in {\mathbb Z}^d} |g(x-\alpha k)|^2 \leq b
$$
for almost every $x \in {{\mathbb R}^d}$ then there exists a constant $\beta_0 = \beta_0(\alpha) > 0$ such that $\mathcal{G}(g,\alpha{\mathbb Z}^d \times \beta {\mathbb Z}^d)$ is a Gabor frame for every $0<\beta\leq \beta_0$. \end{theorem}
Besides, Lyubarskii, Seip and Wallstén characterized all Gabor frames $\mathcal{G}(\varphi, \alpha {\mathbb Z} \times \beta {\mathbb Z})$ for the Gaussian window $\varphi(t) = 2^{1/4} e^{-\pi t^2}$ in terms of a density assumption on $\alpha {\mathbb Z} \times \beta {\mathbb Z}$, namely, $\mathcal{G}(\varphi, \alpha {\mathbb Z} \times \beta {\mathbb Z})$ is a frame if and only if $\alpha \beta <1$ \cite[Theorem 7.5.3]{Groechenig}. Moreover, Bekka showed that for every lattice $\mathcal{L} = L{\mathbb Z}^{2d} \subseteq {{\mathbb R}^{2d}}$ there exists a window function $g \in {L^2({\mathbb R}^d)}$ such that $\mathcal{G}(g,\mathcal{L})$ is a frame for ${L^2({\mathbb R}^d)}$, provided that $|\det L| \leq 1$ \cite[Theorem 11]{Heil2007}. Moreover, it is well-known that if $\mathcal{L}=\alpha S {\mathbb Z}^{2d}$ is a symplectic lattice, $S \in \mathrm{Sp}_{2d}({\mathbb R}), \alpha >0$, and if $g \in {L^2({\mathbb R}^d)}$ then $\mathcal{G}(g,\mathcal{L})$ is a frame for ${L^2({\mathbb R}^d)}$ if and only if $\mathcal{G}(h,\alpha {\mathbb Z}^{2d})$ is a frame for ${L^2({\mathbb R}^d)}$, where $h = \mu(S)^{-1}g$ \cite[Proposition 5]{Heil2007}.
The absence of phase information reveals a significant contrast to the results listed beforehand: if $\mathcal{L} = ST{\mathbb Z}^{2d}$ with $S \in \mathrm{Sp}_{2d}({\mathbb R})$ and if $T$ is a block-diagonal matrix of the form $$ T = \begin{pmatrix} A & 0 \\ 0 & B \end{pmatrix}, \ \ A,B \in \mathrm{GL}_d({\mathbb R}). $$ then according to Theorem \ref{thm:phaseRetrieval_discB}, unique recovery from phaseless STFT samples at $\mathcal{L}$ is never possible, no matter how the window function is chosen and not matter how dense the lattice is chosen. In particular, this holds for arbitrary separable lattices of the form $\mathcal{L}=\alpha {\mathbb Z}^d \times \beta {\mathbb Z}^d$ ($\alpha,\beta>0$ arbitrary density parameters) or symplectic lattices $\mathcal{L}=\alpha S {\mathbb Z}^{2d}$ ($S \in \mathrm{Sp}_{2d}({\mathbb R}), \alpha >0$ arbitrary), which appear in the Gabor frame results mentioned before. We summarize the previous observations in a separate statement.
\begin{corollary} Let $g \in {L^2({\mathbb R}^d)}$ be an arbitrary window function and let $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$. Then $(g,\mathcal{L})$ is never a uniqueness pair, provided that $\mathcal{L}$ is a lattice of the following form: \begin{enumerate}
\item $\mathcal{L} = \alpha S{\mathbb Z}^{2d}$ with $S \in \mathrm{Sp}_{2d}({\mathbb R})$ a symplectic matrix and $\alpha>0$ an arbitrary density parameter
\item $\mathcal{L}$ is rectangular, i.e. it is generated by an invertible diagonal matrix
\item $\mathcal{L}$ is separable
\item $\mathcal{L}=\alpha {\mathbb Z}^d \times \beta {\mathbb Z}^d$ with arbitrary density parameters $\alpha,\beta \in {\mathbb R} \setminus \{ 0 \}$. \end{enumerate} \end{corollary} \begin{proof} Let $L \in \mathrm{GL}_{2d}({\mathbb R})$ be the generating matrix of $\mathcal{L}$. All of the assertions follow from Theorem \ref{thm:phaseRetrieval_discB} via a suitable decomposition of $L$. If $\mathcal{L}=\alpha S {\mathbb Z}^{2d}, S \in \mathrm{Sp}_{2d}({\mathbb R}), \alpha > 0$, is a symplectic lattice then $\mathcal{L}$ is generated by $ST$ with $T= \alpha I_{2d}$. If $\mathcal{L}$ is rectangular then $\mathcal{L}$ is generated by $ST$ with $S=I_{2d}$ and $T=\mathrm{diag}(\alpha_1, \dots, \alpha_{2d})$, $\alpha_j \in {\mathbb R} \setminus \{ 0 \}, j \in \{1, \dots, 2d \}$. If $\mathcal{L}=A{\mathbb Z}^d \times B {\mathbb Z}^d, A,B \in \mathrm{GL}_d({\mathbb R})$, is a separable lattice then $\mathcal{L}$ is generated by $ST$ with $S=I_{2d}$ and
$$ T = \begin{pmatrix} A & 0 \\ 0 & B \end{pmatrix}, \ \ A,B \in \mathrm{GL}_d({\mathbb R}). $$ In particular, $\alpha {\mathbb Z}^d \times \beta {\mathbb Z}^d$ is a separable lattice whenever $\alpha,\beta > 0$. \end{proof}
\begin{remark}[Shifted lattices]
Let $g \in {L^2({\mathbb R}^d)}$ be a window function and suppose that $\mathcal{L} = L{\mathbb Z}^{2d}$ is a lattice as given in Theorem \ref{thm:phaseRetrieval_discB}, i.e. $L=ST$ with $S$ symplectic and $T$ block-diagonal. According to Theorem \ref{thm:phaseRetrieval_discB} there exist $f_1,f_2 \in {L^2({\mathbb R}^d)}$ such that \begin{equation}\label{eqqq}
|V_g(f_1)(z)| = |V_g(f_2)(z)| \ \ \forall z \in \mathcal{L} \end{equation} and $f_2 \nsim f_2$. For every $p=(a,b), z=(x,\omega) \in {{\mathbb R}^d} \times {{\mathbb R}^d}$ and every $u \in {L^2({\mathbb R}^d)}$, the covariance property of the STFT (Lemma \ref{lma:stft_properties}(2)) shows that \begin{equation}\label{covvv}
V_gu(p+z) = V_gu(a+x,b+\omega) = e^{-2 \pi i a \cdot \omega} V_g(T_{-a}M_{-b}u)(z). \end{equation} Accordingly, if $f_1,f_2$ are given as above and $h_1,h_2$ are defined by \begin{equation*}
\begin{split}
h_1 & \coloneqq M_b T_a f_1 \\
h_2 & \coloneqq M_b T_a f_2
\end{split} \end{equation*} then the equations \eqref{eqqq} and \eqref{covvv} imply that $$
|V_g(h_1)(y)| = |V_g(h_2)(y)| \ \ \forall y \in p + \mathcal{L}. $$ Further, the property of two functions being equal up to a global phase is invariant under time-frequency shifts. Hence, $f_1 \nsim f_2$ if and only if $h_1 \nsim h_2$. This shows that the conclusions of Theorem \ref{thm:phaseRetrieval_discB} hold true if the lattice $\mathcal{L}$ gets replaced by a shifted lattice $p+\mathcal{L}$ with $p \in {\mathbb R}^{2d}$ an arbitrary vector. \end{remark}
We end the present subsection with the proof of Theorem \ref{thm:complex_cone} about the size of the class of non-equivalent function pairs which produce identical spectrogram samples. To that end, recall that for a window function $g \in {L^2({\mathbb R}^d)}$ and a set $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ the set $\mathcal{N}(g,\mathcal{L})$ is defined via \begin{equation*}
\begin{split}
& \mathcal{N}(g,\mathcal{L}) \\
& \coloneqq \left \{ f \in {L^2({\mathbb R}^d)} : \exists h \in {L^2({\mathbb R}^d)} \ \text{s.t.} \ h \nsim f \ \text{and} \ |V_gf(\mathcal{L})| = |V_gh(\mathcal{L})| \right \}.
\end{split} \end{equation*} Further recall that a set $C \subseteq V$ of a real or complex vector space $V$ is called a cone if $$ \kappa C \subseteq C $$ for every $\kappa > 0$ and $C$ is said to be infinite-dimensional if it is not contained in any finite-dimensional subspace of $V$. The following theorem states that the class of non-equivalent functions which produce identical phaseless STFT samples contains an infinite-dimensional cone.
\begin{theorem} Let $g \in {L^2({\mathbb R}^d)}$ be an arbitrary window function. If $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ satisfies $\mathcal{L} \subseteq S({{\mathbb R}^d} \times \Lambda)$ for some symplectic matrix $S \in \mathrm{Sp}_{2d}({\mathbb R})$ and some lattice $\Lambda \subseteq {{\mathbb R}^d}$ then the set $$ C \coloneqq \left \{ f \in \mathcal{V}_{\Lambda^*}^S(\mathcal{R}g) : f = \sum_{\lambda \in \Lambda^*} c_\lambda T_\lambda^S\mathcal{R}g, \ \{ c_\lambda \} \in c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*) \right \} $$ is an infinite-dimensional cone which is contained in $\mathcal{N}(g,\mathcal{L})$. \end{theorem} \begin{proof}
If $\{ c_\lambda \} \in c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*)$ then for every $\kappa > 0$ we have $\{ \kappa c_\lambda \} \in c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*)$ which shows that $c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*)$ is a cone in $\ell^2(\Lambda^*)$. Moreover, this cone is infinite-dimensional. These observations readily imply that the set $C$ as defined above is an infinite-dimensional cone in ${L^2({\mathbb R}^d)}$. Now suppose that $f \in C$ has defining sequence $\{ c_\lambda \} \in c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*)$. Since both $\ell^2_\mathcal{O}(\Lambda^*)$ and $c_{00}(\Lambda^*)$ are invariant under complex conjugation, we have $\{ \overline{c_\lambda} \} \in c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*)$ which gives $f_\times \in C$. But $f_\times \nsim f$ by Theorem \ref{thm:nonEquivalence}(1) and $|V_gf(\mathcal{L}| = |V_g(f_\times)(\mathcal{L})|$ by Theorem \ref{thm:equalSpectrograms}. This shows that $C \subseteq \mathcal{N}(g,\mathcal{L})$ which yields the assertion of the theorem. \end{proof}
\subsection{Examples and visualizations}\label{sec:examples_v}
In this section, we shall specify $g$ to be the Gaussian $$ g : {\mathbb R}^2 \to {\mathbb R}, \ \ g(x,y)=e^{-x^2-y^2}, $$ which fixes the dimension to $d=2$. Let $\Lambda \subseteq {\mathbb R}^d$ be a lattice and let $S \in \mathrm{Sp}_{2d}({\mathbb R})$ be the identity matrix $S=I_{2d}$ so that $\mu(S)$ is the identity operator on ${L^2({\mathbb R}^d)}$ and $T_\lambda^S=T_\lambda$ is the ordinary shift operator. For $N \in {\mathbb N}$ let $\lambda_1, \dots, \lambda_N \in \Lambda$ be distinct points on the lattice $\Lambda$ and let $c_{\lambda_1}, \dots, c_{\lambda_N} \in {\mathbb C}$. Define the sequence $\{ c_\lambda \} \in c_{00}(\Lambda)$ by \begin{equation*}
c_\lambda = \begin{cases} c_{\lambda_j} ,& \lambda = \lambda_j, \, j \in \{1,\dots,N\} \\ 0 ,& \text{else} \end{cases} \end{equation*} and let the function $f \in \mathcal{V}^S_\Lambda(\mathcal{R}g)=\mathcal{V}_\Lambda(\mathcal{R}g)$ be defined via $$ f = \sum_{j=1}^N c_{\lambda_j} T_{\lambda_j} \mathcal{R}g. $$
According to Theorem \ref{thm:equalSpectrograms} and Theorem \ref{thm:nonEquivalence}, $|V_gf|$ agrees with $|V_g(f_\times)|$ on ${\mathbb R}^d \times \Lambda^*$ and $f \nsim f_\times$ provided that the points $c_{\lambda_1},\dots,c_{\lambda_N}$ do not lie on a line in the complex plane passing through the origin. Under these assumptions, the map $Q_xf$ defined by \begin{equation}
\begin{split}
& Q_xf : {\mathbb R}^2 \to {\mathbb R}_{\geq 0}, \\
& Q_xf(\omega_1,\omega_2)=\left ||V_gf(x,(\omega_1,\omega_2))|^2-|V_g(f_\times)(x,(\omega_1,\omega_2))|^2 \right |
\end{split} \end{equation} vanishes on $\Lambda^*$ for every fixed $x \in {{\mathbb R}^d}$. In the following we visualize the function $Q_xf$ for different choices of $x,\Lambda,N,\lambda_j,c_j$.
\subsubsection{Example I}
Let $\Lambda$ be the scaled standard rectangular lattice $\Lambda=8{\mathbb Z}^2$ and define $\lambda_1,\lambda_2,\lambda_3 \in \Lambda$ and $c_{\lambda_1},c_{\lambda_2},c_{\lambda_3} \in {\mathbb C}$ via \begin{align*}
\lambda_1 &=(0,0), & \lambda_2&=(1,0), & \lambda_3&=(0,1), \\
c_{\lambda_1} &= 1, & c_{\lambda_2} &= i, & c_{\lambda_3} &= 1+i. \end{align*} The set of points $\{ 1,i,1+i \}$ does not lie on a line in the complex plane passing through the origin which implies that $\{ c_\lambda \} \in c_{00}(\Lambda) \cap \ell_\mathcal{O}^2(\Lambda)$. Consequently, if $$ f_1 = c_1 T_{\lambda_1} \mathcal{R}g+c_2 T_{\lambda_2} \mathcal{R}g+c_3 T_{\lambda_3} \mathcal{R}g $$ then according to Theorem \ref{thm:equalSpectrograms} and Theorem \ref{thm:nonEquivalence}, $Q_xf_1$ vanishes on $\Lambda^* = \tfrac{1}{8}{\mathbb Z}^2$ and $f_1 \nsim (f_1)_\times$. Contour plots of $Q_xf_1$ for different choices of $x$ are provided in Figure \ref{fig:rectangular}.
\begin{figure}
\caption{Contour plot of the function $Q_xf_1$ from example I for different choices of $x$. The white dots represent the scaled standard rectangular lattice $\tfrac{1}{8}{\mathbb Z}^2$. The figure shows that $Q_xf_1$ vanishes at the points on $\tfrac{1}{8}{\mathbb Z}^2$.}
\label{fig:rectangular}
\end{figure}
\subsubsection{Example II}
Suppose that $\Lambda$ is generated by the matrix $B \in \mathrm{GL}_2({\mathbb R})$ defined by $$ B = 5 \begin{pmatrix} 1 & 0 \\ -\tfrac{1}{\sqrt{3}} & \tfrac{2}{\sqrt{3}} \end{pmatrix} = (a,b), \ \ a = \begin{pmatrix} 5 \\ -\tfrac{5}{\sqrt{3}} \end{pmatrix}, \ \ b=\begin{pmatrix} 0 \\ \tfrac{10}{\sqrt{3}} \end{pmatrix}. $$ Further, define $\lambda_1,\lambda_2,\lambda_3,\lambda_4 \in \Lambda$ and $c_{\lambda_1},c_{\lambda_2},c_{\lambda_3},c_{\lambda_4} \in {\mathbb C}$ via \begin{align*}
\lambda_1 &=(0,0), & \lambda_2&=a, & \lambda_3&=b & \lambda_4&=a+b, \\
c_{\lambda_1} &= 1, & c_{\lambda_2} &= i, & c_{\lambda_3} &= 1+i & c_{\lambda_4}&=\tfrac{1}{2}+\tfrac{1}{2}i \end{align*} and let \begin{equation*}
\begin{split}
f_2 & = c_{\lambda_1} T_{\lambda_1} \mathcal{R}g+c_{\lambda_2} T_{\lambda_2} \mathcal{R}g+c_{\lambda_3} T_{\lambda_3} \mathcal{R}g, \\
f_3 & = c_{\lambda_1}T_{\lambda_1} \mathcal{R}g+c_{\lambda_2} T_{\lambda_2} \mathcal{R}g+ c_{\lambda_3} T_{\lambda_3} \mathcal{R}g+c_{\lambda_4} T_{\lambda_4} \mathcal{R}g.
\end{split} \end{equation*} Contour plots of $Q_xf_2$ and $Q_xf_3$ are provided in Figure \ref{fig:hexagonal}. Note that both $Q_xf_2$ and $Q_xf_3$ vanish on $\Lambda^*$ which is the lattice generated by the matrix $B^{-T}$, $$ B^{-T} = \frac{1}{5} \begin{pmatrix} 1 & \tfrac{1}{2} \\ 0 & \tfrac{\sqrt{3}}{2} \end{pmatrix}. $$ This is the generating matrix of a scaled hexagonal lattice \cite[p. 5]{conway_sphere}.
\begin{figure}
\caption{Contour plots of the functions $Q_xf_2$ and $Q_xf_3$ from example II. The white dots represent lattice points of a scaled hexagonal lattice. The figure shows that both $Q_xf_2$ and $Q_xf_3$ vanish at points on this lattice.}
\label{fig:hexagonal}
\end{figure}
\subsection{Separable lattices and real-valued window functions}\label{sec:separability}
In the previous sections, we considered the STFT phase retrieval problem for arbitrary window functions. We now pay particular attention to the setting where the window function is real-valued and the spectrogram is sampled on a separable lattice. These are the most usual assumptions made in time-frequency analysis and its applications. One of these applications, Ptychography, was discussed in detail in Section \ref{sec:ptychography}.
\subsubsection{Pauli-type non-uniqueness}
Recall that the Pauli problem is the question whether a function $f \in {L^2({\mathbb R})}$ is determined up to a global phase by its modulus $|f|$ and the modulus of its Fourier transform $|{\mathcal{F}} f|$. It is well-known that this is in general not the case, i.e. one can construct infinitely many so-called Pauli partners $f_1,f_2 \in {L^2({\mathbb R})}$ such that $|f_1|=|f_2|$ and $|{\mathcal{F}} f_1| = |{\mathcal{F}} f_2|$ \cite{corbett_hurst_1977, VOGT1978365}. A natural variation of the Pauli problem replaces the Fourier transform ${\mathcal{F}}$ by a different operator. In Section \ref{sec:implications} we saw that for every $g \in L^2({{\mathbb R}^d},{\mathbb R})$ and every separable lattice $\mathcal{L}\subseteq {{\mathbb R}^{2d}}$ there exists $f_1, f_2 \in {L^2({\mathbb R}^d)}$ such that $|V_g(f_1)(\mathcal{L})|=|V_g(f_2)(\mathcal{L})|$ and $f_1 \nsim f_2$. Consider the Pauli-type problem where in addition to identical spectrogram samples, one also has $|f_1|=|f_2|$. The next result demonstrates that the induced Pauli problem still fails to be unique: additional knowledge of the moduli of two functions, does not improve the uniqueness property. Its proof highlights how the results derived in the previous sections serve as a handy machinery for providing counterexamples in other contexts appearing frequently in phase retrieval (in this case: Pauli-type uniqueness).
\begin{theorem} Let $g \in L^2({{\mathbb R}^d},{\mathbb R})$ be a real-valued window function and let $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ be a separable lattice. Then there exist $f_1,f_2 \in {L^2({\mathbb R}^d)}$ such that \begin{enumerate}
\item $|V_g(f_1)(z)|=|V_g(f_2)(z)|$ for all $z \in \mathcal{L}$
\item $|f_1(t)| = |f_2(t)|$ for all $t \in {{\mathbb R}^d}$
\item $f_1 \nsim f_2$. \end{enumerate} \end{theorem} \begin{proof} Suppose that $\mathcal{L} = \mathcal{A} \times \mathcal{B}$ such that $\mathcal{A}, \mathcal{B}$ are lattices in ${{\mathbb R}^d}$ generated by the matrices $A,B \in \mathrm{GL}_d({\mathbb R})$, respectively. For a non-zero element $0 \neq \lambda \in \mathcal{B}^*$ of the reciprocal lattice of $\mathcal{B}$, define $$ f_1 \coloneqq T_{-\lambda} \mathcal{R}g + i T_\lambda \mathcal{R}g \in \mathcal{V}_{\mathcal{B}^*}(\mathcal{R}g). $$ Further, let $f_2 \coloneqq (f_1)_\times$ be the corresponding function which has a complex-conjugate defining sequence, i.e. $$ f_2 \coloneqq T_{-\lambda} \mathcal{R}g - i T_\lambda \mathcal{R}g. $$ Since $\mathcal{V}_{\mathcal{B}^*}(\mathcal{R}g) = \mathcal{V}_{\mathcal{B}^*}^S(\mathcal{R}g)$ with $S=I_{2d}$, Theorem \ref{thm:equalSpectrograms} shows that $$
|V_g(f_1)(z)|=|V_g(f_2)(z)| $$
for every $z \in {{\mathbb R}^d} \times (\mathcal{B}^*)^* = {{\mathbb R}^d} \times \mathcal{B} \supseteq \mathcal{A} \times \mathcal{B}$ which yields property (1). Since $g$ is real-valued, we have $f_1 = \overline{f_2}$. Thus, $|f_1|=|f_2|$ which shows that property (2) is satisfied. Property (3) follows by selection of $f_1$: we have $$ f_1 \coloneqq c_{-\lambda} T_{-\lambda} \mathcal{R}g + c_\lambda T_\lambda \mathcal{R}g $$ with $c_{-\lambda}=1$ and $c_\lambda=i$. The set $\{ 1,i \}$ does not lie on a line in the complex plane passing through the origin. Hence, the defining sequence $\{ c_\lambda \} \subseteq {\mathbb C}$ of $f_1$ satisfies $\{ c_\lambda \} \in c_{00}(\mathcal{B}^*) \cap \ell^2_\mathcal{O}(\mathcal{B}^*)$. In particular, Theorem \ref{thm:nonEquivalence} shows that $f_1 \nsim (f_1)_\times = f_2$ and this concludes the proof of the statement \end{proof}
\subsubsection{Restriction to the space $L^2({{\mathbb R}^d}, {\mathbb R})$}
Next, we investigate the situation where in addition to a real-valued window function one has the prior knowledge that the underlying signal space consists of real-valued functions, i.e. we assume that all considered input functions belong to the space $L^2({{\mathbb R}^d},{\mathbb R})$ of real-valued, square-integrable functions. The corresponding uniqueness problem asks for the validity of the implication $$
|V_gf(z)| = |V_gh(z)| \ \ \forall z \in \mathcal{L} \implies f \sim h $$ provided that $f,h \in L^2({{\mathbb R}^d},{\mathbb R})$. Notice that in this setting the condition $f \sim h$ for $f,h \in L^2({{\mathbb R}^d},{\mathbb R})$ means that there exists a constant $\nu \in \{ -1,1 \}$ such that $f=\nu h$. In other words, $f$ is equal to $h$ up to a sign factor and we have the equivalence $$ f \sim h \iff ( f = h \ \ \mathrm{or} \ \ f=-h ). $$ The corresponding phase retrieval problem is therefore also known as the sign retrieval problem. In order to study the real-valued regime, we start by introducing the space of Hermitian sequences.
\begin{definition} Let $\Lambda \subseteq {{\mathbb R}^d}$ be a lattice. We define the subspace of Hermitian sequences in $\ell^2(\Lambda)$ via $$ \ell^2_\mathcal{H}(\Lambda) \coloneqq \left \{ \{ c_\lambda \} \in \ell^2(\Lambda) : c_{-\lambda} = \overline{c_{\lambda}} \right \}. $$ \end{definition}
Suppose now that $\phi \in L^2({{\mathbb R}^d},{\mathbb R})$ is a real-valued generating function. If $\Lambda \subseteq {{\mathbb R}^d}$ is a lattice, $S \in \mathrm{Sp}_{2d}({\mathbb R})$ a symplectic matrix and $f \in \mathcal{V}_\Lambda^S(\Lambda)$ has an Hermitian defining sequence then the following statements shows how a suitable choice of $S$ implies real-valuedness of $f$.
\begin{proposition}\label{prop:r_valuedness_condition} Let $\phi \in L^2({{\mathbb R}^d},{\mathbb R})$ be real-valued and let $\Lambda \subseteq {{\mathbb R}^d}$ be a lattice. Suppose that $\{c_\lambda \} \subseteq {\mathbb C}$ is the defining sequence of $f \in \mathcal{V}_\Lambda^{-\mathcal{J}}(\phi)$ where $\mathcal{J} \in \mathrm{Sp}_{2d}({\mathbb R})$ denotes the standard symplectic matrix. Then the following holds: \begin{enumerate}
\item If $\{c_\lambda \} \in c_{00}(\Lambda) \cap \ell^2_\mathcal{H}(\Lambda)$ then $f$ is real-valued.
\item If $\mathfrak{p}_{\Lambda^*}[{\mathcal{F}^{-1}} \phi] \in L^\infty(\mathcal{P}(\Lambda^*))$ then $f$ is real-valued provided that $\{ c_\lambda \} \in \ell^2_\mathcal{H}(\Lambda)$. \end{enumerate} \end{proposition} \begin{proof} \textbf{Proof of (1).} If $\mathcal{J}$ denotes the standard symplectic matrix then $\mu(-\mathcal{J})={\mathcal{F}}$ is the Fourier transform \cite[Example 9.4.1]{Groechenig}. Hence, if $\{c_\lambda \} \in c_{00}(\Lambda)$ is the defining sequence of $f$ then \begin{equation}\label{rser}
f = \sum_{\lambda \in \Lambda} c_\lambda {\mathcal{F}} T_\lambda {\mathcal{F}}^{-1} \phi = \sum_{\lambda \in \Lambda} c_\lambda M_{-\lambda} \phi. \end{equation} Consider a partition of the lattice $\Lambda$ of the form $$ \Lambda = \{ 0 \} \cup I_+ \cup I_- \ \ \text{s.t.} \ \ -I_+ = I_-. $$ Using this partition, the map $f$ can be written as \begin{equation}\label{eq:real-val}
\begin{split}
f(t) &= c_0 \phi(t) + \sum_{\lambda \in I_+} c_\lambda M_{-\lambda} \phi + \sum_{\lambda \in I_-} c_\lambda M_{-\lambda} \phi(t) \\
& = c_0 \phi(t) + \sum_{\lambda \in I_+} c_\lambda M_{-\lambda} \phi + \sum_{\lambda \in I_+} c_{-\lambda} M_{\lambda} \phi(t) \\
& = c_0 \phi(t) + \sum_{\lambda \in I_+} \left ( c_\lambda e^{-2\pi i \lambda t} + \overline{c_\lambda e^{-2\pi i \lambda t}} \right ) \phi(t) \\
& = c_0 \phi(t) + \sum_{\lambda \in I_+} 2 {\mathrm{Re} \,} \left ( c_\lambda e^{-2\pi i \lambda t} \right ) \phi(t),
\end{split} \end{equation} where we used that $\{ c_\lambda \}$ belongs to the set $\ell^2_\mathcal{H}(\Lambda)$. This property of $\{ c_\lambda \}$ further implies that $c_0 \in {\mathbb R}$. Since $\phi$ is real-valued, the identity derived in equation \eqref{eq:real-val} shows that $f$ is real-valued.
\textbf{Proof of (2).} To prove the second statement we observe that if $\mathfrak{p}_{\Lambda^*}[{\mathcal{F}^{-1}} u] \in L^\infty(\mathcal{P}(\Lambda^*))$ then by Proposition \ref{prop:bessel_condition} the series given in equation \eqref{rser} converges unconditionally. With an analogous argument as in the previous case we obtain the relation $$ f(t) = c_0 \phi(t) + \sum_{\lambda \in I_+} 2 {\mathrm{Re} \,} \left ( c_\lambda e^{-2\pi i \lambda t} \right ) \phi(t) $$ with a suitable choice of $I_+ \subseteq \Lambda$. Thus, $f$ is real-valued. \end{proof}
The previous Proposition leads to the following statement on STFT phase retrieval in a real-valued regime.
\begin{theorem}\label{thm:r_valued_counterexamples} Let $g \in L^2({{\mathbb R}^d},{\mathbb R})$ be a real-valued window function and suppose that $\mathcal{L} \subseteq {{\mathbb R}^d}$ satisfies one of the following conditions \begin{enumerate}
\item $\mathcal{L} = \Lambda \times {\mathbb R}^d$ with $\Lambda$ a lattice in ${{\mathbb R}^d}$
\item $\mathcal{L}$ is a separable lattice of the form $\mathcal{L} = \mathcal{A} \times \mathcal{B}$ where $\mathcal{A}$ and $\mathcal{B}$ are lattices in ${{\mathbb R}^d}$. \end{enumerate} Then there exists two real-valued functions $f_1,f_2 \in L^2({{\mathbb R}^d},{\mathbb R})$ such that $$
|V_g(f_1)(z)| = |V_g(f_2)(z)| \ \forall z \in \mathcal{L} \ \ \text{and} \ \ f_1 \nsim f_2. $$ \end{theorem} \begin{proof} Let $\Lambda \subseteq {{\mathbb R}^d}$ be a lattice and suppose that $f_1 \in \mathcal{V}_{\Lambda^*}^{-\mathcal{J}}(\mathcal{R}g)$ has defining sequence $\{ c_\lambda \}$ belonging to the non-empty intersection $c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*) \cap \ell^2_\mathcal{H}(\Lambda^*)$. Set $f_2 \coloneqq (f_1)_\times$. According to Theorem \ref{thm:nonEquivalence} we have $f_1 \nsim f_2$. In addition, Proposition \ref{prop:r_valuedness_condition} shows that $f_1$ is real-valued. Noting that both $c_{00}(\Lambda^*)$ and $\ell^2_\mathcal{H}(\Lambda^*)$ are invariant under complex conjugation shows that $f_2$ is real-valued as well. Finally, Theorem \ref{thm:equalSpectrograms} implies that $$
|V_g(f_1)(z)| = |V_g(f_2)(z)| $$ for every $z \in -\mathcal{J}({{\mathbb R}^d} \times \Lambda) = \Lambda \times (-{{\mathbb R}^d}) = \Lambda \times {{\mathbb R}^d}$. The second part of the statement follows from the property that every separable lattice $\mathcal{A} \times \mathcal{B}$ is contained in a set of the form $ \Lambda \times {{\mathbb R}^d}$. \end{proof}
Theorem \ref{thm:r_valued_counterexamples} proves the existence of two non-equivalent, real-valued functions $f_1,f_2 \in L^2({{\mathbb R}^d}, {\mathbb R})$ for which their spectrograms agree on a separable lattice $\mathcal{L} = \mathcal{A} \times \mathcal{B}$. Clearly, the conclusions of Theorem \ref{thm:r_valued_counterexamples} hold true for every lattice $\mathcal{D}$ which is contained in a separable lattice $\mathcal{L}$, $\mathcal{D} \subseteq \mathcal{L}$. If $\mathcal{L}$ is generated by $L$ and $\mathcal{D}$ is generated by $D$, the property of $\mathcal{D}$ being contained in $\mathcal{L}$ means that $L^{-1}D$ is an integral matrix. A class of matrices which satisfies this condition is the important class of matrices with rational entries, a property indispensable in the numerical treatment of the STFT phase retrieval problem.
\begin{corollary} Suppose that $\mathcal{L} \subseteq {\mathbb R}^{2d}$ is a lattice which is generated by an invertible matrix $L$ with rational entries, $L \in \mathrm{GL}_{2d}({\mathbb Q})$. Then for every real-valued window function $g \in L^2({{\mathbb R}^d},{\mathbb R})$ there exists two real-values functions $f_1,f_2 \in L^2({{\mathbb R}^d},{\mathbb R})$ such that \begin{enumerate}
\item $|V_g(f_1)(z)| = |V_g(f_2)(z)|$ for every $z \in \mathcal{L}$
\item $f_1 \nsim f_2$. \end{enumerate} \end{corollary}
\begin{proof} Let $q_i = (q_{i,1}, \dots, q_{i,2d} ) \in {\mathbb Q}^{2d}, i \in \{ 1, \dots, 2d \}$, be the row vectors of $L$. Since every $q_{ij}$ is rational, we can write $ q_{ij} = \frac{a_{ij}}{b_{ij}}, a_{ij} \in {\mathbb Z}, b_{ij} \in {\mathbb Z} \setminus \{ 0 \}. $ As a consequence, we have $$ q_i = \frac{1}{\prod_{j=1}^{2d} b_{ij}} (c_{i1}, \dots, c_{i,2d}), \ \ c_{ij} = a_{ij} \prod_{k=1, k \neq j}^{2d} b_{ik}. $$ Now let $c_i \coloneqq (c_{i1}, \dots, c_{i,2d})$ and define $$ D \coloneqq \mathrm{diag} \left ( \frac{1}{\prod_{j=1}^{2d} b_{1j}}, \frac{1}{\prod_{j=1}^{2d} b_{2j}}, \dots, \frac{1}{\prod_{j=1}^{2d} b_{2d,j}} \right ). $$ If $z \in {\mathbb Z}^{2d}$ then in view of the notations above the product $Lz$ satisfies \begin{equation}\label{eq:LD}
Lz = \begin{pmatrix} q_1 \cdot z \\ \vdots \\ q_{2d} \cdot z \end{pmatrix} = \begin{pmatrix} \frac{1}{\prod_{j=1}^{2d} b_{1j}} c_1 \cdot z \\ \vdots \\ \frac{1}{\prod_{j=1}^{2d} b_{2d,j}} c_{2d} \cdot z \end{pmatrix} = D \begin{pmatrix} c_1 \cdot z \\ \vdots \\ c_{2d} \cdot z \end{pmatrix}. \end{equation}
Since $c_j \cdot z \in {\mathbb Z}$ for every $j \in \{ 1, \dots, 2d \}$ it follows from equation \eqref{eq:LD} that $\mathcal L \subseteq \mathcal{D}$ where $\mathcal{D}$ is the lattice generated by the diagonal matrix $D$. Since $\mathcal{D}$ is separable, Theorem \ref{thm:r_valued_counterexamples} implies the existence of two real-valued functions $f_1, f_2 \in L^2({{\mathbb R}^d},{\mathbb R})$ such that $|V_g(f_1)(w)| = |V_g(f_2)(w)|$ for every $w \in \mathcal{D}$ and, in addition, $f_1 \nsim f_2$. The statement follows from the inclusion $\mathcal{L} \subseteq \mathcal{D}$. \end{proof}
We finalize the present subsection with the proof of Theorem \ref{thm:real_cone} about the size of the class of real-valued, non-equivalent functions in ${L^2({\mathbb R}^d)}$ which produce identical phaseless STFT samples on sets of the form $\mathcal{L} = \Lambda \times {{\mathbb R}^d}$ with $\Lambda \subseteq {{\mathbb R}^d}$ a lattice. Recall that the set $\mathcal{N}_{\mathbb R}(g,\mathcal{L})$ is defined by \begin{equation*}
\begin{split}
& \mathcal{N}_{\mathbb R}(g,\mathcal{L}) \\ & \coloneqq \left \{ f \in L^2({{\mathbb R}^d}, {\mathbb R}) : \exists h \in L^2({{\mathbb R}^d}, {\mathbb R}) \ \text{s.t.} \ h \nsim f \ \text{and} \ |V_gf(\mathcal{L})| = |V_gh(\mathcal{L})| \right \}.
\end{split} \end{equation*}
\begin{theorem} Let $g \in {L^2({\mathbb R}^d)}$ be an arbitrary real-valued window function. If $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ satisfies $\mathcal{L} \subseteq \Lambda \times {{\mathbb R}^d}$ for some lattice $\Lambda \subseteq {{\mathbb R}^d}$ then the set $$ C \coloneqq \left \{ f \in \mathcal{V}_{\Lambda^*}^{-\mathcal{J}}(\mathcal{R}g) : f = \sum_{\lambda \in \Lambda^*} c_\lambda T_\lambda^{-\mathcal{J}}\mathcal{R}g, \ \{ c_\lambda \} \in c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*) \cap \ell^2_\mathcal{H}(\Lambda^*) \right \} $$ is an infinite-dimensional cone which is contained in $\mathcal{N}_{\mathbb R}(g,\mathcal{L})$. \end{theorem} \begin{proof}
If $\{ c_\lambda \} \in c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*) \cap \ell^2_\mathcal{H}(\Lambda^*)$ then for every $\kappa > 0$ we have $\{ \kappa c_\lambda \} \in c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*) \cap \ell^2_\mathcal{H}(\Lambda^*)$ which shows that $c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*) \cap \ell^2_\mathcal{H}(\Lambda^*)$ is a cone in $\ell^2(\Lambda^*)$. Moreover, this cone is infinite-dimensional. These observations readily imply that the set $C$ as defined above is an infinite-dimensional cone in ${L^2({\mathbb R}^d)}$. Proposition \ref{prop:r_valuedness_condition} shows that every element in $C$ is real-valued and therefore $C \subseteq L^2({{\mathbb R}^d},{\mathbb R})$. Now suppose that $f \in C$ has defining sequence $\{ c_\lambda \} \in c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*) \cap \ell^2_\mathcal{H}(\Lambda^*)$. Since $\ell^2_\mathcal{O}(\Lambda^*), c_{00}(\Lambda^*)$ and $\ell^2_\mathcal{H}(\Lambda^*)$ are invariant under complex conjugation, we have $\{ \overline{c_\lambda} \} \in c_{00}(\Lambda^*) \cap \ell^2_\mathcal{O}(\Lambda^*) \cap \ell^2_\mathcal{H}(\Lambda^*)$ which gives $f_\times \in C$. But $f_\times \nsim f$ by Theorem \ref{thm:nonEquivalence}(1) and $|V_gf(\mathcal{L}| = |V_g(f_\times)(\mathcal{L})|$ by Theorem \ref{thm:equalSpectrograms}. This shows that $C \subseteq \mathcal{N}_{\mathbb R}(g,\mathcal{L})$. \end{proof}
\subsection{The univariate case $d=1$ and Fock spaces}
Consider the univariate STFT phase retrieval problem, i.e. the situation where the window function $g$ is an element of ${L^2({\mathbb R})}$ and the sampling points $\mathcal{L}$ are a subset of ${\mathbb R}^2$. The main result of the previous paper \cite{grohsLiehrJFAA} states that for every window function $g \in {L^2({\mathbb R})}$ and every lattice $\mathcal{L}=L{\mathbb Z}^2, L \in \mathrm{GL}_2({\mathbb R})$, there exist $f_1,f_2 \in {L^2({\mathbb R})}$ such that $f_1 \nsim f_2$ and $|V_gf_1(z)|=|V_gf_2(z)|$ for every $z \in \mathcal{L}$. For the special case of the centered Gaussian window $g(t)=e^{-\pi t^2}$ the result was derived in \cite{alaifari2020phase}. In both papers, the construction of the functions $f_1,f_2$ with the above properties uses ideas from the theory of fractional Fourier transforms. These are metaplectic operators, namely the operators corresponding to rotation matrices $$ \begin{pmatrix} \cos \theta & - \sin \theta \\ \sin \theta & \cos \theta \end{pmatrix} \in \mathrm{Sp}_2({\mathbb R}), \ \ \theta \in {\mathbb R}. $$ We refer to an article by de Gosson and Luef for a systematic study of the correspondence between metaplectic operators and fractional Fourier transforms \cite{gossonLuef}. We now show that the statements derived in \cite{alaifari2020phase,grohsLiehrJFAA} follow as a by-product from the results derived in the present paper. At its core, it is a consequence of the fact that in ${\mathbb R}^2$ every lattice is symplectic.
\begin{corollary}[Main theorem of \cite{grohsLiehrJFAA}]\label{thm:1d_case} Let $g \in {L^2({\mathbb R}^d)}$ and let $\mathcal{L} \subseteq {{\mathbb R}^{2d}}$ be an \emph{arbitrary} lattice. If $d=1$ then $(g,\mathcal{L})$ is not a uniqueness pair. \end{corollary} \begin{proof}
Let $\alpha \coloneqq \det L, S \coloneqq \alpha^{-1}L \in \mathrm{Sp}_2({\mathbb R})$ and let $\{ c_\lambda \} \subseteq {\mathbb C}$ be the defining sequence of $f \in \mathcal{V}_{\alpha^{-1}{\mathbb Z}}^S(\mathcal{R}g)$ such that $\{ c_\lambda \} \in c_{00}(\alpha^{-1}{\mathbb Z}) \cap \ell^2_\mathcal{O}(\alpha^{-1}{\mathbb Z})$. According to Corollary \ref{cor:1d_lattice} it holds that $|V_gf(z)| = |V_g(f_\times)(z)|$ for every $z \in \mathcal{L}$ whereas $f \nsim f_\times$ by Theorem \ref{thm:nonEquivalence}. This yields the assertion. \end{proof}
\begin{remark}[Fock spaces]
Let $z = x+i\omega \in {\mathbb C}, x,\omega \in {\mathbb R}$, and let $\mu$ be the Gaussian measure $d\mu(z) = e^{-\pi |z|^2} \, dxdy$. The Bargmann-Fock space $F^2({\mathbb C})$ is the collection of all entire functions $a : {\mathbb C} \to {\mathbb C}$ for which $$
\| a \|_{F^2({\mathbb C})} \coloneqq \int_{\mathbb C} |a(z)|^2 \, d\mu(z) < \infty. $$ If $f \in {L^2({\mathbb R})}$ then we can define an entire function $Bf : {\mathbb C} \to {\mathbb C}$ via $$ Bf(z) = 2^{\frac{1}{4}} \int_{\mathbb R} f(t) e^{2\pi t z - \pi t^2 - \frac{\pi}{2}z^2} \, dt. $$ The map $Bf$ is called the Bargmann transform of $f$ and it can be shown that $B$ is an isometry mapping ${L^2({\mathbb R})}$ onto $F^2({\mathbb C})$ \cite[Theorem 6.8]{zhu}. The Bargmann transform is closely related to the STFT with Gaussian window. For if $\varphi(t) = 2^{\frac{1}{4}} e^{-\pi t^2}$ then for every $f \in {L^2({\mathbb R})}$ and every $z = x+i\omega \in {\mathbb C}, x,\omega \in {\mathbb R}$, we have $$
V_\varphi f(x,-\omega) = e^{\pi i x \omega - \frac{\pi}{2}|z|^2} Bf(z). $$ In other words, $V_\varphi f$ is equal to $Bf$ up to a reflection and up to a multiplicative non-zero weighting factor which is independent of $f$. In particular, if $\mathcal{L} \subseteq {\mathbb R}^2$ then according to \cite[Proposition 3.4.1]{Groechenig} for every $f,h \in {L^2({\mathbb R})}$ we have \begin{equation}\label{fock_eq}
\begin{split}
& |V_\varphi f(x,-\omega)| = |V_\varphi h(x,-\omega)| \ \ \forall (x,\omega) \in \mathcal{L} \\
\iff & |Bf(x+i\omega)| = |Bh(x+i\omega)|\ \ \forall (x,\omega) \in \mathcal{L}.
\end{split} \end{equation} Classical uniqueness theory in Bargmann-Fock spaces shows that if $\mathcal{L}=L{\mathbb Z}^2 \subseteq {\mathbb R}^2 \simeq {\mathbb C}, L \in \mathrm{GL}_2({\mathbb R})$, is a lattice such that $\det L \leq 1$ then $\mathcal{L}$ is a uniqueness set for $F^2({\mathbb C})$, i.e. two functions $a,b \in F^2({\mathbb C})$ are identical provided that $a$ and $b$ agree on $\mathcal{L}$ \cite{BARGMANN1971221,Perelomov1971,Seip+1992+91+106}. In an analogous and frequently used terminology, this means that the system of \emph{coherent states} $\{ e^{2 \pi i \omega t} \varphi(t-x) : (x,\omega) \in \mathcal{L} \}$ is complete in ${L^2({\mathbb R})}$. Now observe that the Bargmann transform is a linear bijection between ${L^2({\mathbb R})}$ and $F^2({\mathbb C})$ and further, $\mathcal{L}' \coloneqq \{ (x,-\omega) : (x,\omega) \in \mathcal{L} \}$ is a lattice provided that $\mathcal{L}$ is a lattice. Combing these observations with Corollary \ref{thm:1d_case} and the equivalence given in equation \eqref{fock_eq} shows that for every lattice $\mathcal{L} \subseteq {\mathbb R}^2 \simeq {\mathbb C}$ there exists two functions $a,b \in F^2({\mathbb C})$ such that $$
|a(z)| = |b(z)| \ \ \forall z \in \mathcal{L} \ \ \mathrm{and} \ \ a \nsim b, $$ in complete contrast to the setting where phase information is present. \end{remark}
\section{Conclusion}
Motivated by important applications in physics and imaging sciences, in this article we have derived a non-uniqueness theory in sampled STFT phase retrieval which lead to the formulation of several fundamental discretization barriers. The main results highlight that the STFT phase retrieval problem fails to be unique if the samples are located on lattices. In fact, we developed an extensive machinery for the construction of non-equivalent function pairs which produce identical spectrogram samples on certain prescribed lattices. As an application, we showed that the Pauli problem which is induced via phaseless sampling of the STFT is not unique. Moreover, the problem even fails to be unique if the signal class is restricted to real functions. The established theorems emphasize a foundational difference of sampling without phase compared to ordinary sampling of the STFT: no matter how the window function is chosen there exists no critical sampling density so that uniqueness is achieved via lattice sampling (as it would be the case for the Nyquist rate in classical sampling theory). In addition, the results highlight a stark contrast to the case where the spectrogram of a function is sampled on a continuous domain (e.g. for Gaussian windows the STFT phase retrieval problem is unique via sampling on an arbitrary open set). The proofs made use of techniques from several areas in mathematics, in particular symplectic geometry, the theory of shift-invariant spaces as well as linear independence properties of systems of translates. Finally, the article gives rise to fruitful future research: since lattice sampling does not guarantee uniqueness one might question if irregular sampling or a suitable increase of the redundancy of the sampling set is beneficial.
\textbf{Acknowledgement.} The authors highly appreciate helpful discussions with Irina Shafkulovska and her useful comments on the paper.
\section*{Appendix}
\subsection*{A. Proof of Proposition \ref{prop:bessel_condition}}\label{appendix:A}
Suppose that $\{ c_\lambda \} \in c_{00}(\Lambda)$ is a sequence of finitely many non-zero components. We start by upper bounding the $L^2$-norm of the sum $\sum_{\lambda \in \Lambda} c_\lambda T_\lambda \phi$: \begin{equation*}
\begin{split}
& \left \| \sum_{\lambda \in \Lambda} c_\lambda T_\lambda \phi \right \|^2_{{L^2({\mathbb R}^d)}} = \left \| {\mathcal{F}} \sum_{\lambda \in \Lambda} c_\lambda T_\lambda \phi \right \|^2_{{L^2({\mathbb R}^d)}} = \left \| \sum_{\lambda \in \Lambda} c_\lambda M_{-\lambda} {\mathcal{F}} \phi \right \|^2_{{L^2({\mathbb R}^d)}} \\
&= \int_{{\mathbb R}^d} \left | \sum_{\lambda \in \Lambda} c_\lambda e^{-2\pi i \lambda \cdot t} {\mathcal{F}} \phi(t) \right |^2 \, dt = \int_{{{\mathbb R}^d}} |F(t)|^2|{\mathcal{F}} \phi(t)|^2 \, dt
\end{split} \end{equation*}
where $F(t) \coloneqq \sum_{\lambda \in \Lambda} c_\lambda e^{-2\pi i \lambda \cdot t}$. The function $F$ is measurable, bounded and $\Lambda^*$-periodic. Since $|F|^2,|{\mathcal{F}} \phi|^2 \geq 0$ we therefore obtain \begin{equation*}
\begin{split}
& \int_{{{\mathbb R}^d}} |F(t)|^2|{\mathcal{F}} \phi(t)|^2 \, dt = \sum_{\lambda^* \in \Lambda^*} \int_{\mathcal{P}(\Lambda^*)} |F(t + \lambda^*)|^2 |{\mathcal{F}} \phi(t+ \lambda^*)|^2 \, dt \\
& = \int_{\mathcal{P}(\Lambda^*)} |F(t)|^2 \sum_{\lambda^* \in \Lambda^*} |{\mathcal{F}} \phi(t+ \lambda^*)|^2 \, dt = \int_{\mathcal{P}(\Lambda^*)} |F(t)|^2 \mathfrak{p}_{\Lambda^*}[\phi](t) \, dt \\
& \leq \det(A^{-T}) \| \mathfrak{p}_{\Lambda^*}[\phi] \|_{L^\infty(\mathcal{P}(\Lambda^*))} \sum_{\lambda \in \Lambda} |c_\lambda|^2.
\end{split} \end{equation*}
The term $M \coloneqq \det(A^{-T}) \| \mathfrak{p}_{\Lambda^*}^S[u] \|_{L^\infty(\mathcal{P}(\Lambda^*))}$ is a constant independent of $\{ c_\lambda \} \in c_{00}(\Lambda)$. It follows from \cite[Theorem 3, p. 129]{Young} that the system $ \{ T_\lambda \phi : \lambda \in \Lambda \} $ is a Bessel sequence in ${L^2({\mathbb R}^d)}$. In particular, the series $\sum_{\lambda \in \Lambda} c_\lambda T_\lambda^S \phi$ converges unconditionally for every $\{ c_\lambda \} \in \ell^2(\Lambda)$ \cite[Corollary 3.2.5]{christensenBook}.
\subsection*{B. Proof of Lemma \ref{lma:elementary_uniqueness}}\label{appendix:B}
Denote by $\lambda^d$ the $d$-dimensional Lebesgue measure in ${{\mathbb R}^d}$. We prove the statement by induction over the dimension $d$. If $d=1$ then the statement holds true since every set $\mathcal{Y} \subseteq {\mathbb R}$ of positive $1$-dimensional Lebesgue measure, $\lambda^1(\mathcal{Y}) > 0$, contains a limit point and a limit point is a uniqueness set for holomorphic functions $F : {\mathbb C} \to {\mathbb C}$. This proves the base case. Suppose now that the statement holds in ${\mathbb C}^d$ and let $\mathcal{Y} \subseteq {\mathbb R}^{d+1}$ with $\lambda^{d+1}(\mathcal{Y}) > 0$. Writing $x \in {\mathbb R}^{d+1}$ as $x=(x',t), x' \in {{\mathbb R}^d}, t \in {\mathbb R}$, and using Fubini's theorem we obtain \begin{equation}\label{eq:vanishing}
\lambda^{d+1}(\mathcal{Y}) = \int_{{\mathbb R}^{d+1}} {\boldsymbol 1}_\mathcal{Y}(x) \, d\lambda^{d+1}(x) = \int_{\mathbb R}\underbrace{ \int_{{\mathbb R}^d} {\boldsymbol 1}_\mathcal{Y}(x',t) \, d\lambda^d(x')}_{\coloneqq M(t)} d\lambda^1(t) >0. \end{equation} Since $M(t) \geq 0$ for every $t \in {\mathbb R}$, equation \eqref{eq:vanishing} implies that there exists a Lebesgue measurable set $A \subseteq {\mathbb R}$ such that $\lambda^1(A) > 0$ and $M(t)>0$ for every $t \in {\mathbb R}$. It follows by assumption on the function $F$ and by induction hypothesis that $F(\cdot, t)$ vanishes identically for every $t \in A$. Consequently, the base case shows that $F(x,\cdot)$ vanishes identically for every $x \in {{\mathbb C}^d}$, thereby proving the statement.
\end{document} | arXiv | {
"id": "2207.05628.tex",
"language_detection_score": 0.6182761192321777,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{\bf Robust semiparametric inference with missing data}
\author{Eva Cantoni$^{1}$ and Xavier de Luna$^{2}$ \\ \\ $^{1}$ Research Center for Statistics and \\ Geneva School of Economics and Management, \\University of Geneva, Geneva 4, 1211, Switzerland.\\ $^{2}$ Department of Statistics, \\Ume{\aa} School of Business, Economics and Statistics, \\
Ume{\aa} University, Ume{\aa}, SE-90187, Sweden.} \providecommand{\keywords}[1]{\textbf{\textit{Keywords: }} #1}
\maketitle
\begin{abstract}
Classical semiparametric inference with missing outcome data is not robust to contamination of the observed data and a single observation can have arbitrarily large influence on estimation of a parameter of interest. This sensitivity is exacerbated when inverse probability weighting methods are used, which may overweight contaminated observations. We introduce inverse probability weighted, double robust and outcome regression estimators of location and scale parameters, which are robust to contamination in the sense that their influence function is bounded. We give asymptotic properties and study finite sample behaviour. Our simulated experiments show that contamination can be more serious a threat to the quality of inference than model misspecification. An interesting aspect of our results is that the auxiliary outcome model used to adjust for ignorable missingness by some of the estimators, is also useful to protect against contamination. We also illustrate through a case study how both adjustment to ignorable missingness and protection against contamination are achieved through weighting schemes, which can be contrasted to gain further insights. \end{abstract}
\keywords{doubly robust estimator; influence function; inverse probability weighting; outcome regression.}
\section{Introduction}
Many data analyses are concerned with drawing inference on a parameter $\beta$ partially characterising a distribution law of interest from which data is assumed to be a random sample. However, most often the observed data deviates from this ideal random sample scenario, for instance such that some of the observations are contaminated, i.e. drawn from a nuisance distribution law. Another common deviation is that the random sample is incomplete: some data are missing, due to dropout in follow up studies, non-response in surveys, etc. Such corrupted random samples are indeed the rule rather than the exception in applications, and we give a telling example in Section \ref{bmi.sec}, where we study BMI change in a ten year follow up study. While methods are available to deal with these two different problems separately as described below, it is essential to have inferential methods able to deal with situations where both types of corruption (missingness and contamination) arise simultaneously. Indeed, while it is well known that many estimating procedures, including OLS, ML, and method of moments, lack robustness to contamination \citep[a single observation can have arbitrarily large influence, e.g.,][]{Hamp:Ronc:Rous:Stah:1986,Heri:Cant:Copt:Vict:2009}, it is seldom acknowledged that this sensitivity to contamination can be exacerbated with estimators able to deal with missing data; see, however, \citet[][]{Hulliger:1995} and \citet{Beaumont:2013}, where this increased sensitivity has been pointed out in the context of surveys of finite populations. The potential increase sensitivity to contamination arises in particular for estimators overweighting some of the observations (those representing part of the data which are missing), and if the overweighted observations are by chance contaminated, this will have large negative impact on the inference. Thus, while robust methods are important in general, they are even more so when missing data needs to be accounted for.
In this paper we focus on situations where, while observations may be missing for the response,
there is a set of background variables (covariates) which are observed for all units, and we can assume that outcomes are independent of missingness given the observed covariates (ignorable missingness assumption). Under the latter assumption, auxiliary (nuisance) models explaining the missingness mechanism and the outcome given the covariates can be combined in different ways to obtain semiparametric estimators of $\beta$. Classical examples include inverse probability weighted estimators (IPW), using the missingness mechanism model as weights \citep{Hor:Thom:1952}, and augmented inverse probability weighted estimators \citep[AIPW,][]{robins1994estimation} using both auxiliary models. AIPW estimators are then robust to the misspecification of one of these two auxiliary models at a time (thus the name doubly robust estimator often used); see, e.g., \cite{tsiatis2006}, \cite{RotnitzkyStijn:15}. Finally, outcome regression imputation (OR) estimators using only the model for the outcome may also be used, thereby avoiding weighting \citep[][]{kang2007,Tan:2007}.
Within this context of ignorable missing data in the outcome, we introduce and study estimators that are able to deal with situations where most of
the units in the sample are randomly drawn from the distribution of interest
while a smaller number of units is possibly drawn from another nuisance
distribution. An estimator is considered robust to such contamination if it has
bounded influence function, see \cite{Hamp:Ronc:Rous:Stah:1986}. This is because the influence function measures the asymptotic bias due to an infinitesimal contamination. A single observation can thus yield arbitrarily large bias if the influence function of the estimator is not bounded.
Classical IPW, AIPW and OR estimators have unbounded influence
function. They are not robust in this sense, even though AIPW has a robustness property, but merely to misspecification of one of the auxiliary models used.
In a full data and finite parametric context, bounded influence function estimators are most
naturally introduced as M-estimators \citep{Huber:1964,Hamp:1974}. Here we take
advantage of the fact that IPW, AIPW and OR estimators are partial M-estimators
\citep{newey:mcfadden:94,Stef:Boos:2002,Zhel:Gent:Ronc:2012} to propose bounded influence function estimators. An interesting result of the introduced estimators is that the auxiliary outcome regression model used by AIPW to improve on efficiency compared to IPW, happens to also be useful in improving on the robustness properties of AIPW and OR. Robustness to contamination is typically obtained at the price of a loss in efficiency, although the latter can be controlled and set to say approximately 5\% under some conditions. On the other hand, our simulated experiments show that moderate contamination seriously affects the quality of classical semiparametric inference, more so than model misspecification. Our
approach is general and we fully spell out the case where $\beta$ is the two dimensional
location-scale parameter.
The paper is organized as follows. Section 2 presents formally the context, and introduces robust estimators for missing data situations, together with their asymptotic properties. Section 3 studies finite sample properties through simulation designs previously used by \cite{Lunceford:Davidian:2004}, to which we have added several contamination schemes. This allows us to study robustness due both to model misspecification and to contamination.
In Section 4 a longitudinal study of BMI based on electronic record linkage data is used to illustrate, e.g., how the robustification introduced can be seen as a weighting scheme which can be compared to the weighting used to correct for ignorable missingness.
The paper is concluded with a discussion in Section 5. Regularity conditions, proofs, implementation details and exhaustive results from the simulations are relegated to the Appendix.
\section{Theory and method}\label{theory.sec} \subsection{Notation and context} Let a vector variable $Z$ be partitioned as $(Z_{2}',Z_1')'$, and consider the ideal situation when $(Z_{2i},Z_{1i}$, $i=1,\ldots,n)$ are independently drawn from a probability law with density $p(Z_{2i},Z_{1i};\beta,\eta)=p(Z_{2i};\beta,\eta)p(Z_{1i}\mid Z_{2i};\eta)$ for unknown values $\beta=\beta_0$ and $\eta=\eta_0$, where $\beta$, of finite dimension, is the parameter of interest describing some aspects of the distribution, $\eta$ is a nuisance parameter possibly of infinite dimension, and $\beta$ and $\eta$ are variationally independent \cite[semiparametric model; see][Chap.~4]{tsiatis2006}. We consider simultaneously two types of deviation from the above ideal random sample setting.
First, situations where atypical observations can occur in $Z_{2i}$ (and possibly $Z_{1i}$), i.e. where the majority of the data is generated as described above, but some of the observations may be issued from a different, but unknown, distribution. The final goal is to draw inference about $\beta$, even in the presence of a small fraction of spurious data points.
Further, we also want to allow for incomplete data situations, where we observe only $(R_iZ_{2i},Z_{1i},R_{i}$, $i=1,\ldots,n)$, with $R_i$ a binary variable indicating the observation status of $Z_{2i}$: $R_i=1$ if observed and $R_i=0$ if missing. We make throughout the missing at random assumption (also called ignorable missingness), i.e. $\Pr(R_i=1\mid Z_{2i},Z_{1i})=\pi(Z_{1i})$, with $\pi(Z_{1i})>0$ on the support of $Z_{1i}$.
The missing assignment mechanism is modelled up to a parameter $\gamma$, $ \pi(Z_{1i};\gamma)$, and we distinguish cases where this model is correctly specified, i.e. $\Pr(R_i=1\mid Z_{1i})=\pi(Z_{1i};\gamma_0)$ for a given but unknown $\gamma_0$, and cases where it is misspecified, i.e. an incorrect model for $\Pr(R_i=1\mid Z_{1i})$ is used.
\subsection{Full data case: robust M-estimators} Let us first consider an estimating function $m(Z_2;\beta)$, which would be used if we had no missing data ($R_i=1$ for all $i$): \begin{equation}\label{mestimator.eq} \sum_{i=1}^n m(Z_{2i};\beta)=0. \end{equation} The choice of $m(Z_{2i};\beta)$ may be done based on desired properties for the resulting M-estimator for $\beta$ (in the complete data case); e.g., such that $E(m(Z_{2i};\beta_0))=0$ for consistency. The study of robustness properties to contamination was formalised in Hampel (1974). The influence function plays a central role because it can be interpreted as measuring the asymptotic bias due to an infinitesimal contamination. Here, the influence function for the resulting estimator $\hat\beta$ solution of (\ref{mestimator.eq}) is \begin{align} E\left( - \frac{\partial m(Z_{2i};\beta)}{\partial \beta} \right)^{-1} m(Z_{2i};\beta) \end{align}
under suitable regularity conditions \citep{Stef:Boos:2002}.
In the sequel we focus on the location-scale parameter $\beta=(\mu=E(Z_{2i}),\sigma^2=Var(Z_{2i}))'$. A commonly used choice is $m(Z_{2i};\beta)=(Z_{2i}-\mu,(Z_{2i}-\mu)^2-\sigma^2)'$, because the resulting estimator is efficient in the Gaussian case. For this choice of $m$ estimating function, the influence function will not be bounded in $Z_{2i}$ and therefore not robust to contamination; see, e.g., \citet[Chap. 2]{Maro:Mart:Yoha:2006}. A general class of M-estimators for $\mu$ and $\sigma^2$ are solution of (\ref{mestimator.eq}) for \begin{align}\label{psi.eq} m_\psi(Z_{2i};\beta)=& \left(\begin{array}{c}
\psi_{c_{\mu}} \left( \frac{Z_{2i}- \mu}{\sigma}\right)-A \\
\psi_{c_{\sigma}}^2\left( \frac{Z_{2i}- \mu}{\sigma} \right)- B \end{array}\right), \end{align} where $\psi_{c}(\cdot)$ is an odd function, and where $A= E\left\{\psi_{c_{\mu}}\left( \sigma_0^{-1} (Z_{2i}- \mu_0) \right)\right\}$ and $B= E\left\{\psi_{c_{\sigma}}^2\left(\sigma_0^{-1} (Z_{2i}- \mu_0)\right)\right\}$ in order to ensure that $E(m_\psi(Z_{2i};\beta))=0$ at $\beta_0=(\mu_0,\sigma_0)$, the true unknown value for $(\mu,\sigma)$. Bounded influence function estimators are obtained by using bounded $\psi_{c}(\cdot)$ functions, e.g., the Huber function $\psi_c(t) = \min\{ c,\max\{ t,-c\}\} $, and the Tukey biweight function
$\psi_c(t) = ((t/c)^2-1)^2 t $ if $|t|<c$ and $0$ otherwise, see \cite{Heri:Cant:Copt:Vict:2009} for further details. The value for $c$ can be chosen appropriately to control efficiency under the non-contaminated Gaussian case. Equations (\ref{mestimator.eq}) using (\ref{psi.eq}) need to be solved simultaneously for $\mu$ and $\sigma$.
\subsection{Robust estimation with missing data} Semiparametric estimation with missing data has been reviewed for instance in \citet{tsiatis2006}. We introduce below novel bounded influence function estimators.
Let $ \pi(Z_{1i};\gamma)$ be a well specified parametric model, i.e. such that for $\Pr(R_i=1\mid Z_{1i})=\pi(Z_{1i};\gamma_0)$ for an unknown value $\gamma_0$. Assume that we have an estimator $\hat\gamma$ of $\gamma$ solution of estimating equations \begin{align} \label{estim.gamma} &\sum_{i=1}^n m_\gamma (R_i,Z_{1i};\gamma)=0, \end{align}
such that $\plim_{n\rightarrow\infty}\hat\gamma = \gamma_0$.
\begin{definition} A robust inverse probability weighted (RIPW) estimator \\ $(\hat\mu_{RIPW},\hat\sigma_{RIPW})'$ of $(\mu,\sigma)'$ is solution of the estimating equation: \begin{align}\label{robustmu.ipw.eq} \sum_{i=1}^n \varphi_{RIPW}(Z_{i},R_i;\beta,\hat\gamma)=0, \end{align}
where \begin{align}\nonumber \varphi_{RIPW}(Z_{i},R_i;\beta,\gamma)=\left( \begin{array}{c} \frac{R_i \big( \psi_{c_{\mu}}\big( \sigma^{-1} (Z_{2i}- \mu)\big)-A\big)}{\pi(Z_{1i};\gamma)} \\
\frac{R_i \big( \psi_{c_{\sigma}}^2\big( \sigma^{-1} (Z_{2i}- \mu)\big)-B\big)}{\pi(Z_{1i};\gamma)} \end{array}\right), \end{align} with $A=E\left\{ \psi_{c_{\mu}}\big( \sigma_0^{-1} (Z_{2i}- \mu_0)\big)\right\}$ and $ B = E\left\{ \psi_{c_{\sigma}}^2\big( \sigma_0^{-1} (Z_{2i}- \mu_0)\big)\right\}. $ \end{definition}
A similar estimator was proposed and studied in \cite{Hulliger:1995} in the context, however, of finite populations and surveys. Note that letting $\psi_c(t) = t$, the identity function, yields a classical inverse probability weighted estimator \citep{Hor:Thom:1952}.
\begin{remark}\label{weight.rem}
RIPW estimation can be interpreted as a double weighting scheme estimator, where $Z_{2i}$ observations are weighted with inverse propensity scores $1/\pi(Z_{1i};\gamma)$ (i.e., observations lying on the covariate support where the probability of dropout is higher are overweighted) and with $\psi$ weights $\psi_{c_{\mu}}\big( \sigma^{-1} (Z_{2i}- \mu)\big) / (\sigma^{-1} (Z_{2i}- \mu))$ (i.e., outlying observations are downweighted). These weights as well as the compound weights $1/\pi(Z_{1i};\gamma) \times \psi_{c_{\mu}}\big( \sigma^{-1} (Z_{2i}- \mu)\big) / (\sigma^{-1} (Z_{2i}- \mu))$ may be looked at in applications to gain insight in how the two weighting schemes interact; see Section \ref{bmi.sec} for an illustration. \end{remark}
\begin{proposition}\label{ripw.prop} Let $\pi(Z_{1i};\gamma)$ be correctly specified with (\ref{estim.gamma}) such that $\plim_{n\rightarrow\infty}\hat\gamma= \gamma_0$. Then, under regularity conditions given in Appendix~\ref{prop1and3.sec}, $(\hat\mu_{RIPW},\hat\sigma_{RIPW})'$ is consistent for $(\mu_0,\sigma_0)'$ and has the following asymptotic multivariate normal distribution as $n\rightarrow\infty$ $$\sqrt n \Big( (\hat\mu_{RIPW},\hat\sigma_{RIPW})'-(\mu_0,\sigma_0)'\Big )\overset{d}\rightarrow N\big(0,E\big\{IF_{RIPW}(IF_{RIPW})'\big\}\big),$$ where $IF_{RIPW}$ is the influence function: \begin{align} \lefteqn{IF_{RIPW}(Z_{i},R_i; \beta)= -\left\{ E \left[ \frac{\partial m_\psi(Z_{2i};\beta)}{\partial \beta'} \right] \right\}^{-1} \Bigg\{ \varphi_{RIPW}(Z_{i},R_i;\beta,\gamma_0)} \nonumber \\ &- E\left[ \frac{\partial \varphi_{RIPW}(Z_{i},R_i; \beta,\gamma_0) }{\partial \gamma'}\right]\left\{E\left[ \frac{\partial m_\gamma (R_i,Z_{1i};\gamma_0)}{\partial\gamma'}\right]\right\}^{-1}m_\gamma (R_i,Z_{1i};\gamma_0) \Bigg\}. \label{ifipw.eq} \end{align} \end{proposition} Thus, from (\ref{ifipw.eq}) we see that the influence function of RIPW is bounded in $Z_{2i}$ if the function $\psi_c(\cdot)$ is bounded. This is not the case for the classical IPW, cor responding to $\psi_c(t) = t$.
The implementation of RIPW requires the computation of $A$ and $B$. If the standardized quantity $\sigma_0^{-1} (Z_{2i}- \mu_0)$ is satisfactorily approximated by a ${\mathcal N}(0,1)$ variate, then $A=0$ (since $\psi_c$ is odd) and $B$ can be approximated by Monte Carlo simulations.
In an attempt to improve efficiency one may consider $h(Z_{1i};\beta,\xi)$ a working model (parametrised with $\xi$) for $E(m(Z_{2i};\beta)\mid Z_{1i})$. This model is correctly specified for $E(m(Z_{2i};\beta)\mid Z_{1i})$ if $h(Z_{1i};\beta,\xi_0)=E(m(Z_{2i};\beta)\mid Z_{1i})$ for a value $\xi_0$. However, we call it working model because we will also consider situations where it is not necessarily correctly specified.
Assume we have estimators $\hat\xi$ of $\xi$ and $\hat\gamma$ of $\gamma$, respectively solutions of (\ref{estim.gamma}) and \begin{align} \label{estim.xi} &\sum_{i=1}^n R_i m_\xi (Z_{i};\xi)=0, \end{align} such that $\plim_{n\rightarrow\infty}\hat\xi = \xi^*$ and $\plim_{n\rightarrow\infty}\hat\gamma = \gamma^*$, for some fix values $\xi^*$ and $\gamma^*$. In the correctly specified cases $\xi^*=\xi_0$ and $\gamma^*=\gamma_0$. \begin{definition} A robust augmented IPW (RAIPW) estimator $(\hat\mu_{RAIPW},\hat\sigma_{RAIPW})'$ of $(\mu,\sigma)'$ is solution of the estimating equation: \begin{align} \sum_{i=1}^n \varphi_{RAIPW}(Z_{i},R_i;\beta,\hat\gamma,\hat\xi) = 0, \label{robusts.aipw.eq} \end{align} where \begin{align}\nonumber \varphi_{RAIPW}(Z_{i},R_i;\beta,\gamma,\xi)=\left( \begin{array}{c} \frac{R_i \big( \psi_{c_{\mu}}\big( \sigma^{-1} (Z_{2i}- \mu)\big)-A\big)}{\pi(Z_{1i};\gamma)}-\left[ \frac{R_i-\pi(Z_{1i};\gamma)}{\pi(Z_{1i};\gamma)} h_1(Z_{1i};\beta,\xi) \right] \\
\frac{R_i \big( \psi_{c_{\sigma}}^2\big( \sigma^{-1} (Z_{2i}- \mu)\big)-B\big)}{\pi(Z_{1i};\gamma)}- \left[ \frac{R_i-\pi(Z_{1i};\gamma)}{\pi(Z_{1i};\gamma)} h_2(Z_{1i};\beta,\xi) \right] \end{array}\right), \end{align} $h_1(Z_{1i};\beta,\xi)$ is a working model for $E\Big( \psi_{c_{\mu}}( \sigma^{-1}
(Z_{2i}- \mu)\big) -A | Z_{1i} \Big)$ and $h_2(Z_{1i};\beta,\xi)$ for $E\Big( \psi_{c_{\sigma}}^2( \sigma^{-1}
(Z_{2i}- \mu)\big) -B | Z_{1i} \Big)$, and $A= E\left\{ \psi_{c_{\mu}}\big( \sigma_0^{-1} (Z_{2i}- \mu_0)\big)\right\}$ and $ B = E\left\{ \psi_{c_{\sigma}}^2\big( \sigma_0^{-1} (Z_{2i}- \mu_0)\big)\right\}. $ \end{definition}
Using the identity function for $\psi_c$ yields a classical augmented inverse probability weighting (AIPW) estimator \citep{robins1994estimation}.
\begin{proposition}\label{raipw.prop} Let $\pi(Z_{1i};\gamma)$ be correctly specified with (\ref{estim.gamma}) such that \\ $\plim_{n\rightarrow\infty}\hat\gamma=\gamma_0$ and/or let $h(Z_{1i};\beta,\xi)=(h_1(Z_{1i};\beta,\xi),h_2(Z_{1i};\beta,\xi))'$ be correctly specified with (\ref{estim.xi}) such that $\plim_{n\rightarrow\infty}\hat\xi=\xi_0$. Then, under regularity conditions given in Appendix~\ref{proof.section},
$(\hat\mu_{RAIPW},\hat\sigma_{RAIPW})$ is consistent for $(\mu_0,\sigma_0)'$ and has the following asymptotic multivariate normal distribution as $n\rightarrow\infty$ $$\sqrt n \Big( (\hat\mu_{RAIPW},\hat\sigma_{RAIPW})'-(\mu_0,\sigma_0)'\Big )\overset{d}\rightarrow N\big(0,E\big\{IF_{RAIPW}(IF_{RAIPW})'\big\}\big),$$ where \begin{align} \lefteqn{IF_{RAIPW}(Z_{i},R_i; \beta)= -\left\{ E \left[ \frac{\partial m_\psi(Z_{2i},\beta)}{\partial \beta'} \right] \right\}^{-1}\Bigg\{ \varphi_{RAIPW}(Z_{i},R_i; \beta,\gamma^*,\xi^*)} \nonumber \\ &- E\left[ \frac{\partial \varphi_{RAIPW}(Z_{i},R_i; \beta,\gamma^*,\xi^*)}{\partial \gamma'} \right]\left\{E\left[ \frac{\partial m_\gamma (R_i,Z_{1i};\gamma^*)}{\partial\gamma'}\right]\right\}^{-1}m_\gamma (R_i,Z_{1i};\gamma^*) \nonumber \\ &- E\left[ \frac{\partial \varphi_{RAIPW}(Z_{i},R_i;\beta,\gamma^*,\xi^*) }{\partial \xi'}\right]\left\{E\left[ \frac{\partial m_\xi (Z_{i};\xi^*)}{\partial\xi'}\right]\right\}^{-1}m_\xi (Z_{i},\xi^*)\Bigg \}. \label{ifaipw.eq} \end{align}
\end{proposition} Thus, RAIPW is as AIPW doubly robust in the sense that only one of the two auxiliary models used must be correctly specified in order to obtain consistent and asymptotic normal estimators. Moreover, the influence function of RAIPW is bounded in $Z_{2i}$ if the function $\psi_c(\cdot)$ is bounded (assuming the estimating equation \eqref{estim.xi} of the auxiliary model has also bounded influence function in $Z_{2i}$; see Exemple \ref{example.sls}), while this is not the case for the classical AIPW.
\begin{example}[RAIPW estimator of location and scale]\label{example.sls} Let us specify a working model parametrized by $\xi=(\xi_1',\xi_2)'$ as \begin{align}\label{auxi.mod}
Z_{2i}=\tilde h(Z_{1i};\xi_1)+\xi_2\nu, \end{align} with $\nu\sim N(0,1$). Note that this does not constrain $Z_{2i}$ to have a symmetric distribution as was the case for RIPW. The corresponding working model for $E(m(Z_{2i};\beta)\mid Z_{1i})$ is such that $h_1(Z_{1i};\beta,\xi)=\tilde h(Z_{1i};\xi_1)-\mu$ and $h_2(Z_{1i};\beta,\xi)=(\tilde h(Z_{1i};\xi_1)-\mu)^2+\xi_2^2-\sigma^2$. Estimators of $\xi$ with bounded influence function in this context are described in Appendix~\ref{robreg.section}. Then, \begin{align}
h_1(Z_{1i};\beta,\xi)&=E\Big( \psi_{c_{\mu}}( \sigma^{-1}
(\tilde h(Z_{1i};\xi_1)+\xi_2\nu- \mu)\big) | Z_{1i} \Big)-A, \label{workmu.eq} \\ h_2(Z_{1i};\beta,\xi)&=E\Big( \psi_{c_{\sigma}}^2( \sigma^{-1}
(\tilde h(Z_{1i};\xi_1)+\xi_2\nu- \mu)\big) | Z_{1i} \Big)-B, \label{works.eq} \end{align} may be computed using numerical integration for the conditional expectations, where $E(\cdot\mid Z_{1i})$ is the expectation under model~\eqref{auxi.mod}. Under the latter model, $A=0$ and Monte Carlo simulations can be used to obtain B. Both (\ref{workmu.eq}) and (\ref{works.eq}) can be used to obtain RAIPW estimators through \eqref{robusts.aipw.eq}. See Appendix~\ref{implementation.section} for more implementation details. \end{example}
Finally, when the outcome model is correctly specified yet another robust estimator can be introduced. \begin{definition} A robust outcome regression estimator (ROR) $(\hat\mu_{ROR},\hat\sigma_{ROR})'$ of ($\mu,\sigma$) is solution of
\begin{align}\label{imp.eq} &\sum_{i=1}^n h(Z_{1i};\beta,\hat\xi) =\sum_{i=1}^n \varphi_{ROR}(Z_{1i};\beta,\hat\xi)=0 \end{align} by using a correctly specified working model $h(Z_{1i};\beta,\xi_0)=E(m(Z_{2i};\beta)\mid Z_{1i})$ together with $\hat\xi$, an M-estimator (\ref{estim.xi}) of $\xi$ with bounded influence function. \end{definition}
\begin{proposition}\label{ror.prop} Let $h(Z_{1i};\beta,\xi)=(h_1(Z_{1i};\beta,\xi),h_2(Z_{1i};\beta,\xi))'$ be correctly specified with (\ref{estim.xi}) such that $\plim_{n\rightarrow\infty}\hat\xi=\xi_0$. Then, under regularity conditions given in Appendix~\ref{prop1and3.sec},
$(\hat\mu_{ROR},\hat\sigma_{ROR})'$ is consistent for $(\mu_0,\sigma_0)'$ and has the following asymptotic multivariate normal distribution as $n\rightarrow\infty$ $$\sqrt n \Big( (\hat\mu_{ROR},\hat\sigma_{ROR})'-(\mu_0,\sigma_0)'\Big )\overset{d}\rightarrow N(0,IF_{ROR}(IF_{ROR})'),$$ where \begin{align} IF_{ROR}(Z_{i},R_i; \beta)=&-\left\{ E \left[ \frac{\partial \varphi_{ROR}(Z_{1i}; \beta,\xi_0)}{\partial \beta'} \right] \right\}^{-1}\Bigg\{ \varphi_{ROR}(Z_{1i}; \beta,\xi_0) \nonumber \\ &- E\left[ \frac{\partial \varphi_{ROR}(Z_{1i};\beta,\xi_0) }{\partial \xi'}\right]\left\{E\left[ \frac{\partial m_\xi (Z_{i};\xi_0)}{\partial\xi'}\right]\right\}^{-1}m_\xi (Z_{i},\xi_0)\Bigg \}. \label{ifor.eq} \end{align} \end{proposition}
\begin{example}[ROR estimator of location and scale]\label{example.ror} Within the context of Example~\ref{example.sls}, assume that model (\ref{auxi.mod}) holds. Then, $h(Z_{1i};\beta,\xi)=(\tilde h(Z_{1i};\beta,\xi)-\mu,(\tilde h(Z_{1i};\xi_1)-\mu)^2+\xi_2^2-\sigma^2)'$, and $\xi$ is estimated with a bounded influence function estimator; see Appendix~\ref{robreg.section} for details. \end{example}
Unlike for RAIPW, the regularity conditions apply to the working model $h(Z_{1i};\beta,\xi)$ only. For instance, to characterise the influence function one need to be more specific about the working model (which needs to be correctly specified). On the other hand, the results of Propositions \ref{ripw.prop} and \ref{raipw.prop} for RIPW and RAIPW respectively give specifically the regularity conditions that must apply to the $\psi_c$ functions used, and the influence functions resulting.
We have focused on robustness properties to contamination in the outcome $Z_{2i}$. Contamination in the covariates $Z_{1i}$ may also happen. This is typically tackled by using the Tukey's redescending $\psi$ function which protect against high leverage points, i.e. outlying values in the design space; see, e.g., \citet[Chap. 4 and 5]{Maro:Mart:Yoha:2006} and \citet{Cant:Ronc:2001}.
\section{Simulation experiments} \label{sim.section} We present a large simulation exercise to assess several aspects of our procedure for the joint estimation of location and scale: behaviour for clean data, robustness to the presence of contamination, and sensitivity to model misspecification.
\subsection{Simulation setting} \label{simsetting.section}
We implement the same simulation design as \cite{Lunceford:Davidian:2004}. We consider the covariates $X = (X_1,X_2,X_3)'$ associated with both the missingness mechanism and the outcome, and the covariates $V= (V_1,V_2,V_3)'$ which are associated only with the outcome. The variables $(X_{1},X_{2}, X_{3}, V_{1}, V_{2}, V_{3})^\prime$ are realizations of the joint distribution of $(X',V')'$ built by first taking $X_3 \sim \mbox{Bernoulli}(0.2)$. Then, conditionally on $X_3$, $V_3$ is generated as Bernoulli with $\Pr(V_3=1 \mid X_3) = 0.75 X_3 + 0.25 (1-X_3)$ and finally $(X_1, V_1, X_2 ,V_2)' \mid X_3$ is taken from a multivariate normal distribution ${\mathcal N}(\tau_{X_3},\Sigma_{X_3})$, where $\tau_1 = (1,1,-1,-1)'$, $\tau_0 = (-1,-1,1,1)'$ and $$\Sigma_1 = \Sigma_0 = \left( \begin{array}{cccc} 1 & 0.5 & -0.5 & -0.5 \\ 0.5 & 1 & -0.5 & -0.5\\ -0.5 & -0.5 & 1 & 0.5 \\ -0.5 & -0.5 & 0.5 & 1 \end{array} \right).$$
For each individual $i=1,\ldots,n$, the missingess mechanism indicator $R_i$ is generated as a Bernoulli variable with probability of missingness ($R_i=0$) defined by \begin{equation*} \Pr(R_i =0\mid X,V) = \frac{\exp( \gamma_{1} +\gamma_{2} X_{1i} + \gamma_{3} X_{2i} +\gamma_{4} X_{3i}) }{1+\exp( \gamma_{1} +\gamma_{2} X_{1i} + \gamma_{3} X_{2i} +\gamma_{4} X_{3i})}, \end{equation*} which corresponds to the control group in \cite{Lunceford:Davidian:2004}.
The response $Z_{2i}$ is generated according to the model \begin{equation} \label{sim.model} Z_{2i} = \xi_{10} +\xi_{11} X_{1i} + \xi_{12} X_{2i} +\xi_{13} X_{3i} + \xi_{14} V_{1i} + \xi_{15} V_{2i} + \xi_{16} V_{3i} + \epsilon_i, \end{equation} where $\epsilon_i \sim {\mathcal N}(0,\xi_2^2 =1)$ and in our notation $\xi_1=(\xi_{10},\xi_{11},\cdots,\xi_{16})$.
The parameter values $(\xi_{10},\xi_{11}, \xi_{12},\xi_{13} )' = (0,-1,1,-1)'$ are kept fixed throughout, whereas different scenarios are considered for $(\xi_{14},\xi_{15},\xi_{16})'$ and $\gamma$, namely \begin{equation} \label{csi.value} (\xi_{14},\xi_{15},\xi_{16})'= \left\{ \begin{array}{ll} (-1,1,1)' & \mbox{strong association}\\ (-0.5,0.5,0.5)' & \mbox{moderate association}\\ (0,0,0)' & \mbox{no association} \end{array} \right. \end{equation} and \begin{equation} \label{gamma.value}
\gamma = (\gamma_1,\gamma_2,\gamma_3,\gamma_4)' = \left\{ \begin{array}{ll} (0,0.6,-0.6,0.6)' & \mbox{strong association}\\ (0,0.3,-0.3,0.3)' & \mbox{moderate association.} \end{array} \right. \end{equation}
Notice that when $(\xi_{14},\xi_{15},\xi_{16})' =(0,0,0)'$, $V$ is associated with neither the outcome nor the missingness mechanism.
The values of $\xi$ and $\gamma$ are such that lower response values and lower probabilities of missingness are obtained when $X_3=1$, and conversely when $X_3=0$.
We generate $1000$ realisations of size $n=1000$ and $5000$, called clean datasets, i.e. free of contamination. We present results for $n=1000$, while the larger sample size confirmed the results and are omitted. Departing from the clean datasets, we obtain corresponding contaminated datasets according to different schemes as we describe in Section~\ref{simcontam.section}.
The combination of parameters in \eqref{csi.value} and \eqref{gamma.value} gives six designs. For each design, we fit a total of 20 estimators of $\beta = (\mu, \sigma)'$. They differ in the choice of estimation strategy (IPW, AIPW, OR), whether they are in their classical or robust versions, and whether the auxiliary models are misspecified or not. Thus, we consider \\ \vskip-3mm \indent IPW($X$), AIPW($X,X$), AIPW($X,XV$), OR($X$) and OR($XV$), \\ \vskip-3mm \noindent and their robust versions \\ \vskip-3mm \indent RIPW($X$), RAIPW($X,X$), RAIPW($X,XV$), ROR($X$) and ROR($XV$), \\ \vskip-3mm \noindent where the covariate sets used in the auxiliary models are given within parentheses, and, e.g., AIPW($X,XV$), means that the first set $X$ is used to explain $R_i$ and the second set $XV:= (X,V)$ is used to explain $Z_{2i}$. All these estimators use well specified auxiliary models. We, moreover, consider estimators using misspecified auxiliary models as follows: \\ \vskip-3mm \indent IPW($X_{\_}$), AIPW($X_{\_},XV$), AIPW($X,X_{\_}V$), AIPW($X_{\_},X_{\_}V$), and OR($X_{\_}V$), \\ \vskip-3mm \noindent and their robust versions \\ \vskip-3mm \indent RIPW($X_{\_}$), RAIPW($X_{\_},XV$), RAIPW($X,X_{\_}V$), RAIPW($X_{\_},X_{\_}V$), \\ \indent and ROR($X_{\_}V$), \\ \vskip-3mm \noindent where $X_{\_}:=X\setminus X_1$ and $X_{\_}V:=(X_{\_},V)$. Auxiliary models explaining $R_i$ and $Z_{2i}$ are fitted using, respectively, logistic regression and ordinary least squares for the classical versions, and robust logistic regression and robust linear regression for the robust versions. For RIPW and RAIPW estimators Tukey's $\psi$ function is used in \eqref{robusts.aipw.eq}. Tukey's $\psi$ function is usually preferred over Huber's with asymmetric contamination. The robust estimators are tuned to have approximately $95\%$ efficiency at the correctly specified models for clean data. The values of the corresponding tuning constants are given in Appendix~\ref{tuning.section}. For details on the computation see Appendix~\ref{implementation.section}.
\subsection{Results for clean data}
The top half of Figure~\ref{CleanContam2XiModerateGammaModerateFig} summarises with boxplots the estimates of $\mu$ (left) and $\sigma$ (right) for clean data, i.e.\ when the 1000 replicates are generated from the design introduced in Section~\ref{simsetting.section}, with $\gamma$ moderate and $\xi$ moderate. The first row of panels show that for both $\mu$ and $\sigma$ all the estimators (classical and robust) except RIPW are, as expected, unbiased. The bias of RIPW is due to the correction terms ($A$ and $B$ in \eqref{robustmu.ipw.eq}) which are in this setting badly approximated based on the assumption that $Z_{2i}$ is normally distributed. This is improved for RAIPW, because the use of the outcome model allows for a better approximation of the correction terms. Also as expected, (R)IPW is more variable than (R)AIPW.
\begin{figure}
\caption{Estimates of $\mu$ (left) and $\sigma$ (right) for the $\gamma$ moderate-$\xi$ moderate scenario for clean data and under the C-asym contamination. The vertical lines represent the true underlying values.}
\label{CleanContam2XiModerateGammaModerateFig}
\end{figure}
The second row of panels confirms some other well known properties of the (A)IPW estimators: the bias due to misspecification of the missingness mechanism for IPW, the double robustness property of AIPW (i.e.,\ unbiasedness if only one of the auxiliary models is misspecified) and the sensitivity of the OR estimator to the misspecification of the outcome regression model. Essentially, these properties are preserved for the robust versions introduced herein. These results are also summarised numerically in Table~\ref{CleanModerateModerateTable} (Appendix~\ref{simulationmoderatemoderate.section}), where bias, standard deviations and root mean squared error of the estimators are reported. The results for the other five combinations of parameters (\eqref{csi.value} - \eqref{gamma.value}) deliver a similar general message, with different magnitudes. The corresponding figures and tables supporting this claim are provided in Appendix~\ref{simulationothers.section}.
\subsection{Results under contamination} \label{simcontam.section} With the result above that expected behaviours are obtained with clean data, we study now the effect on estimation of deviations from the data generating mechanism of interest. To generate a contaminated sample, $5\%$ of the observed responses (i.e.\ data points for which $R_i=1$) issued from model~\eqref{sim.model} were randomly chosen and changed to the realization of: \begin{description} \item[C-asym] $U \sim {\mathcal U}(-20,-12)$, \item[C-sym] $W=BU-(1-B)U$, where $B$ is Bernoulli(probability=0.5), \item[C-hidden] $N \sim {\mathcal N}(-10,0.4)$. \end{description} For the C-asym case, the range of the uniform distribution has been set such that it falls approximately outside the observed range of clean $Z_{2i}$. C-sym is the symmetric version of C-asym, and the C-hidden case is such that the contamination is not clearly visible when looking at the observed $Z_{2i}$ marginally. Figure~\ref{ContamSchemesFig} displays a realization of each scheme for the scenario with $\gamma$ moderate and $\xi$ moderate: the values of $Z_{2i}$ are plotted against $E(Z_{2i} \mid Z_{1i})$, the linear predictor of the outcome model~\eqref{sim.model}, with a histogram of the marginal distribution of $Z_{2i}$.
\begin{center} \begin{figure}
\caption{One realization of size 1000 for the three contamination schemes considered: black circles observed outcomes ($R_i=1$) and circles unobserved outcomes ($R_i=0$). The histograms are over the observed outcomes $Z_{2i}$.}
\label{ContamSchemesFig}
\end{figure} \end{center}
We present the results for the $\gamma$ moderate and $\xi$ moderate design and contamination C-asym in bottom half of Figure~\ref{CleanContam2XiModerateGammaModerateFig}. The results for the other $\gamma$-$\xi$ combinations are given in Appendix~\ref{simulationothers.section}. The third row of panels of Figure~\ref{CleanContam2XiModerateGammaModerateFig} displays the results for the correctly specified models. We can see that for $\mu$ all the classical methods suffer a negative bias (underestimation) due to the presence of contamination, and these bias are of similar magnitude. For $\sigma$, the biases of the classical methods are positive (overestimation), with OR estimators being even more affected than (A)IPW estimators. On the other hand, RAIPW and ROR perform well, producing estimates in target with the true underlying values. A slight negative bias remains for RAIPW(X,XV) for $\sigma$. However, the size of this bias is negligible compared to the bias induced by contamination on the classical estimators.
The fourth row of panels of Figure~\ref{CleanContam2XiModerateGammaModerateFig} shows the effects of both misspecification and contamination. We observe that the bias due to contamination is more severe than bias due to misspecification in the setting simulated (compare with the second row of the same figure, i.e. clean data).
Root mean squared errors (RMSE) are given in Table \ref{Contam2ModerateModerateTable}, yielding further insights. In particular, while AIPW and OR with correct model specification were comparable in terms of empirical RMSE in the clean data designs (see RMSE tables in Appendix~\ref{simulationmoderatemoderate.section}), we observe that ROR outperforms RAIPW in this contamination case, particularly so when estimating $\sigma$ (upper half of Table \ref{Contam2ModerateModerateTable}). In fact we see that ROR has both lower bias and variance in this setting. When model misspecification occurs (lower half of Table \ref{Contam2ModerateModerateTable}), ROR also outperforms RAIPW if the latter misspecifies both auxiliary models, otherwise RAIPW has lowest RMSE for estimation of $\mu$ when one of the model used is correct.
Results for the other parameter combinations under C-asym contamination carry similar messages, with different magnitudes; see figures and tables in Appendix~\ref{simulationothers.section}.
\begin{table}
\caption{\label{Contam2ModerateModerateTable} Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ moderate-$\xi$ moderate scenario under C-asym contamination.} \centering
\begin{tabular}{lrrrrrr}\hline ($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\
\hline IPW(X) & -8.835 & 1.506 & 8.962 & 15.903 & 1.511 & 15.974 \\
AIPW(X,X) & -8.864 & 1.266 & 8.954 & 15.941 & 1.308 & 15.994 \\
AIPW(X,XV) & -8.858 & 1.254 & 8.946 & 15.947 & 1.297 & 15.999 \\
OR(X) & -8.873 & 1.292 & 8.967 & 17.684 & 1.023 & 17.714 \\
OR(XV) & -8.866 & 1.278 & 8.958 & 17.774 & 1.008 & 17.803 \\
RIPW(X) & 3.583 & 1.702 & 3.966 & -2.370 & 2.051 & 3.134 \\
RAIPW(X,X) & 0.182 & 1.477 & 1.487 & -1.233 & 1.392 & 1.859 \\
RAIPW(X,XV) & 0.165 & 1.452 & 1.460 & -1.207 & 1.357 & 1.815 \\
ROR(X) & 0.017 & 1.298 & 1.297 & 0.295 & 1.021 & 1.062 \\
ROR(XV) & 0.026 & 1.274 & 1.274 & 0.194 & 0.980 & 0.999 \\
\hline
IPW($X_{\_}$) & -7.618 & 1.443 & 7.754 & 15.815 & 1.451 & 15.881 \\
AIPW($X_{\_}, XV$) & -8.865 & 1.261 & 8.954 & 16.153 & 1.292 & 16.204 \\
AIPW($(X,X_{\_}V$) & -8.845 & 1.261 & 8.934 & 15.939 & 1.297 & 15.991 \\
AIPW($X_{\_},X_{\_}V$) & -8.088 & 1.247 & 8.184 & 15.947 & 1.283 & 15.999 \\
OR($X_{\_}$) & -8.091 & 1.264 & 8.189 & 17.559 & 1.003 & 17.588 \\
RIPW($X_{\_}$) & 4.834 & 1.656 & 5.109 & -2.911 & 1.898 & 3.474 \\
RAIPW($X_{\_}, XV$) & 0.113 & 1.447 & 1.451 & -1.193 & 1.330 & 1.786 \\
RAIPW($X,X_{\_}V$) & 0.244 & 1.480 & 1.499 & -1.256 & 1.459 & 1.924 \\
RAIPW($X_{\_},X_{\_}V$) & 1.034 & 1.452 & 1.782 & -1.616 & 1.386 & 2.128 \\
ROR($X_{\_}V$) & 0.853 & 1.291 & 1.547 & 0.041 & 1.040 & 1.041 \\ \hline \end{tabular} \end{table}
\begin{figure}
\caption{Estimates of $\mu$ (left) and $\sigma$ (right) for the $\gamma$ moderate-$\xi$ moderate scenario under the C-asym and C-hidden contamination. The vertical lines represent the true underlying values.}
\label{Contam3ContamXiModerateGammaModerateFig}
\end{figure}
Results for the C-sym contamination are summarized in the top half of Figure~\ref{Contam3ContamXiModerateGammaModerateFig}, which shows the same patterns as C-asym in Figure~\ref{CleanContam2XiModerateGammaModerateFig}, with a major difference that the biases of RAIPW(X) and RAIPW(XV) in the estimation of $\sigma$ have now disappeared. Finally, the C-hidden configuration, whose results are summarized in the bottom half of Figure~\ref{Contam3ContamXiModerateGammaModerateFig}, confirms the expectation that this contamination scheme is most challenging. Here RAIPW is clearly biased for both $\mu$ and $\sigma$, and ROR behaves best both in terms of bias and efficiency. This is due to the fact that ROR only considers the conditional distribution of $Z_{2i}$ given $Z_{1i}$, through the outcome regression model, where the contamination is most visible, while RAIPW considers this conditional distribution but also the marginal of $Z_{2i}$ where the contamination is hidden. RMSE tables provided in Appendix~\ref{simulationmoderatemoderate.section} confirm the visual impression of the figures.
\section{Application: BMI change}\label{bmi.sec}
\begin{figure}
\caption{BMI change after ten years versus BMI measured at 40 years of age for 5553 men living in the county of V\"asterbotten in Sweden and born 1950-58. In red baseline BMI observed for the 2002 men not returning at follow up. }
\label{BMI.fig}
\end{figure}
We illustrate the methods presented in this paper with a population based 10 year follow up study of body mass index (kg/m$^2$, BMI).
The analysis is performed on data from the Ume\aa\ SIMSAM Lab database
\citep{simsam:2016}, which makes available record linkage information from
several population based registers. In particular the database includes BMI
data from an intervention program where all individuals living in the county of
V\"asterbotten in Sweden and turning 40 and 50 years of age are called for
a health examination. Thanks to the Swedish individual identification number, this
collected health data can be record linked to population wide health and
administrative registers, which allows us to retrieve useful auxiliary
information on individuals hospitalisation and socio-economy adding to self
reported variables available from the intervention program.
We consider men born between 1950 and 1958 and observe their BMI when they turn 40 years of age as well as at a 10 year follow up. Figure \ref{BMI.fig} displays a scatter plot of BMI change versus BMI at baseline for the 5553 men who came back at the follow up examination when they turned 50 out of the 7555 that were measured at baseline (40 years of age). Extreme BMI values are observed (both at baseline and changes at follow up), giving an interesting case to illustrate the robust estimators introduced herein.
The set of baseline covariates used in the auxiliary models fitted are: (from the health examinations) measured BMI, self reported health, and tobacco use; (from Statistics Sweden registers) education level, number of children under 3 years of age, log earnings, parental benefits, sick leave benefits, unemployment benefits, urban living; (from the hospitalisation register) annual hospitalisation days. These variables are used to explain dropout ($R_i$) using a logistic regression and BMI change ($Z_2$) using linear regression. Estimation of these auxiliary models is performed using maximum likelihood or robust regression methods depending on the estimators used. More details on baseline covariates and results on the estimation of the auxiliary models are provided and discussed in Appendix~\ref{applicationsupp.section}.
\begin{table}
\caption{\label{results.tab} Estimates and their standard errors (s.e.) for $\mu$ and $\sigma$ using the different estimators defined in the article, and where "CC" stands for complete case sample moments.} \centering
\begin{tabular}{cccccccc}\hline
& CC & IPW & AIPW & OR & RIPW & RAIPW & ROR \\ \hline
$\hat\mu$ & 1.43 & 1.36 & 1.38 & 1.41 & 1.34 & 1.34 & 1.34 \\
s.e. & 0.039 & 0.073 & 0.062 & 0.044 & 0.024 & 0.024 & 0.026 \\
$\hat\sigma$ & 2.87 & 3.67 & 3.64 & 2.88 & 1.56 & 1.58 & 1.66 \\
s.e. & 1.524 & 0.700 & 0.665 & 0.264 & 0.025 & 0.025 & 0.024 \\ \hline
\end{tabular} \end{table}
Table \ref{results.tab} displays estimated mean ($\hat\mu$) and standard deviation ($\hat\sigma$) in BMI change. The estimators used are the naive complete case (CC) sample mean and standard deviation (i.e., not taking into account selective dropout and outlier contamination);
IPW, OR, and AIPW taking into account selective dropout but not outlier contamination; and RIPW, ROR and RAIPW, taking into account both selective dropout and contamination.
\begin{figure}
\caption{Tukey's weights versus inverse of fitted propensity scores and compound weights (product of Tukey's weights and inverse propensity scores) used in RIPW.}
\label{BMIweights.fig}
\end{figure}
All three introduced robust versions (RIPW, RAIPW, ROR) give similar estimates of mean and variance BMI change. Robust estimation seems to be here most relevant for the scale parameter $\sigma$, where the robust versions are two BMI units smaller than the non-robust versions. This has also consequence for the estimation of standard errors for $\hat\mu$, IPW, OR and AIPW having standard errors 50\% larger than their robust counterparts. In summary, while robust estimation does not yield notably different results in mean BMI change, the results indicate a clear overestimation of the variability in BMI change if the non-robust estimators are used. Furthermore, correcting for selective dropout without taking into account contamination yields even larger overestimation of the variability of BMI change than using the naive estimator.
Taking into account selective dropout and contamination can both be seen as weighting schemes as described in Remark \ref{weight.rem}. Thus, the propensity score weighting and the $\psi$ Tukey weighting, as well as the compound weights, are plotted in Figure \ref{BMIweights.fig} against each other. This plot highlights which individuals are downweighted due to outlying BMI change and overweighted due to selective dropout.
We observe that one observation has very high inverse propensity score and zero Tukey weight. This observation corresponds to the outlying individual with BMI at baseline close to 100 and large BMI decrease (see Figure \ref{BMI.fig}). Thus, while IPW and AIPW give it a large weight because it lies in a region with high probability of missingness, its outlying nature is noticed by the robust estimators which discard it from estimation. Its overweight in IPW and AIPW estimation is a contributing factor to their seemingly overestimation of $\sigma$ noticed above.
\section{Discussion} In this paper we have studied semiparametric inference when outcome data is missing at random (ignorable given observed covariates) and the observed data is possibly contaminated by a nuisance process. We have proposed estimators which have bounded bias for an arbitrary large contamination. Many alternative estimators have been proposed in the literature \cite[for a review]{RotnitzkyStijn:15}. In order to obtain bounded influence function versions of those, the approach presented here can be followed. In particular, an interesting family of AIPW estimators are those which are bounded in the sense that they cannot produce an estimate outside the range of the observed outcomes \citep{Tan:2010,Grub:vand:2010}, in order to avoid the inverse probability weighting to have too drastic consequences. Such estimators are still not robust to contamination (unbounded influence function) and may therefore be robustified as proposed herein. Moreover, while we have focused on the location-scale parameter of the marginal distribution of the outcome of interest, the framework can readily be extended to other parameters, e.g., parameters of the conditional distribution of the outcome given some covariates, and to causal parameters defined using the potential outcome framework \citep{RR:74}.
Finally, an interesting aspect of our results is that the auxiliary outcome regression model is not only useful to improve efficiency over IPW estimation but it is also useful for the robustness properties of the RAIPW and ROR estimators. This is the case in two ways, first because it allows us to relax assumptions (otherwise commonly made in robust estimation of location and scale) on the marginal distribution of the outcome, and second because it allows us to deal with contaminations which may be hidden when looking at the marginal distribution of the outcome, but become apparent in the conditional distribution of the outcome given the covariates.
\begin{thebibliography}{}
\bibitem[\protect\citeauthoryear{Beaumont, Haziza, and Ruiz-Gazen}{Beaumont
et~al.}{2013}]{Beaumont:2013} Beaumont, J.-F., Haziza, D., and Ruiz-Gazen, A. (2013). \newblock A unified approach to robust estimation in finite population
sampling. \newblock {\em Biometrika\/},~{\em 100\/},(3), 555--569.
\bibitem[\protect\citeauthoryear{Cantoni and Ronchetti}{Cantoni and
Ronchetti}{2001}]{Cant:Ronc:2001} Cantoni, E. and Ronchetti, E. (2001). \newblock Robust inference for generalized linear models. \newblock {\em Journal of the American Statistical Association\/},~{\em
96\/},(455), 1022--1030.
\bibitem[\protect\citeauthoryear{Gruber and van~der Laan}{Gruber and van~der
Laan}{2010}]{Grub:vand:2010} Gruber, S. and van~der Laan, M.~J. (2010). \newblock An application of collaborative targeted maximum likelihood
estimation in causal inference and genomics. \newblock {\em The International Journal of Biostatistics\/},~{\em 6\/},(1).
\bibitem[\protect\citeauthoryear{Hampel}{Hampel}{1974}]{Hamp:1974} Hampel, F.~R. (1974). \newblock {The} influence curve and its role in robust estimation. \newblock {\em Journal of the American Statistical Association\/},~{\em
69\/},(346), 383--393.
\bibitem[\protect\citeauthoryear{Hampel, Ronchetti, Rousseeuw, and
Stahel}{Hampel et~al.}{1986}]{Hamp:Ronc:Rous:Stah:1986} Hampel, F.~R., Ronchetti, E.~M., Rousseeuw, P.~J., and Stahel, W.~A. (1986). \newblock {\em {Robust} Statistics: {The} Approach Based on Influence
Functions}. \newblock New York: Wiley.
\bibitem[\protect\citeauthoryear{Heritier, Cantoni, Copt, and
Victoria-Feser}{Heritier et~al.}{2009}]{Heri:Cant:Copt:Vict:2009} Heritier, S., Cantoni, E., Copt, S., and Victoria-Feser, M.-P. (2009). \newblock {\em Robust Methods in Biostatistics}. \newblock Wiley-Interscience.
\bibitem[\protect\citeauthoryear{Horovitz and Thompson}{Horovitz and
Thompson}{1952}]{Hor:Thom:1952} Horovitz, D.~G. and Thompson, D.~J. (1952). \newblock A generalization of sampling without replacement from a finite
universe. \newblock {\em Journal of the American Statistical Association\/},~{\em
47\/},(260), 663--685.
\bibitem[\protect\citeauthoryear{Huber}{Huber}{1964}]{Huber:1964} Huber, P. (1964). \newblock Robust estimation of a location parameter. \newblock {\em Annals of Mathematical Statististics\/},~{\em 35\/},(1),
73--101.
\bibitem[\protect\citeauthoryear{Hulliger}{Hulliger}{1995}]{Hulliger:1995} Hulliger, B. (1995). \newblock Outlier robust {H}orvitz-{T}hompson estimators. \newblock {\em Survey Methodology\/},~{\em 21\/},(1), 79--87.
\bibitem[\protect\citeauthoryear{Kang and Schafer}{Kang and
Schafer}{2007}]{kang2007} Kang, J. D.~Y. and Schafer, J.~L. (2007). \newblock Demystifying double robustness: A comparison of alternative
strategies for estimating a population mean from incomplete data. \newblock {\em Statistical Science\/},~{\em 22\/},(4), 523--539.
\bibitem[\protect\citeauthoryear{Lindgren, Nilsson, de~Luna, and
Ivarsson}{Lindgren et~al.}{2016}]{simsam:2016} Lindgren, U., Nilsson, K., de~Luna, X., and Ivarsson, A. (2016). \newblock Data resource profile: Swedish microdata research from childhood into
lifelong health and welfare ({U}me\aa\ {SIMSAM} {L}ab). \newblock {\em International Journal of Epidemiology\/},~{\em 45\/},(4),
1075--1075.
\bibitem[\protect\citeauthoryear{Lunceford and Davidian}{Lunceford and
Davidian}{2004}]{Lunceford:Davidian:2004} Lunceford, J.~K. and Davidian, M. (2004). \newblock Stratification and weighting via the propensity score in estimation
of causal treatment effects: a comparative study. \newblock {\em Statistics in medicine\/},~{\em 23\/},(19), 2937--2960.
\bibitem[\protect\citeauthoryear{Maronna, Martin, and Yohai}{Maronna
et~al.}{2006}]{Maro:Mart:Yoha:2006} Maronna, R., Martin, R., and Yohai, V. (2006). \newblock {\em {Robust statistics}}. \newblock Wiley New York.
\bibitem[\protect\citeauthoryear{Newey and McFadden}{Newey and
McFadden}{1994}]{newey:mcfadden:94} Newey, W.~K. and McFadden, D.~L. (1994). \newblock Large sample estimation and hypothesis testing. \newblock In R.~F. Engle and D.~L. McFadden (Eds.), {\em Handbook of
Econometrics, Volume IV}, Chapter~36, pp.\ 2111--2245. Amsterdam: Elsevier
Science.
\bibitem[\protect\citeauthoryear{Robins, Rotnitzky, and Zhao}{Robins
et~al.}{1994}]{robins1994estimation} Robins, J.~M., Rotnitzky, A., and Zhao, L.~P. (1994). \newblock Estimation of regression coefficients when some regressors are not
always observed. \newblock {\em Journal of the American Statistical Association\/},~{\em
89\/},(427), 846--866.
\bibitem[\protect\citeauthoryear{Rotnitzky and Vansteelandt}{Rotnitzky and
Vansteelandt}{2015}]{RotnitzkyStijn:15} Rotnitzky, A. and Vansteelandt, S. (2015). \newblock Double-robust methods. \newblock In G.~Molenberghs, G.~Fitzmaurice, M.~G. Kenward, A.~Tsiatis, and
G.~Verbeke (Eds.), {\em Handbook of Missing Data Methodology}, Chapter~9,
pp.\ 185--212. London: Chapman and Hall/CRC.
\bibitem[\protect\citeauthoryear{Rubin}{Rubin}{1974}]{RR:74} Rubin, D.~B. (1974). \newblock Estimating causal effects of treatments in randomized and
nonrandomized studies. \newblock {\em Journal of Educational Psychology\/},~{\em 66\/},(5), 688--701.
\bibitem[\protect\citeauthoryear{Stefanski and Boos}{Stefanski and
Boos}{2002}]{Stef:Boos:2002} Stefanski, L.~A. and Boos, D.~D. (2002). \newblock The calculus of {M}-estimation. \newblock {\em The American Statistician\/},~{\em 56\/},(1), 29--38.
\bibitem[\protect\citeauthoryear{Tan}{Tan}{2007}]{Tan:2007} Tan, Z. (2007). \newblock Comment: Understanding {OR}, {PS} and {DR}. \newblock {\em Statistical Science\/},~{\em 22\/},(4), 560--568.
\bibitem[\protect\citeauthoryear{Tan}{Tan}{2010}]{Tan:2010} Tan, Z. (2010). \newblock Bounded, efficient and doubly robust estimation with inverse
weighting. \newblock {\em Biometrika\/},~{\em 97\/},(3), 661--6823.
\bibitem[\protect\citeauthoryear{Tsiatis}{Tsiatis}{2006}]{tsiatis2006} Tsiatis, A. (2006). \newblock {\em Semiparametric theory and missing data}. \newblock Springer Science \& Business Media.
\bibitem[\protect\citeauthoryear{Zhelonkin, Genton, and Ronchetti}{Zhelonkin
et~al.}{2012}]{Zhel:Gent:Ronc:2012} Zhelonkin, M., Genton, M.~G., and Ronchetti, E. (2012). \newblock On the robustness of two-stage estimators. \newblock {\em Statistics \& Probability Letters\/},~{\em 82\/},(4), 726--732.
\end{thebibliography}
\appendix
\section{Proposition~2} \label{proof.section} We give below proofs of consistency and asymptotic normality for the RAIPW estimator. Assumptions made in Section \ref{theory.sec} on the data generating mechanism hold throughout.
Let us use the simpler notation $\varphi_{i}(\mu,\sigma;\gamma,\xi)$ for \\ $\varphi_{RAIPW}(Z_{i},R_i;\beta,\gamma,\xi)$ given in (\ref{robusts.aipw.eq}),
where the dependence on the data is shown merely by the index $i$. For convenience, the estimator $(\hat\mu_{RAIPW},\hat\sigma_{RAIPW})'$ defined as solving (\ref{robusts.aipw.eq}) is instead (and equivalently when such a solution exists) defined as a minimum distance estimator (between the empirical moment and zero): \begin{equation} \label{min.problem} (\hat\mu_{RAIPW},\hat\sigma_{RAIPW})'=\arg\min_{\mu,\sigma} \hat Q(\mu,\sigma;\hat\gamma,\hat\xi), \end{equation} \noindent where $\hat Q(\mu,\sigma;\hat\gamma,\hat\xi)=n^{-2}(\sum_{i=1}^{n}\varphi_{i}(\mu,\sigma;\hat\gamma,\hat\xi))'\sum_{i=1}^{n}\varphi_{i}(\mu,\sigma;\hat\gamma,\hat\xi)$. This allows us to utilize some general asymptotic results given in \cite{newey:mcfadden:94} as specified below. Proposition \ref{raipw.prop} given earlier is a summary of the two following propositions (Proposition 2 (consistency) and Proposition 2 (asymptotic normality)).
\subsection{Consistency}
\noindent\textbf{Regularity conditions} \begin{itemize} \item [A.1)] i) $\plim_{n\rightarrow\infty}\hat\gamma=\gamma^*$, and ii) $\plim_{n\rightarrow\infty}\hat\xi=\xi^*$; \item [A.2)] \begin{itemize} \item[i)] $\pi(Z_{1i};\gamma)$ is differentiable with respect to $\gamma$ on the open interval with its derivative continuous on the closed interval between $\tilde\gamma$ and $\gamma$, where $\tilde\gamma\in[\gamma^*,\hat\gamma]$; \item[ii)] $h_1(Z_{1i};\beta,\xi)$ and $h_2(Z_{1i};\beta,\xi)$ are differentiable with respect to $\xi$ on the open interval with their derivatives continuous on the closed interval between $\tilde\xi$ and $\xi$, where $\tilde\xi\in[\xi^*,\hat\xi]$; \end{itemize} \item [A.3)] $\beta=(\mu,\sigma)'\in \cal B$ where $\cal B$ is compact, and the equality \begin{align*} &E \left\{\begin{array}{c}
\psi_{c_{\mu}}\left( \frac{Z_{2i}- \mu}{\sigma}\right)-A \\
\psi_{c_{\sigma}}^2\left( \frac{Z_{2i}- \mu}{\sigma} \right)- B \end{array}\right\}=0 \end{align*} holds only for $(\mu,\sigma)'=(\mu_0,\sigma_0)'\in\cal B$; \item[A.4)] $1\geq \pi(Z_{1i};\gamma^*)>\varepsilon$ for $\varepsilon>0$ with probability (wp) 1;
\item[A.5)] $\psi_c\big( \sigma^{-1} (Z_{2i}- \mu)\big)$ is continuous at each $(\mu,\sigma)'\in \cal B$ wp 1; \item[A.6)] $h_1(Z_{1i};\beta,\xi^*)$ and $h_2(Z_{1i};\beta,\xi^*)$ are continuous at each $\beta\in \cal B$ wp 1; \item [A.7)]
$$\hspace*{-1cm} E\big\{\sup_{(\mu,\sigma)'\in \cal B} \big( \big|
\psi_{c_{\mu}}\big( \sigma^{-1} (Z_{2i}- \mu)\big)\big|^2\big)\big\} +
E\big\{\sup_{(\mu,\sigma)'\in \cal B}\big(\big| \psi_{c_{\sigma}}^2\big( \sigma^{-1}
(Z_{2i}- \mu)\big)\big|^2\big)\big\} <\infty ;$$
\item[A.8)]
\hspace*{-1cm} $$E\big\{\sup_{(\mu,\sigma)'\in \cal B}\big(\big( h_1(Z_{1i};\beta,\xi^*)\big)^2 \big)\big\} + E\big\{\sup_{(\mu,\sigma)'\in \cal B}\big(\big( h_2(Z_{1i};\beta,\xi^*)\big)^2\big)\big\}<\infty.$$
\end{itemize}
Condition A.3) is an identification condition. Compactness of $\cal B$ may be considered restrictive and can be relaxed at the cost of other assumptions \citep{newey:mcfadden:94}. For Huber's $\psi$ function, compactness is, for instance, not necessary \citep{Huber:1964}, while for Tukey's $\psi$ function identification holds only locally.
\setcounter{proposition}{1} \begin{proposition}[consistency] Let either
$\pi(Z_{1i};\gamma)$ be correctly specified with $\gamma^*=\gamma_0$ and/or $h(Z_{1i};\beta,\xi)=(h_1(Z_{1i};\beta,\xi),h_2(Z_{1i};\beta,\xi))'$ be correctly specified with $\xi^*=\xi_0$.
Then, under regularity conditions A.1) to A.8) given above,
$$\plim_{n\rightarrow\infty} (\hat\mu_{RAIPW},\hat\sigma_{RAIPW})'=(\mu_0,\sigma_0)'.$$ \end{proposition} \begin{proof}
This consistency result holds if the general Theorem 2.1 in \cite{newey:mcfadden:94} can be applied. A central assumption of this theorem is the following uniform convergence $$\plim_{n\rightarrow\infty} \sup_{(\mu,\sigma)'\in \cal B}||\hat Q(\hat\gamma,\hat\xi)-Q_0(\gamma^*,\xi^*)||=0,$$ where we use the simplified notation $\hat Q(\hat\gamma,\hat\xi)$ for $\hat Q(\mu,\sigma;\hat\gamma,\hat\xi)$, and where
$Q_0(\gamma^*,\xi^*)=E(\varphi_{i}(\mu,\sigma;\gamma^*,\xi^*)'\varphi_{i}(\mu,\sigma;\gamma^*,\xi^*))$ and $||\cdot||$ denotes the Euclidean norm.
In order to show this uniform convergence result, consider the Taylor expansion (using Assumption A.2)) of $\hat Q(\mu,\sigma;\hat\gamma,\hat\xi)$ as a function of $(\hat\gamma,\hat\xi)$ around $(\gamma^*,\xi^*)$: \begin{align*} \hat Q(\hat\gamma,\hat\xi)=\hat Q(\gamma^*,\xi^*)+ (\hat\lambda-\tilde\lambda)'D(\tilde\lambda), \end{align*} where $D(\tilde\lambda)=\frac{\partial\hat Q}{\partial\lambda}(\tilde\lambda)$, $\lambda=(\gamma,\xi)'$, and $\tilde\lambda\in[\lambda^*,\hat\lambda]$. We can then write: \begin{align*} \hat Q(\hat\gamma,\hat\xi)-Q_0(\gamma^*,\xi^*)=\hat Q(\gamma^*,\xi^*)-Q_0(\gamma^*,\xi^*)+ (\hat\lambda-\tilde\lambda)'D(\tilde\lambda). \end{align*} Thus, we have, \begin{eqnarray*}
\lefteqn{\sup_{(\mu,\sigma)'\in \cal B}||\hat Q(\hat\gamma,\hat\xi)-Q_0(\gamma^*,\xi^*)||} \\
& \leq & \sup_{(\mu,\sigma)'\in \cal B}||\hat Q(\gamma^*,\xi^*)-Q_0(\gamma^*,\xi^*)||+
\sup_{(\mu,\sigma)'\in \cal B}||(\hat\lambda-\tilde\lambda)'D(\tilde\lambda)|| \\
& \leq & \sup_{(\mu,\sigma)'\in \cal B}||\hat Q(\gamma^*,\xi^*)-Q_0(\gamma^*,\xi^*)||+
\bar D\sup_{(\mu,\sigma)'\in \cal B}||(\hat\lambda-\tilde\lambda)'||, \end{eqnarray*}
where $\bar D<\infty$ is the the supremum over ${(\mu,\sigma)' \in \cal B}$ of all the elements of the vector $ D(\tilde\lambda)$. By Assumption A.1), $\plim ||\hat\lambda-\tilde\lambda||=0$. It remains to show that
$\plim \sup_{(\mu,\sigma)'\in \cal B}||\hat Q(\gamma^*,\xi^*)-Q_0(\gamma^*,\xi^*)||=0$. This is a consequence of Theorem 2.6 in \cite{newey:mcfadden:94}, whose assumptions, referred below by (NMi-NMiv), are now verified.
Assumption (NMi) says that $E(\varphi_{i}(\mu,\sigma;\gamma,\xi))=0$ should hold only for $(\mu,\sigma)'=(\mu_0,\sigma_0)'$. This is the case by Assumption A.3), and because either $\gamma^*=\gamma_0$ and/or $\xi^*=\xi_0$, so that either $E \left\{ \frac{R_i} {\pi(Z_{1i};\gamma^*)}\mid Z_{1i} \right\}=1$, when $\gamma^*=\gamma_0$, or $h(Z_{1i};\beta,\xi^*)=E(m_{\psi}(Z_{2i};\mu,\sigma)\mid Z_{1i})$, when $\xi^*=\xi_0$.
Assumption (NMii) holds by Assumption A.3).
Condition (NMiii) says that $\varphi_{i}(\mu,\sigma;\gamma^*,\xi^*)$ is continuous at each $(\mu,\sigma)'\in B$ with probability one. This holds by Assumptions A.4), A.5) and A.6).
We now show that condition (NMiv) holds: \begin{align*} E&\left\{\sup_{(\mu,\sigma)'\in \cal B} \left(\left\Vert\varphi_i(\mu,\sigma;\gamma,\xi)\right\Vert\right) \right\}\\
&=E\left\{\sup_{(\mu,\sigma)'\in \cal B}\left(\left| \frac{R_i}{\pi(Z_{1i};\gamma^*)}\big( \psi_{c_{\mu}}\big( \sigma^{-1}
(Z_{2i}- \mu)\big)-A\big)- \frac{R_i-\pi(Z_{1i};\gamma^*)}{\pi(Z_{1i};\gamma^*)}h_1(Z_{1i};\beta,\xi^*) \right|^2 \right.\right.\\
&\ \ \ \ \ +\left.\left.\left| \frac{R_i}{\pi(Z_{1i};\gamma^*)}(\psi_{c_{\sigma}}^2\big( \sigma^{-1}
(Z_{2i}- \mu)\big)- B)- \frac{R_i-\pi(Z_{1i};\gamma^*)}{\pi(Z_{1i};\gamma^*)}h_2(Z_{1i};\beta,\xi^*) \right|^2\right)\right\} \\
&\leq 2E\left\{\frac{R_i}{\pi(Z_{1i};\gamma^*)^2}\left(\sup_{(\mu,\sigma)'\in \cal B}\left(\left|\big( \psi_{c_{\mu}}\big( \sigma^{-1}
(Z_{2i}- \mu)\big)-A\big)\right|^2\right) \right.\right. \\
&\ \ \ \ \ \left.\left. +\sup_{(\mu,\sigma)'\in \cal B}\left(\left|\big( \psi_{c_{\sigma}}^2\big( \sigma^{-1}
(Z_{2i}- \mu)\big)-B\big)\right|^2 \right)\right) \right\} \\ &\ \ \ \ \ + 2E\left\{ \left(\frac{R_i-\pi(Z_{1i};\gamma^*)}{\pi(Z_{1i};\gamma^*)}\right)^2 \left(\sup_{(\mu,\sigma)'\in \cal B}\left(\big( h_1(Z_{1i};\beta,\xi^*)\big)^2\right) \right. \right. \\ & \ \ \ \ \ + \left. \left. \sup_{(\mu,\sigma)'\in \cal B} \left(\big( h_2(Z_{1i};\beta,\xi^*)\big)^2 \right) \right) \right\} \\
&=2E\left\{\frac{\pi(Z_{1i};\gamma_0)}{\pi(Z_{1i};\gamma^*)^2}\right\}\left(E\left\{\sup_{(\mu,\sigma)'\in \cal B}\left(\left|\big( \psi_{c_{\mu}}\big( \sigma^{-1}
(Z_{2i}- \mu)\big)-A\big)\right|^2\right)\right\} \right. \\
&\ \ \ \ \ \left.+E\left\{\sup_{(\mu,\sigma)'\in \cal B}\left(\left|\big( \psi_{c_{\sigma}}^2\big( \sigma^{-1}
(Z_{2i}- \mu)\big)-B\big)\right|^2 \right) \right\} \right) \\ &\ \ \ \ \ + 2E\left\{\frac{(\pi(Z_{1i};\gamma_0)-\pi(Z_{1i};\gamma^*))^2}{\pi(Z_{1i};\gamma^*)^2}\right\}\left(E\left\{\sup_{(\mu,\sigma)'\in \cal B}\left(\big( h_1(Z_{1i};\beta,\xi^*)\big)^2\right)\right\} \right.\\ &\ \ \ \ \ \left.+ E\left\{\sup_{(\mu,\sigma)'\in \cal B} \left(\big( h_2(Z_{1i};\beta,\xi^*)\big)^2\right)\right\}\right) <\infty, \end{align*} where we have used $(a+b)^2\leq 2(a^2+b^2)$, $\sup f+g\leq \sup f + \sup g$ for $f,g$ positive, that $Z_{2i}$ and and that $R_i$ are independent conditional on $Z_{1i}$. The last inequality holds by Assumptions A.4), A.7) and A.8). Finally, noting that (NMi-NMiv) also ensures that the remaining conditions of Theorem 2.1 hold \citep[Theorem 2.6]{newey:mcfadden:94} completes the proof. \end{proof}
\subsection{Asymptotic normality} \noindent\textbf{Regularity conditions}
Let $\tilde\varphi_i(\theta)$ be the vector stacking $\varphi_i(\mu,\sigma)$, $m_\gamma(R_i,Z_{1i};\gamma)$ and $m_\xi(Z_i;\xi)$, with $\theta=(\mu,\sigma,\gamma',\xi')'$. \begin{itemize}
\item[A.9)]
$\theta_0\in \mbox{interior of }\cal T$, where $\theta_0=(\mu_0,\sigma_0,{\gamma^*}',{\xi^*}')'$ ;
\item[A.10)] $E\big\{\tilde\varphi_i(\theta_0)\big\}=0$ and $E\big\{||\tilde\varphi_i(\theta_0)||^2\big\}$ is finite;
\item[A.11)]
$\tilde\varphi_i(\theta)$ is continuously differentiable in a neighborhood ${\cal N}$ of $\theta_0$;
\item[A.12)]
$E\{\sup_{\theta\in {\cal N}} ||\nabla_{\theta}\tilde\varphi_i(\theta)||\}<\infty$;
\item[A.13)] $E\big\{\nabla_{\theta} \tilde\varphi_i(\theta)\big\}'E\big\{\nabla_{\theta} \tilde\varphi_i(\theta)\big\}$ is nonsingular.
\end{itemize}
\setcounter{proposition}{1} \begin{proposition}[asymptotic normality] Let $\plim (\hat\mu_{RAIPW},\hat\sigma_{RAIPW})'=(\mu_0,\sigma_0)'$ and A.1) hold. Then, under regularity conditions A.9)-A.13) given above, $(\hat\mu_{RAIPW},\newline \hat\sigma_{RAIPW})'$ has the following asymptotic multivariate normal distribution as $n\rightarrow\infty$ $$\sqrt n \Big( (\hat\mu_{RAIPW},\hat\sigma_{RAIPW})'-(\mu_0,\sigma_0)'\Big )\overset{d}\rightarrow N\big(0,E\big\{IF_{RAIPW}(IF_{RAIPW})'\}\big),$$ where $IF_{RAIPW}(Z_{i},R_i; \beta)$ is given by (\ref{ifaipw.eq}).
\end{proposition} \begin{proof} This result is obtained by a direct application of Theorem 6.1 in \cite{newey:mcfadden:94}. \end{proof} The latter result assumes that $\gamma$ and $\xi$ are estimated through M-estimators, and regularity conditions on the moment conditions $m_\gamma$ and $m_\xi$ are made. As pointed out in \citet[p.~2178]{newey:mcfadden:94}, the same result may be obtained for the more general situations where we have asymptotically linear estimators for $\gamma$ and $\xi$.
\section{Propositions 1 and 3}\label{prop1and3.sec}
Regularity conditions for Proposition \ref{ripw.prop} are as follows: for consistency, A.1)i), A.2)i), A.3), A.4), A.5) and A.7); for asymptotic normality, A.9)-A.13) where $\tilde\varphi_i(\theta)$ is the vector stacking $\varphi_i(\mu,\sigma)$ and $m_\gamma(R_i,Z_{1i};\gamma)$, and with $\theta=(\mu,\sigma,\gamma')'$.
Regularity conditions for Proposition \ref{ror.prop} are as follows: for consistency, A.1)ii), A.2)ii), A.3), A.6), and A.8); for asymptotic normality, A.9)-A.13) where $\tilde\varphi_i(\theta)$ is the vector stacking $\varphi_i(\mu,\sigma)$ and $m_\xi(Z_i;\xi)$, and with $\theta=(\mu,\sigma,\xi')'$.
Proofs are similar to the above and are omitted.
\section{Implementation details} \label{implementation.section}
We use the free software R for our implementation. To obtain RAIPW (given $\hat{\gamma}$ and $\hat{\xi}$), instead of solving the system of equations (\ref{robusts.aipw.eq}), we minimize \eqref{min.problem} numerically with the function \texttt{optim}. We proceed similarly for RIPW.
We use the set up of Example \ref{example.sls}, and the conditional expectations
$$E\Big( \psi_{c_{\mu}}(\sigma^{-1} (\tilde h(Z_{1i};\xi_1)+\xi_2\nu- \mu)\big) | Z_{1i} \Big) \mbox{ and } E\Big( \psi_{c_{\sigma}}^2(\sigma^{-1}
(\tilde h(Z_{1i};\xi_1)+\xi_2\nu- \mu)\big) | Z_{1i} \Big)$$ defining the working model for RAIPW are obtained by numerical integration (function \texttt{integrate}), and A and B by Monte Carlo simulations.
\subsection{Robust logistic regression}\label{roblogistic.sec}
The robust logistic regression estimator we consider to fit model~\eqref{estim.gamma} is the proposal by \cite{Cant:Ronc:2001} for generalized linear model (GLM).
In the case of a logistic model $$ \log\left(\frac{\pi(Z_{1i},\gamma)}{1-\pi(Z_{1i},\gamma)} \right) = Z_{1i}' \gamma $$ it solves \begin{equation} \label{robGLM.estim} \sum_{i=1}^n \Big[ \psi_c^L(r_i) w(Z_{1i}) \frac{1}{\sqrt{v_{\mu_i}}} \mu_i^{\prime} - a(\gamma) \Big] = 0, \end{equation}
where $\mu_i = E(R_i | Z_{1i}) = \pi(Z_{1i},\gamma)$, $Var(R_i | Z_{1i}) = v_{\mu_i} = \pi(Z_{1i},\gamma) (1-\pi(Z_{1i},\gamma))$, $r_i= \frac{(R_i-\mu_i)}{\sqrt{v_{\mu_i}}}$ and $\mu_i^{\prime} = \partial \mu_i / \partial \gamma$. The constant $$a(\gamma) = \frac{1}{n} \sum_{i=1}^n E[\psi_c^L(r_i)] w(Z_{1i}) / \sqrt{v_{\mu_i}} \ \mu_i^{\prime}$$ is a correction term to ensure Fisher consistency.
Estimator~\eqref{robGLM.estim} is implemented in the \texttt{R} function \texttt{glmrob} in package \texttt{robustbase}, where $\psi_c^L(r_i)$ is the Huber function. In our simulations and application we use the default settings, namely $c=1.35$ and equal weights $w(Z_{1i})=1$ on the design.
\subsection{Robust regression} \label{robreg.section}
For the robust fit of the auxiliary regression model, we consider $m_{\xi}(Z_i; \xi)$ in the estimating equations~\eqref{estim.xi} as the joint M-estimator of regression and scale \cite[Sec.~4.4.3]{Maro:Mart:Yoha:2006} defined by solving \begin{equation} \label{Mestim} \sum_{i=1}^n \left( \begin{array}{c} \xi_2 \ \psi_{c_1}^R\left( \frac{Z_{2i} - Z_{1i}^T \xi_{1}}{\xi_2} \right) Z_{1i} \\ \xi_2^2 \left( \psi_{c_2}^{HP2} \left( \frac{Z_{2i} - Z_{1i}^T \xi_{1}}{\xi_2} \right) \right)^2 - \xi_2^2 \ a(\xi) \end{array} \right)= 0. \end{equation}
In this work, we consider the Tukey function for $\psi_{c_1}^R$, because of its improved breakdown properties over the Huber version. In addition, the redescending nature of the Tukey function also protect against leverage points (outliers in the design space). The price to pay when using a redescending estimator is the fact that the resulting estimating equations admit more than one minimum. Careful implementation is therefore required, in particular regarding the starting point of the algorithm. The function $ \psi_{c_2}^{HP2}$ is the Huber function, and $a(\xi) = E \left( \left( \psi_{c_2}^{HP2} \left( \frac{Z_{2i}- Z_{1i}^T \xi_{1}}{\xi_2} \right) \right)^2 \right)$ is a consistency correction term. It is calibrated under the Gaussian assumption, in which case it equals $2 \Phi(c_2) -1 - 2 c_2 \phi(c_2) + 2 c_2^2(1-\Phi(c_2))$, where $\phi$ and $\Phi$ are the density and cumulative distribution function of a ${\mathcal N}(0,1)$ distribution, respectively. The default value for $c_2$ (aiming at $95\%$ efficiency for the estimator of the scale parameter) is $1.345$.
\subsection{Standard errors of $\hat{\beta}$} In what follows, we give the expression of the standard errors of $\hat{\beta}$ for the RAIPW defined by \eqref{robusts.aipw.eq}, based on $\hat{\gamma}$ and $\hat{\xi}$. The standard errors of the RIPW and ROR estimators can be derived including the straightfoward simplification and are therefore omitted.
Let $\theta = (\beta^\prime,\gamma^\prime,\xi^\prime)^\prime$ be the vector of all the parameters. Our proposal jointly solves \begin{equation} \sum_{i=1}^n \Psi(Z_i, R_i, \theta) = \sum_{i=1}^n \left( \begin{array}{ccc}
\varphi_{RAIPW}(Z_i,R_i; \beta, \gamma, \xi) \\
m_\gamma (R_i,Z_{1i};\gamma)\\
R_i m_\xi (Z_{i};\xi) \end{array} \right) =0. \end{equation}
An alternative expression for the asymptotic variance \citep{Stef:Boos:2002} is given by $V_n(\theta) = A_n^{-1}(\theta) B_n(\theta) A_n^{-1}(\theta)$, where $$A_n(\theta) = - \frac{1}{n} \sum_{i=1}^n \frac{\partial \Psi(Z_i, R_i, \theta)}{\partial \theta} \ \ \mbox{and} \ \ B_n(\theta) = \frac{1}{n} \sum_{i=1}^n \Psi(Z_i, R_i, \theta) \Psi(Z_i, R_i, \theta)^\prime,$$ with \begin{equation} \label{An} A_n(\theta) = \left(
\begin{array}{c|cc} \frac{1}{n} \sum_{i=1}^n \frac{\partial \varphi_{RAIPW,i}}{\partial \beta} & \frac{1}{n} \sum_{i=1}^n\frac{\partial \varphi_{RAIPW,i}}{\partial \gamma} & \frac{1}{n} \sum_{i=1}^n \frac{\partial \varphi_{RAIPW,i}}{\partial \xi} \\ \hline 0_{p \times 2} & \frac{1}{n} \sum_{i=1}^n\frac{\partial m_{\gamma,i}}{\partial \gamma} & 0_{p \times dim(\xi)} \\ 0_{p \times 2} & 0_{p \times p} & \frac{1}{n} \sum_{i=1}^n R_i \frac{\partial m_{\xi,i}}{\partial \xi} \\ \end{array} \right) \end{equation}
\begin{eqnarray*} \lefteqn{B_n(\theta) =} \\ & & \left(
\begin{array}{c|cc}
\frac{1}{n} \sum_{i=1}^n \varphi_{RAIPW,i} \varphi_{RAIPW,i}^\prime & \frac{1}{n} \sum_{i=1}^n \varphi_{RAIPW,i} m_{\gamma,i}^\prime & \frac{1}{n} \sum_{i=1}^n R_i \varphi_{RAIPW,i} m_{\xi,i}^\prime \\ \hline
\frac{1}{n} \sum_{i=1}^n m_{\gamma,i} \varphi_{RAIPW,i}^\prime & \frac{1}{n} \sum_{i=1}^n m_{\gamma,i} m_{\gamma,i}^\prime & \frac{1}{n} \sum_{i=1}^n m_{\gamma,i} m_{\xi,i}^\prime \\
\frac{1}{n} \sum_{i=1}^n R_i m_{\xi,i} \varphi_{RAIPW,i}^\prime & \frac{1}{n} \sum_{i=1}^n R_i m_{\xi,i} m_{\gamma,i}^\prime & \frac{1}{n} \sum_{i=1}^n R_i m_{\xi,i} m_{\xi,i}^\prime \end{array} \right) \end{eqnarray*} where $\varphi_{RAIPW,i} = \varphi_{RAIPW}(Z_i,R_i; \beta, \gamma, \xi)$, $ m_{\gamma,i} = m_\gamma (R_i,Z_{1i};\gamma) $ and $m_{\xi,i} = m_\xi (Z_{i};\xi)$.
We estimate each matrix by plugging-in $\hat{\theta}$. We use this sandwich estimator (rather than the formula involving the influence function, see Proposition~\ref{raipw.prop}) as it has been shown to be more stable in finite samples.
\section{Simulation complements} We give in this Section some additional details pertaining to our simulation setting of Section~\ref{simsetting.section}.
\subsection{Tuning constant details for robust methods} \label{tuning.section}
To make them comparable, we have tuned the robust methods to have approximately $95\%$ efficiency at the correctly specified models for clean data across simulations. This amounts to \begin{description} \item[for RAIPW ] choosing $c_{\mu}$ and $c_{\sigma}$ in Equation~\eqref{robusts.aipw.eq} (with $c= 4.685$ and $c_2 = 1.345$ in \eqref{Mestim}, and $c=1.345$ in \eqref{robGLM.estim} kept fixed) . \item[for ROR] choosing separately two values of $c_1$ in Equation \eqref{Mestim} (with $c_2 = 1.345$) to produce two estimates $\hat{\xi}^{\mu}$ and $\hat{\xi}^{\sigma}$ of $\xi$, to be plugged-in the solution of \eqref{imp.eq}. \end{description}
Direct tuning of $c_{\mu}$ and $c_{\sigma}$ in Equation~\eqref{robustmu.ipw.eq} for the robust IPW estimator is impaired by the large bias, compared to variance, observed for the estimator due to the difficulty in computing the Fisher consistency correction term. We have therefore taken the same values of $c_{\mu}$ and $c_{\sigma}$ as for the robust AIPW (with $c=1.345$ in \eqref{robGLM.estim} kept fixed). Table~\ref{tuningconstants.table} gives the values of the tuning constants obtained for the six considered designs and for each method.
\begin{table}
\caption{ \label{tuningconstants.table} Tuning constants used in the simulation study. The values of $\xi$ and
$\gamma$ under each scenario are given in \eqref{csi.value} and \eqref{gamma.value}. }
\centering
\begin{tabular}{lcccc}\hline
& \multicolumn{2}{c}{RAIPW} &
\multicolumn{2}{c}{ROR} \\
& $c_{\mu}$ & $c_{\sigma}$ & $c_{\mu}$ & $c_{\sigma}$ \\ \hline $\gamma$ strong, $\xi$ strong & 3.7 & 4.5 & 3.3 & 3.7 \\ $\gamma$ strong, $\xi$ moderate & 3.9 & 4.5 & 3.4 & 3.7 \\ $\gamma$ strong, $\xi$ no & 4 & 4.5 & 3.6 & 4.2\\ \hline $\gamma$ moderate, $\xi$ strong & 3.2 & 5.3 & 2.6 & 2.8 \\ $\gamma$ moderate, $\xi$ moderate & 3.9 & 5.4 & 3 & 3.1\\ $\gamma$ moderate, $\xi$ no & 4.2 & 5.3 & 3.4 & 3.6\\ \hline \end{tabular} \end{table}
\subsection{Computation of $\beta = (\mu,\sigma)'$} We need to deduce the values of $\beta = (\mu,\sigma)'$ for the designs simulated in Section~\ref{sim.section}. To ease the computation of $\mu$ and $\sigma^2$, we rearrange model~\eqref{sim.model} as follows: $$Z_{2i} = \xi_{10} + \tau^\prime \tilde{\xi} +\xi_{13}X_{3i}+ \xi_{16} V_{3i} + \epsilon_i ,$$ where $\tau = (X_{1}, V_{1}, X_{2}, V_{2})'$ and $\tilde{\xi} = (\xi_{11},\xi_{14},\xi_{12},\xi_{14} )'$.
The derivation proceeds by conditioning on $X_{3}$ and using the law of total expectation and variance. We have \begin{align*}
\mu & = \xi_{10} + E_{X_3}\left( E_{\tau|X_3}(\tau^\prime \tilde{\xi})\right) +
E_{X_3}(\xi_{13} X_{3i}) + E_{X_3}\left( E_{V_3 | X_3} \left( \xi_{16} V_{3i} \right) \right) \\
& = \xi_{10} + E_{X_3}\left(\tau_{X_3}^\prime \tilde{\xi} \right) + \xi_{13}
E_{X_3}(X_{3}) + \xi_{16} E_{X_3}\left( 0.75 X_{3i} + 0.25 (1-X_{3i})\right) \\
& = \xi_{10} + 0.2 \tau_1^\prime \tilde{\xi} + 0.8 \tau_0^\prime \tilde{\xi}
+ 0.2 (\xi_{13}+ 0.5 \xi_{16} )+ 0.25 \xi_{16}, \end{align*} and \begin{eqnarray*} \sigma^2 & = & Var\left( \tau^\prime \tilde{\xi} + \xi_{13} X_{3i} + xi_{16} V_{3i} \right) + \xi_2^2 \\
& = & Var_{X_3}\left(E_{(\tau|,V)X_3}( \tau^\prime \tilde{\xi} + \xi_{13} X_{3i} + \xi_{16} V_{3i})\right) + \\
& & E_{X_3}\left( Var_{(\tau,V)|X_{3i}} \left( \tau^\prime \tilde{\xi} + \xi_{13} X_{3i} + \xi_{16} V_{3i}\right) \right) + \xi_2^2 \\ & =& Var_{X_3}\left( \tau_{X_3}^\prime \tilde{\xi} + \xi_{13} X_{3i} + 0.5 \xi_{16} X_{3i} \right) + \\
& & E_{X_3}\left( \tilde{\xi}^\prime \Sigma_{X_3} \tilde{\xi} + \xi_{16}^2 (0.5 X_{3i} + 0.25) (0.75 - 0.5 X_{3i} ) \right) + \xi_2^2 \\ & = & 0.8 \left( \tau_0^\prime \tilde{\xi} - E_{X_3}(\tau_{X_3}^\prime \tilde{\xi} + X_{3i} + 0.5 \xi_{16} X_{3i} ) \right)^2 + \\ & & 0.2 \left( \tau_1^\prime \tilde{\xi} + X_{3i} +0.5 \xi_{16} - E_{X_3}(\tau_{X_3}^\prime \tilde{\xi} + \xi_{13} X_{3i} + 0.5 \xi_{16} X_{3i} \right)^2 +\\ & & 0.8\left( \tilde{\xi}^\prime \Sigma_0 \tilde{\xi} + 0.25 \cdot 0.75 \xi_{16}^2 \right) + 0.2\left( \tilde{\xi}^\prime \Sigma_1 \tilde{\xi} + 0.25 \cdot 0.75 \xi_{16}^2 \right) + \xi_2^2 \\ & = & 0.8 \left( \tau_0^\prime \tilde{\xi} - 0.8 \tau_{0}^\prime \tilde{\xi} - 0.2(\tau_{1}^\prime \tilde{\xi} + \xi_{13} + 0.5 \xi_{16} ) \right)^2 + \\ & & 0.2 \left( \tau_1^\prime \tilde{\xi} + \xi_{13} +0.5 \xi_{16} - 0.8 \tau_{0}^\prime \tilde{\xi} - 0.2(\tau_{1}^\prime \tilde{\xi} + \xi_{13} + 0.5 \xi_{16} )\right)^2 +\\ & & 0.8\left( \tilde{\xi}^\prime \Sigma_0 \tilde{\xi} + 0.25\cdot 0.75 \xi_{16}^2 \right) + 0.2\left( \tilde{\xi}^\prime \Sigma_1 \tilde{\xi} + 0.25 \cdot 0.75 \xi_{16}^2 \right) + \xi_2^2. \end{eqnarray*}
\subsection{Supplementary results for the $\gamma$ moderate-$\xi$ moderate design} \label{simulationmoderatemoderate.section}
\begin{table} \caption{\label{CleanModerateModerateTable} Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ moderate-$\xi$ moderate scenario for clean data. }
\centering
\begin{tabular}{lrrrrrr}\hline ($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\
\hline IPW(X) & -0.116 & 1.513 & 1.516 & -0.043 & 1.725 & 1.725 \\
AIPW(X,X) & -0.119 & 1.243 & 1.248 & -0.015 & 1.079 & 1.079 \\
AIPW(X,XV) & -0.113 & 1.217 & 1.222 & -0.020 & 1.014 & 1.014 \\
OR(X) & -0.119 & 1.238 & 1.243 & 0.006 & 1.024 & 1.023 \\
OR(XV) & -0.113 & 1.214 & 1.219 & 0.003 & 0.983 & 0.982 \\
RIPW(X) & 3.149 & 1.500 & 3.488 & -0.904 & 1.794 & 2.008 \\
RAIPW(X,X) & -0.118 & 1.284 & 1.289 & -0.021 & 1.111 & 1.111 \\
RAIPW(X,XV) & -0.103 & 1.258 & 1.262 & -0.027 & 1.058 & 1.058 \\
ROR(X) & -0.113 & 1.253 & 1.258 & -0.010 & 1.060 & 1.059 \\
ROR(XV) & -0.109 & 1.230 & 1.235 & -0.007 & 1.005 & 1.005 \\
\hline IPW($X_{\_}$) & 1.169 & 1.468 & 1.876 & -0.514 & 1.595 & 1.675 \\
AIPW($X_{\_}, XV$) & -0.114 & 1.217 & 1.221 & -0.016 & 1.001 & 1.001 \\
AIPW($X,X_{\_} V$) & -0.105 & 1.227 & 1.231 & -0.027 & 1.081 & 1.081 \\
AIPW($X_{\_},X_{\_}V$) & 0.692 & 1.232 & 1.412 & -0.334 & 1.053 & 1.105 \\
OR($X_{\_}$) & 0.692 & 1.230 & 1.411 & -0.299 & 1.020 & 1.063 \\
RIPW($X_{\_}$) & 4.381 & 1.510 & 4.634 & -1.389 & 1.725 & 2.214 \\
RAIPW($X_{\_}, XV$) & -0.102 & 1.258 & 1.262 & -0.026 & 1.053 & 1.053 \\
RAIPW($X,X_{\_}V$) & -0.084 & 1.286 & 1.289 & -0.056 & 1.137 & 1.137 \\
RAIPW($X_{\_},X_{\_}V$) & 0.698 & 1.292 & 1.468 & -0.354 & 1.123 & 1.177 \\
ROR($X_{\_}V$) & 0.681 & 1.268 & 1.439 & -0.290 & 1.055 & 1.094 \\ \hline \end{tabular} \end{table}
\begin{table} \caption{\label{Contam3ModerateModerateTable} Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ moderate-$\xi$ moderate scenario under C-sym contamination. }
\centering
\begin{tabular}{lrrrrrr}\hline ($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\
\hline IPW(X) & -0.806 & 2.153 & 2.298 & 13.808 & 1.659 & 13.907 \\
AIPW(X,X) & -0.880 & 2.011 & 2.194 & 13.902 & 1.390 & 13.972 \\
AIPW(X,XV) & -0.878 & 1.997 & 2.181 & 13.898 & 1.375 & 13.966 \\
OR(X) & -0.854 & 1.989 & 2.164 & 14.100 & 1.340 & 14.164 \\
OR(XV) & -0.859 & 1.975 & 2.153 & 14.181 & 1.327 & 14.243 \\
RIPW(X) & 3.567 & 1.619 & 3.917 & -1.209 & 2.191 & 2.501 \\
RAIPW(X,X) & 0.217 & 1.407 & 1.423 & -0.208 & 1.478 & 1.492 \\
RAIPW(X,XV) & 0.165 & 1.375 & 1.384 & -0.178 & 1.427 & 1.437 \\
ROR(X) & 0.032 & 1.266 & 1.265 & 0.239 & 1.017 & 1.044 \\
ROR(XV) & 0.008 & 1.253 & 1.253 & 0.188 & 0.975 & 0.993 \\
\hline
IPW($X{\_}$) & 0.379 & 2.091 & 2.124 & 13.521 & 1.576 & 13.612 \\
AIPW($X_{\_}, XV$) & -0.872 & 1.983 & 2.166 & 13.900 & 1.354 & 13.966 \\
AIPW($(X,X_{\_}V$) & -0.863 & 2.002 & 2.179 & 13.871 & 1.385 & 13.940 \\
AIPW($X_{\_},X_{\_}V$) & -0.113 & 1.985 & 1.987 & 13.678 & 1.358 & 13.745 \\
OR($X_{\_}$) & -0.103 & 1.976 & 1.978 & 13.954 & 1.329 & 14.017 \\
RIPW($X_{\_}$) & 4.766 & 1.592 & 5.025 & -1.732 & 2.047 & 2.681 \\
RAIPW($X_{\_}, XV$) & 0.111 & 1.368 & 1.371 & -0.161 & 1.403 & 1.412 \\
RAIPW($X,X_{\_}V$) & 0.225 & 1.385 & 1.402 & -0.236 & 1.469 & 1.487 \\
RAIPW($X_{\_},X_{\_}V$) & 0.989 & 1.374 & 1.692 & -0.577 & 1.442 & 1.553 \\
ROR($X_{\_}V$) & 0.799 & 1.274 & 1.504 & 0.023 & 1.015 & 1.014 \\ \hline \end{tabular} \end{table}
\begin{table} \caption{\label{ContamModerateModerateTable} Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ moderate-$\xi$ moderate scenario under C-hidden contamination. }
\centering
\begin{tabular}{lrrrrrr}\hline ($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\
\hline IPW & -5.799 & 1.461 & 5.980 & 7.004 & 1.292 & 7.122 \\
AIPW-X & -5.868 & 1.192 & 5.987 & 7.101 & 0.878 & 7.155 \\
AIPW-XV & -5.873 & 1.170 & 5.989 & 7.102 & 0.847 & 7.153 \\
OR-X & -5.864 & 1.204 & 5.986 & 8.390 & 0.817 & 8.430 \\
OR-XV & -5.875 & 1.184 & 5.993 & 8.451 & 0.798 & 8.489 \\
RIPW & 1.053 & 2.116 & 2.363 & 2.786 & 2.617 & 3.821 \\
RAIPW-X & -3.031 & 1.559 & 3.408 & 4.581 & 1.488 & 4.816 \\
RAIPW-XV & -3.089 & 1.519 & 3.442 & 4.615 & 1.440 & 4.835 \\
ROR-X & 0.025 & 1.268 & 1.267 & 0.254 & 1.023 & 1.053 \\
ROR-XV & 0.004 & 1.255 & 1.254 & 0.197 & 0.979 & 0.999 \\
\hline
IPW($X_{\_}$) & -4.617 & 1.405 & 4.826 & 6.808 & 1.185 & 6.910 \\
AIPW($X_{\_}, XV$) & -5.868 & 1.174 & 5.985 & 7.238 & 0.832 & 7.285 \\
AIPW($(X,X_{\_}V$) & -5.858 & 1.177 & 5.975 & 7.073 & 0.860 & 7.126 \\
AIPW($X_{\_},X_{\_}V$) & -5.110 & 1.179 & 5.244 & 6.985 & 0.839 & 7.035 \\
OR($X_{\_}$) & -5.118 & 1.186 & 5.254 & 8.207 & 0.801 & 8.246 \\
RIPW($X_{\_}$) & 2.548 & 2.078 & 3.287 & 1.968 & 2.573 & 3.238 \\
RAIPW($X_{\_}, XV$) & -3.157 & 1.522 & 3.504 & 4.643 & 1.426 & 4.857 \\
RAIPW($X,X_{\_}V$) & -3.013 & 1.531 & 3.379 & 4.544 & 1.493 & 4.783 \\
RAIPW($X_{\_},X_{\_}V$) & -2.088 & 1.560 & 2.607 & 4.084 & 1.522 & 4.358 \\
ROR($X_{\_}V$) & 0.789 & 1.277 & 1.500 & 0.042 & 1.021 & 1.021 \\ \hline \end{tabular} \end{table}
\subsection{ results for the other $\gamma$ - $\xi$ combinations} \label{simulationothers.section} \begin{center} \begin{figure}
\caption{Estimates of $\mu$ (left) and $\sigma$ (right) for the $\gamma$ strong-$\xi$ moderate scenario for clean data and under the C-asym contamination. The vertical lines represent the true underlying values.}
\label{CleanContam2XiModerateGammaStrongFig}
\end{figure} \end{center}
\begin{center} \begin{figure}
\caption{Estimates of $\mu$ (left) and $\sigma$ (right) for the $\gamma$ moderate-$\xi$ strong scenario for clean data and under the C-asym contamination. The vertical lines represent the true underlying values.}
\label{CleanContam2XiStrongGammaModerateFig}
\end{figure} \end{center}
\begin{center} \begin{figure}
\caption{Estimates of $\mu$ (left) and $\sigma$ (right) for the $\gamma$ strong-$\xi$ strong scenario for clean data and under the C-asym contamination. The vertical lines represent the true underlying values.}
\label{CleanContam2XiStrongGammaStrongFig}
\end{figure} \end{center}
\begin{center} \begin{figure}
\caption{Estimates of $\mu$ (left) and $\sigma$ (right) for the $\gamma$ moderate-$\xi$ no scenario for clean data and under the C-asym contamination. The vertical lines represent the true underlying values.}
\label{CleanContam2XiNoGammaModerateFig}
\end{figure} \end{center}
\begin{center} \begin{figure}
\caption{Estimates of $\mu$ (left) and $\sigma$ (right) for the $\gamma$ strong-$\xi$ no scenario for clean data and under the C-asym contamination. The vertical lines represent the true underlying values.}
\label{CleanContam2XiNoGammaStrongFig}
\end{figure} \end{center}
\begin{table} \caption{Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ strong-$\xi$ moderate scenario for clean data.} \centering
\begin{tabular}{lrrrrrr} \hline ($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\ \hline IPW(X) & -0.045 & 3.245 & 3.243 & -0.354 & 3.540 & 3.556 \\
AIPW(X,X) & -0.101 & 1.315 & 1.318 & -0.052 & 1.514 & 1.514 \\
AIPW(X,XV) & -0.085 & 1.269 & 1.271 & -0.076 & 1.348 & 1.349 \\
OR(X) & -0.106 & 1.259 & 1.263 & 0.000 & 1.107 & 1.106 \\
OR(XV) & -0.097 & 1.227 & 1.230 & -0.007 & 1.048 & 1.047 \\
RIPW(X) & 2.856 & 3.179 & 4.272 & -1.170 & 3.870 & 4.041 \\
RAIPW(X,X) & -0.090 & 1.415 & 1.417 & -0.059 & 1.545 & 1.546 \\
RAIPW(X,XV) & -0.086 & 1.343 & 1.345 & -0.048 & 1.396 & 1.396 \\
ROR(X) & -0.104 & 1.271 & 1.274 & -0.013 & 1.141 & 1.140 \\
ROR(XV) & -0.092 & 1.235 & 1.238 & -0.014 & 1.069 & 1.069 \\
\hline
IPW($X_{\_}$) & 2.162 & 2.550 & 3.343 & -1.702 & 2.832 & 3.303 \\
AIPW($X_{\_}, XV$) & -0.094 & 1.249 & 1.252 & -0.049 & 1.216 & 1.216 \\
AIPW($(X,X_{\_}V$) & -0.053 & 1.336 & 1.336 & -0.126 & 1.590 & 1.594 \\
AIPW($X_{\_},X_{\_}V$) & 1.296 & 1.277 & 1.819 & -1.020 & 1.359 & 1.699 \\
OR($X_{\_}$) & 1.280 & 1.239 & 1.781 & -0.924 & 1.103 & 1.439 \\
RIPW($X_{\_}$) & 5.081 & 2.636 & 5.723 & -2.942 & 3.209 & 4.352 \\
RAIPW($X_{\_}, XV$) & -0.088 & 1.315 & 1.317 & -0.041 & 1.296 & 1.296 \\
RAIPW($X,X_{\_}V$) & 0.009 & 1.461 & 1.460 & -0.145 & 1.762 & 1.767 \\
RAIPW($X_{\_},X_{\_}V$) & 1.305 & 1.361 & 1.885 & -1.091 & 1.478 & 1.836 \\
ROR($X_{\_}V$) & 1.274 & 1.267 & 1.796 & -0.921 & 1.141 & 1.465 \\ \hline \end{tabular} \end{table}
\begin{table} \caption{Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ moderate-$\xi$ strong scenario for clean data.} \centering
\begin{tabular}{lrrrrrr}\hline ($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\ \hline IPW(X) & -0.141 & 1.930 & 1.934 & -0.041 & 2.179 & 2.178 \\
AIPW(X,X) & -0.145 & 1.596 & 1.602 & -0.007 & 1.401 & 1.400 \\
AIPW(X,XV) & -0.133 & 1.520 & 1.525 & -0.016 & 1.212 & 1.211 \\
OR(X) & -0.145 & 1.587 & 1.593 & 0.023 & 1.313 & 1.312 \\
OR(XV) & -0.134 & 1.518 & 1.523 & 0.003 & 1.188 & 1.187 \\
RIPW(X) & 6.032 & 1.938 & 6.336 & -1.411 & 2.234 & 2.641 \\
RAIPW(X,X) & -0.147 & 1.715 & 1.720 & -0.024 & 1.455 & 1.454 \\
RAIPW(X,XV) & -0.120 & 1.586 & 1.589 & -0.026 & 1.279 & 1.279 \\
ROR(X) & -0.169 & 1.635 & 1.643 & -0.021 & 1.392 & 1.392 \\
ROR(XV) & -0.128 & 1.538 & 1.543 & -0.013 & 1.215 & 1.215 \\
\hline
IPW($X_{\_}$) & 1.464 & 1.876 & 2.379 & -0.622 & 2.017 & 2.109 \\
AIPW($X_{\_}, XV$) & -0.135 & 1.520 & 1.525 & -0.012 & 1.201 & 1.200 \\
AIPW($(X,X_{\_}V$) & -0.125 & 1.528 & 1.532 & -0.022 & 1.271 & 1.271 \\
AIPW($X_{\_},X_{\_}V$) & 0.672 & 1.530 & 1.670 & -0.331 & 1.247 & 1.289 \\
OR($X_{\_}$) & 0.672 & 1.528 & 1.669 & -0.299 & 1.219 & 1.255 \\
RIPW($X_{\_}$) & 7.509 & 1.976 & 7.764 & -1.993 & 2.161 & 2.939 \\
RAIPW($X_{\_}, XV$) & -0.118 & 1.587 & 1.590 & -0.025 & 1.273 & 1.273 \\
RAIPW($X,X_{\_}V$) & -0.101 & 1.625 & 1.627 & -0.060 & 1.352 & 1.352 \\
RAIPW($X_{\_},X_{\_}V$) & 0.670 & 1.628 & 1.760 & -0.353 & 1.337 & 1.382 \\
ROR($X_{\_}V$) & 0.658 & 1.581 & 1.712 & -0.293 & 1.263 & 1.296 \\ \hline \end{tabular} \end{table}
\begin{table} \caption{Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ strong-$\xi$ strong scenario for clean data.} \centering
\begin{tabular}{lrrrrrr}\hline ($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\ \hline IPW(X) & -0.064 & 4.109 & 4.107 & -0.408 & 4.464 & 4.480 \\
AIPW(X,X) & -0.138 & 1.713 & 1.718 & -0.029 & 2.032 & 2.032 \\
AIPW(X,XV) & -0.105 & 1.561 & 1.564 & -0.069 & 1.495 & 1.496 \\
OR(X) & -0.136 & 1.624 & 1.629 & 0.024 & 1.425 & 1.424 \\
OR(XV) & -0.118 & 1.527 & 1.531 & -0.007 & 1.241 & 1.241 \\
RIPW(X) & 4.024 & 3.920 & 5.617 & -1.556 & 4.858 & 5.099 \\
RAIPW(X,X) & -0.106 & 1.894 & 1.896 & -0.099 & 2.085 & 2.086 \\
RAIPW(X,XV) & -0.112 & 1.639 & 1.642 & -0.043 & 1.589 & 1.588 \\
ROR(X) & -0.154 & 1.652 & 1.659 & -0.006 & 1.484 & 1.483 \\
ROR(XV) & -0.112 & 1.531 & 1.534 & -0.015 & 1.259 & 1.258 \\
\hline
IPW($X_{\_}$) & 2.695 & 3.233 & 4.208 & -2.076 & 3.559 & 4.119 \\
AIPW($X_{\_}, XV$) & -0.114 & 1.545 & 1.548 & -0.043 & 1.382 & 1.382 \\
AIPW($(X,X_{\_}V$) & -0.074 & 1.616 & 1.617 & -0.117 & 1.717 & 1.720 \\
AIPW($X_{\_},X_{\_}V$) & 1.276 & 1.563 & 2.017 & -1.018 & 1.504 & 1.815 \\
OR($X_{\_}$) & 1.259 & 1.532 & 1.982 & -0.920 & 1.283 & 1.579 \\
RIPW($X_{\_}$) & 6.730 & 3.317 & 7.503 & -3.679 & 4.015 & 5.445 \\
RAIPW($X_{\_}, XV$) & -0.112 & 1.614 & 1.618 & -0.035 & 1.489 & 1.488 \\
RAIPW($X,X_{\_}V$) & -0.011 & 1.752 & 1.751 & -0.148 & 1.957 & 1.961 \\
RAIPW($X_{\_},X_{\_}V$) & 1.274 & 1.653 & 2.086 & -1.078 & 1.662 & 1.980 \\
ROR($X_{\_}V$) & 1.254 & 1.555 & 1.996 & -0.920 & 1.314 & 1.604 \\ \hline \end{tabular} \end{table}
\begin{table} \caption{Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ moderate-$\xi$ no scenario for clean data.} \centering
\begin{tabular}{lrrrrrr}\hline ($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\ \hline IPW(X) & -0.090 & 1.143 & 1.146 & -0.046 & 1.295 & 1.295 \\
AIPW(X,X) & -0.092 & 0.947 & 0.951 & -0.026 & 0.837 & 0.837 \\
AIPW(X,XV) & -0.092 & 0.947 & 0.951 & -0.025 & 0.837 & 0.837 \\
OR(X) & -0.093 & 0.944 & 0.948 & -0.006 & 0.796 & 0.796 \\
OR(XV) & -0.093 & 0.944 & 0.948 & 0.004 & 0.797 & 0.797 \\
RIPW(X) & 1.867 & 1.138 & 2.187 & -0.603 & 1.356 & 1.483 \\
RAIPW(X,X) & -0.093 & 0.982 & 0.986 & -0.026 & 0.866 & 0.866 \\
RAIPW(X,XV) & -0.083 & 0.983 & 0.986 & -0.033 & 0.870 & 0.870 \\
ROR(X) & -0.091 & 0.958 & 0.962 & -0.013 & 0.815 & 0.814 \\
ROR(XV) & -0.090 & 0.957 & 0.961 & 0.003 & 0.814 & 0.813 \\
\hline
IPW($X_{\_}$) & 0.874 & 1.108 & 1.411 & -0.396 & 1.200 & 1.263 \\
AIPW($X_{\_}, XV$) & -0.094 & 0.946 & 0.950 & -0.021 & 0.822 & 0.822 \\
AIPW($(X,X_{\_}V$) & -0.085 & 0.960 & 0.963 & -0.033 & 0.908 & 0.909 \\
AIPW($X_{\_},X_{\_}V$) & 0.712 & 0.969 & 1.202 & -0.327 & 0.878 & 0.936 \\
OR($X_{\_}$) & 0.713 & 0.967 & 1.201 & -0.289 & 0.839 & 0.888 \\
RIPW($X_{\_}$) & 2.802 & 1.138 & 3.024 & -0.967 & 1.304 & 1.623 \\
RAIPW($X_{\_}, XV$) & -0.082 & 0.983 & 0.986 & -0.033 & 0.866 & 0.866 \\
RAIPW($X,X_{\_}V$) & -0.064 & 1.010 & 1.012 & -0.054 & 0.952 & 0.953 \\
RAIPW($X_{\_},X_{\_}V$) & 0.720 & 1.022 & 1.249 & -0.347 & 0.939 & 1.001 \\
ROR($X_{\_}V$) & 0.704 & 0.997 & 1.220 & -0.274 & 0.864 & 0.906 \\ \hline \end{tabular} \end{table}
\begin{table} \caption{Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ strong-$\xi$ no scenario for clean data.} \centering
\begin{tabular}{lrrrrrr}\hline ($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\ \hline IPW(X) & -0.025 & 2.420 & 2.419 & -0.293 & 2.623 & 2.638 \\
AIPW(X,X) & -0.064 & 1.012 & 1.014 & -0.085 & 1.216 & 1.219 \\
AIPW(X,XV) & -0.065 & 1.013 & 1.014 & -0.084 & 1.215 & 1.217 \\
OR(X) & -0.077 & 0.961 & 0.964 & -0.018 & 0.873 & 0.873 \\
OR(XV) & -0.077 & 0.961 & 0.964 & -0.008 & 0.873 & 0.873 \\
RIPW(X) & 1.870 & 2.382 & 3.027 & -0.737 & 2.873 & 2.965 \\
RAIPW(X,X) & -0.067 & 1.077 & 1.079 & -0.055 & 1.206 & 1.207 \\
RAIPW(X,XV) & -0.062 & 1.080 & 1.081 & -0.058 & 1.200 & 1.201 \\
ROR(X) & -0.072 & 0.975 & 0.978 & -0.021 & 0.891 & 0.891 \\
ROR(XV) & -0.073 & 0.974 & 0.977 & -0.005 & 0.891 & 0.890 \\
\hline
IPW($X_{\_}$) & 1.630 & 1.909 & 2.51 & -1.289 & 2.115 & 2.476 \\
AIPW($X_{\_}, XV$) & -0.073 & 0.989 & 0.992 & -0.057 & 1.067 & 1.068 \\
AIPW($(X,X_{\_}V$) & -0.033 & 1.097 & 1.097 & -0.135 & 1.461 & 1.467 \\
AIPW($X_{\_},X_{\_}V$) & 1.316 & 1.032 & 1.672 & -0.988 & 1.224 & 1.573 \\
OR($X_{\_}$) & 1.300 & 0.985 & 1.631 & -0.898 & 0.939 & 1.299 \\
RIPW($X_{\_}$) & 3.525 & 1.968 & 4.036 & -2.010 & 2.352 & 3.093 \\
RAIPW($X_{\_}, XV$) & -0.067 & 1.049 & 1.050 & -0.051 & 1.110 & 1.111 \\
RAIPW($X,X_{\_}V$) & 0.019 & 1.179 & 1.178 & -0.109 & 1.507 & 1.510 \\
RAIPW($X_{\_},X_{\_}V$) & 1.325 & 1.104 & 1.724 & -1.056 & 1.276 & 1.656 \\
ROR($X_{\_}V$) & 1.295 & 1.015 & 1.645 & -0.887 & 0.968 & 1.312 \\ \hline \end{tabular} \end{table}
\begin{table} \caption{Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ strong-$\xi$ moderate under C-sym contamination.} \centering
\begin{tabular}{lrrrrrr} \hline
($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\ \hline IPW(X) & -8.659 & 3.197 & 9.230 & 15.638 & 2.824 & 15.891 \\
AIPW(X,X) & -8.839 & 1.576 & 8.978 & 15.866 & 2.355 & 16.039 \\
AIPW(X,XV) & -8.831 & 1.551 & 8.966 & 15.864 & 2.331 & 16.035 \\
OR(X) & -8.848 & 1.470 & 8.969 & 18.357 & 1.244 & 18.399 \\
OR(XV) & -8.844 & 1.458 & 8.964 & 18.44 & 1.222 & 18.481 \\
RIPW(X) & 3.505 & 3.877 & 5.226 & -3.540 & 5.056 & 6.170 \\
RAIPW(X,X) & 0.326 & 2.077 & 2.101 & -2.011 & 3.159 & 3.744 \\
RAIPW(X,XV) & 0.315 & 2.095 & 2.117 & -2.050 & 3.503 & 4.058 \\
ROR(X) & 0.015 & 1.280 & 1.280 & 0.328 & 1.097 & 1.145 \\
ROR(XV) & 0.015 & 1.251 & 1.250 & 0.231 & 1.015 & 1.040 \\
\hline
IPW($X_{\_}$) & -6.586 & 2.466 & 7.032 & 15.127 & 2.494 & 15.331 \\
AIPW($X_{\_}, XV$) & -8.865 & 1.528 & 8.996 & 16.217 & 2.212 & 16.367 \\
AIPW($(X,X_{\_}V$) & -8.806 & 1.617 & 8.953 & 15.846 & 2.317 & 16.014 \\
AIPW($X_{\_},X_{\_}V$) & -7.570 & 1.53 & 7.723 & 15.636 & 2.235 & 15.795 \\
OR($X_{\_}$) & -7.559 & 1.441 & 7.695 & 17.888 & 1.216 & 17.929 \\
RIPW($X_{\_}$) & 5.760 & 2.793 & 6.401 & -5.254 & 3.817 & 6.493 \\
RAIPW($X_{\_}, XV$) & 0.164 & 1.921 & 1.927 & -1.863 & 2.864 & 3.415 \\
RAIPW($X,X_{\_}V$) & 0.417 & 2.096 & 2.136 & -2.052 & 3.172 & 3.776 \\
RAIPW($X_{\_},X_{\_}V$) & 1.618 & 2.046 & 2.607 & -2.860 & 3.260 & 4.335 \\
ROR($X_{\_}V$) & 1.359 & 1.262 & 1.854 & -0.493 & 1.059 & 1.168 \\ \hline \end{tabular} \end{table}
\begin{table} \caption{Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ moderate-$\xi$ strong under C-sym contamination.} \centering
\begin{tabular}{lrrrrrr} \hline
($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\ \hline IPW(X) & -9.203 & 1.891 & 9.395 & 14.010 & 1.716 & 14.114 \\
AIPW(X,X) & -9.240 & 1.594 & 9.376 & 14.064 & 1.358 & 14.13 \\
AIPW(X,XV) & -9.226 & 1.544 & 9.354 & 14.077 & 1.299 & 14.137 \\
OR(X) & -9.247 & 1.619 & 9.388 & 15.988 & 1.184 & 16.032 \\
OR(XV) & -9.233 & 1.567 & 9.365 & 16.074 & 1.134 & 16.114 \\
RIPW(X) & 6.327 & 2.284 & 6.726 & -2.057 & 3.164 & 3.773 \\
RAIPW(X,X) & -0.233 & 2.023 & 2.035 & 0.451 & 2.245 & 2.289 \\
RAIPW(X,XV) & -0.359 & 1.899 & 1.932 & 0.574 & 2.079 & 2.156 \\
ROR(X) & 0.001 & 1.691 & 1.690 & 0.442 & 1.331 & 1.402 \\
ROR(XV) & 0.049 & 1.594 & 1.594 & 0.127 & 1.185 & 1.191 \\
\hline
IPW($X_{\_}$) & -7.678 & 1.819 & 7.890 & 13.834 & 1.626 & 13.929 \\
AIPW($X_{\_}, XV$) & -9.233 & 1.550 & 9.362 & 14.301 & 1.308 & 14.361 \\
AIPW($(X,X_{\_}V$) & -9.213 & 1.548 & 9.342 & 14.069 & 1.300 & 14.129 \\
AIPW($X_{\_},X_{\_}V$) & -8.456 & 1.534 & 8.594 & 14.074 & 1.298 & 14.134 \\
OR($X_{\_}$) & -8.457 & 1.551 & 8.598 & 15.841 & 1.129 & 15.881 \\
RIPW($X_{\_}$) & 7.900 & 2.238 & 8.211 & -2.929 & 2.902 & 4.122 \\
RAIPW($X_{\_}, XV$) & -0.428 & 1.898 & 1.945 & 0.610 & 2.054 & 2.141 \\
RAIPW($X,X_{\_}V$) & -0.254 & 1.942 & 1.957 & 0.496 & 2.216 & 2.270 \\
RAIPW($X_{\_},X_{\_}V$) & 0.559 & 1.907 & 1.986 & 0.001 & 2.132 & 2.130 \\
ROR($X_{\_}V$) & 0.878 & 1.614 & 1.836 & -0.052 & 1.245 & 1.245 \\ \hline \end{tabular} \end{table}
\begin{table} \caption{Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ strong-$\xi$ strong under C-sym contamination.} \centering
\begin{tabular}{lrrrrrr}\hline
($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\ \hline IPW(X) & -9.009 & 3.966 & 9.842 & 13.714 & 3.312 & 14.108 \\
AIPW(X,X) & -9.236 & 1.859 & 9.421 & 14.043 & 2.359 & 14.240 \\
AIPW(X,XV) & -9.218 & 1.751 & 9.383 & 14.033 & 2.203 & 14.205 \\
OR(X) & -9.230 & 1.765 & 9.397 & 16.753 & 1.474 & 16.817 \\
OR(XV) & -9.226 & 1.704 & 9.382 & 16.829 & 1.382 & 16.886 \\
RIPW(X) & 4.446 & 5.061 & 6.734 & -3.718 & 6.399 & 7.398 \\
RAIPW(X,X) & 0.337 & 2.369 & 2.392 & -2.219 & 3.233 & 3.920 \\
RAIPW(X,XV) & 0.161 & 2.101 & 2.106 & -1.978 & 2.717 & 3.360 \\
ROR(X) & 0.007 & 1.663 & 1.663 & 0.494 & 1.447 & 1.528 \\
ROR(XV) & 0.022 & 1.561 & 1.560 & 0.177 & 1.202 & 1.215 \\
\hline
IPW($X_{\_}$) & -6.404 & 3.051 & 7.094 & 12.913 & 2.78 & 13.208 \\
AIPW($X_{\_}, XV$) & -9.250 & 1.742 & 9.413 & 14.407 & 2.127 & 14.563 \\
AIPW($(X,X_{\_}V$) & -9.193 & 1.808 & 9.369 & 14.015 & 2.183 & 14.184 \\
AIPW($X_{\_},X_{\_}V$) & -7.955 & 1.735 & 8.141 & 13.766 & 2.150 & 13.932 \\
OR($X_{\_}$) & -7.940 & 1.678 & 8.115 & 16.224 & 1.373 & 16.282 \\
RIPW($X_{\_}$) & 7.559 & 3.654 & 8.395 & -6.501 & 4.926 & 8.155 \\
RAIPW($X_{\_}, XV$) & 0.050 & 2.018 & 2.018 & -1.921 & 2.374 & 3.053 \\
RAIPW($X,X_{\_}V$) & 0.280 & 2.292 & 2.308 & -2.051 & 3.222 & 3.818 \\
RAIPW($X_{\_},X_{\_}V$) & 1.637 & 1.942 & 2.539 & -3.196 & 2.310 & 3.943 \\
ROR($X_{\_}V$) & 1.366 & 1.564 & 2.076 & -0.574 & 1.233 & 1.360 \\ \hline \end{tabular} \end{table}
\begin{table} \caption{Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ moderate-$\xi$ no under C-sym contamination.} \centering
\begin{tabular}{lrrrrrr}\hline
($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\ \hline IPW(X) & -8.468 & 1.178 & 8.549 & 18.104 & 1.437 & 18.161 \\
AIPW(X,X) & -8.489 & 1.007 & 8.548 & 18.126 & 1.358 & 18.177 \\
AIPW(X,XV) & -8.489 & 1.013 & 8.550 & 18.129 & 1.357 & 18.179 \\
OR(X) & -8.499 & 1.029 & 8.561 & 19.621 & 0.938 & 19.643 \\
OR(XV) & -8.499 & 1.034 & 8.562 & 19.713 & 0.935 & 19.736 \\
RIPW(X) & 2.265 & 1.537 & 2.737 & -2.104 & 2.567 & 3.318 \\
RAIPW(X,X) & 0.187 & 1.098 & 1.113 & -1.274 & 0.954 & 1.591 \\
RAIPW(X,XV) & 0.192 & 1.105 & 1.121 & -1.276 & 0.956 & 1.594 \\
ROR(X) & 0.003 & 0.987 & 0.986 & 0.287 & 0.792 & 0.842 \\
ROR(XV) & 0.003 & 0.990 & 0.990 & 0.295 & 0.794 & 0.847 \\
\hline
IPW($X_{\_}$) & -7.559 & 1.125 & 7.642 & 18.094 & 1.391 & 18.148 \\
AIPW($X_{\_}, XV$) & -8.498 & 1.019 & 8.559 & 18.308 & 1.334 & 18.357 \\
AIPW($(X,X_{\_}V$) & -8.476 & 1.022 & 8.538 & 18.121 & 1.356 & 18.171 \\
AIPW($X_{\_},X_{\_}V$) & -7.721 & 1.010 & 7.787 & 18.137 & 1.326 & 18.185 \\
OR($X_{\_}$) & -7.725 & 1.024 & 7.792 & 19.527 & 0.931 & 19.550 \\
RIPW($X_{\_}$) & 3.180 & 1.408 & 3.478 & -2.412 & 2.322 & 3.348 \\
RAIPW($X_{\_}, XV$) & 0.157 & 1.100 & 1.111 & -1.271 & 0.936 & 1.578 \\
RAIPW($X,X_{\_}V$) & 0.268 & 1.126 & 1.157 & -1.319 & 1.036 & 1.677 \\
RAIPW($X_{\_},X_{\_}V$) & 1.052 & 1.108 & 1.527 & -1.607 & 0.995 & 1.890 \\
ROR($X_{\_}V$) & 0.828 & 1.009 & 1.305 & 0.192 & 0.857 & 0.877 \\ \hline \end{tabular} \end{table}
\begin{table} \caption{Bias, standard deviation and root mean squared error (times 10) of the estimates across simulations for the $\gamma$ strong-$\xi$ no under C-sym contamination.} \centering
\begin{tabular}{lrrrrrr}\hline
($\times 10$) & bias($\hat{\mu}$) & sd($\hat{\mu})$ & $\sqrt{\mbox{mse}(\hat{\mu})}$ & bias($\hat{\sigma}$) & sd($\hat{\sigma})$ & $\sqrt{\mbox{mse}(\hat{\sigma})}$ \\ \hline IPW(X) & -8.295 & 2.476 & 8.656 & 17.833 & 2.532 & 18.012 \\
AIPW(X,X) & -8.427 & 1.396 & 8.542 & 17.967 & 2.432 & 18.131 \\
AIPW(X,XV) & -8.430 & 1.406 & 8.546 & 17.977 & 2.432 & 18.141 \\
OR(X) & -8.464 & 1.255 & 8.556 & 20.175 & 1.078 & 20.204 \\
OR(XV) & -8.461 & 1.265 & 8.555 & 20.261 & 1.082 & 20.290 \\
RIPW(X) & 2.102 & 3.607 & 4.173 & -2.039 & 6.092 & 6.421 \\
RAIPW(X,X) & -0.162 & 3.504 & 3.506 & -0.974 & 6.999 & 7.063 \\
RAIPW(X,XV) & -0.137 & 3.527 & 3.528 & -1.079 & 7.302 & 7.378 \\
ROR(X) & 0.010 & 0.977 & 0.977 & 0.311 & 0.839 & 0.894 \\
ROR(XV) & 0.011 & 0.977 & 0.976 & 0.318 & 0.840 & 0.898 \\
\hline
IPW($X_{\_}$) & -6.758 & 1.948 & 7.033 & 17.606 & 2.367 & 17.764 \\
AIPW($X_{\_}, XV$) & -8.469 & 1.370 & 8.580 & 18.303 & 2.298 & 18.446 \\
AIPW($(X,X_{\_}V$) & -8.405 & 1.477 & 8.533 & 17.958 & 2.415 & 18.119 \\
AIPW($X_{\_},X_{\_}V$) & -7.177 & 1.384 & 7.309 & 17.819 & 2.316 & 17.968 \\
OR($X_{\_}$) & -7.177 & 1.262 & 7.287 & 19.791 & 1.079 & 19.821 \\
RIPW($X_{\_}$) & 3.716 & 3.021 & 4.788 & -3.129 & 5.575 & 6.391 \\
RAIPW($X_{\_}, XV$) & 0.108 & 2.945 & 2.946 & -1.724 & 6.497 & 6.719 \\
RAIPW($X,X_{\_}V$) & -0.031 & 3.472 & 3.471 & -1.088 & 7.157 & 7.236 \\
RAIPW($X_{\_},X_{\_}V$) & 1.321 & 2.923 & 3.206 & -1.913 & 5.474 & 5.796 \\
ROR($X_{\_}V$) & 1.354 & 0.999 & 1.683 & -0.332 & 0.893 & 0.953 \\ \hline \end{tabular} \end{table}
\section{Application: supplementary results} \label{applicationsupp.section}
Table \ref{auxmodel.tab} describes the fitted auxiliary models explaining dropout ($R_i$) and BMI change (outcome $Z_{2i}$) with covariates: (from the health examinations) measured BMI (bbmi, kg/m$^2$), self reported health (srh, 1 if positive self reported health, zero otherwise) and tobacco use (tob, 1 if cigaretes and/or snus user, 0 otherwise); (from Statistics Sweden registers) education level (educ, 1 if more than 9 years education, 0 otherwise), number of children under 3 years of age (nrchild3), log annual earnings (logearn), annual parental benefits (parbenef), annual sick leave benefits (sickbenef), annual unemployment benefits (unempbenef), urban living (urban, 1 if urban living area, 0 otherwise); (from the hospitalisation register) hospitalisation days (no hospitalisation is reference, hosp13 for 1 to 3 days hospitalisation during baseline year, hosp4M for more than 3 days hospitalisation).
Both OLS/Maximum likelihood (using logit link for the binary indicator $R_i$) and robust estimators are used; see Sections \ref{roblogistic.sec} and \ref{robreg.section}. The logistic regression fit and its robust GLM version give similar results. On the other hand, there are clear difference between OLS and its robust version fits. In particular, "self reported health" is significant at 1 \% level in the OLS fit explaining BMI change while it is not anymore (at the 10\% level) with the robust fit. Conversely "sick benefits" becomes clearly significant (1\% level) with the robust estimation. BMI at baseline while significant in both cases, has 10 times lower explaining effect in the robust fit.
The implementation of RIPW, RAIPW and ROR are based on these robust fits of auxiliary models. We use $c_\mu=4$ and $c_\sigma=5$ for the results of Table \ref{results.tab}. These corresponds to values tuned in the simulation study, see Table \ref{tuningconstants.table}, and varying $c_\mu$ and $c_\sigma$ within [2,6] and [3,7] respectively for $c_\mu$ and $c_\sigma$ did not change the results of Table \ref{results.tab} notably.
\begin{table} \caption{ \label{auxmodel.tab} Estimated auxiliary models explaining dropout and change in BMI using the covariates listed in Section \ref{bmi.sec} of main paper (s.e. in parantheses).}
\centering
\begin{tabular}{@{\extracolsep{5pt}}lcccc} \\[-1.8ex] \hline
& \multicolumn{4}{c}{{Dependent variable}} \\ \cline{2-5} \\[-2.8ex] & \multicolumn{2}{c}{$R_i=0$: dropout} & \multicolumn{2}{c}{$Z_{2i}$: BMI change} \\ \\[-2.8ex] & \multicolumn{1}{c}{{logistic}} & \multicolumn{1}{c}{{robust}} & \multicolumn{1}{c}{{OLS}} & \multicolumn{1}{c}{{robust}} \\
\hline \\[-2.8ex]
bbmi & -0.033$^{***}$ & -0.033$^{***}$ & -0.203$^{***}$ & -0.029$^{***}$ \\
& (0.007) & (0.007) & (0.010) & (0.007) \\
srh & 0.146$^{**}$ & 0.153$^{**}$ & -0.271$^{***}$ & -0.045 \\
& (0.064) & (0.064) & (0.093) & (0.058) \\
tob & -0.119$^{**}$ & -0.120$^{**}$ & 0.310$^{***}$ & 0.233$^{***}$ \\
& (0.053) & (0.054) & (0.075) & (0.047) \\
educ & 0.026 & 0.028 & -0.188$^{*}$ & -0.040 \\
& (0.069) & (0.070) & (0.101) & (0.063) \\
nrchild3 & -0.156$^{**}$ & -0.168$^{**}$ & -0.046 & 0.047 \\
& (0.072) & (0.073) & (0.105) & (0.065) \\
logearn & 0.050$^{***}$ & 0.049$^{***}$ & -0.027 & 0.007 \\
& (0.013) & (0.013) & (0.020) & (0.013) \\
parbenef & 0.226$^{**}$ & 0.237$^{**}$ & -0.192 & -0.104 \\
& (0.096) & (0.098) & (0.129) & (0.081) \\
sickbenef & 0.366$^{***}$ & 0.356$^{***}$ & 0.167$^{*}$ & 0.258$^{***}$ \\
& (0.068) & (0.069) & (0.091) & (0.057) \\
unempbenef & -0.300$^{***}$ & -0.298$^{***}$ & 0.017 & -0.062 \\
& (0.073) & (0.073) & (0.115) & (0.072) \\
urban & -0.042 & -0.038 & 0.050 & 0.037 \\
& (0.053) & (0.054) & (0.076) & (0.047) \\
hosp13 & -0.433$^{***}$ & -0.436$^{***}$ & 0.179 & 0.036 \\
& (0.138) & (0.139) & (0.220) & (0.137) \\
hosp4M & -0.427$^{**}$ & -0.400$^{**}$ & 0.569$^{**}$ & -0.137 \\
& (0.181) & (0.182) & (0.282) & (0.176) \\
Constant & 1.466$^{***}$ & 1.454$^{***}$ & 6.860$^{***}$ & 1.923$^{***}$ \\
& (0.227) & (0.229) & (0.334) & (0.209) \\
\hline \\[-2.8ex] Observations & \multicolumn{1}{c}{7,555} & \multicolumn{1}{c}{7,555} & \multicolumn{1}{c}{5,553} & \multicolumn{1}{c}{5,553} \\ \hline \\[-2.8ex] \textit{Note:} & \multicolumn{4}{r}{$^{*}$p$<$0.1; $^{**}$p$<$0.05; $^{***}$p$<$0.01} \\ \end{tabular}
\end{table}
\end{document} | arXiv | {
"id": "1803.08764.tex",
"language_detection_score": 0.5340486764907837,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\marginnote{\bf??}\begin\marginnote{\bf??}{document\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\title\marginnote{\bf??} \marginnote{\bf??}[Spectrahedral\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} polar\marginnote{\bf??} orbitopes\marginnote{\bf??}]\marginnote{\bf??} \marginnote{\bf??}{Spectrahedral\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} polar\marginnote{\bf??} orbitopes\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{abstract\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} compact\marginnote{\bf??} Lie\marginnote{\bf??} group\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} a\marginnote{\bf??} finite\marginnote{\bf??}-dimensional\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} orbitope\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} vector\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} orbit\marginnote{\bf??} \marginnote{\bf??}$Kx\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} show\marginnote{\bf??} that\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} polar\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} we\marginnote{\bf??} produce\marginnote{\bf??} an\marginnote{\bf??} explicit\marginnote{\bf??} linear\marginnote{\bf??} matrix\marginnote{\bf??} inequality\marginnote{\bf??} representation\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} also\marginnote{\bf??} consider\marginnote{\bf??} the\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} which\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} set\marginnote{\bf??} polar\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} prove\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} finitely\marginnote{\bf??} many\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} we\marginnote{\bf??} identify\marginnote{\bf??} the\marginnote{\bf??} cases\marginnote{\bf??} in\marginnote{\bf??} which\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} itself\marginnote{\bf??} an\marginnote{\bf??} orbitope\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} these\marginnote{\bf??} cases\marginnote{\bf??} one\marginnote{\bf??} has\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}=c\marginnote{\bf??}\cdot\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$c\marginnote{\bf??}>0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Moreover\marginnote{\bf??} we\marginnote{\bf??} show\marginnote{\bf??} that\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} \marginnote{\bf??}`\marginnote{\bf??}`rational\marginnote{\bf??} coefficients\marginnote{\bf??}'\marginnote{\bf??}'\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} again\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} provides\marginnote{\bf??} many\marginnote{\bf??} new\marginnote{\bf??} families\marginnote{\bf??} of\marginnote{\bf??} doubly\marginnote{\bf??} spectrahedral\marginnote{\bf??} orbitopes\marginnote{\bf??}.\marginnote{\bf??} All\marginnote{\bf??} polar\marginnote{\bf??} orbitopes\marginnote{\bf??} that\marginnote{\bf??} are\marginnote{\bf??} derived\marginnote{\bf??} from\marginnote{\bf??} classical\marginnote{\bf??} semisimple\marginnote{\bf??} Lie\marginnote{\bf??} can\marginnote{\bf??} be\marginnote{\bf??} described\marginnote{\bf??} in\marginnote{\bf??} terms\marginnote{\bf??} of\marginnote{\bf??} conditions\marginnote{\bf??} on\marginnote{\bf??} singular\marginnote{\bf??} values\marginnote{\bf??} and\marginnote{\bf??} Ky\marginnote{\bf??} Fan\marginnote{\bf??} matrix\marginnote{\bf??} norms\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{abstract\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\author\marginnote{\bf??}{Tim\marginnote{\bf??} Kobert\marginnote{\bf??},\marginnote{\bf??} Claus\marginnote{\bf??} Scheiderer\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\address\marginnote{\bf??}
\marginnote{\bf??} \marginnote{\bf??} \marginnote{\bf??}{Fachbereich\marginnote{\bf??} Mathematik\marginnote{\bf??} und\marginnote{\bf??} Statistik\marginnote{\bf??} \marginnote{\bf??}\marginnote{\bf??}\marginnote{\bf??}
\marginnote{\bf??} \marginnote{\bf??} Universit\marginnote{\bf??}\marginnote{\bf??}"at\marginnote{\bf??} Konstanz\marginnote{\bf??} \marginnote{\bf??}\marginnote{\bf??}\marginnote{\bf??}
\marginnote{\bf??} \marginnote{\bf??} 78457\marginnote{\bf??} Konstanz\marginnote{\bf??} \marginnote{\bf??}\marginnote{\bf??}\marginnote{\bf??}
\marginnote{\bf??} \marginnote{\bf??} Germany\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\email\marginnote{\bf??}
\marginnote{\bf??} \marginnote{\bf??} \marginnote{\bf??}{tim\marginnote{\bf??}.kobert\marginnote{\bf??}@uni\marginnote{\bf??}-konstanz\marginnote{\bf??}.de\marginnote{\bf??},\marginnote{\bf??} claus\marginnote{\bf??}.scheiderer\marginnote{\bf??}@uni\marginnote{\bf??}-konstanz\marginnote{\bf??}.de\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\thanks\marginnote{\bf??} \marginnote{\bf??}{This\marginnote{\bf??} work\marginnote{\bf??} was\marginnote{\bf??} partially\marginnote{\bf??} supported\marginnote{\bf??} by\marginnote{\bf??} DFG\marginnote{\bf??} grants\marginnote{\bf??} SCHE281\marginnote{\bf??}/10\marginnote{\bf??}-1\marginnote{\bf??} and\marginnote{\bf??} SCHE281\marginnote{\bf??}/10\marginnote{\bf??}-2\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\date\marginnote{\bf??}\today\marginnote{\bf??} \marginnote{\bf??}\maketitle\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\section\marginnote{\bf??}*\marginnote{\bf??}{Introduction\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
Let\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} compact\marginnote{\bf??} Lie\marginnote{\bf??} group\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} finite\marginnote{\bf??}-dimensional\marginnote{\bf??} real\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} orbitope\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} vector\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} denoted\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} orbit\marginnote{\bf??} \marginnote{\bf??}$Kx\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Orbitopes\marginnote{\bf??} are\marginnote{\bf??} highly\marginnote{\bf??} symmetric\marginnote{\bf??} objects\marginnote{\bf??} that\marginnote{\bf??} are\marginnote{\bf??} interesting\marginnote{\bf??} from\marginnote{\bf??} many\marginnote{\bf??} perspectives\marginnote{\bf??},\marginnote{\bf??} like\marginnote{\bf??} convex\marginnote{\bf??} geometry\marginnote{\bf??},\marginnote{\bf??} algebraic\marginnote{\bf??} geometry\marginnote{\bf??},\marginnote{\bf??} Lie\marginnote{\bf??} theory\marginnote{\bf??},\marginnote{\bf??} symplectic\marginnote{\bf??} geometry\marginnote{\bf??},\marginnote{\bf??} combinatorial\marginnote{\bf??} geometry\marginnote{\bf??} or\marginnote{\bf??} optimization\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} refer\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??} for\marginnote{\bf??} a\marginnote{\bf??} broad\marginnote{\bf??} overview\marginnote{\bf??} with\marginnote{\bf??} plenty\marginnote{\bf??} of\marginnote{\bf??} explicit\marginnote{\bf??} examples\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Here\marginnote{\bf??} our\marginnote{\bf??} focus\marginnote{\bf??} will\marginnote{\bf??} be\marginnote{\bf??} on\marginnote{\bf??} properties\marginnote{\bf??} of\marginnote{\bf??} orbitopes\marginnote{\bf??} that\marginnote{\bf??} are\marginnote{\bf??} particularly\marginnote{\bf??} relevant\marginnote{\bf??} to\marginnote{\bf??} optimization\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} more\marginnote{\bf??} specifically\marginnote{\bf??},\marginnote{\bf??} to\marginnote{\bf??} semidefinite\marginnote{\bf??} programming\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} are\marginnote{\bf??} interested\marginnote{\bf??} in\marginnote{\bf??} existence\marginnote{\bf??} and\marginnote{\bf??} explicit\marginnote{\bf??} construction\marginnote{\bf??} of\marginnote{\bf??} spectrahedral\marginnote{\bf??} representations\marginnote{\bf??} for\marginnote{\bf??} orbitopes\marginnote{\bf??} and\marginnote{\bf??} related\marginnote{\bf??} convex\marginnote{\bf??} bodies\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} this\marginnote{\bf??} we\marginnote{\bf??} consider\marginnote{\bf??} a\marginnote{\bf??} particular\marginnote{\bf??} class\marginnote{\bf??} of\marginnote{\bf??} group\marginnote{\bf??} representations\marginnote{\bf??},\marginnote{\bf??} namely\marginnote{\bf??} polar\marginnote{\bf??} representations\marginnote{\bf??} of\marginnote{\bf??} connected\marginnote{\bf??} compact\marginnote{\bf??} Lie\marginnote{\bf??} groups\marginnote{\bf??}.\marginnote{\bf??} As\marginnote{\bf??} far\marginnote{\bf??} as\marginnote{\bf??} the\marginnote{\bf??} orbit\marginnote{\bf??} structure\marginnote{\bf??} is\marginnote{\bf??} concerned\marginnote{\bf??},\marginnote{\bf??} all\marginnote{\bf??} such\marginnote{\bf??} representations\marginnote{\bf??} arise\marginnote{\bf??} from\marginnote{\bf??} Riemannian\marginnote{\bf??} symmetric\marginnote{\bf??} spaces\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}=G\marginnote{\bf??}/K\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} actions\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} isotropy\marginnote{\bf??} group\marginnote{\bf??} on\marginnote{\bf??} the\marginnote{\bf??} tangent\marginnote{\bf??} space\marginnote{\bf??} at\marginnote{\bf??} a\marginnote{\bf??} point\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} other\marginnote{\bf??} words\marginnote{\bf??},\marginnote{\bf??} each\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} comes\marginnote{\bf??} from\marginnote{\bf??} a\marginnote{\bf??} Cartan\marginnote{\bf??} decomposition\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} real\marginnote{\bf??} semisimple\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} as\marginnote{\bf??} the\marginnote{\bf??} adjoint\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Dadok\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{da\marginnote{\bf??}}\marginnote{\bf??} showed\marginnote{\bf??} that\marginnote{\bf??} these\marginnote{\bf??} representations\marginnote{\bf??} have\marginnote{\bf??} very\marginnote{\bf??} particular\marginnote{\bf??} properties\marginnote{\bf??}.\marginnote{\bf??} At\marginnote{\bf??} the\marginnote{\bf??} same\marginnote{\bf??} time\marginnote{\bf??} they\marginnote{\bf??} comprise\marginnote{\bf??} all\marginnote{\bf??} the\marginnote{\bf??} familiar\marginnote{\bf??} actions\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} classical\marginnote{\bf??} unitary\marginnote{\bf??},\marginnote{\bf??} orthogonal\marginnote{\bf??} or\marginnote{\bf??} symplectic\marginnote{\bf??} groups\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}(skew\marginnote{\bf??}-\marginnote{\bf??})\marginnote{\bf??} hermitian\marginnote{\bf??} or\marginnote{\bf??} symmetric\marginnote{\bf??} matrices\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Our\marginnote{\bf??} main\marginnote{\bf??} results\marginnote{\bf??} are\marginnote{\bf??} as\marginnote{\bf??} follows\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} prove\marginnote{\bf??} that\marginnote{\bf??} every\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??},\marginnote{\bf??} i\marginnote{\bf??}.e\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} an\marginnote{\bf??} affine\marginnote{\bf??}-linear\marginnote{\bf??} slice\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} psd\marginnote{\bf??} matrix\marginnote{\bf??} cone\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} fact\marginnote{\bf??} we\marginnote{\bf??} produce\marginnote{\bf??} an\marginnote{\bf??} explicit\marginnote{\bf??} linear\marginnote{\bf??} matrix\marginnote{\bf??} inequality\marginnote{\bf??} representation\marginnote{\bf??} for\marginnote{\bf??} any\marginnote{\bf??} such\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}(Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{polorbspekt\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} far\marginnote{\bf??},\marginnote{\bf??} this\marginnote{\bf??} result\marginnote{\bf??} was\marginnote{\bf??} known\marginnote{\bf??} only\marginnote{\bf??} for\marginnote{\bf??} a\marginnote{\bf??} few\marginnote{\bf??} scattered\marginnote{\bf??} classes\marginnote{\bf??} of\marginnote{\bf??} examples\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} also\marginnote{\bf??} consider\marginnote{\bf??} the\marginnote{\bf??} dual\marginnote{\bf??} convex\marginnote{\bf??} body\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} called\marginnote{\bf??} the\marginnote{\bf??} coorbitope\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} prove\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} always\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} finitely\marginnote{\bf??} many\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} we\marginnote{\bf??} identify\marginnote{\bf??} those\marginnote{\bf??} orbits\marginnote{\bf??} explicitly\marginnote{\bf??} \marginnote{\bf??}(Corollary\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{orbitsoxo\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} isolate\marginnote{\bf??} the\marginnote{\bf??} cases\marginnote{\bf??} when\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} biorbitope\marginnote{\bf??} \marginnote{\bf??}(Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{biorbitop\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} meaning\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} orbitope\marginnote{\bf??} as\marginnote{\bf??} well\marginnote{\bf??}.\marginnote{\bf??} Remarkably\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} always\marginnote{\bf??} self\marginnote{\bf??}-polar\marginnote{\bf??} up\marginnote{\bf??} to\marginnote{\bf??} positive\marginnote{\bf??} scaling\marginnote{\bf??} when\marginnote{\bf??} it\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} biorbitope\marginnote{\bf??} \marginnote{\bf??}(Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{oxovsox\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Moreover\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} show\marginnote{\bf??} that\marginnote{\bf??} whenever\marginnote{\bf??} the\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} can\marginnote{\bf??} be\marginnote{\bf??} \marginnote{\bf??}`\marginnote{\bf??}`defined\marginnote{\bf??} over\marginnote{\bf??} the\marginnote{\bf??} rational\marginnote{\bf??} numbers\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{Q\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}'\marginnote{\bf??}'\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} again\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} we\marginnote{\bf??} find\marginnote{\bf??} an\marginnote{\bf??} explicit\marginnote{\bf??} linear\marginnote{\bf??} matrix\marginnote{\bf??} inequality\marginnote{\bf??} for\marginnote{\bf??} it\marginnote{\bf??} \marginnote{\bf??}(Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{doublspect\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} far\marginnote{\bf??},\marginnote{\bf??} only\marginnote{\bf??} very\marginnote{\bf??} few\marginnote{\bf??} examples\marginnote{\bf??} of\marginnote{\bf??} doubly\marginnote{\bf??} spectrahedral\marginnote{\bf??} sets\marginnote{\bf??} were\marginnote{\bf??} known\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{spw\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Our\marginnote{\bf??} result\marginnote{\bf??} provides\marginnote{\bf??} many\marginnote{\bf??} new\marginnote{\bf??} series\marginnote{\bf??} of\marginnote{\bf??} sets\marginnote{\bf??} with\marginnote{\bf??} this\marginnote{\bf??} property\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
The\marginnote{\bf??} main\marginnote{\bf??} and\marginnote{\bf??} all\marginnote{\bf??}-important\marginnote{\bf??} tool\marginnote{\bf??} for\marginnote{\bf??} our\marginnote{\bf??} results\marginnote{\bf??} is\marginnote{\bf??} Kostant\marginnote{\bf??}'s\marginnote{\bf??} convexity\marginnote{\bf??} theorem\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{ko\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} It\marginnote{\bf??} allows\marginnote{\bf??} to\marginnote{\bf??} reduce\marginnote{\bf??} most\marginnote{\bf??} questions\marginnote{\bf??} considered\marginnote{\bf??} here\marginnote{\bf??} to\marginnote{\bf??} a\marginnote{\bf??} Cartan\marginnote{\bf??} subspace\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} even\marginnote{\bf??} to\marginnote{\bf??} a\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} this\marginnote{\bf??} way\marginnote{\bf??} the\marginnote{\bf??} questions\marginnote{\bf??} become\marginnote{\bf??} polyhedral\marginnote{\bf??} in\marginnote{\bf??} nature\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
The\marginnote{\bf??} paper\marginnote{\bf??} is\marginnote{\bf??} organized\marginnote{\bf??} as\marginnote{\bf??} follows\marginnote{\bf??}.\marginnote{\bf??} Polar\marginnote{\bf??} representations\marginnote{\bf??} and\marginnote{\bf??} orbitopes\marginnote{\bf??} are\marginnote{\bf??} recalled\marginnote{\bf??} in\marginnote{\bf??} Section\marginnote{\bf??}~1\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} general\marginnote{\bf??} background\marginnote{\bf??} on\marginnote{\bf??} semisimple\marginnote{\bf??} real\marginnote{\bf??} Lie\marginnote{\bf??} algebras\marginnote{\bf??} and\marginnote{\bf??} their\marginnote{\bf??} restricted\marginnote{\bf??} root\marginnote{\bf??} systems\marginnote{\bf??} is\marginnote{\bf??} summarized\marginnote{\bf??} in\marginnote{\bf??} Section\marginnote{\bf??}~2\marginnote{\bf??},\marginnote{\bf??} as\marginnote{\bf??} far\marginnote{\bf??} as\marginnote{\bf??} it\marginnote{\bf??} will\marginnote{\bf??} be\marginnote{\bf??} needed\marginnote{\bf??} here\marginnote{\bf??}.\marginnote{\bf??} Kostant\marginnote{\bf??}'s\marginnote{\bf??} theorem\marginnote{\bf??} is\marginnote{\bf??} stated\marginnote{\bf??} in\marginnote{\bf??} Section\marginnote{\bf??}~3\marginnote{\bf??},\marginnote{\bf??} together\marginnote{\bf??} with\marginnote{\bf??} a\marginnote{\bf??} few\marginnote{\bf??} immediate\marginnote{\bf??} consequences\marginnote{\bf??}.\marginnote{\bf??} Spectrahedral\marginnote{\bf??} representations\marginnote{\bf??} for\marginnote{\bf??} polar\marginnote{\bf??} orbitopes\marginnote{\bf??} are\marginnote{\bf??} constructed\marginnote{\bf??} in\marginnote{\bf??} Section\marginnote{\bf??}~4\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} Sections\marginnote{\bf??} 5\marginnote{\bf??} and\marginnote{\bf??}~6\marginnote{\bf??} we\marginnote{\bf??} relate\marginnote{\bf??} the\marginnote{\bf??} facial\marginnote{\bf??} structure\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} momentum\marginnote{\bf??} polytope\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} with\marginnote{\bf??} an\marginnote{\bf??} emphasis\marginnote{\bf??} on\marginnote{\bf??} maximal\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} facets\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} the\marginnote{\bf??} maximal\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} correspond\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} extreme\marginnote{\bf??} points\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} polar\marginnote{\bf??} set\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} this\marginnote{\bf??} allows\marginnote{\bf??} us\marginnote{\bf??} to\marginnote{\bf??} identify\marginnote{\bf??} biorbitopes\marginnote{\bf??}.\marginnote{\bf??} Doubly\marginnote{\bf??} spectrahedral\marginnote{\bf??} orbitopes\marginnote{\bf??} are\marginnote{\bf??} considered\marginnote{\bf??} in\marginnote{\bf??} Section\marginnote{\bf??}~7\marginnote{\bf??}.\marginnote{\bf??} Finally\marginnote{\bf??},\marginnote{\bf??} in\marginnote{\bf??} Section\marginnote{\bf??}~8\marginnote{\bf??} we\marginnote{\bf??} list\marginnote{\bf??} all\marginnote{\bf??} polar\marginnote{\bf??} orbitopes\marginnote{\bf??} that\marginnote{\bf??} are\marginnote{\bf??} derived\marginnote{\bf??} from\marginnote{\bf??} semisimple\marginnote{\bf??} Lie\marginnote{\bf??} algebras\marginnote{\bf??} of\marginnote{\bf??} classical\marginnote{\bf??} type\marginnote{\bf??}.\marginnote{\bf??} All\marginnote{\bf??} these\marginnote{\bf??} orbitopes\marginnote{\bf??} have\marginnote{\bf??} descriptions\marginnote{\bf??} in\marginnote{\bf??} terms\marginnote{\bf??} of\marginnote{\bf??} singular\marginnote{\bf??} values\marginnote{\bf??} of\marginnote{\bf??} matrices\marginnote{\bf??} over\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} or\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Typically\marginnote{\bf??},\marginnote{\bf??} they\marginnote{\bf??} consist\marginnote{\bf??} of\marginnote{\bf??} intersections\marginnote{\bf??} of\marginnote{\bf??} balls\marginnote{\bf??} of\marginnote{\bf??} various\marginnote{\bf??} radii\marginnote{\bf??} with\marginnote{\bf??} respect\marginnote{\bf??} to\marginnote{\bf??} different\marginnote{\bf??} Ky\marginnote{\bf??} Fan\marginnote{\bf??} matrix\marginnote{\bf??} norms\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Some\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} results\marginnote{\bf??} presented\marginnote{\bf??} here\marginnote{\bf??} are\marginnote{\bf??} taken\marginnote{\bf??} from\marginnote{\bf??} the\marginnote{\bf??} 2018\marginnote{\bf??} doctoral\marginnote{\bf??} thesis\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{kob\marginnote{\bf??}}\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} first\marginnote{\bf??} author\marginnote{\bf??},\marginnote{\bf??} written\marginnote{\bf??} under\marginnote{\bf??} the\marginnote{\bf??} guidance\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} second\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\section\marginnote{\bf??}{Polar\marginnote{\bf??} representations\marginnote{\bf??} and\marginnote{\bf??} orbitopes\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{polar\marginnote{\bf??}}\marginnote{\bf??} We\marginnote{\bf??} recall\marginnote{\bf??} the\marginnote{\bf??} notion\marginnote{\bf??} of\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??},\marginnote{\bf??} following\marginnote{\bf??} Dadok\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{da\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} Lie\marginnote{\bf??} group\marginnote{\bf??} with\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}\to\marginnote{\bf??} O\marginnote{\bf??}(V\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} linear\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}(finite\marginnote{\bf??}-dimensional\marginnote{\bf??})\marginnote{\bf??} real\marginnote{\bf??} vector\marginnote{\bf??} space\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} preserving\marginnote{\bf??} a\marginnote{\bf??} fixed\marginnote{\bf??} inner\marginnote{\bf??} product\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} linear\marginnote{\bf??} subspace\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}(\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??} x\marginnote{\bf??})\marginnote{\bf??}^\marginnote{\bf??}\bot\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} meets\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Lemma\marginnote{\bf??}~1\marginnote{\bf??}]\marginnote{\bf??}{da\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} A\marginnote{\bf??} vector\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} said\marginnote{\bf??} to\marginnote{\bf??} be\marginnote{\bf??} regular\marginnote{\bf??} if\marginnote{\bf??} the\marginnote{\bf??} orbit\marginnote{\bf??} \marginnote{\bf??}$Kx\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} maximal\marginnote{\bf??} dimension\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} subspaces\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} regular\marginnote{\bf??},\marginnote{\bf??} are\marginnote{\bf??} called\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{Cartan\marginnote{\bf??} subspaces\marginnote{\bf??}}\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} representation\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} Cartan\marginnote{\bf??} subspaces\marginnote{\bf??} are\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??} if\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??},\marginnote{\bf??} they\marginnote{\bf??} are\marginnote{\bf??} orthogonal\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} passing\marginnote{\bf??} through\marginnote{\bf??} them\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Prop\marginnote{\bf??}.\marginnote{\bf??}~2\marginnote{\bf??}]\marginnote{\bf??}{da\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} representation\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} said\marginnote{\bf??} to\marginnote{\bf??} be\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{polar\marginnote{\bf??}}\marginnote{\bf??} if\marginnote{\bf??} these\marginnote{\bf??} equivalent\marginnote{\bf??} conditions\marginnote{\bf??} are\marginnote{\bf??} satisfied\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Every\marginnote{\bf??} Riemannian\marginnote{\bf??} symmetric\marginnote{\bf??} space\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}$\marginnote{\bf??} gives\marginnote{\bf??} rise\marginnote{\bf??} to\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??},\marginnote{\bf??} namely\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} isotropy\marginnote{\bf??} group\marginnote{\bf??} on\marginnote{\bf??} the\marginnote{\bf??} tangent\marginnote{\bf??} space\marginnote{\bf??} \marginnote{\bf??}$T\marginnote{\bf??}_e\marginnote{\bf??}(M\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} at\marginnote{\bf??} a\marginnote{\bf??} point\marginnote{\bf??} \marginnote{\bf??}$e\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} other\marginnote{\bf??} words\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$G\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} connected\marginnote{\bf??} real\marginnote{\bf??} semisimple\marginnote{\bf??} Lie\marginnote{\bf??} group\marginnote{\bf??} with\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} Cartan\marginnote{\bf??} decomposition\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}\subseteq\marginnote{\bf??} G\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} analytic\marginnote{\bf??} subgroup\marginnote{\bf??} corresponding\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} maximal\marginnote{\bf??} compact\marginnote{\bf??} subgroup\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$G\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} adjoint\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} example\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} representation\marginnote{\bf??} is\marginnote{\bf??} irreducible\marginnote{\bf??} if\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} simple\marginnote{\bf??} as\marginnote{\bf??} a\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??}.\marginnote{\bf??} Conversely\marginnote{\bf??},\marginnote{\bf??} as\marginnote{\bf??} far\marginnote{\bf??} as\marginnote{\bf??} the\marginnote{\bf??} orbit\marginnote{\bf??} structure\marginnote{\bf??} is\marginnote{\bf??} concerned\marginnote{\bf??},\marginnote{\bf??} these\marginnote{\bf??} are\marginnote{\bf??} the\marginnote{\bf??} only\marginnote{\bf??} examples\marginnote{\bf??} of\marginnote{\bf??} polar\marginnote{\bf??} representations\marginnote{\bf??} of\marginnote{\bf??} connected\marginnote{\bf??} Lie\marginnote{\bf??} groups\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{prop\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{dadokprop6\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{\marginnote{\bf??}(Dadok\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Proposition\marginnote{\bf??}~6\marginnote{\bf??}]\marginnote{\bf??}{da\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} connected\marginnote{\bf??} Lie\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$H\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} There\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} real\marginnote{\bf??} semisimple\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} Cartan\marginnote{\bf??} decomposition\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} together\marginnote{\bf??} with\marginnote{\bf??} a\marginnote{\bf??} vector\marginnote{\bf??} space\marginnote{\bf??} isomorphism\marginnote{\bf??} \marginnote{\bf??}$f\marginnote{\bf??}\colon\marginnote{\bf??} V\marginnote{\bf??}\to\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$f\marginnote{\bf??}(H\marginnote{\bf??}\cdot\marginnote{\bf??} x\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\Ad\marginnote{\bf??}(K\marginnote{\bf??})\marginnote{\bf??}\cdot\marginnote{\bf??} f\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}\subseteq\marginnote{\bf??}\Aut\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} analytic\marginnote{\bf??} subgroup\marginnote{\bf??} with\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{prop\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{dirprod\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=\marginnote{\bf??}\bigoplus\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^nV\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} irreducible\marginnote{\bf??} decomposition\marginnote{\bf??} of\marginnote{\bf??} an\marginnote{\bf??} arbitrary\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} each\marginnote{\bf??} irreducible\marginnote{\bf??} summand\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} again\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(Dadok\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Theorem\marginnote{\bf??}~4\marginnote{\bf??}]\marginnote{\bf??}{da\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Moreover\marginnote{\bf??},\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^nx\marginnote{\bf??}_i\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_i\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??}~\marginnote{\bf??}$i\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{x\marginnote{\bf??}_1\marginnote{\bf??}}\marginnote{\bf??}\times\marginnote{\bf??}\cdots\marginnote{\bf??}\times\marginnote{\bf??} \marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{x\marginnote{\bf??}_n\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{x\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}_\marginnote{\bf??}{V\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}(Kx\marginnote{\bf??}_i\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Indeed\marginnote{\bf??},\marginnote{\bf??} such\marginnote{\bf??} a\marginnote{\bf??} direct\marginnote{\bf??} product\marginnote{\bf??} decomposition\marginnote{\bf??} holds\marginnote{\bf??} for\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} Dadok\marginnote{\bf??}'s\marginnote{\bf??} theorem\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} hence\marginnote{\bf??} it\marginnote{\bf??} holds\marginnote{\bf??} for\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hulls\marginnote{\bf??} as\marginnote{\bf??} well\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} other\marginnote{\bf??} words\marginnote{\bf??},\marginnote{\bf??} every\marginnote{\bf??} polar\marginnote{\bf??} orbitope\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} cartesian\marginnote{\bf??} direct\marginnote{\bf??} product\marginnote{\bf??} of\marginnote{\bf??} irreducible\marginnote{\bf??} polar\marginnote{\bf??} orbitopes\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} Recall\marginnote{\bf??} that\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{spectrahedron\marginnote{\bf??}}\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} solution\marginnote{\bf??} set\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} linear\marginnote{\bf??} matrix\marginnote{\bf??} inequality\marginnote{\bf??} \marginnote{\bf??}(LMI\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} \marginnote{\bf??}$S\marginnote{\bf??}\subseteq\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??} if\marginnote{\bf??} there\marginnote{\bf??} exist\marginnote{\bf??} complex\marginnote{\bf??} hermitian\marginnote{\bf??} matrices\marginnote{\bf??} \marginnote{\bf??}$A\marginnote{\bf??}_0\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},A\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} some\marginnote{\bf??} size\marginnote{\bf??} \marginnote{\bf??}$d\marginnote{\bf??}\times\marginnote{\bf??} d\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$S\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\Bigl\marginnote{\bf??}\marginnote{\bf??}{x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??}\colon\marginnote{\bf??} A\marginnote{\bf??}_0\marginnote{\bf??}+\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^nx\marginnote{\bf??}_iA\marginnote{\bf??}_i\marginnote{\bf??}\succeq0\marginnote{\bf??}\Bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$A\marginnote{\bf??}\succeq0\marginnote{\bf??}$\marginnote{\bf??} means\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$A\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} positive\marginnote{\bf??} semidefinite\marginnote{\bf??} \marginnote{\bf??}(all\marginnote{\bf??} eigenvalues\marginnote{\bf??} are\marginnote{\bf??} nonnegative\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Note\marginnote{\bf??} that\marginnote{\bf??} an\marginnote{\bf??} LMI\marginnote{\bf??} with\marginnote{\bf??} complex\marginnote{\bf??} hermitian\marginnote{\bf??} \marginnote{\bf??}$d\marginnote{\bf??}\times\marginnote{\bf??} d\marginnote{\bf??}$\marginnote{\bf??} matrices\marginnote{\bf??} may\marginnote{\bf??} be\marginnote{\bf??} converted\marginnote{\bf??} into\marginnote{\bf??} an\marginnote{\bf??} equivalent\marginnote{\bf??} LMI\marginnote{\bf??} with\marginnote{\bf??} real\marginnote{\bf??} symmetric\marginnote{\bf??} \marginnote{\bf??}$2d\marginnote{\bf??}\times2d\marginnote{\bf??}$\marginnote{\bf??} matrices\marginnote{\bf??},\marginnote{\bf??} which\marginnote{\bf??} is\marginnote{\bf??} why\marginnote{\bf??} spectrahedra\marginnote{\bf??} are\marginnote{\bf??} often\marginnote{\bf??} defined\marginnote{\bf??} via\marginnote{\bf??} real\marginnote{\bf??} symmetric\marginnote{\bf??} LMIs\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{orbitopsasum\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} linear\marginnote{\bf??} representation\marginnote{\bf??} \marginnote{\bf??}(real\marginnote{\bf??} and\marginnote{\bf??} finite\marginnote{\bf??}-dimensional\marginnote{\bf??})\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} compact\marginnote{\bf??} Lie\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Given\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\conv\marginnote{\bf??}(Kx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} orbit\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} called\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-\marginnote{\bf??})\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{orbitope\marginnote{\bf??}}\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} usually\marginnote{\bf??} denote\marginnote{\bf??} it\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Kx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} assuming\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} understood\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} compact\marginnote{\bf??} convex\marginnote{\bf??} set\marginnote{\bf??} on\marginnote{\bf??} which\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} acts\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} whose\marginnote{\bf??} set\marginnote{\bf??} of\marginnote{\bf??} extreme\marginnote{\bf??} points\marginnote{\bf??} coincide\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} orbit\marginnote{\bf??}~\marginnote{\bf??}$Kx\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
We\marginnote{\bf??} will\marginnote{\bf??} study\marginnote{\bf??} orbitopes\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Kx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} polar\marginnote{\bf??} representations\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} compact\marginnote{\bf??} connected\marginnote{\bf??} Lie\marginnote{\bf??} groups\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Using\marginnote{\bf??} Proposition\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{dadokprop6\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} can\marginnote{\bf??} and\marginnote{\bf??} will\marginnote{\bf??} always\marginnote{\bf??} assume\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} Cartan\marginnote{\bf??} decomposition\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} real\marginnote{\bf??} semisimple\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} adjoint\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} analytic\marginnote{\bf??} subgroup\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Aut\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} vector\marginnote{\bf??} space\marginnote{\bf??} over\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} dual\marginnote{\bf??} vector\marginnote{\bf??} space\marginnote{\bf??} is\marginnote{\bf??} denoted\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\Hom\marginnote{\bf??}(V\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} set\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}\subseteq\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} written\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\conv\marginnote{\bf??}(M\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Our\marginnote{\bf??} notation\marginnote{\bf??} for\marginnote{\bf??} matrix\marginnote{\bf??} groups\marginnote{\bf??} and\marginnote{\bf??} matrix\marginnote{\bf??} Lie\marginnote{\bf??} algebras\marginnote{\bf??} tries\marginnote{\bf??} to\marginnote{\bf??} follow\marginnote{\bf??} the\marginnote{\bf??} conventions\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{kn\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$SU\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$Sp\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} the\marginnote{\bf??} classical\marginnote{\bf??} compact\marginnote{\bf??} Lie\marginnote{\bf??} groups\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$su\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$so\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$sp\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} their\marginnote{\bf??} Lie\marginnote{\bf??} algebras\marginnote{\bf??},\marginnote{\bf??} etc\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} diagonal\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??} matrix\marginnote{\bf??} with\marginnote{\bf??} diagonal\marginnote{\bf??} entries\marginnote{\bf??} \marginnote{\bf??}$a\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},a\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} denoted\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mathrm\marginnote{\bf??}{diag\marginnote{\bf??}}\marginnote{\bf??}(a\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},a\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\section\marginnote{\bf??}{Background\marginnote{\bf??} on\marginnote{\bf??} semisimple\marginnote{\bf??} real\marginnote{\bf??} Lie\marginnote{\bf??} algebras\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\label\marginnote{\bf??}{lienot\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
We\marginnote{\bf??} use\marginnote{\bf??} standard\marginnote{\bf??} notation\marginnote{\bf??} and\marginnote{\bf??} terminology\marginnote{\bf??} for\marginnote{\bf??} semisimple\marginnote{\bf??} Lie\marginnote{\bf??} groups\marginnote{\bf??} and\marginnote{\bf??} Lie\marginnote{\bf??} algebras\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} we\marginnote{\bf??}'ll\marginnote{\bf??} recall\marginnote{\bf??} it\marginnote{\bf??} briefly\marginnote{\bf??} here\marginnote{\bf??}.\marginnote{\bf??} As\marginnote{\bf??} a\marginnote{\bf??} general\marginnote{\bf??} reference\marginnote{\bf??} we\marginnote{\bf??} refer\marginnote{\bf??} to\marginnote{\bf??} Knapp\marginnote{\bf??}'s\marginnote{\bf??} monograph\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{kn\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} in\marginnote{\bf??} particular\marginnote{\bf??} to\marginnote{\bf??} Chapter\marginnote{\bf??}~6\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} semisimple\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} over\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} hence\marginnote{\bf??} a\marginnote{\bf??} finite\marginnote{\bf??} direct\marginnote{\bf??} sum\marginnote{\bf??} of\marginnote{\bf??} simple\marginnote{\bf??} \marginnote{\bf??}(nonabelian\marginnote{\bf??})\marginnote{\bf??} Lie\marginnote{\bf??} algebras\marginnote{\bf??} over\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Recall\marginnote{\bf??} that\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} simple\marginnote{\bf??} then\marginnote{\bf??} either\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} a\marginnote{\bf??} structure\marginnote{\bf??} as\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}(simple\marginnote{\bf??})\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} over\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} or\marginnote{\bf??} else\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\otimes\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} simple\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} over\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\theta\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} Cartan\marginnote{\bf??} involution\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} the\marginnote{\bf??} corresponding\marginnote{\bf??} Cartan\marginnote{\bf??} decomposition\marginnote{\bf??}.\marginnote{\bf??} With\marginnote{\bf??} respect\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} Killing\marginnote{\bf??} form\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}-\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}-\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} decomposition\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} orthogonal\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} restriction\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}-\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}-\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(resp\marginnote{\bf??}.\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} is\marginnote{\bf??} negative\marginnote{\bf??} \marginnote{\bf??}(resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} positive\marginnote{\bf??})\marginnote{\bf??} definite\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} Choose\marginnote{\bf??} a\marginnote{\bf??} maximal\marginnote{\bf??} commutative\marginnote{\bf??} subspace\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Sigma\marginnote{\bf??}\subseteq\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\Hom\marginnote{\bf??}(\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} system\marginnote{\bf??} of\marginnote{\bf??} restricted\marginnote{\bf??} roots\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} abstract\marginnote{\bf??} root\marginnote{\bf??} system\marginnote{\bf??},\marginnote{\bf??} possibly\marginnote{\bf??} non\marginnote{\bf??}-reduced\marginnote{\bf??}.\marginnote{\bf??} Fixing\marginnote{\bf??} an\marginnote{\bf??} ordering\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} the\marginnote{\bf??} sets\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Sigma\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}+\marginnote{\bf??}}\marginnote{\bf??}\subseteq\marginnote{\bf??}\Sigma\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} positive\marginnote{\bf??} restricted\marginnote{\bf??} roots\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\beta\marginnote{\bf??}_n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}\subseteq\marginnote{\bf??}\Sigma\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}+\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} simple\marginnote{\bf??} restricted\marginnote{\bf??} roots\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{comporderings\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{t}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} maximal\marginnote{\bf??} commutative\marginnote{\bf??} subalgebra\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} centralizer\marginnote{\bf??} \marginnote{\bf??}$Z\marginnote{\bf??}_\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}(\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}:\marginnote{\bf??}=i\marginnote{\bf??}{\mathfrak{t}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} commutative\marginnote{\bf??} subalgebra\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(the\marginnote{\bf??} complexification\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} for\marginnote{\bf??} which\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} Cartan\marginnote{\bf??} algebra\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} rank\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} will\marginnote{\bf??} be\marginnote{\bf??} denoted\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$l\marginnote{\bf??}=\marginnote{\bf??}\dim\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} real\marginnote{\bf??} rank\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}=\marginnote{\bf??}\dim\marginnote{\bf??}(\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
The\marginnote{\bf??} Killing\marginnote{\bf??} form\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} restricted\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} euclidean\marginnote{\bf??} inner\marginnote{\bf??} product\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} denoted\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}-\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}-\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} All\marginnote{\bf??} roots\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} real\marginnote{\bf??}-valued\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Delta\marginnote{\bf??}\subseteq\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\Hom\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} root\marginnote{\bf??} system\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Extending\marginnote{\bf??} the\marginnote{\bf??} ordering\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} suitably\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(e\marginnote{\bf??}.g\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} take\marginnote{\bf??} the\marginnote{\bf??} lexicographic\marginnote{\bf??} order\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}(\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}+i\marginnote{\bf??}{\mathfrak{t}}\marginnote{\bf??})\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} before\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}{\mathfrak{t}}\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[p\marginnote{\bf??}.\marginnote{\bf??}~377\marginnote{\bf??}]\marginnote{\bf??}{kn\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Delta\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}+\marginnote{\bf??}}\marginnote{\bf??}\subseteq\marginnote{\bf??}\Delta\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} positive\marginnote{\bf??} roots\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\alpha\marginnote{\bf??}_l\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}\subseteq\marginnote{\bf??}\Delta\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}+\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} the\marginnote{\bf??} simple\marginnote{\bf??} roots\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$G\marginnote{\bf??}=\marginnote{\bf??}\Aut\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}_0\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} identity\marginnote{\bf??} component\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} automorphism\marginnote{\bf??} group\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$G\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} naturally\marginnote{\bf??} identified\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} analytic\marginnote{\bf??} subgroup\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}\subseteq\marginnote{\bf??} G\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} maximal\marginnote{\bf??} compact\marginnote{\bf??} subgroup\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$G\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} acts\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??})\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} via\marginnote{\bf??} the\marginnote{\bf??} adjoint\marginnote{\bf??} action\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} denote\marginnote{\bf??} this\marginnote{\bf??} action\marginnote{\bf??} simply\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$gx\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}\Ad\marginnote{\bf??}(g\marginnote{\bf??})x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$g\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Every\marginnote{\bf??} element\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??} to\marginnote{\bf??} an\marginnote{\bf??} element\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Note\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-action\marginnote{\bf??} preserves\marginnote{\bf??} the\marginnote{\bf??} quadratic\marginnote{\bf??} \marginnote{\bf??}(Killing\marginnote{\bf??})\marginnote{\bf??} form\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} action\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} as\marginnote{\bf??} far\marginnote{\bf??} as\marginnote{\bf??} the\marginnote{\bf??} orbit\marginnote{\bf??} structure\marginnote{\bf??} is\marginnote{\bf??} concerned\marginnote{\bf??},\marginnote{\bf??} every\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} connected\marginnote{\bf??} Lie\marginnote{\bf??} group\marginnote{\bf??} arises\marginnote{\bf??} in\marginnote{\bf??} this\marginnote{\bf??} way\marginnote{\bf??} \marginnote{\bf??}(Proposition\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{dadokprop6\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} Note\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} direct\marginnote{\bf??} sum\marginnote{\bf??} decomposition\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=i\marginnote{\bf??}{\mathfrak{t}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} orthogonal\marginnote{\bf??} with\marginnote{\bf??} respect\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} Killing\marginnote{\bf??} form\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} restriction\marginnote{\bf??} map\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}\colon\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}\to\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} satisfies\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\Delta\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}+\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\subseteq\marginnote{\bf??} \marginnote{\bf??}\Sigma\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}+\marginnote{\bf??}}\marginnote{\bf??}\cup\marginnote{\bf??}\marginnote{\bf??}{0\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\Pi\marginnote{\bf??})\marginnote{\bf??}\subseteq\marginnote{\bf??} \marginnote{\bf??}\Gamma\marginnote{\bf??}\cup\marginnote{\bf??}\marginnote{\bf??}{0\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Conversely\marginnote{\bf??},\marginnote{\bf??} there\marginnote{\bf??} exists\marginnote{\bf??} an\marginnote{\bf??} involution\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}\mapsto\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} set\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}\colon\marginnote{\bf??} r\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\beta\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} the\marginnote{\bf??} form\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??},\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{weyl\marginnote{\bf??}}\marginnote{\bf??} From\marginnote{\bf??} the\marginnote{\bf??} inner\marginnote{\bf??} product\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}-\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}-\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} get\marginnote{\bf??} linear\marginnote{\bf??} isomorphisms\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\isoto\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}\isoto\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} use\marginnote{\bf??} them\marginnote{\bf??} to\marginnote{\bf??} transfer\marginnote{\bf??} the\marginnote{\bf??} inner\marginnote{\bf??} product\marginnote{\bf??} from\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} from\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
For\marginnote{\bf??} every\marginnote{\bf??} restricted\marginnote{\bf??} root\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}\Sigma\marginnote{\bf??}$\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$s\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}\colon\marginnote{\bf??} \marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}\to\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} denote\marginnote{\bf??} the\marginnote{\bf??} root\marginnote{\bf??} reflection\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\gamma\marginnote{\bf??}\mapsto\marginnote{\bf??}
\marginnote{\bf??}\gamma\marginnote{\bf??}-\marginnote{\bf??}\frac\marginnote{\bf??}{2\marginnote{\bf??}\bil\marginnote{\bf??}\beta\marginnote{\bf??}\gamma\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}|\marginnote{\bf??}\beta\marginnote{\bf??}|\marginnote{\bf??}^2\marginnote{\bf??}}\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} always\marginnote{\bf??} write\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}=\marginnote{\bf??}\langle\marginnote{\bf??} s\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}\colon\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}\Sigma\marginnote{\bf??}\rangle\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}(restricted\marginnote{\bf??})\marginnote{\bf??} Weyl\marginnote{\bf??} group\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Via\marginnote{\bf??} the\marginnote{\bf??} identification\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}\isoto\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} consider\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} a\marginnote{\bf??} reflection\marginnote{\bf??} group\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} well\marginnote{\bf??}:\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}\Sigma\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$h\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} element\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{h\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}}x\marginnote{\bf??}=\marginnote{\bf??}\beta\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$s\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??} acts\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??}
\marginnote{\bf??}$s\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=x\marginnote{\bf??}-\marginnote{\bf??}\frac\marginnote{\bf??}{2\marginnote{\bf??}\beta\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}|h\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}|\marginnote{\bf??}^2\marginnote{\bf??}}h\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} It\marginnote{\bf??} is\marginnote{\bf??} well\marginnote{\bf??} known\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} naturally\marginnote{\bf??} isomorphic\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$N\marginnote{\bf??}_K\marginnote{\bf??}(\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}/Z\marginnote{\bf??}_K\marginnote{\bf??}(\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[6\marginnote{\bf??}.57\marginnote{\bf??}]\marginnote{\bf??}{kn\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
We\marginnote{\bf??} always\marginnote{\bf??} denote\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$C\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}\colon\marginnote{\bf??}\beta\marginnote{\bf??}_1\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\ge0\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\beta\marginnote{\bf??}_n\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\ge0\marginnote{\bf??} \marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}(closed\marginnote{\bf??})\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} for\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} polyhedral\marginnote{\bf??} convex\marginnote{\bf??} cone\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} every\marginnote{\bf??} element\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??} to\marginnote{\bf??} a\marginnote{\bf??} unique\marginnote{\bf??} element\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{dualbas\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\lambda\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\lambda\marginnote{\bf??}_l\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} basis\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} that\marginnote{\bf??} is\marginnote{\bf??} dual\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\alpha\marginnote{\bf??}_l\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}\lambda\marginnote{\bf??}_k\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??} \marginnote{\bf??}\delta\marginnote{\bf??}_\marginnote{\bf??}{ik\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??},k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},l\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Similarly\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\mu\marginnote{\bf??}_n\marginnote{\bf??} \marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} defined\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_k\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\delta\marginnote{\bf??}_\marginnote{\bf??}{jk\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??},k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} fundamental\marginnote{\bf??} weights\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} the\marginnote{\bf??} linear\marginnote{\bf??} forms\marginnote{\bf??}
\marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}_i\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} defined\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}_i\marginnote{\bf??}=\marginnote{\bf??}\frac12\marginnote{\bf??}|\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}|\marginnote{\bf??}^2\marginnote{\bf??} \marginnote{\bf??}\lambda\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$i\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},l\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\frac\marginnote{\bf??}{2\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\omega\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}_k\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}{\marginnote{\bf??}|\marginnote{\bf??}\alpha\marginnote{\bf??}_k\marginnote{\bf??}|\marginnote{\bf??}^2\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\delta\marginnote{\bf??}_\marginnote{\bf??}{ik\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??},k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},l\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} each\marginnote{\bf??} linear\marginnote{\bf??} combination\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}=\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^lm\marginnote{\bf??}_i\marginnote{\bf??}\omega\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} integer\marginnote{\bf??} coefficients\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}_i\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} there\marginnote{\bf??} exists\marginnote{\bf??} a\marginnote{\bf??} unique\marginnote{\bf??} \marginnote{\bf??}(up\marginnote{\bf??} to\marginnote{\bf??} isomorphism\marginnote{\bf??})\marginnote{\bf??} irreducible\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} highest\marginnote{\bf??} weight\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} irreducible\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} highest\marginnote{\bf??} weight\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} called\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}$\marginnote{\bf??}-th\marginnote{\bf??} fundamental\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??}'ll\marginnote{\bf??} denote\marginnote{\bf??} it\marginnote{\bf??} by\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$i\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},l\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} As\marginnote{\bf??} before\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}\subseteq\marginnote{\bf??}\Delta\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}+\marginnote{\bf??}}\marginnote{\bf??}\subseteq\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}\subseteq\marginnote{\bf??}\Sigma\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}+\marginnote{\bf??}}\marginnote{\bf??}\subseteq\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} systems\marginnote{\bf??} of\marginnote{\bf??} simple\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} simple\marginnote{\bf??} restricted\marginnote{\bf??} roots\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} need\marginnote{\bf??} to\marginnote{\bf??} relate\marginnote{\bf??} the\marginnote{\bf??} dual\marginnote{\bf??} bases\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} to\marginnote{\bf??} each\marginnote{\bf??} other\marginnote{\bf??}.\marginnote{\bf??} Given\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} defined\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\delta\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??},\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} each\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Given\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} defined\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\delta\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??},\marginnote{\bf??}\beta\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} each\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}'\marginnote{\bf??}\in\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}(So\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}=\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??}=\marginnote{\bf??}\lambda\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} similarly\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}=\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}=\marginnote{\bf??}\mu\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??})\marginnote{\bf??} The\marginnote{\bf??} following\marginnote{\bf??} fact\marginnote{\bf??} is\marginnote{\bf??} certainly\marginnote{\bf??} well\marginnote{\bf??}-known\marginnote{\bf??},\marginnote{\bf??} but\marginnote{\bf??} we\marginnote{\bf??} haven\marginnote{\bf??}'t\marginnote{\bf??} been\marginnote{\bf??} able\marginnote{\bf??} to\marginnote{\bf??} find\marginnote{\bf??} a\marginnote{\bf??} suitable\marginnote{\bf??} reference\marginnote{\bf??} for\marginnote{\bf??} it\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{restrfundgew\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}:\marginnote{\bf??}=r\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}\ne0\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=q\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} some\marginnote{\bf??} rational\marginnote{\bf??} number\marginnote{\bf??} \marginnote{\bf??}$q\marginnote{\bf??}>0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
In\marginnote{\bf??} fact\marginnote{\bf??} the\marginnote{\bf??} argument\marginnote{\bf??} shows\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} only\marginnote{\bf??} element\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??} that\marginnote{\bf??} restricts\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\frac12\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??} if\marginnote{\bf??} there\marginnote{\bf??} are\marginnote{\bf??} two\marginnote{\bf??} such\marginnote{\bf??} elements\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}\to\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}\mapsto\marginnote{\bf??}\wt\marginnote{\bf??}\mu\marginnote{\bf??}$\marginnote{\bf??} denote\marginnote{\bf??} the\marginnote{\bf??} linear\marginnote{\bf??} map\marginnote{\bf??} that\marginnote{\bf??} is\marginnote{\bf??} adjoint\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} restriction\marginnote{\bf??} map\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}\colon\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}\to\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\lambda\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}\wt\marginnote{\bf??}\mu\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\bil\marginnote{\bf??}{r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}\mu\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\lambda\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}_0\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}_1\marginnote{\bf??}=\marginnote{\bf??}\Pi\marginnote{\bf??}\smallsetminus\marginnote{\bf??}\Pi\marginnote{\bf??}_0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} Cartan\marginnote{\bf??} involution\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\theta\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} induces\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mathrm\marginnote{\bf??}{id\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}{\mathfrak{t}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}-\marginnote{\bf??}\mathrm\marginnote{\bf??}{id\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} dual\marginnote{\bf??} involution\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\theta\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} satisfies\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}-\marginnote{\bf??}\theta\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}\Pi\marginnote{\bf??})\marginnote{\bf??}\subseteq\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} abbreviate\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}-\marginnote{\bf??}\theta\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}_1\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} then\marginnote{\bf??} the\marginnote{\bf??} only\marginnote{\bf??} elements\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??} that\marginnote{\bf??} restrict\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}=r\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{bu\marginnote{\bf??}}\marginnote{\bf??} Prop\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} 29\marginnote{\bf??}.9\marginnote{\bf??} for\marginnote{\bf??} these\marginnote{\bf??} facts\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} restricted\marginnote{\bf??} simple\marginnote{\bf??} root\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} From\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}=\marginnote{\bf??}-\marginnote{\bf??}\theta\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} see\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}+\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}=2\marginnote{\bf??}\wt\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\lambda\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} dominant\marginnote{\bf??},\marginnote{\bf??} i\marginnote{\bf??}.e\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} satisfies\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}\lambda\marginnote{\bf??}\gamma\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\gamma\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} conclude\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}\beta\marginnote{\bf??}=\marginnote{\bf??}\bil\marginnote{\bf??}\lambda\marginnote{\bf??}{\marginnote{\bf??}\wt\marginnote{\bf??}\beta\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\frac12\marginnote{\bf??} \marginnote{\bf??}\bil\marginnote{\bf??}\lambda\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}+\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} shows\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{equation\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{inclus\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\mathrm\marginnote{\bf??}{cone\marginnote{\bf??}}\marginnote{\bf??}(r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_1\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_l\marginnote{\bf??})\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\subseteq\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??} \marginnote{\bf??}\mathrm\marginnote{\bf??}{cone\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}\mu\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\mu\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{equation\marginnote{\bf??}}\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
On\marginnote{\bf??} the\marginnote{\bf??} other\marginnote{\bf??} hand\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\gamma\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??} the\marginnote{\bf??} inner\marginnote{\bf??} product\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\wt\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\gamma\marginnote{\bf??}=\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}}\marginnote{\bf??}{r\marginnote{\bf??}(\marginnote{\bf??}\gamma\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$1\marginnote{\bf??}$\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\gamma\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$0\marginnote{\bf??}$\marginnote{\bf??} otherwise\marginnote{\bf??}.\marginnote{\bf??} Therefore\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\wt\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\frac12\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??}+\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}=r\marginnote{\bf??}(\marginnote{\bf??}\wt\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\frac12r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??}+\marginnote{\bf??} \marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} which\marginnote{\bf??} proves\marginnote{\bf??} the\marginnote{\bf??} reverse\marginnote{\bf??} inclusion\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}\eqref\marginnote{\bf??}{inclus\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}=\marginnote{\bf??}\alpha\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} It\marginnote{\bf??} remains\marginnote{\bf??} to\marginnote{\bf??} consider\marginnote{\bf??} the\marginnote{\bf??} case\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}\ne\marginnote{\bf??}\alpha\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} From\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}+\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}=2\marginnote{\bf??}\wt\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} see\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}\beta\marginnote{\bf??}=\marginnote{\bf??} \marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}\wt\marginnote{\bf??}\beta\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\frac12\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} similarly\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}\beta\marginnote{\bf??}=\marginnote{\bf??}\frac12\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} On\marginnote{\bf??} the\marginnote{\bf??} other\marginnote{\bf??} hand\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??}+\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}=2\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} together\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}\eqref\marginnote{\bf??}{inclus\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} implies\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} positive\marginnote{\bf??} scalar\marginnote{\bf??} multiples\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} since\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??} generates\marginnote{\bf??} an\marginnote{\bf??} extreme\marginnote{\bf??} ray\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mathrm\marginnote{\bf??}{cone\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}\mu\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\mu\marginnote{\bf??}_m\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Altogether\marginnote{\bf??} it\marginnote{\bf??} follows\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??} \marginnote{\bf??}\frac12\marginnote{\bf??}\mu\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Finally\marginnote{\bf??} assume\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\gamma\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} arbitrary\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}=r\marginnote{\bf??}(\marginnote{\bf??}\gamma\marginnote{\bf??})\marginnote{\bf??}\ne0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}\beta\marginnote{\bf??}=\marginnote{\bf??} \marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}\wt\marginnote{\bf??}\beta\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\frac12\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}{\marginnote{\bf??}\gamma\marginnote{\bf??}+\marginnote{\bf??}\gamma\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} since\marginnote{\bf??} \marginnote{\bf??}$2\marginnote{\bf??}\wt\marginnote{\bf??}\beta\marginnote{\bf??}=\marginnote{\bf??}\gamma\marginnote{\bf??}+\marginnote{\bf??}\gamma\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} see\marginnote{\bf??} above\marginnote{\bf??}.\marginnote{\bf??} But\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}\notin\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\gamma\marginnote{\bf??},\marginnote{\bf??}\gamma\marginnote{\bf??}'\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} since\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\gamma\marginnote{\bf??})\marginnote{\bf??}=r\marginnote{\bf??}(\marginnote{\bf??}\gamma\marginnote{\bf??}'\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\beta\marginnote{\bf??} \marginnote{\bf??}\ne0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Hence\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}\beta\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} shows\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}\alpha\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\section\marginnote{\bf??}{Kostant\marginnote{\bf??}'s\marginnote{\bf??} convexity\marginnote{\bf??} theorem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
We\marginnote{\bf??} assume\marginnote{\bf??} the\marginnote{\bf??} setup\marginnote{\bf??} of\marginnote{\bf??} Section\marginnote{\bf??}~\marginnote{\bf??}\ref\marginnote{\bf??}{lienot\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} semisimple\marginnote{\bf??} real\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} with\marginnote{\bf??} Cartan\marginnote{\bf??} decomposition\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} maximal\marginnote{\bf??} abelian\marginnote{\bf??} subspace\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} gives\marginnote{\bf??} us\marginnote{\bf??} the\marginnote{\bf??} system\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Sigma\marginnote{\bf??}\subseteq\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} reduced\marginnote{\bf??} roots\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} on\marginnote{\bf??} which\marginnote{\bf??} the\marginnote{\bf??} reduced\marginnote{\bf??} Weyl\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??} acts\marginnote{\bf??}.\marginnote{\bf??} After\marginnote{\bf??} fixing\marginnote{\bf??} an\marginnote{\bf??} ordering\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} the\marginnote{\bf??} simple\marginnote{\bf??} positive\marginnote{\bf??} roots\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}\subseteq\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
The\marginnote{\bf??} key\marginnote{\bf??} technical\marginnote{\bf??} tool\marginnote{\bf??} for\marginnote{\bf??} this\marginnote{\bf??} paper\marginnote{\bf??} is\marginnote{\bf??} Kostant\marginnote{\bf??}'s\marginnote{\bf??} convexity\marginnote{\bf??} theorem\marginnote{\bf??},\marginnote{\bf??} together\marginnote{\bf??} with\marginnote{\bf??} its\marginnote{\bf??} consequences\marginnote{\bf??}.\marginnote{\bf??} To\marginnote{\bf??} a\marginnote{\bf??} large\marginnote{\bf??} extent\marginnote{\bf??},\marginnote{\bf??} it\marginnote{\bf??} allows\marginnote{\bf??} to\marginnote{\bf??} reduce\marginnote{\bf??} the\marginnote{\bf??} study\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} polar\marginnote{\bf??} orbits\marginnote{\bf??} \marginnote{\bf??}$Kx\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} their\marginnote{\bf??} orbitopes\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} to\marginnote{\bf??} a\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??},\marginnote{\bf??} whereby\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-action\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} gets\marginnote{\bf??} replaced\marginnote{\bf??} by\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-action\marginnote{\bf??} on\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} now\marginnote{\bf??} recall\marginnote{\bf??} this\marginnote{\bf??} theorem\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{mompolytop\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$T\marginnote{\bf??}\subseteq\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} cone\marginnote{\bf??} that\marginnote{\bf??} is\marginnote{\bf??} dual\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(with\marginnote{\bf??} respect\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-invariant\marginnote{\bf??} inner\marginnote{\bf??} product\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\beta\marginnote{\bf??}_n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$T\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}\colon\marginnote{\bf??} \marginnote{\bf??}\mu\marginnote{\bf??}_1\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\ge0\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\mu\marginnote{\bf??}_n\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\ge0\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\mu\marginnote{\bf??}_n\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} dual\marginnote{\bf??} basis\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{dualbas\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Wx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}(restricted\marginnote{\bf??})\marginnote{\bf??} Weyl\marginnote{\bf??} group\marginnote{\bf??} orbit\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} will\marginnote{\bf??} play\marginnote{\bf??} a\marginnote{\bf??} central\marginnote{\bf??} role\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} Hamiltonian\marginnote{\bf??} geometry\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} called\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{momentum\marginnote{\bf??} polytope\marginnote{\bf??}}\marginnote{\bf??} associated\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{or\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} a\marginnote{\bf??} term\marginnote{\bf??} that\marginnote{\bf??} we\marginnote{\bf??} will\marginnote{\bf??} adopt\marginnote{\bf??}.\marginnote{\bf??} According\marginnote{\bf??} to\marginnote{\bf??} Kostant\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} characterized\marginnote{\bf??} as\marginnote{\bf??} follows\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{prop\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{kost33\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Lemma\marginnote{\bf??} 3\marginnote{\bf??}.3\marginnote{\bf??}]\marginnote{\bf??}{ko\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{itemize\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\item\marginnote{\bf??}[\marginnote{\bf??}(a\marginnote{\bf??})\marginnote{\bf??}]\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} if\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}-wy\marginnote{\bf??}\in\marginnote{\bf??} T\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$w\marginnote{\bf??}\in\marginnote{\bf??} W\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\item\marginnote{\bf??}[\marginnote{\bf??}(b\marginnote{\bf??})\marginnote{\bf??}]\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} if\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}-y\marginnote{\bf??}\in\marginnote{\bf??} T\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{itemize\marginnote{\bf??}}\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}\colon\marginnote{\bf??}\mu\marginnote{\bf??}_j\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\le\marginnote{\bf??}\mu\marginnote{\bf??}_j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{prop\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
Recall\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} Killing\marginnote{\bf??} form\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} restricts\marginnote{\bf??} to\marginnote{\bf??} a\marginnote{\bf??} euclidean\marginnote{\bf??} inner\marginnote{\bf??} product\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}\colon\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\to\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} denote\marginnote{\bf??} the\marginnote{\bf??} orthogonal\marginnote{\bf??} projection\marginnote{\bf??} from\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} to\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Kostant\marginnote{\bf??}'s\marginnote{\bf??} convexity\marginnote{\bf??} theorem\marginnote{\bf??} says\marginnote{\bf??}:\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{kostant\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Theorem\marginnote{\bf??} 8\marginnote{\bf??}.2\marginnote{\bf??}]\marginnote{\bf??}{ko\marginnote{\bf??}}\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}(Kx\marginnote{\bf??})\marginnote{\bf??}=P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
We\marginnote{\bf??} record\marginnote{\bf??} a\marginnote{\bf??} few\marginnote{\bf??} immediate\marginnote{\bf??} consequences\marginnote{\bf??}.\marginnote{\bf??} Recall\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Kx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} denotes\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{cor2kost\marginnote{\bf??}}\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}=P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Hence\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\cap\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} holds\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-invariant\marginnote{\bf??} convex\marginnote{\bf??} subset\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??}\marginnote{\bf??}/\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{kostant\marginnote{\bf??}}\marginnote{\bf??} implies\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??})\marginnote{\bf??}=P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} hence\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}\subseteq\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} reverse\marginnote{\bf??} inclusion\marginnote{\bf??} is\marginnote{\bf??} obvious\marginnote{\bf??} since\marginnote{\bf??} \marginnote{\bf??}$Wx\marginnote{\bf??}\subseteq\marginnote{\bf??} Kx\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} second\marginnote{\bf??} assertion\marginnote{\bf??} follows\marginnote{\bf??} from\marginnote{\bf??} the\marginnote{\bf??} first\marginnote{\bf??},\marginnote{\bf??} since\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} meets\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{cor3kost\marginnote{\bf??}}\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},y\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??} then\marginnote{\bf??} they\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??}.\marginnote{\bf??} Every\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} intersects\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} a\marginnote{\bf??} unique\marginnote{\bf??} element\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} Both\marginnote{\bf??} statements\marginnote{\bf??} are\marginnote{\bf??} equivalent\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} it\marginnote{\bf??} suffices\marginnote{\bf??} to\marginnote{\bf??} prove\marginnote{\bf??} the\marginnote{\bf??} second\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}=P\marginnote{\bf??}_y\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{kostant\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{kost33\marginnote{\bf??}}\marginnote{\bf??}(b\marginnote{\bf??})\marginnote{\bf??} implies\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pm\marginnote{\bf??}(x\marginnote{\bf??}-y\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??} T\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} whence\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}-y\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{exsymsh\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\ge2\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=sl\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} real\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??} matrices\marginnote{\bf??} of\marginnote{\bf??} trace\marginnote{\bf??} zero\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} resulting\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} natural\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} special\marginnote{\bf??} orthogonal\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=sym\marginnote{\bf??}_0\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} trace\marginnote{\bf??} zero\marginnote{\bf??} symmetric\marginnote{\bf??} real\marginnote{\bf??} matrices\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} standard\marginnote{\bf??} choice\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} to\marginnote{\bf??} take\marginnote{\bf??} all\marginnote{\bf??} diagonal\marginnote{\bf??} matrices\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} \marginnote{\bf??}(restricted\marginnote{\bf??})\marginnote{\bf??} Weyl\marginnote{\bf??} group\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}=S\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} symmetric\marginnote{\bf??} group\marginnote{\bf??},\marginnote{\bf??} acting\marginnote{\bf??} by\marginnote{\bf??} permutation\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} diagonal\marginnote{\bf??} elements\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} the\marginnote{\bf??} momentum\marginnote{\bf??} polytope\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{permutahedron\marginnote{\bf??}}\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} namely\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} permutations\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Kostant\marginnote{\bf??}'s\marginnote{\bf??} theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{kostant\marginnote{\bf??}}\marginnote{\bf??} specializes\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}(symmetric\marginnote{\bf??})\marginnote{\bf??} Schur\marginnote{\bf??}-Horn\marginnote{\bf??} theorem\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{hj\marginnote{\bf??}}\marginnote{\bf??} 4\marginnote{\bf??}.3\marginnote{\bf??}.45\marginnote{\bf??} and\marginnote{\bf??} 4\marginnote{\bf??}.3\marginnote{\bf??}.48\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{lrt\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Likewise\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} hermitian\marginnote{\bf??} version\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} Schur\marginnote{\bf??}-Horn\marginnote{\bf??} theorem\marginnote{\bf??} arises\marginnote{\bf??} from\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{kostant\marginnote{\bf??}}\marginnote{\bf??} if\marginnote{\bf??} we\marginnote{\bf??} take\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}=su\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} we\marginnote{\bf??} get\marginnote{\bf??} the\marginnote{\bf??} adjoint\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} special\marginnote{\bf??} unitary\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SU\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}(traceless\marginnote{\bf??})\marginnote{\bf??} hermitian\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??} matrices\marginnote{\bf??},\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} analogous\marginnote{\bf??} theorem\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
In\marginnote{\bf??} Section\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sect\marginnote{\bf??}:exs\marginnote{\bf??}}\marginnote{\bf??} we\marginnote{\bf??} will\marginnote{\bf??} discuss\marginnote{\bf??} examples\marginnote{\bf??} of\marginnote{\bf??} polar\marginnote{\bf??} orbitopes\marginnote{\bf??} in\marginnote{\bf??} a\marginnote{\bf??} systematic\marginnote{\bf??} way\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\section\marginnote{\bf??}{Polar\marginnote{\bf??} orbitopes\marginnote{\bf??} as\marginnote{\bf??} spectrahedra\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{cptorbtps\marginnote{\bf??}}\marginnote{\bf??} In\marginnote{\bf??} general\marginnote{\bf??},\marginnote{\bf??} orbitopes\marginnote{\bf??} under\marginnote{\bf??} compact\marginnote{\bf??} connected\marginnote{\bf??} linear\marginnote{\bf??} groups\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} can\marginnote{\bf??}'t\marginnote{\bf??} be\marginnote{\bf??} expected\marginnote{\bf??} to\marginnote{\bf??} be\marginnote{\bf??} spectrahedra\marginnote{\bf??}.\marginnote{\bf??} Examples\marginnote{\bf??} are\marginnote{\bf??} suitable\marginnote{\bf??} \marginnote{\bf??}$SO\marginnote{\bf??}(2\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}-orbitopes\marginnote{\bf??} like\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$4\marginnote{\bf??}$\marginnote{\bf??}-dimensional\marginnote{\bf??} Barvinok\marginnote{\bf??}-Novik\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{si\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} or\marginnote{\bf??} the\marginnote{\bf??} Grassmann\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$G\marginnote{\bf??}_\marginnote{\bf??}{3\marginnote{\bf??},6\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} dimension\marginnote{\bf??}~\marginnote{\bf??}$20\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Theorem\marginnote{\bf??} 7\marginnote{\bf??}.6\marginnote{\bf??}]\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} where\marginnote{\bf??} the\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(6\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} even\marginnote{\bf??} semisimple\marginnote{\bf??}.\marginnote{\bf??} Using\marginnote{\bf??} results\marginnote{\bf??} from\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{sch\marginnote{\bf??}:hn\marginnote{\bf??}}\marginnote{\bf??} it\marginnote{\bf??} is\marginnote{\bf??} easy\marginnote{\bf??} to\marginnote{\bf??} construct\marginnote{\bf??} orbitopes\marginnote{\bf??} under\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$2\marginnote{\bf??}$\marginnote{\bf??}-torus\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(2\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} SO\marginnote{\bf??}(2\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} which\marginnote{\bf??} are\marginnote{\bf??} not\marginnote{\bf??} even\marginnote{\bf??} linear\marginnote{\bf??} projections\marginnote{\bf??} of\marginnote{\bf??} spectrahedra\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} example\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??}
\marginnote{\bf??}$\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}(s\marginnote{\bf??},s\marginnote{\bf??}^2\marginnote{\bf??},s\marginnote{\bf??}^3\marginnote{\bf??},t\marginnote{\bf??},t\marginnote{\bf??}^2\marginnote{\bf??},st\marginnote{\bf??},st\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\colon\marginnote{\bf??} s\marginnote{\bf??},t\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}|s\marginnote{\bf??}|\marginnote{\bf??}=\marginnote{\bf??}|t\marginnote{\bf??}|\marginnote{\bf??}=1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^7\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} fact\marginnote{\bf??} it\marginnote{\bf??} can\marginnote{\bf??} be\marginnote{\bf??} shown\marginnote{\bf??} that\marginnote{\bf??} in\marginnote{\bf??} sufficiently\marginnote{\bf??} high\marginnote{\bf??} dimension\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}`\marginnote{\bf??}`most\marginnote{\bf??}'\marginnote{\bf??}'\marginnote{\bf??} \marginnote{\bf??}(in\marginnote{\bf??} a\marginnote{\bf??} suitable\marginnote{\bf??} sense\marginnote{\bf??})\marginnote{\bf??} orbitopes\marginnote{\bf??} under\marginnote{\bf??} \marginnote{\bf??}$SO\marginnote{\bf??}(2\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} SO\marginnote{\bf??}(2\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} fail\marginnote{\bf??} to\marginnote{\bf??} be\marginnote{\bf??} projected\marginnote{\bf??} spectrahedra\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{kob\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
Given\marginnote{\bf??} this\marginnote{\bf??} general\marginnote{\bf??} situation\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} think\marginnote{\bf??} the\marginnote{\bf??} following\marginnote{\bf??} theorem\marginnote{\bf??} all\marginnote{\bf??} the\marginnote{\bf??} more\marginnote{\bf??} remarkable\marginnote{\bf??}:\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{connpolorbsp\marginnote{\bf??}}\marginnote{\bf??} Any\marginnote{\bf??} orbitope\marginnote{\bf??} in\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} connected\marginnote{\bf??} Lie\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} By\marginnote{\bf??} Proposition\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{dadokprop6\marginnote{\bf??}}\marginnote{\bf??} we\marginnote{\bf??} may\marginnote{\bf??} assume\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} Cartan\marginnote{\bf??} decomposition\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} real\marginnote{\bf??} semisimple\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} acts\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} via\marginnote{\bf??} the\marginnote{\bf??} adjoint\marginnote{\bf??} representation\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} provide\marginnote{\bf??} an\marginnote{\bf??} explicit\marginnote{\bf??} linear\marginnote{\bf??} matrix\marginnote{\bf??} inequality\marginnote{\bf??} description\marginnote{\bf??} for\marginnote{\bf??} any\marginnote{\bf??} such\marginnote{\bf??} orbitope\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Fix\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} together\marginnote{\bf??} with\marginnote{\bf??} compatible\marginnote{\bf??} orderings\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}\ref\marginnote{\bf??}{comporderings\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} otherwise\marginnote{\bf??} notation\marginnote{\bf??} be\marginnote{\bf??} as\marginnote{\bf??} in\marginnote{\bf??} Section\marginnote{\bf??}~\marginnote{\bf??}\ref\marginnote{\bf??}{lienot\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\alpha\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\alpha\marginnote{\bf??}_l\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} system\marginnote{\bf??} of\marginnote{\bf??} simple\marginnote{\bf??} roots\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\beta\marginnote{\bf??}_n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} system\marginnote{\bf??} of\marginnote{\bf??} simple\marginnote{\bf??} restricted\marginnote{\bf??} roots\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} each\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??} choose\marginnote{\bf??} an\marginnote{\bf??} index\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}=i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??}\marginnote{\bf??}{1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},l\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}\colon\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\to\marginnote{\bf??}\End\marginnote{\bf??}(V\marginnote{\bf??}_j\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}(complex\marginnote{\bf??})\marginnote{\bf??} irreducible\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} highest\marginnote{\bf??} weight\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}-th\marginnote{\bf??} fundamental\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} see\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{dualbas\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} There\marginnote{\bf??} exists\marginnote{\bf??} an\marginnote{\bf??} hermitian\marginnote{\bf??} inner\marginnote{\bf??} product\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??} making\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} self\marginnote{\bf??}-adjoint\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(Lemma\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{ewprelmaxev\marginnote{\bf??}}\marginnote{\bf??}(a\marginnote{\bf??})\marginnote{\bf??} below\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} real\marginnote{\bf??} eigenvalues\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} A\marginnote{\bf??} more\marginnote{\bf??} precise\marginnote{\bf??} version\marginnote{\bf??} of\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{connpolorbsp\marginnote{\bf??}}\marginnote{\bf??} is\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{polorbspekt\marginnote{\bf??}}\marginnote{\bf??} Given\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Kx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} for\marginnote{\bf??} each\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} all\marginnote{\bf??} eigenvalues\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} less\marginnote{\bf??} or\marginnote{\bf??} equal\marginnote{\bf??} than\marginnote{\bf??} the\marginnote{\bf??} largest\marginnote{\bf??} eigenvalue\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} other\marginnote{\bf??} words\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\colon\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\preceq\marginnote{\bf??} c\marginnote{\bf??}_j\marginnote{\bf??}\cdot\marginnote{\bf??}\mathrm\marginnote{\bf??}{id\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??} j\marginnote{\bf??}=1\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$c\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} maximal\marginnote{\bf??} eigenvalue\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
Upon\marginnote{\bf??} choosing\marginnote{\bf??} orthogonal\marginnote{\bf??} bases\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} representation\marginnote{\bf??} spaces\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} this\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} explicit\marginnote{\bf??} description\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} linear\marginnote{\bf??} matrix\marginnote{\bf??} inequalities\marginnote{\bf??},\marginnote{\bf??} involving\marginnote{\bf??} hermitian\marginnote{\bf??} matrices\marginnote{\bf??} in\marginnote{\bf??} general\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} remark\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} fundamental\marginnote{\bf??} representations\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} very\marginnote{\bf??} well\marginnote{\bf??} known\marginnote{\bf??} \marginnote{\bf??} and\marginnote{\bf??} understood\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{ti\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} in\marginnote{\bf??} particular\marginnote{\bf??} so\marginnote{\bf??} for\marginnote{\bf??} the\marginnote{\bf??} classical\marginnote{\bf??} Lie\marginnote{\bf??} algebras\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{polorbspekt\marginnote{\bf??}}\marginnote{\bf??} implies\marginnote{\bf??} in\marginnote{\bf??} particular\marginnote{\bf??} that\marginnote{\bf??} all\marginnote{\bf??} faces\marginnote{\bf??} in\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} orbitope\marginnote{\bf??} are\marginnote{\bf??} exposed\marginnote{\bf??},\marginnote{\bf??} since\marginnote{\bf??} this\marginnote{\bf??} is\marginnote{\bf??} true\marginnote{\bf??} in\marginnote{\bf??} every\marginnote{\bf??} spectrahedron\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} fact\marginnote{\bf??} was\marginnote{\bf??} proved\marginnote{\bf??} before\marginnote{\bf??} by\marginnote{\bf??} Biliotti\marginnote{\bf??},\marginnote{\bf??} Ghigi\marginnote{\bf??} and\marginnote{\bf??} Heinzner\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{bgh1\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Some\marginnote{\bf??} spectrahedral\marginnote{\bf??} representations\marginnote{\bf??} contained\marginnote{\bf??} in\marginnote{\bf??},\marginnote{\bf??} or\marginnote{\bf??} closely\marginnote{\bf??} related\marginnote{\bf??} to\marginnote{\bf??},\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{polorbspekt\marginnote{\bf??}}\marginnote{\bf??} were\marginnote{\bf??} constructed\marginnote{\bf??} by\marginnote{\bf??} Sanyal\marginnote{\bf??},\marginnote{\bf??} Sottile\marginnote{\bf??} and\marginnote{\bf??} Sturmfels\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} namely\marginnote{\bf??} for\marginnote{\bf??} symmetric\marginnote{\bf??} Schur\marginnote{\bf??}-Horn\marginnote{\bf??} orbitopes\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{exsymsh\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} also\marginnote{\bf??} for\marginnote{\bf??} skew\marginnote{\bf??}-symmetric\marginnote{\bf??} Schur\marginnote{\bf??}-Horn\marginnote{\bf??} orbitopes\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{unitskewherm\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??} and\marginnote{\bf??} Fan\marginnote{\bf??} orbitopes\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonn\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{ovsso\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} These\marginnote{\bf??} latter\marginnote{\bf??} orbitopes\marginnote{\bf??},\marginnote{\bf??} as\marginnote{\bf??} considered\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} do\marginnote{\bf??} not\marginnote{\bf??} directly\marginnote{\bf??} fall\marginnote{\bf??} under\marginnote{\bf??} the\marginnote{\bf??} assumptions\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{polorbspekt\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} since\marginnote{\bf??} the\marginnote{\bf??} groups\marginnote{\bf??} acting\marginnote{\bf??} there\marginnote{\bf??} are\marginnote{\bf??} not\marginnote{\bf??} connected\marginnote{\bf??} \marginnote{\bf??}(full\marginnote{\bf??} instead\marginnote{\bf??} of\marginnote{\bf??} special\marginnote{\bf??} orthogonal\marginnote{\bf??} groups\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} It\marginnote{\bf??} is\marginnote{\bf??} not\marginnote{\bf??} hard\marginnote{\bf??},\marginnote{\bf??} however\marginnote{\bf??},\marginnote{\bf??} to\marginnote{\bf??} recover\marginnote{\bf??} the\marginnote{\bf??} results\marginnote{\bf??} from\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??} in\marginnote{\bf??} our\marginnote{\bf??} setup\marginnote{\bf??},\marginnote{\bf??} see\marginnote{\bf??} Remark\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{ovsso\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} Saunderson\marginnote{\bf??}-Parrilo\marginnote{\bf??}-Willsky\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{spw\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} a\marginnote{\bf??} spectrahedral\marginnote{\bf??} representation\marginnote{\bf??} for\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} special\marginnote{\bf??} orthogonal\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(and\marginnote{\bf??} for\marginnote{\bf??} its\marginnote{\bf??} dual\marginnote{\bf??} convex\marginnote{\bf??} body\marginnote{\bf??})\marginnote{\bf??} was\marginnote{\bf??} found\marginnote{\bf??},\marginnote{\bf??} see\marginnote{\bf??} also\marginnote{\bf??} Remarks\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonrem\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{spwrem\marginnote{\bf??}}\marginnote{\bf??} below\marginnote{\bf??}.\marginnote{\bf??} Otherwise\marginnote{\bf??} we\marginnote{\bf??} believe\marginnote{\bf??} that\marginnote{\bf??} our\marginnote{\bf??} result\marginnote{\bf??} is\marginnote{\bf??} new\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
Before\marginnote{\bf??} we\marginnote{\bf??} give\marginnote{\bf??} the\marginnote{\bf??} proof\marginnote{\bf??} of\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{polorbspekt\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} recall\marginnote{\bf??} the\marginnote{\bf??} following\marginnote{\bf??} well\marginnote{\bf??}-known\marginnote{\bf??} facts\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{ewprelmaxev\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}\colon\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\to\marginnote{\bf??}\End\marginnote{\bf??}(V\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}(complex\marginnote{\bf??})\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{itemize\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\item\marginnote{\bf??}[\marginnote{\bf??}(a\marginnote{\bf??})\marginnote{\bf??}]\marginnote{\bf??} There\marginnote{\bf??} exists\marginnote{\bf??} an\marginnote{\bf??} hermitian\marginnote{\bf??} inner\marginnote{\bf??} product\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} that\marginnote{\bf??} makes\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} self\marginnote{\bf??}-adjoint\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\item\marginnote{\bf??}[\marginnote{\bf??}(b\marginnote{\bf??})\marginnote{\bf??}]\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} irreducible\marginnote{\bf??} with\marginnote{\bf??} highest\marginnote{\bf??} weight\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} largest\marginnote{\bf??} eigenvalue\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}(Recall\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} denotes\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??},\marginnote{\bf??} see\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{weyl\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??})\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{itemize\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}(a\marginnote{\bf??})\marginnote{\bf??} Since\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_1\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}\oplus\marginnote{\bf??} i\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\subseteq\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} compact\marginnote{\bf??} real\marginnote{\bf??} form\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} there\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} hermitian\marginnote{\bf??} inner\marginnote{\bf??} product\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} that\marginnote{\bf??} is\marginnote{\bf??} invariant\marginnote{\bf??} under\marginnote{\bf??} this\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??},\marginnote{\bf??} i\marginnote{\bf??}.e\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} anti\marginnote{\bf??}-self\marginnote{\bf??} adjoint\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_1\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} self\marginnote{\bf??}-adjoint\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}(b\marginnote{\bf??})\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\chi\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\chi\marginnote{\bf??}_r\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} weights\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\chi\marginnote{\bf??}_1\marginnote{\bf??}=\marginnote{\bf??}\omega\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} eigenvalues\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\chi\marginnote{\bf??}_1\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\chi\marginnote{\bf??}_r\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Every\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\chi\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} the\marginnote{\bf??} form\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}-\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^lk\marginnote{\bf??}_i\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} integer\marginnote{\bf??} coefficients\marginnote{\bf??} \marginnote{\bf??}$k\marginnote{\bf??}_i\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} each\marginnote{\bf??} index\marginnote{\bf??}~\marginnote{\bf??}$i\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} from\marginnote{\bf??} which\marginnote{\bf??} the\marginnote{\bf??} claim\marginnote{\bf??} is\marginnote{\bf??} obvious\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\textsc\marginnote{\bf??}{Proof\marginnote{\bf??}}\marginnote{\bf??} of\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{polorbspekt\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$c\marginnote{\bf??}_j\marginnote{\bf??}=\marginnote{\bf??}\omega\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} largest\marginnote{\bf??} eigenvalue\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} write\marginnote{\bf??} \marginnote{\bf??}$O\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\preceq\marginnote{\bf??} c\marginnote{\bf??}_j\marginnote{\bf??}\cdot\marginnote{\bf??}\mathrm\marginnote{\bf??}{id\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Both\marginnote{\bf??} sets\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$O\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-invariant\marginnote{\bf??}.\marginnote{\bf??} To\marginnote{\bf??} prove\marginnote{\bf??} equality\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=O\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} it\marginnote{\bf??} therefore\marginnote{\bf??} suffices\marginnote{\bf??} to\marginnote{\bf??} show\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}=O\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} since\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} meets\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
So\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} See\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{dualbas\marginnote{\bf??}}\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{restrfundgew\marginnote{\bf??}}\marginnote{\bf??} for\marginnote{\bf??} notation\marginnote{\bf??} in\marginnote{\bf??} the\marginnote{\bf??} following\marginnote{\bf??} discussion\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} highest\marginnote{\bf??} weight\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} largest\marginnote{\bf??} eigenvalue\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}^j\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(Lemma\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{ewprelmaxev\marginnote{\bf??}}\marginnote{\bf??}(b\marginnote{\bf??})\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Hence\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} O\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} if\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\le\marginnote{\bf??}\omega\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=c\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} By\marginnote{\bf??} Lemma\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{restrfundgew\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} restriction\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\omega\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} positive\marginnote{\bf??} scalar\marginnote{\bf??} multiple\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??}
\marginnote{\bf??}(recall\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\frac12\marginnote{\bf??}|\marginnote{\bf??}\alpha\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}|\marginnote{\bf??}^2\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} O\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} if\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_j\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\le\marginnote{\bf??}\mu\marginnote{\bf??}_j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} By\marginnote{\bf??} Proposition\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{kost33\marginnote{\bf??}}\marginnote{\bf??}(b\marginnote{\bf??})\marginnote{\bf??} this\marginnote{\bf??} is\marginnote{\bf??} equivalent\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} On\marginnote{\bf??} the\marginnote{\bf??} other\marginnote{\bf??} hand\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} equivalent\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} Corollary\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{cor2kost\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\qed\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{sonn\marginnote{\bf??}}\marginnote{\bf??} We\marginnote{\bf??} illustrate\marginnote{\bf??} the\marginnote{\bf??} statement\marginnote{\bf??} of\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{polorbspekt\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\ge3\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} consider\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(g\marginnote{\bf??},h\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}=SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$gxh\marginnote{\bf??}^t\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} that\marginnote{\bf??} arises\marginnote{\bf??} from\marginnote{\bf??} the\marginnote{\bf??} split\marginnote{\bf??} real\marginnote{\bf??} form\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$D\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} i\marginnote{\bf??}.e\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} from\marginnote{\bf??} the\marginnote{\bf??} simple\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{equation\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{natrep\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>so\marginnote{\bf??}(n\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_\marginnote{\bf??}{2n\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\colon\marginnote{\bf??} jx\marginnote{\bf??}+x\marginnote{\bf??}^tj\marginnote{\bf??}=0\marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}\quad\marginnote{\bf??} j\marginnote{\bf??}=\marginnote{\bf??}\begin\marginnote{\bf??}{pmatrix\marginnote{\bf??}}I\marginnote{\bf??}_n\marginnote{\bf??}&0\marginnote{\bf??}\marginnote{\bf??}\0\marginnote{\bf??}&\marginnote{\bf??}-I\marginnote{\bf??}_n\marginnote{\bf??}\end\marginnote{\bf??}{pmatrix\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{equation\marginnote{\bf??}}\marginnote{\bf??} Note\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} block\marginnote{\bf??} matrices\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{equation\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{xuvw\marginnote{\bf??}}\marginnote{\bf??} x\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\begin\marginnote{\bf??}{pmatrix\marginnote{\bf??}}u\marginnote{\bf??}&w\marginnote{\bf??}\marginnote{\bf??}\w\marginnote{\bf??}^t\marginnote{\bf??}&v\marginnote{\bf??}\end\marginnote{\bf??}{pmatrix\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{equation\marginnote{\bf??}}\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$u\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},v\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},w\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$u\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},v\marginnote{\bf??}$\marginnote{\bf??} skew\marginnote{\bf??}-symmetric\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\subseteq\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} subspace\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} symmetric\marginnote{\bf??} such\marginnote{\bf??} matrices\marginnote{\bf??},\marginnote{\bf??} i\marginnote{\bf??}.e\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$u\marginnote{\bf??}=v\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} As\marginnote{\bf??} maximal\marginnote{\bf??} commutative\marginnote{\bf??} subspace\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} take\marginnote{\bf??} the\marginnote{\bf??} space\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} matrices\marginnote{\bf??} \marginnote{\bf??}\eqref\marginnote{\bf??}{xuvw\marginnote{\bf??}}\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$u\marginnote{\bf??}=v\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$w\marginnote{\bf??}=\marginnote{\bf??}\mathrm\marginnote{\bf??}{diag\marginnote{\bf??}}\marginnote{\bf??}(x\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},x\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} diagonal\marginnote{\bf??}.\marginnote{\bf??} Denote\marginnote{\bf??} such\marginnote{\bf??} a\marginnote{\bf??} matrix\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}(x\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},x\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} simple\marginnote{\bf??} roots\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}=\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} act\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=x\marginnote{\bf??}_i\marginnote{\bf??}-x\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}+1\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$1\marginnote{\bf??}\le\marginnote{\bf??} i\marginnote{\bf??}<n\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}_n\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=x\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}+x\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Hence\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??}
\marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_1\marginnote{\bf??}\ge\marginnote{\bf??}\cdots\marginnote{\bf??}\ge\marginnote{\bf??} x\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}\ge\marginnote{\bf??}|x\marginnote{\bf??}_n\marginnote{\bf??}|\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} fundamental\marginnote{\bf??} weights\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}=\marginnote{\bf??}\lambda\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\lambda\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??} x\marginnote{\bf??}_1\marginnote{\bf??}+\marginnote{\bf??}\cdots\marginnote{\bf??}+x\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$i\marginnote{\bf??}\le\marginnote{\bf??} n\marginnote{\bf??}-2\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\frac12\marginnote{\bf??}\bigl\marginnote{\bf??}(x\marginnote{\bf??}_1\marginnote{\bf??}+\marginnote{\bf??}\cdots\marginnote{\bf??}+x\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}-x\marginnote{\bf??}_n\marginnote{\bf??}\bigr\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}\quad\marginnote{\bf??}\lambda\marginnote{\bf??}_n\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\frac12\marginnote{\bf??}\bigl\marginnote{\bf??}(x\marginnote{\bf??}_1\marginnote{\bf??}+\marginnote{\bf??}\cdots\marginnote{\bf??}+x\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}+x\marginnote{\bf??}_n\marginnote{\bf??}\bigr\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} By\marginnote{\bf??} Lemma\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{kost33\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} momentum\marginnote{\bf??} polytope\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} described\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\lambda\marginnote{\bf??}_i\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\le\marginnote{\bf??}\lambda\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} first\marginnote{\bf??} fundamental\marginnote{\bf??} representation\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}_1\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} natural\marginnote{\bf??} representation\marginnote{\bf??} \marginnote{\bf??}\eqref\marginnote{\bf??}{natrep\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} higher\marginnote{\bf??} ones\marginnote{\bf??} are\marginnote{\bf??} the\marginnote{\bf??} exterior\marginnote{\bf??} powers\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}_i\marginnote{\bf??}=\marginnote{\bf??}\mathsf\marginnote{\bf??}{\marginnote{\bf??}\Lambda\marginnote{\bf??}}\marginnote{\bf??}^i\marginnote{\bf??}\rho\marginnote{\bf??}_1\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$1\marginnote{\bf??}\le\marginnote{\bf??} i\marginnote{\bf??}\le\marginnote{\bf??} n\marginnote{\bf??}-2\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Moreover\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} the\marginnote{\bf??} two\marginnote{\bf??} half\marginnote{\bf??}-spin\marginnote{\bf??} representations\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\dim\marginnote{\bf??}(\marginnote{\bf??}\rho\marginnote{\bf??}_i\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\choose\marginnote{\bf??}{2n\marginnote{\bf??}}i\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}\le\marginnote{\bf??} n\marginnote{\bf??}-2\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\dim\marginnote{\bf??}(\marginnote{\bf??}\rho\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\dim\marginnote{\bf??}(\marginnote{\bf??}\rho\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}=2\marginnote{\bf??}^\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Expressing\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} matrices\marginnote{\bf??} one\marginnote{\bf??} arrives\marginnote{\bf??} at\marginnote{\bf??} explicit\marginnote{\bf??} spectrahedral\marginnote{\bf??} representations\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbitopes\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} These\marginnote{\bf??} representations\marginnote{\bf??} are\marginnote{\bf??} closely\marginnote{\bf??} related\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Theorem\marginnote{\bf??} 4\marginnote{\bf??}.7\marginnote{\bf??}]\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} where\marginnote{\bf??} the\marginnote{\bf??} group\marginnote{\bf??} acting\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$O\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} O\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} instead\marginnote{\bf??} of\marginnote{\bf??} our\marginnote{\bf??}~\marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{sonrem\marginnote{\bf??}}\marginnote{\bf??} For\marginnote{\bf??} general\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} none\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??} linear\marginnote{\bf??} matrix\marginnote{\bf??} inequalities\marginnote{\bf??} describing\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{polorbspekt\marginnote{\bf??}}\marginnote{\bf??} can\marginnote{\bf??} be\marginnote{\bf??} left\marginnote{\bf??} out\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} special\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} this\marginnote{\bf??} may\marginnote{\bf??} be\marginnote{\bf??} different\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} illustrate\marginnote{\bf??} this\marginnote{\bf??} remark\marginnote{\bf??} with\marginnote{\bf??} just\marginnote{\bf??} one\marginnote{\bf??} example\marginnote{\bf??},\marginnote{\bf??} deferring\marginnote{\bf??} a\marginnote{\bf??} detailed\marginnote{\bf??} discussion\marginnote{\bf??} to\marginnote{\bf??} a\marginnote{\bf??} later\marginnote{\bf??} occasion\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Consider\marginnote{\bf??} again\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} as\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonn\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} take\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=I\marginnote{\bf??}_n\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} identity\marginnote{\bf??} matrix\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}(1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},1\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} notation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonn\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} orbitope\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Due\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} special\marginnote{\bf??} choice\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} description\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} momentum\marginnote{\bf??} polytope\marginnote{\bf??} simplifies\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} condition\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}_1\marginnote{\bf??}\le1\marginnote{\bf??}$\marginnote{\bf??} implies\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^ky\marginnote{\bf??}_i\marginnote{\bf??}\le\marginnote{\bf??} k\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} already\marginnote{\bf??} described\marginnote{\bf??} by\marginnote{\bf??} the\marginnote{\bf??} two\marginnote{\bf??} inequalities\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\lambda\marginnote{\bf??}_1\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\le1\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\lambda\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\le\marginnote{\bf??}\frac\marginnote{\bf??}{n\marginnote{\bf??}-2\marginnote{\bf??}}2\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} conclude\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} satisfies\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}\conv\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\Bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\colon\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{pmatrix\marginnote{\bf??}}0\marginnote{\bf??}&y\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\mathtt\marginnote{\bf??}{y\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^t\marginnote{\bf??}&0\marginnote{\bf??}\end\marginnote{\bf??}{pmatrix\marginnote{\bf??}}\marginnote{\bf??}\preceq\marginnote{\bf??} I\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\rho\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}\begin\marginnote{\bf??}{pmatrix\marginnote{\bf??}}0\marginnote{\bf??}&y\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\mathtt\marginnote{\bf??}{y\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^t\marginnote{\bf??}&0\marginnote{\bf??}\end\marginnote{\bf??}{pmatrix\marginnote{\bf??}}\marginnote{\bf??}\preceq\marginnote{\bf??} \marginnote{\bf??}\frac\marginnote{\bf??}{n\marginnote{\bf??}-2\marginnote{\bf??}}2I\marginnote{\bf??}\Bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} since\marginnote{\bf??} both\marginnote{\bf??} sets\marginnote{\bf??} agree\marginnote{\bf??} when\marginnote{\bf??} intersected\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} recovers\marginnote{\bf??} one\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} main\marginnote{\bf??} results\marginnote{\bf??} of\marginnote{\bf??} Saunderson\marginnote{\bf??},\marginnote{\bf??} Parrilo\marginnote{\bf??} and\marginnote{\bf??} Willsky\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Theorem\marginnote{\bf??} 1\marginnote{\bf??}.3\marginnote{\bf??}]\marginnote{\bf??}{spw\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}(In\marginnote{\bf??} the\marginnote{\bf??} notation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{loc\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},cit\marginnote{\bf??}.\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} given\marginnote{\bf??} a\marginnote{\bf??} matrix\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}=\marginnote{\bf??}(y\marginnote{\bf??}_\marginnote{\bf??}{ij\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$2\marginnote{\bf??}^\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}\times\marginnote{\bf??} 2\marginnote{\bf??}^\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} matrix\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??},j\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^ny\marginnote{\bf??}_\marginnote{\bf??}{ij\marginnote{\bf??}}A\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}(ij\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} constructed\marginnote{\bf??} there\marginnote{\bf??} corresponds\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} endomorphism\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}_n\marginnote{\bf??}\begin\marginnote{\bf??}{pmatrix\marginnote{\bf??}}0\marginnote{\bf??}&2y\marginnote{\bf??}\marginnote{\bf??}\2y\marginnote{\bf??}^t\marginnote{\bf??}&0\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{pmatrix\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} extra\marginnote{\bf??} factor\marginnote{\bf??} \marginnote{\bf??}$2\marginnote{\bf??}$\marginnote{\bf??} accounts\marginnote{\bf??} for\marginnote{\bf??} the\marginnote{\bf??} apparent\marginnote{\bf??} difference\marginnote{\bf??} between\marginnote{\bf??} their\marginnote{\bf??} result\marginnote{\bf??} and\marginnote{\bf??} ours\marginnote{\bf??}.\marginnote{\bf??})\marginnote{\bf??} See\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{spwrem\marginnote{\bf??}}\marginnote{\bf??} below\marginnote{\bf??} for\marginnote{\bf??} a\marginnote{\bf??} spectrahedral\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} polar\marginnote{\bf??} convex\marginnote{\bf??} set\marginnote{\bf??} \marginnote{\bf??}$SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\section\marginnote{\bf??}{Face\marginnote{\bf??} correspondence\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{facecorrdfn\marginnote{\bf??}}\marginnote{\bf??} As\marginnote{\bf??} before\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} Cartan\marginnote{\bf??} decomposition\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} semisimple\marginnote{\bf??} real\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} general\marginnote{\bf??} setup\marginnote{\bf??} and\marginnote{\bf??} notation\marginnote{\bf??} see\marginnote{\bf??} Section\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{lienot\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} continue\marginnote{\bf??} to\marginnote{\bf??} denote\marginnote{\bf??} the\marginnote{\bf??} orthogonal\marginnote{\bf??} projection\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\to\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} momentum\marginnote{\bf??} polytope\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}\ref\marginnote{\bf??}{mompolytop\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} any\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_Q\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??}\pi\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}(Q\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} any\marginnote{\bf??} \marginnote{\bf??}$w\marginnote{\bf??}\in\marginnote{\bf??} W\marginnote{\bf??}$\marginnote{\bf??} there\marginnote{\bf??} exists\marginnote{\bf??} \marginnote{\bf??}$g\marginnote{\bf??}\in\marginnote{\bf??} N\marginnote{\bf??}_K\marginnote{\bf??}(\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$w\marginnote{\bf??}=gZ\marginnote{\bf??}_K\marginnote{\bf??}(\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} projection\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}\colon\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\to\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} easily\marginnote{\bf??} seen\marginnote{\bf??} to\marginnote{\bf??} commute\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$N\marginnote{\bf??}_K\marginnote{\bf??}(\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} therefore\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_\marginnote{\bf??}{wQ\marginnote{\bf??}}\marginnote{\bf??}=gF\marginnote{\bf??}_Q\marginnote{\bf??}$\marginnote{\bf??} holds\marginnote{\bf??}.\marginnote{\bf??} Hence\marginnote{\bf??} the\marginnote{\bf??} assignment\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}\mapsto\marginnote{\bf??} F\marginnote{\bf??}_Q\marginnote{\bf??}$\marginnote{\bf??} induces\marginnote{\bf??} a\marginnote{\bf??} map\marginnote{\bf??} from\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
The\marginnote{\bf??} following\marginnote{\bf??} theorem\marginnote{\bf??} asserts\marginnote{\bf??},\marginnote{\bf??} in\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} that\marginnote{\bf??} this\marginnote{\bf??} map\marginnote{\bf??} is\marginnote{\bf??} bijective\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} fact\marginnote{\bf??} was\marginnote{\bf??} originally\marginnote{\bf??} proved\marginnote{\bf??} by\marginnote{\bf??} Biliotti\marginnote{\bf??},\marginnote{\bf??} Ghigi\marginnote{\bf??} and\marginnote{\bf??} Heinzner\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Theorem\marginnote{\bf??}~1\marginnote{\bf??}.1\marginnote{\bf??}]\marginnote{\bf??}{bgh1\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} give\marginnote{\bf??} a\marginnote{\bf??} new\marginnote{\bf??} proof\marginnote{\bf??} that\marginnote{\bf??} we\marginnote{\bf??} think\marginnote{\bf??} is\marginnote{\bf??} considerably\marginnote{\bf??} easier\marginnote{\bf??}.\marginnote{\bf??} Note\marginnote{\bf??} however\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{bgh1\marginnote{\bf??}}\marginnote{\bf??} proves\marginnote{\bf??} a\marginnote{\bf??} more\marginnote{\bf??} precise\marginnote{\bf??} result\marginnote{\bf??},\marginnote{\bf??} implying\marginnote{\bf??} in\marginnote{\bf??} particular\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} faces\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_Q\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} themselves\marginnote{\bf??} orbitopes\marginnote{\bf??} under\marginnote{\bf??} suitable\marginnote{\bf??} groups\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{orbitcorr\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{itemize\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\item\marginnote{\bf??}[\marginnote{\bf??}(a\marginnote{\bf??})\marginnote{\bf??}]\marginnote{\bf??} There\marginnote{\bf??} exists\marginnote{\bf??} a\marginnote{\bf??} face\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} an\marginnote{\bf??} element\marginnote{\bf??} \marginnote{\bf??}$g\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}=gF\marginnote{\bf??}_Q\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\item\marginnote{\bf??}[\marginnote{\bf??}(b\marginnote{\bf??})\marginnote{\bf??}]\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} another\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}\subseteq\marginnote{\bf??} g\marginnote{\bf??}'F\marginnote{\bf??}_\marginnote{\bf??}{Q\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} some\marginnote{\bf??} \marginnote{\bf??}$g\marginnote{\bf??}'\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} then\marginnote{\bf??} there\marginnote{\bf??} exists\marginnote{\bf??} \marginnote{\bf??}$w\marginnote{\bf??}\in\marginnote{\bf??} W\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$wQ\marginnote{\bf??}\subseteq\marginnote{\bf??} Q\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{itemize\marginnote{\bf??}}\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}\mapsto\marginnote{\bf??} F\marginnote{\bf??}_Q\marginnote{\bf??}$\marginnote{\bf??} induces\marginnote{\bf??} a\marginnote{\bf??} bijective\marginnote{\bf??} correspondence\marginnote{\bf??} between\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} compatible\marginnote{\bf??} with\marginnote{\bf??} inclusion\marginnote{\bf??} of\marginnote{\bf??} faces\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
For\marginnote{\bf??} the\marginnote{\bf??} proof\marginnote{\bf??} observe\marginnote{\bf??} the\marginnote{\bf??} following\marginnote{\bf??} lemma\marginnote{\bf??}:\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{keylem\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_y\marginnote{\bf??}\cap\marginnote{\bf??} Q\marginnote{\bf??}\ne\marginnote{\bf??}\varnothing\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$wy\marginnote{\bf??}\in\marginnote{\bf??} Q\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} some\marginnote{\bf??} \marginnote{\bf??}$w\marginnote{\bf??}\in\marginnote{\bf??} W\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} We\marginnote{\bf??} can\marginnote{\bf??} assume\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}\ne\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} there\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} supporting\marginnote{\bf??} hyperplane\marginnote{\bf??} \marginnote{\bf??}$H\marginnote{\bf??}\subseteq\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}=H\marginnote{\bf??}\cap\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_y\marginnote{\bf??}\subseteq\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$H\marginnote{\bf??}\cap\marginnote{\bf??} P\marginnote{\bf??}_y\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} not\marginnote{\bf??} empty\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} hyperplane\marginnote{\bf??} \marginnote{\bf??}$H\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} supporting\marginnote{\bf??} hyperplane\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_y\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} well\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}'\marginnote{\bf??}=H\marginnote{\bf??}\cap\marginnote{\bf??} P\marginnote{\bf??}_y\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_y\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} therefore\marginnote{\bf??} contains\marginnote{\bf??} an\marginnote{\bf??} extreme\marginnote{\bf??} point\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_y\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Thus\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}'\marginnote{\bf??}\in\marginnote{\bf??} Q\marginnote{\bf??}'\marginnote{\bf??}\subseteq\marginnote{\bf??} H\marginnote{\bf??}\cap\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}=Q\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}'\marginnote{\bf??}=wy\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} some\marginnote{\bf??} \marginnote{\bf??}$w\marginnote{\bf??}\in\marginnote{\bf??} W\marginnote{\bf??}$\marginnote{\bf??} since\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_y\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Wy\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\textsc\marginnote{\bf??}{Proof\marginnote{\bf??}}\marginnote{\bf??} of\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{orbitcorr\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}(a\marginnote{\bf??})\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} By\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{connpolorbsp\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} all\marginnote{\bf??} faces\marginnote{\bf??} are\marginnote{\bf??} exposed\marginnote{\bf??}.\marginnote{\bf??} Hence\marginnote{\bf??} there\marginnote{\bf??} exist\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$c\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$H\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\colon\marginnote{\bf??}\bil\marginnote{\bf??} yz\marginnote{\bf??}=c\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} supporting\marginnote{\bf??} hyperplane\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$H\marginnote{\bf??}\cap\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=F\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Upon\marginnote{\bf??} replacing\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$gF\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} some\marginnote{\bf??} \marginnote{\bf??}$g\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} can\marginnote{\bf??} assume\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} since\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??} to\marginnote{\bf??} an\marginnote{\bf??} element\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$H\marginnote{\bf??}\cap\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} supporting\marginnote{\bf??} hyperplane\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} so\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}:\marginnote{\bf??}=H\marginnote{\bf??}\cap\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Clearly\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}=F\marginnote{\bf??}_Q\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}(b\marginnote{\bf??})\marginnote{\bf??} By\marginnote{\bf??} \marginnote{\bf??}(a\marginnote{\bf??})\marginnote{\bf??} it\marginnote{\bf??} suffices\marginnote{\bf??} to\marginnote{\bf??} show\marginnote{\bf??}:\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},Q\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$gF\marginnote{\bf??}_Q\marginnote{\bf??}\subseteq\marginnote{\bf??} F\marginnote{\bf??}_\marginnote{\bf??}{Q\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} some\marginnote{\bf??} \marginnote{\bf??}$g\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} then\marginnote{\bf??} there\marginnote{\bf??} exists\marginnote{\bf??} \marginnote{\bf??}$w\marginnote{\bf??}\in\marginnote{\bf??} W\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$wQ\marginnote{\bf??}\subseteq\marginnote{\bf??} Q\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}\mathrm\marginnote{\bf??}{relint\marginnote{\bf??}}\marginnote{\bf??}(Q\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}\subseteq\marginnote{\bf??} F\marginnote{\bf??}_Q\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} \marginnote{\bf??}$gy\marginnote{\bf??}\in\marginnote{\bf??} F\marginnote{\bf??}_\marginnote{\bf??}{Q\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} therefore\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}(gy\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??} Q\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} On\marginnote{\bf??} the\marginnote{\bf??} other\marginnote{\bf??} hand\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}(gy\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??}\pi\marginnote{\bf??}(Ky\marginnote{\bf??})\marginnote{\bf??}=P\marginnote{\bf??}_y\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} Lemma\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{keylem\marginnote{\bf??}}\marginnote{\bf??} applies\marginnote{\bf??} and\marginnote{\bf??} shows\marginnote{\bf??} \marginnote{\bf??}$wy\marginnote{\bf??}\in\marginnote{\bf??} Q\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} some\marginnote{\bf??} \marginnote{\bf??}$w\marginnote{\bf??}\in\marginnote{\bf??} W\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}\mathrm\marginnote{\bf??}{relint\marginnote{\bf??}}\marginnote{\bf??}(Q\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} this\marginnote{\bf??} implies\marginnote{\bf??} \marginnote{\bf??}$wQ\marginnote{\bf??}\subseteq\marginnote{\bf??} Q\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},Q\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} which\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_Q\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_\marginnote{\bf??}{Q\marginnote{\bf??}'\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??},\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
Recall\marginnote{\bf??} that\marginnote{\bf??} a\marginnote{\bf??} face\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} polytope\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} called\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{facet\marginnote{\bf??}}\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\dim\marginnote{\bf??}(Q\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\dim\marginnote{\bf??}(P\marginnote{\bf??})\marginnote{\bf??}-1\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{maxpropfac\marginnote{\bf??}}\marginnote{\bf??} The\marginnote{\bf??} maximal\marginnote{\bf??} proper\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} precisely\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-conjugates\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} faces\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_Q\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} facet\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\qed\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{polarset\marginnote{\bf??}}\marginnote{\bf??} We\marginnote{\bf??} apply\marginnote{\bf??} this\marginnote{\bf??} result\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} study\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} First\marginnote{\bf??} recall\marginnote{\bf??} the\marginnote{\bf??} definition\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} polar\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} convex\marginnote{\bf??} set\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} real\marginnote{\bf??} vector\marginnote{\bf??} space\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\dim\marginnote{\bf??}(V\marginnote{\bf??})\marginnote{\bf??}<\marginnote{\bf??}\infty\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} any\marginnote{\bf??} set\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}\subseteq\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}^o\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{l\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}\colon\marginnote{\bf??}\forall\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??} x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}$l\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\le1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{polar\marginnote{\bf??} set\marginnote{\bf??}}\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$M\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Usually\marginnote{\bf??} a\marginnote{\bf??} euclidean\marginnote{\bf??} inner\marginnote{\bf??} product\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}-\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}-\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} will\marginnote{\bf??} be\marginnote{\bf??} fixed\marginnote{\bf??},\marginnote{\bf??} then\marginnote{\bf??} we\marginnote{\bf??} identify\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} set\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}\colon\marginnote{\bf??}\forall\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??} x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??} xy\marginnote{\bf??}\le1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} compact\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$0\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} interior\marginnote{\bf??} point\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} same\marginnote{\bf??} holds\marginnote{\bf??} for\marginnote{\bf??}~\marginnote{\bf??}$M\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
If\marginnote{\bf??} the\marginnote{\bf??} compact\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} acts\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} polar\marginnote{\bf??} set\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Kx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} called\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{coorbitope\marginnote{\bf??}}\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Clearly\marginnote{\bf??} the\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} acts\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} but\marginnote{\bf??} in\marginnote{\bf??} general\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} won\marginnote{\bf??}'t\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbitope\marginnote{\bf??}.\marginnote{\bf??} Below\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}\ref\marginnote{\bf??}{facetsconj\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{biorbitop\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??} we\marginnote{\bf??}'ll\marginnote{\bf??} identify\marginnote{\bf??} those\marginnote{\bf??} cases\marginnote{\bf??} when\marginnote{\bf??} this\marginnote{\bf??} happens\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{fulldim\marginnote{\bf??}}\marginnote{\bf??} For\marginnote{\bf??} any\marginnote{\bf??} irreducible\marginnote{\bf??} abstract\marginnote{\bf??} root\marginnote{\bf??} system\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(V\marginnote{\bf??},\marginnote{\bf??}\Sigma\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(possibly\marginnote{\bf??} non\marginnote{\bf??}-reduced\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??} acts\marginnote{\bf??} irreducibly\marginnote{\bf??} on\marginnote{\bf??}~\marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} any\marginnote{\bf??} \marginnote{\bf??}$0\marginnote{\bf??}\ne\marginnote{\bf??} x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} polytope\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Wx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} therefore\marginnote{\bf??} contains\marginnote{\bf??} an\marginnote{\bf??} open\marginnote{\bf??} neighborhood\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} origin\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(V\marginnote{\bf??},\marginnote{\bf??}\Sigma\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} not\marginnote{\bf??} necessarily\marginnote{\bf??} irreducible\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} it\marginnote{\bf??} follows\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} polytope\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimensional\marginnote{\bf??} if\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??} every\marginnote{\bf??} irreducible\marginnote{\bf??} component\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Sigma\marginnote{\bf??}$\marginnote{\bf??} contains\marginnote{\bf??} a\marginnote{\bf??} root\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\ne0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Moreover\marginnote{\bf??} in\marginnote{\bf??} this\marginnote{\bf??} case\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$0\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} interior\marginnote{\bf??} point\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Let\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}\to\marginnote{\bf??} SO\marginnote{\bf??}(V\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} previous\marginnote{\bf??} discussion\marginnote{\bf??} implies\marginnote{\bf??} that\marginnote{\bf??} when\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} irreducible\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} origin\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} interior\marginnote{\bf??} point\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} soon\marginnote{\bf??} as\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\ne0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} When\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} arbitrary\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=\marginnote{\bf??}\bigoplus\marginnote{\bf??} V\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} decomposition\marginnote{\bf??} into\marginnote{\bf??} irreducible\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-modules\marginnote{\bf??} as\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{dirprod\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}\sum\marginnote{\bf??}_ix\marginnote{\bf??}_i\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_i\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{x\marginnote{\bf??}_1\marginnote{\bf??}}\marginnote{\bf??}\times\marginnote{\bf??}\cdots\marginnote{\bf??}\times\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{x\marginnote{\bf??}_n\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{dirprod\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Therefore\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimensional\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} iff\marginnote{\bf??} \marginnote{\bf??}$0\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} interior\marginnote{\bf??} point\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} both\marginnote{\bf??} are\marginnote{\bf??} equivalent\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_i\marginnote{\bf??}\ne0\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} each\marginnote{\bf??} index\marginnote{\bf??}~\marginnote{\bf??}$i\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} It\marginnote{\bf??} is\marginnote{\bf??} also\marginnote{\bf??} equivalent\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}(Kx\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}\ref\marginnote{\bf??}{cor2kost\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimensional\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
When\marginnote{\bf??} studying\marginnote{\bf??} the\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} can\marginnote{\bf??} obviously\marginnote{\bf??} assume\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimen\marginnote{\bf??}\marginnote{\bf??}-sional\marginnote{\bf??} \marginnote{\bf??}(or\marginnote{\bf??} equivalently\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$0\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} interior\marginnote{\bf??} point\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} by\marginnote{\bf??} the\marginnote{\bf??} previous\marginnote{\bf??} discussion\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{prop\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{oxoorbits\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimensional\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}\subseteq\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} associated\marginnote{\bf??} coorbitope\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} extreme\marginnote{\bf??} points\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} in\marginnote{\bf??} natural\marginnote{\bf??} bijective\marginnote{\bf??} correspondence\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} facets\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} polytope\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} finitely\marginnote{\bf??} many\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} in\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{prop\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} First\marginnote{\bf??} recall\marginnote{\bf??} the\marginnote{\bf??} following\marginnote{\bf??} general\marginnote{\bf??} and\marginnote{\bf??} easy\marginnote{\bf??} fact\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[2\marginnote{\bf??}.1\marginnote{\bf??}.4\marginnote{\bf??}]\marginnote{\bf??}{sn\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} example\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\subseteq\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} any\marginnote{\bf??} compact\marginnote{\bf??} convex\marginnote{\bf??} body\marginnote{\bf??} which\marginnote{\bf??} contains\marginnote{\bf??} a\marginnote{\bf??} neighborhood\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$0\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^o\marginnote{\bf??}\subseteq\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} body\marginnote{\bf??} polar\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} any\marginnote{\bf??} face\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\wh\marginnote{\bf??} F\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^o\marginnote{\bf??}\colon\marginnote{\bf??}\forall\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??} x\marginnote{\bf??}\in\marginnote{\bf??} F\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??} xy\marginnote{\bf??}=1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\wh\marginnote{\bf??} F\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} exposed\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}\mapsto\marginnote{\bf??}\wh\marginnote{\bf??} F\marginnote{\bf??}$\marginnote{\bf??} restricts\marginnote{\bf??} to\marginnote{\bf??} an\marginnote{\bf??} inclusion\marginnote{\bf??}-reversing\marginnote{\bf??} bijection\marginnote{\bf??} between\marginnote{\bf??} exposed\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} exposed\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} with\marginnote{\bf??} inverse\marginnote{\bf??} map\marginnote{\bf??} \marginnote{\bf??}$G\marginnote{\bf??}\mapsto\marginnote{\bf??}\wh\marginnote{\bf??} G\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
To\marginnote{\bf??} prove\marginnote{\bf??} the\marginnote{\bf??} proposition\marginnote{\bf??} we\marginnote{\bf??} can\marginnote{\bf??} assume\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$Fac\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} set\marginnote{\bf??} of\marginnote{\bf??} representatives\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} facets\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} an\marginnote{\bf??} exposed\marginnote{\bf??} extreme\marginnote{\bf??} point\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\colon\marginnote{\bf??}\forall\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??} g\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{gx\marginnote{\bf??}}y\marginnote{\bf??}\le1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} write\marginnote{\bf??} \marginnote{\bf??}$G\marginnote{\bf??}_z\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}\wh\marginnote{\bf??} z\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\colon\marginnote{\bf??} \marginnote{\bf??}\bil\marginnote{\bf??} yz\marginnote{\bf??}=1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} By\marginnote{\bf??} the\marginnote{\bf??} fact\marginnote{\bf??} just\marginnote{\bf??} recalled\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$G\marginnote{\bf??}_z\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} maximal\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} so\marginnote{\bf??} \marginnote{\bf??}$G\marginnote{\bf??}_z\marginnote{\bf??}=gF\marginnote{\bf??}_Q\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} some\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}\in\marginnote{\bf??} Fac\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} some\marginnote{\bf??} \marginnote{\bf??}$g\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}\ref\marginnote{\bf??}{orbitcorr\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{maxpropfac\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} It\marginnote{\bf??} is\marginnote{\bf??} easily\marginnote{\bf??} checked\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$G\marginnote{\bf??}_\marginnote{\bf??}{hz\marginnote{\bf??}}\marginnote{\bf??}=hG\marginnote{\bf??}_z\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} any\marginnote{\bf??} \marginnote{\bf??}$h\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$u\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} another\marginnote{\bf??} exposed\marginnote{\bf??} extreme\marginnote{\bf??} point\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$G\marginnote{\bf??}_u\marginnote{\bf??}=hF\marginnote{\bf??}_Q\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} some\marginnote{\bf??} \marginnote{\bf??}$h\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$G\marginnote{\bf??}_\marginnote{\bf??}{gh\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}-1\marginnote{\bf??}}u\marginnote{\bf??}}\marginnote{\bf??}=gh\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}-1\marginnote{\bf??}}G\marginnote{\bf??}_u\marginnote{\bf??}=\marginnote{\bf??} G\marginnote{\bf??}_z\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} whence\marginnote{\bf??} \marginnote{\bf??}$gh\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}-1\marginnote{\bf??}}u\marginnote{\bf??}=z\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} \marginnote{\bf??}$u\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} shows\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} exposed\marginnote{\bf??} extreme\marginnote{\bf??} points\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} consist\marginnote{\bf??} of\marginnote{\bf??} finitely\marginnote{\bf??} many\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??},\marginnote{\bf??} each\marginnote{\bf??} of\marginnote{\bf??} them\marginnote{\bf??} corresponding\marginnote{\bf??} to\marginnote{\bf??} a\marginnote{\bf??} different\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} of\marginnote{\bf??} facets\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} exposed\marginnote{\bf??} extreme\marginnote{\bf??} points\marginnote{\bf??} are\marginnote{\bf??} dense\marginnote{\bf??} within\marginnote{\bf??} all\marginnote{\bf??} extreme\marginnote{\bf??} points\marginnote{\bf??} \marginnote{\bf??}(Straszewicz\marginnote{\bf??}'\marginnote{\bf??} theorem\marginnote{\bf??},\marginnote{\bf??} e\marginnote{\bf??}.g\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{sn\marginnote{\bf??}}\marginnote{\bf??} 1\marginnote{\bf??}.4\marginnote{\bf??}.7\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} conclude\marginnote{\bf??} that\marginnote{\bf??} all\marginnote{\bf??} extreme\marginnote{\bf??} points\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} exposed\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
For\marginnote{\bf??} each\marginnote{\bf??} facet\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} claim\marginnote{\bf??} conversely\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\wh\marginnote{\bf??}{F\marginnote{\bf??}_Q\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} \marginnote{\bf??}(exposed\marginnote{\bf??})\marginnote{\bf??} extreme\marginnote{\bf??} point\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Indeed\marginnote{\bf??},\marginnote{\bf??} otherwise\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\wh\marginnote{\bf??}{F\marginnote{\bf??}_Q\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} would\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} minimal\marginnote{\bf??} exposed\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} dimension\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\ge1\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} But\marginnote{\bf??} such\marginnote{\bf??} a\marginnote{\bf??} face\marginnote{\bf??} cannot\marginnote{\bf??} exist\marginnote{\bf??},\marginnote{\bf??} since\marginnote{\bf??} all\marginnote{\bf??} extreme\marginnote{\bf??} points\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} exposed\marginnote{\bf??}.\marginnote{\bf??} Altogether\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} proved\marginnote{\bf??} the\marginnote{\bf??} bijection\marginnote{\bf??} between\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} extreme\marginnote{\bf??} points\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} facets\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{facetsconj\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbitope\marginnote{\bf??} itself\marginnote{\bf??} if\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??},\marginnote{\bf??} all\marginnote{\bf??} facets\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} polytope\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\qed\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
We\marginnote{\bf??} will\marginnote{\bf??} determine\marginnote{\bf??} these\marginnote{\bf??} cases\marginnote{\bf??} explicitly\marginnote{\bf??} in\marginnote{\bf??} the\marginnote{\bf??} next\marginnote{\bf??} section\marginnote{\bf??},\marginnote{\bf??} after\marginnote{\bf??} having\marginnote{\bf??} discussed\marginnote{\bf??} the\marginnote{\bf??} facets\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} more\marginnote{\bf??} detail\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Note\marginnote{\bf??} that\marginnote{\bf??} under\marginnote{\bf??} the\marginnote{\bf??} equivalent\marginnote{\bf??} conditions\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{facetsconj\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??} the\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??} itself\marginnote{\bf??},\marginnote{\bf??} by\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{connpolorbsp\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} Sect\marginnote{\bf??}.\marginnote{\bf??}~\marginnote{\bf??}\ref\marginnote{\bf??}{sec\marginnote{\bf??}:doubly\marginnote{\bf??}}\marginnote{\bf??} we\marginnote{\bf??} will\marginnote{\bf??} uncover\marginnote{\bf??} many\marginnote{\bf??} more\marginnote{\bf??} cases\marginnote{\bf??} where\marginnote{\bf??} this\marginnote{\bf??} holds\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\section\marginnote{\bf??}{Facets\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} momentum\marginnote{\bf??} polytope\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
In\marginnote{\bf??} the\marginnote{\bf??} previous\marginnote{\bf??} section\marginnote{\bf??},\marginnote{\bf??} a\marginnote{\bf??} close\marginnote{\bf??} relation\marginnote{\bf??} was\marginnote{\bf??} established\marginnote{\bf??} between\marginnote{\bf??} the\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} momentum\marginnote{\bf??} polytope\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} latter\marginnote{\bf??} can\marginnote{\bf??} be\marginnote{\bf??} described\marginnote{\bf??} in\marginnote{\bf??} terms\marginnote{\bf??} of\marginnote{\bf??} root\marginnote{\bf??} data\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} start\marginnote{\bf??} by\marginnote{\bf??} recalling\marginnote{\bf??} this\marginnote{\bf??} description\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(V\marginnote{\bf??},\marginnote{\bf??}\Sigma\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} an\marginnote{\bf??} abstract\marginnote{\bf??} root\marginnote{\bf??} system\marginnote{\bf??} \marginnote{\bf??}(which\marginnote{\bf??} may\marginnote{\bf??} be\marginnote{\bf??} non\marginnote{\bf??}-reduced\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} fix\marginnote{\bf??} an\marginnote{\bf??} ordering\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\le\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\beta\marginnote{\bf??}_n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} corresponding\marginnote{\bf??} system\marginnote{\bf??} of\marginnote{\bf??} simple\marginnote{\bf??} positive\marginnote{\bf??} roots\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\mu\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} dual\marginnote{\bf??} basis\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_j\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\delta\marginnote{\bf??}_\marginnote{\bf??}{ij\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$1\marginnote{\bf??}\le\marginnote{\bf??} i\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},j\marginnote{\bf??}\le\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} group\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}\colon\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}}x\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$1\marginnote{\bf??}\le\marginnote{\bf??} i\marginnote{\bf??}\le\marginnote{\bf??} n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} closed\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} associated\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} given\marginnote{\bf??} point\marginnote{\bf??} and\marginnote{\bf??} write\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Wx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} A\marginnote{\bf??} subset\marginnote{\bf??} \marginnote{\bf??}$I\marginnote{\bf??}\subseteq\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} said\marginnote{\bf??} to\marginnote{\bf??} be\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}-connected\marginnote{\bf??}}\marginnote{\bf??} if\marginnote{\bf??} every\marginnote{\bf??} connected\marginnote{\bf??} component\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$I\marginnote{\bf??}$\marginnote{\bf??} contains\marginnote{\bf??} a\marginnote{\bf??} root\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}\beta\marginnote{\bf??} x\marginnote{\bf??}\ne0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}(Of\marginnote{\bf??} course\marginnote{\bf??},\marginnote{\bf??} connectedness\marginnote{\bf??} notions\marginnote{\bf??} refer\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} Dynkin\marginnote{\bf??} graph\marginnote{\bf??}.\marginnote{\bf??})\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}_I\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} subgroup\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??} generated\marginnote{\bf??} by\marginnote{\bf??} the\marginnote{\bf??} root\marginnote{\bf??} reflections\marginnote{\bf??} \marginnote{\bf??}$s\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??} I\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} following\marginnote{\bf??} result\marginnote{\bf??} is\marginnote{\bf??} quoted\marginnote{\bf??} from\marginnote{\bf??} Casselman\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Theorem\marginnote{\bf??} 3\marginnote{\bf??}.1\marginnote{\bf??}]\marginnote{\bf??}{ca\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} where\marginnote{\bf??} it\marginnote{\bf??} is\marginnote{\bf??} proved\marginnote{\bf??} in\marginnote{\bf??} the\marginnote{\bf??} more\marginnote{\bf??} general\marginnote{\bf??} context\marginnote{\bf??} of\marginnote{\bf??} arbitrary\marginnote{\bf??} finite\marginnote{\bf??} Coxeter\marginnote{\bf??} groups\marginnote{\bf??}.\marginnote{\bf??} As\marginnote{\bf??} Casselman\marginnote{\bf??} remarks\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} result\marginnote{\bf??} is\marginnote{\bf??} already\marginnote{\bf??} implicit\marginnote{\bf??} in\marginnote{\bf??} much\marginnote{\bf??} older\marginnote{\bf??} work\marginnote{\bf??} of\marginnote{\bf??} Satake\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{sat\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} Borel\marginnote{\bf??}-Tits\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{boti\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} A\marginnote{\bf??} related\marginnote{\bf??} discussion\marginnote{\bf??} can\marginnote{\bf??} also\marginnote{\bf??} be\marginnote{\bf??} found\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[\marginnote{\bf??}\S6\marginnote{\bf??}]\marginnote{\bf??}{bgh2\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[\marginnote{\bf??}\S4\marginnote{\bf??}]\marginnote{\bf??}{bgh1\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{facmompol\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} map\marginnote{\bf??} \marginnote{\bf??}$I\marginnote{\bf??}\mapsto\marginnote{\bf??}\conv\marginnote{\bf??}(W\marginnote{\bf??}_Ix\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} induces\marginnote{\bf??} a\marginnote{\bf??} bijection\marginnote{\bf??} between\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}-connected\marginnote{\bf??} subsets\marginnote{\bf??} \marginnote{\bf??}$I\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??}\marginnote{\bf??}/\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} faces\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Wx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} any\marginnote{\bf??} such\marginnote{\bf??} \marginnote{\bf??}$I\marginnote{\bf??}$\marginnote{\bf??} one\marginnote{\bf??} has\marginnote{\bf??}
\marginnote{\bf??}$\marginnote{\bf??}\dim\marginnote{\bf??}\conv\marginnote{\bf??}(W\marginnote{\bf??}_Ix\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}|I\marginnote{\bf??}|\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
Here\marginnote{\bf??} we\marginnote{\bf??} are\marginnote{\bf??} mainly\marginnote{\bf??} interested\marginnote{\bf??} in\marginnote{\bf??} the\marginnote{\bf??} facets\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Assume\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimensional\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} or\marginnote{\bf??} equivalently\marginnote{\bf??},\marginnote{\bf??} that\marginnote{\bf??} every\marginnote{\bf??} connected\marginnote{\bf??} component\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} contains\marginnote{\bf??} a\marginnote{\bf??} root\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}\beta\marginnote{\bf??} x\marginnote{\bf??}\ne0\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{fulldim\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} facets\marginnote{\bf??} the\marginnote{\bf??} theorem\marginnote{\bf??} gives\marginnote{\bf??}:\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{facetsmompol\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimensional\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} denote\marginnote{\bf??} the\marginnote{\bf??} set\marginnote{\bf??} of\marginnote{\bf??} indices\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}\in\marginnote{\bf??}\marginnote{\bf??}{1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} which\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}\smallsetminus\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}-connected\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}\in\marginnote{\bf??} I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}(i\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}\colon\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}(i\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} facet\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Conversely\marginnote{\bf??},\marginnote{\bf??} every\marginnote{\bf??} facet\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}(i\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} a\marginnote{\bf??} unique\marginnote{\bf??} index\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}\in\marginnote{\bf??} I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}\in\marginnote{\bf??} I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Clearly\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}(i\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} is\marginnote{\bf??} proper\marginnote{\bf??} since\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimensional\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}'\marginnote{\bf??}:\marginnote{\bf??}=W\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\Pi\marginnote{\bf??}\smallsetminus\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\langle\marginnote{\bf??} s\marginnote{\bf??}_\marginnote{\bf??}\beta\marginnote{\bf??}\colon\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}\ne\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}\rangle\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\conv\marginnote{\bf??}(W\marginnote{\bf??}'x\marginnote{\bf??})\marginnote{\bf??}\subseteq\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}(i\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} holds\marginnote{\bf??} since\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(s\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}}\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??}\ne\marginnote{\bf??} i\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} By\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{facmompol\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\conv\marginnote{\bf??}(W\marginnote{\bf??}'x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} facet\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} equality\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} remaining\marginnote{\bf??} assertion\marginnote{\bf??} follows\marginnote{\bf??} directly\marginnote{\bf??} from\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{facmompol\marginnote{\bf??}}\marginnote{\bf??} as\marginnote{\bf??} well\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
We\marginnote{\bf??} will\marginnote{\bf??} also\marginnote{\bf??} use\marginnote{\bf??} the\marginnote{\bf??} following\marginnote{\bf??} \marginnote{\bf??}(well\marginnote{\bf??}-known\marginnote{\bf??})\marginnote{\bf??} fact\marginnote{\bf??}:\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{stripos\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimensional\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}>0\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} It\marginnote{\bf??} is\marginnote{\bf??} enough\marginnote{\bf??} to\marginnote{\bf??} prove\marginnote{\bf??} this\marginnote{\bf??} in\marginnote{\bf??} the\marginnote{\bf??} case\marginnote{\bf??} where\marginnote{\bf??} the\marginnote{\bf??} root\marginnote{\bf??} system\marginnote{\bf??} is\marginnote{\bf??} simple\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$0\marginnote{\bf??}\ne\marginnote{\bf??} x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}>0\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} at\marginnote{\bf??} least\marginnote{\bf??} one\marginnote{\bf??}~\marginnote{\bf??}$j\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} lemma\marginnote{\bf??} follows\marginnote{\bf??} from\marginnote{\bf??} the\marginnote{\bf??} fact\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} inverse\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} Cartan\marginnote{\bf??} matrix\marginnote{\bf??} has\marginnote{\bf??} strictly\marginnote{\bf??} positive\marginnote{\bf??} coefficients\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{luti\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{polarpoints\marginnote{\bf??}}\marginnote{\bf??} Now\marginnote{\bf??} again\marginnote{\bf??} consider\marginnote{\bf??} the\marginnote{\bf??} adjoint\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} apply\marginnote{\bf??} Corollary\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{facetsmompol\marginnote{\bf??}}\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} system\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Sigma\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} restricted\marginnote{\bf??} roots\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} this\marginnote{\bf??} way\marginnote{\bf??} we\marginnote{\bf??} are\marginnote{\bf??} going\marginnote{\bf??} to\marginnote{\bf??} identify\marginnote{\bf??} explicitly\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} extreme\marginnote{\bf??} points\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} Proposition\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{oxoorbits\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}\subseteq\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimensional\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{i\marginnote{\bf??}\in\marginnote{\bf??}\marginnote{\bf??}{1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}\colon\marginnote{\bf??}\Gamma\marginnote{\bf??}\smallsetminus\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}-connected\marginnote{\bf??}$\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(as\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{facetsmompol\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}\in\marginnote{\bf??} I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} the\marginnote{\bf??} facet\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}(i\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}\colon\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} easier\marginnote{\bf??} notation\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} us\marginnote{\bf??} write\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} instead\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_\marginnote{\bf??}{P\marginnote{\bf??}_x\marginnote{\bf??}(i\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\colon\marginnote{\bf??}\pi\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}(i\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} By\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{orbitcorr\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} maximal\marginnote{\bf??} proper\marginnote{\bf??} face\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} every\marginnote{\bf??} maximal\marginnote{\bf??} proper\marginnote{\bf??} face\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} a\marginnote{\bf??} unique\marginnote{\bf??} index\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}\in\marginnote{\bf??} I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(using\marginnote{\bf??} also\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{facetsmompol\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Given\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}\in\marginnote{\bf??} I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} there\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} unique\marginnote{\bf??} extreme\marginnote{\bf??} point\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} that\marginnote{\bf??} corresponds\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} under\marginnote{\bf??} polarity\marginnote{\bf??},\marginnote{\bf??} characterized\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$F\marginnote{\bf??}_i\marginnote{\bf??}=\marginnote{\bf??}\wh\marginnote{\bf??}{z\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\colon\marginnote{\bf??}\bil\marginnote{\bf??} y\marginnote{\bf??}{z\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}=1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} proof\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{oxoorbits\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} points\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}\in\marginnote{\bf??} I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} represent\marginnote{\bf??} the\marginnote{\bf??} pairwise\marginnote{\bf??} different\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} extreme\marginnote{\bf??} points\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Using\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{facetsmompol\marginnote{\bf??}}\marginnote{\bf??} we\marginnote{\bf??} identify\marginnote{\bf??} these\marginnote{\bf??} points\marginnote{\bf??} as\marginnote{\bf??} follows\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Let\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}\in\marginnote{\bf??} I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} element\marginnote{\bf??} satisfying\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}}y\marginnote{\bf??}=\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Note\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} cone\marginnote{\bf??} generated\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_1\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_n\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} By\marginnote{\bf??} Lemma\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{stripos\marginnote{\bf??}}\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}>0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} claim\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}_i\marginnote{\bf??}=h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}/\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Indeed\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} element\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}:\marginnote{\bf??}=h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}/\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} satisfies\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??} yz\marginnote{\bf??}=\marginnote{\bf??}\bil\marginnote{\bf??}{\marginnote{\bf??}\pi\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}}z\marginnote{\bf??}=\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(\marginnote{\bf??}\pi\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??})\marginnote{\bf??}/\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}\ref\marginnote{\bf??}{cor2kost\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} this\marginnote{\bf??} shows\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??} yz\marginnote{\bf??}\le1\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} with\marginnote{\bf??} equality\marginnote{\bf??} if\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}(i\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} proved\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{orbitsoxo\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimensional\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}\subseteq\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} union\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} elements\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}_i\marginnote{\bf??}=h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}/\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}$\marginnote{\bf??} running\marginnote{\bf??} through\marginnote{\bf??} \marginnote{\bf??}$I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\qed\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{cor\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
A\marginnote{\bf??} particularly\marginnote{\bf??} interesting\marginnote{\bf??} case\marginnote{\bf??} arises\marginnote{\bf??} when\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}|I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}|\marginnote{\bf??}=1\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} i\marginnote{\bf??}.e\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} the\marginnote{\bf??} polytope\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} only\marginnote{\bf??} one\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} of\marginnote{\bf??} facets\marginnote{\bf??}.\marginnote{\bf??} By\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{facetsconj\marginnote{\bf??}}\marginnote{\bf??} it\marginnote{\bf??} is\marginnote{\bf??} equivalent\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbitope\marginnote{\bf??} itself\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} will\marginnote{\bf??} say\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-\marginnote{\bf??})\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{biorbitope\marginnote{\bf??}}\marginnote{\bf??} in\marginnote{\bf??} this\marginnote{\bf??} case\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} we\marginnote{\bf??} can\marginnote{\bf??} characterize\marginnote{\bf??} it\marginnote{\bf??} as\marginnote{\bf??} follows\marginnote{\bf??}:\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{biorbitop\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$0\marginnote{\bf??}\ne\marginnote{\bf??} x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} full\marginnote{\bf??}-dimensional\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-biorbitope\marginnote{\bf??} if\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} simple\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} restricted\marginnote{\bf??} root\marginnote{\bf??} system\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} not\marginnote{\bf??} of\marginnote{\bf??} type\marginnote{\bf??} \marginnote{\bf??}$D\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$n\marginnote{\bf??}\ge4\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} or\marginnote{\bf??} \marginnote{\bf??}$E\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$n\marginnote{\bf??}=6\marginnote{\bf??},7\marginnote{\bf??},8\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} following\marginnote{\bf??} holds\marginnote{\bf??}:\marginnote{\bf??} There\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} simple\marginnote{\bf??} restricted\marginnote{\bf??} root\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\gamma\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\gamma\marginnote{\bf??}\in\marginnote{\bf??}\Gamma\marginnote{\bf??}\smallsetminus\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}\smallsetminus\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} connected\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
In\marginnote{\bf??} other\marginnote{\bf??} words\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} condition\marginnote{\bf??} is\marginnote{\bf??} saying\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\ne0\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} only\marginnote{\bf??} one\marginnote{\bf??} simple\marginnote{\bf??} restricted\marginnote{\bf??} root\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??} sits\marginnote{\bf??} at\marginnote{\bf??} an\marginnote{\bf??} end\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} restricted\marginnote{\bf??} Dynkin\marginnote{\bf??} graph\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} at\marginnote{\bf??} most\marginnote{\bf??} one\marginnote{\bf??} other\marginnote{\bf??} end\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{biorb1\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{biorb2\marginnote{\bf??}}\marginnote{\bf??} we\marginnote{\bf??}'ll\marginnote{\bf??} make\marginnote{\bf??} all\marginnote{\bf??} biorbitopes\marginnote{\bf??} explicit\marginnote{\bf??} for\marginnote{\bf??} the\marginnote{\bf??} classical\marginnote{\bf??} Lie\marginnote{\bf??} algebras\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} By\marginnote{\bf??} Corollary\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{orbitsoxo\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} biorbitope\marginnote{\bf??} if\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} only\marginnote{\bf??} one\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} of\marginnote{\bf??} facets\marginnote{\bf??}.\marginnote{\bf??} One\marginnote{\bf??} sees\marginnote{\bf??} immediately\marginnote{\bf??} that\marginnote{\bf??} this\marginnote{\bf??} can\marginnote{\bf??} hold\marginnote{\bf??} only\marginnote{\bf??} when\marginnote{\bf??} the\marginnote{\bf??} restricted\marginnote{\bf??} root\marginnote{\bf??} system\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Sigma\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} irreducible\marginnote{\bf??}.\marginnote{\bf??} Therefore\marginnote{\bf??} we\marginnote{\bf??} may\marginnote{\bf??} assume\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} simple\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}_1\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}\Gamma\marginnote{\bf??}\colon\marginnote{\bf??}\beta\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\ne0\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} say\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}\in\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} boundary\marginnote{\bf??} root\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}\smallsetminus\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} connected\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}_1\marginnote{\bf??}$\marginnote{\bf??} contains\marginnote{\bf??} a\marginnote{\bf??} non\marginnote{\bf??}-boundary\marginnote{\bf??} root\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} two\marginnote{\bf??} non\marginnote{\bf??}-conjugate\marginnote{\bf??} facets\marginnote{\bf??}.\marginnote{\bf??} Indeed\marginnote{\bf??},\marginnote{\bf??} choose\marginnote{\bf??} two\marginnote{\bf??} different\marginnote{\bf??} boundary\marginnote{\bf??} roots\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}(i\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}(j\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} both\marginnote{\bf??} facets\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} are\marginnote{\bf??} not\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??},\marginnote{\bf??} according\marginnote{\bf??} to\marginnote{\bf??} Corollary\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{facetsmompol\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Exactly\marginnote{\bf??} the\marginnote{\bf??} same\marginnote{\bf??} argument\marginnote{\bf??} works\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}_1\marginnote{\bf??}$\marginnote{\bf??} contains\marginnote{\bf??} two\marginnote{\bf??} different\marginnote{\bf??} boundary\marginnote{\bf??} roots\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
So\marginnote{\bf??} all\marginnote{\bf??} facets\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} can\marginnote{\bf??} only\marginnote{\bf??} be\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-conjugate\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}_1\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} just\marginnote{\bf??} one\marginnote{\bf??} single\marginnote{\bf??} boundary\marginnote{\bf??} root\marginnote{\bf??}.\marginnote{\bf??} Conversely\marginnote{\bf??},\marginnote{\bf??} if\marginnote{\bf??} this\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} case\marginnote{\bf??} then\marginnote{\bf??} the\marginnote{\bf??} conjugacy\marginnote{\bf??} classes\marginnote{\bf??} of\marginnote{\bf??} facets\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} correspond\marginnote{\bf??} precisely\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} remaining\marginnote{\bf??} boundary\marginnote{\bf??} roots\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} proves\marginnote{\bf??} the\marginnote{\bf??} equivalence\marginnote{\bf??} in\marginnote{\bf??} the\marginnote{\bf??} theorem\marginnote{\bf??},\marginnote{\bf??} since\marginnote{\bf??} \marginnote{\bf??}$D\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$n\marginnote{\bf??}\ge4\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$E\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$n\marginnote{\bf??}=6\marginnote{\bf??},7\marginnote{\bf??},8\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} are\marginnote{\bf??} precisely\marginnote{\bf??} the\marginnote{\bf??} simple\marginnote{\bf??} root\marginnote{\bf??} systems\marginnote{\bf??} with\marginnote{\bf??} more\marginnote{\bf??} than\marginnote{\bf??} two\marginnote{\bf??} boundary\marginnote{\bf??} roots\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{dn2orb\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\ge4\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=so\marginnote{\bf??}(n\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} the\marginnote{\bf??} description\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\lambda\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} see\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonn\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} restricted\marginnote{\bf??} root\marginnote{\bf??} system\marginnote{\bf??} is\marginnote{\bf??} of\marginnote{\bf??} type\marginnote{\bf??} \marginnote{\bf??}$D\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} we\marginnote{\bf??} take\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}(1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},1\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonrem\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}\ne\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} \marginnote{\bf??}$I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} singleton\marginnote{\bf??} set\marginnote{\bf??}.\marginnote{\bf??} Yet\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} two\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} facets\marginnote{\bf??},\marginnote{\bf??} represented\marginnote{\bf??} by\marginnote{\bf??} the\marginnote{\bf??} facets\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}(1\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}\colon\marginnote{\bf??} y\marginnote{\bf??}_1\marginnote{\bf??}=1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}(n\marginnote{\bf??}-1\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}_1\marginnote{\bf??}+\marginnote{\bf??}\cdots\marginnote{\bf??}+y\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}-y\marginnote{\bf??}_n\marginnote{\bf??}=n\marginnote{\bf??}-2\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Hence\marginnote{\bf??} the\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} two\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??} of\marginnote{\bf??} maximal\marginnote{\bf??} dimensional\marginnote{\bf??} faces\marginnote{\bf??},\marginnote{\bf??} a\marginnote{\bf??} fact\marginnote{\bf??} already\marginnote{\bf??} proved\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Theorem\marginnote{\bf??} 4\marginnote{\bf??}.11\marginnote{\bf??}]\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} means\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}=SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} not\marginnote{\bf??} an\marginnote{\bf??} orbitope\marginnote{\bf??},\marginnote{\bf??} rather\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}^o\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\conv\marginnote{\bf??}(Kz\marginnote{\bf??}_1\marginnote{\bf??}\cup\marginnote{\bf??} Kz\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} Corollary\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{orbitsoxo\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}_1\marginnote{\bf??}=\marginnote{\bf??}(1\marginnote{\bf??},0\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},0\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-2\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\frac1\marginnote{\bf??}{n\marginnote{\bf??}-2\marginnote{\bf??}}\marginnote{\bf??}(1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},1\marginnote{\bf??},\marginnote{\bf??}-1\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(notation\marginnote{\bf??} as\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonn\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonrem\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} A\marginnote{\bf??} similar\marginnote{\bf??} remark\marginnote{\bf??} applies\marginnote{\bf??} when\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}(1\marginnote{\bf??},0\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},0\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(here\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} unit\marginnote{\bf??} ball\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} nuclear\marginnote{\bf??} norm\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} see\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{expqexcept\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??} and\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}(1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},1\marginnote{\bf??},\marginnote{\bf??}-1\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(here\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$O\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}-\marginnote{\bf??}}\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} which\marginnote{\bf??} is\marginnote{\bf??} of\marginnote{\bf??} course\marginnote{\bf??} linearly\marginnote{\bf??} isomorphic\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\conv\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
Remarkably\marginnote{\bf??},\marginnote{\bf??} whenever\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} biorbitope\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} positive\marginnote{\bf??} scaling\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}:\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{oxovsox\marginnote{\bf??}}\marginnote{\bf??}
Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} simple\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$0\marginnote{\bf??}\ne\marginnote{\bf??} x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} assume\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} biorbitope\marginnote{\bf??},\marginnote{\bf??} i\marginnote{\bf??}.e\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}|I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}|\marginnote{\bf??}=1\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} there\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} real\marginnote{\bf??} number\marginnote{\bf??} \marginnote{\bf??}$c\marginnote{\bf??}>0\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}=c\marginnote{\bf??}\cdot\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} We\marginnote{\bf??} have\marginnote{\bf??} \marginnote{\bf??}$I\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{i\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}\in\marginnote{\bf??}\Gamma\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} boundary\marginnote{\bf??} root\marginnote{\bf??} \marginnote{\bf??}(Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{biorbitop\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} By\marginnote{\bf??} Corollary\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{orbitsoxo\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$Kz\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}_i\marginnote{\bf??}=h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}/\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} element\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} itself\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} scalar\marginnote{\bf??} multiple\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} since\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}\in\marginnote{\bf??} \marginnote{\bf??}\Gamma\marginnote{\bf??}\smallsetminus\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} More\marginnote{\bf??} precisely\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} since\marginnote{\bf??} both\marginnote{\bf??} elements\marginnote{\bf??} give\marginnote{\bf??} the\marginnote{\bf??} same\marginnote{\bf??} value\marginnote{\bf??} under\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} implies\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}
\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??}|\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}|\marginnote{\bf??}^2\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}_i\marginnote{\bf??}=h\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}}\marginnote{\bf??}/\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}
x\marginnote{\bf??}/\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}^2\marginnote{\bf??}|\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}|\marginnote{\bf??}^2\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} therefore\marginnote{\bf??}
\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\frac1\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}^2\marginnote{\bf??}\cdot\marginnote{\bf??}|\marginnote{\bf??}\mu\marginnote{\bf??}_i\marginnote{\bf??}|\marginnote{\bf??}^2\marginnote{\bf??}}\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}.\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\section\marginnote{\bf??}{Doubly\marginnote{\bf??} spectrahedral\marginnote{\bf??} orbitopes\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{sec\marginnote{\bf??}:doubly\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
Following\marginnote{\bf??} Saunderson\marginnote{\bf??},\marginnote{\bf??} Parrilo\marginnote{\bf??} and\marginnote{\bf??} Willsky\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{spw\marginnote{\bf??}}\marginnote{\bf??} we\marginnote{\bf??} use\marginnote{\bf??} the\marginnote{\bf??} term\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{doubly\marginnote{\bf??} spectrahedral\marginnote{\bf??} convex\marginnote{\bf??} sets\marginnote{\bf??}}\marginnote{\bf??} to\marginnote{\bf??} refer\marginnote{\bf??} to\marginnote{\bf??} convex\marginnote{\bf??} sets\marginnote{\bf??} \marginnote{\bf??}$S\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} which\marginnote{\bf??} both\marginnote{\bf??} \marginnote{\bf??}$S\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} polar\marginnote{\bf??} convex\marginnote{\bf??} set\marginnote{\bf??} \marginnote{\bf??}$S\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} spectrahedra\marginnote{\bf??}.\marginnote{\bf??} As\marginnote{\bf??} remarked\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{spw\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} it\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} very\marginnote{\bf??} special\marginnote{\bf??} phenomenon\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} polar\marginnote{\bf??} set\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??} is\marginnote{\bf??} again\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??}.\marginnote{\bf??} Apart\marginnote{\bf??} from\marginnote{\bf??} polyhedra\marginnote{\bf??} \marginnote{\bf??}(which\marginnote{\bf??} have\marginnote{\bf??} this\marginnote{\bf??} property\marginnote{\bf??} for\marginnote{\bf??} obvious\marginnote{\bf??} reasons\marginnote{\bf??})\marginnote{\bf??} it\marginnote{\bf??} seems\marginnote{\bf??} that\marginnote{\bf??} only\marginnote{\bf??} one\marginnote{\bf??} other\marginnote{\bf??} distinct\marginnote{\bf??} family\marginnote{\bf??} of\marginnote{\bf??} doubly\marginnote{\bf??} spectrahedral\marginnote{\bf??} convex\marginnote{\bf??} sets\marginnote{\bf??} is\marginnote{\bf??} known\marginnote{\bf??},\marginnote{\bf??} namely\marginnote{\bf??} the\marginnote{\bf??} homogeneous\marginnote{\bf??} convex\marginnote{\bf??} cones\marginnote{\bf??} \marginnote{\bf??}(Vinberg\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{vi\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} Chua\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{ch\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} see\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[6\marginnote{\bf??}.1\marginnote{\bf??}]\marginnote{\bf??}{spw\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} addition\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} matrix\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} doubly\marginnote{\bf??} spectrahedral\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\ge1\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} by\marginnote{\bf??} the\marginnote{\bf??} main\marginnote{\bf??} theorem\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{spw\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} fact\marginnote{\bf??},\marginnote{\bf??} explicit\marginnote{\bf??} spectrahedral\marginnote{\bf??} representations\marginnote{\bf??} for\marginnote{\bf??} both\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\conv\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} were\marginnote{\bf??} constructed\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{spw\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Below\marginnote{\bf??} we\marginnote{\bf??} show\marginnote{\bf??} that\marginnote{\bf??} all\marginnote{\bf??} polar\marginnote{\bf??} orbitopes\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}`\marginnote{\bf??}`rational\marginnote{\bf??} coordinates\marginnote{\bf??}'\marginnote{\bf??}'\marginnote{\bf??} are\marginnote{\bf??} doubly\marginnote{\bf??} spectrahedral\marginnote{\bf??} as\marginnote{\bf??} well\marginnote{\bf??}.\marginnote{\bf??} Moreover\marginnote{\bf??} we\marginnote{\bf??}'ll\marginnote{\bf??} give\marginnote{\bf??} explicit\marginnote{\bf??} linear\marginnote{\bf??} matrix\marginnote{\bf??} inequality\marginnote{\bf??} representations\marginnote{\bf??} for\marginnote{\bf??} those\marginnote{\bf??} orbitopes\marginnote{\bf??} and\marginnote{\bf??} their\marginnote{\bf??} polars\marginnote{\bf??}.\marginnote{\bf??} As\marginnote{\bf??} a\marginnote{\bf??} particular\marginnote{\bf??} case\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} recover\marginnote{\bf??} the\marginnote{\bf??} results\marginnote{\bf??} from\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{spw\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} see\marginnote{\bf??} Remark\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{spwrem\marginnote{\bf??}}\marginnote{\bf??} below\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\mathfrak{k}}\marginnote{\bf??}\oplus\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} real\marginnote{\bf??} semisimple\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} with\marginnote{\bf??} Cartan\marginnote{\bf??} decomposition\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} consider\marginnote{\bf??} the\marginnote{\bf??} adjoint\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} before\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} use\marginnote{\bf??} notation\marginnote{\bf??} and\marginnote{\bf??} conventions\marginnote{\bf??} from\marginnote{\bf??} Section\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{lienot\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} maximal\marginnote{\bf??} abelian\marginnote{\bf??} subspace\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}\subseteq\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} with\marginnote{\bf??} respect\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} fixed\marginnote{\bf??} ordering\marginnote{\bf??} on\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} As\marginnote{\bf??} before\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Gamma\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\beta\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\beta\marginnote{\bf??}_n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}\subseteq\marginnote{\bf??} \marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} simple\marginnote{\bf??} positive\marginnote{\bf??} restricted\marginnote{\bf??} roots\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{dfn\marginnote{\bf??}}\marginnote{\bf??} Given\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} say\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Kx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{rational\marginnote{\bf??} coordinates\marginnote{\bf??}}\marginnote{\bf??} if\marginnote{\bf??} there\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$b\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{Q\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??} b\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{dfn\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
Since\marginnote{\bf??} any\marginnote{\bf??} two\marginnote{\bf??} choices\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}\subseteq\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} conjugate\marginnote{\bf??} under\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[6\marginnote{\bf??}.51\marginnote{\bf??}]\marginnote{\bf??}{kn\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} since\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} intersects\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} one\marginnote{\bf??} full\marginnote{\bf??} \marginnote{\bf??}$W\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} \marginnote{\bf??}(Corollary\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{cor3kost\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} property\marginnote{\bf??} of\marginnote{\bf??} having\marginnote{\bf??} rational\marginnote{\bf??} coordinates\marginnote{\bf??} depends\marginnote{\bf??} only\marginnote{\bf??} on\marginnote{\bf??} the\marginnote{\bf??} orbit\marginnote{\bf??} \marginnote{\bf??}$Kx\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} neither\marginnote{\bf??} on\marginnote{\bf??} the\marginnote{\bf??} choice\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} nor\marginnote{\bf??} on\marginnote{\bf??} the\marginnote{\bf??} particular\marginnote{\bf??} choice\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} representative\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$Kx\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{doublspect\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} orbitope\marginnote{\bf??} with\marginnote{\bf??} rational\marginnote{\bf??} coordinates\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} both\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} spectrahedra\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{thm\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
For\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} a\marginnote{\bf??} spectrahedral\marginnote{\bf??} representation\marginnote{\bf??} has\marginnote{\bf??} been\marginnote{\bf??} given\marginnote{\bf??} in\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{polorbspekt\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{lmioxo\marginnote{\bf??}}\marginnote{\bf??} below\marginnote{\bf??} we\marginnote{\bf??} explain\marginnote{\bf??} how\marginnote{\bf??} to\marginnote{\bf??} find\marginnote{\bf??} one\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Explicit\marginnote{\bf??} descriptions\marginnote{\bf??} of\marginnote{\bf??} these\marginnote{\bf??} orbitopes\marginnote{\bf??} are\marginnote{\bf??} contained\marginnote{\bf??} in\marginnote{\bf??} the\marginnote{\bf??} next\marginnote{\bf??} section\marginnote{\bf??},\marginnote{\bf??} c\marginnote{\bf??}.f\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Remark\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{doublspectex\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
Given\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??}(Wx\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} momentum\marginnote{\bf??} polytope\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} before\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} polar\marginnote{\bf??} set\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} i\marginnote{\bf??}.e\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}\colon\marginnote{\bf??}\forall\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??} w\marginnote{\bf??}\in\marginnote{\bf??} W\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\bil\marginnote{\bf??}{wx\marginnote{\bf??}}\marginnote{\bf??}{y\marginnote{\bf??}}\marginnote{\bf??}\le1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} We\marginnote{\bf??} have\marginnote{\bf??} the\marginnote{\bf??} following\marginnote{\bf??} lemma\marginnote{\bf??}:\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{lab7\marginnote{\bf??}}\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}\colon\marginnote{\bf??}\bil\marginnote{\bf??} xy\marginnote{\bf??}\le1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{wx\marginnote{\bf??}}y\marginnote{\bf??}\le\marginnote{\bf??}\bil\marginnote{\bf??} xy\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$w\marginnote{\bf??}\in\marginnote{\bf??} W\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Lemma\marginnote{\bf??} 3\marginnote{\bf??}.2\marginnote{\bf??}]\marginnote{\bf??}{ko\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Therefore\marginnote{\bf??},\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??} xy\marginnote{\bf??}\le1\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} P\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} opposite\marginnote{\bf??} inclusion\marginnote{\bf??} is\marginnote{\bf??} trivial\marginnote{\bf??} from\marginnote{\bf??} the\marginnote{\bf??} definition\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
Recall\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}\colon\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\to\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} denotes\marginnote{\bf??} orthogonal\marginnote{\bf??} projection\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Forming\marginnote{\bf??} the\marginnote{\bf??} polar\marginnote{\bf??} convex\marginnote{\bf??} body\marginnote{\bf??} commutes\marginnote{\bf??} with\marginnote{\bf??} projection\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}(or\marginnote{\bf??} intersection\marginnote{\bf??} with\marginnote{\bf??})\marginnote{\bf??}~\marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}:\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{lab3\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\subseteq\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-invariant\marginnote{\bf??} convex\marginnote{\bf??} set\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\cap\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}=\marginnote{\bf??}\pi\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}\ref\marginnote{\bf??}{cor2kost\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pi\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^o\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^o\marginnote{\bf??}\cap\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}=Q\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(the\marginnote{\bf??} polar\marginnote{\bf??} set\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$Q\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$z\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??} y\marginnote{\bf??}{\marginnote{\bf??}\pi\marginnote{\bf??}(z\marginnote{\bf??})\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}\bil\marginnote{\bf??} yz\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} From\marginnote{\bf??} this\marginnote{\bf??} the\marginnote{\bf??} lemma\marginnote{\bf??} follows\marginnote{\bf??} immediately\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\textsc\marginnote{\bf??}{Proof\marginnote{\bf??}}\marginnote{\bf??} of\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{doublspect\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} can\marginnote{\bf??} assume\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} rational\marginnote{\bf??} coordinates\marginnote{\bf??} we\marginnote{\bf??} can\marginnote{\bf??} assume\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{Q\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$j\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} after\marginnote{\bf??} scaling\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} a\marginnote{\bf??} suitable\marginnote{\bf??} positive\marginnote{\bf??} real\marginnote{\bf??} number\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} there\marginnote{\bf??} exist\marginnote{\bf??} rational\marginnote{\bf??} numbers\marginnote{\bf??} \marginnote{\bf??}$c\marginnote{\bf??}_j\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??} xy\marginnote{\bf??}=\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{j\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^nc\marginnote{\bf??}_j\marginnote{\bf??}\mu\marginnote{\bf??}_j\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(namely\marginnote{\bf??} \marginnote{\bf??}$c\marginnote{\bf??}_j\marginnote{\bf??}=\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Hence\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} by\marginnote{\bf??} Lemma\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{restrfundgew\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} there\marginnote{\bf??} are\marginnote{\bf??} an\marginnote{\bf??} integer\marginnote{\bf??} \marginnote{\bf??}$k\marginnote{\bf??}\ge1\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} an\marginnote{\bf??} integral\marginnote{\bf??} dominant\marginnote{\bf??} weight\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$k\marginnote{\bf??}\cdot\marginnote{\bf??}\bil\marginnote{\bf??} xy\marginnote{\bf??}=\marginnote{\bf??}\omega\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} By\marginnote{\bf??} the\marginnote{\bf??} highest\marginnote{\bf??} weight\marginnote{\bf??} theorem\marginnote{\bf??},\marginnote{\bf??} there\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} irreducible\marginnote{\bf??} representation\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} highest\marginnote{\bf??} weight\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} From\marginnote{\bf??} Lemmas\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{lab7\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{lab3\marginnote{\bf??}}\marginnote{\bf??} we\marginnote{\bf??} get\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{equation\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{polarcapc\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>P\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}\colon\marginnote{\bf??}\bil\marginnote{\bf??} xy\marginnote{\bf??}\le1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{equation\marginnote{\bf??}}\marginnote{\bf??} We\marginnote{\bf??} claim\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}\eqref\marginnote{\bf??}{polarcapc\marginnote{\bf??}}\marginnote{\bf??} implies\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{equation\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{claim\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{p\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\colon\marginnote{\bf??}\rho\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\preceq\marginnote{\bf??} k\marginnote{\bf??}\cdot\marginnote{\bf??}\mathrm\marginnote{\bf??}{id\marginnote{\bf??}}\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{equation\marginnote{\bf??}}\marginnote{\bf??} Indeed\marginnote{\bf??},\marginnote{\bf??} both\marginnote{\bf??} sets\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\eqref\marginnote{\bf??}{claim\marginnote{\bf??}}\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-invariant\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} it\marginnote{\bf??} suffices\marginnote{\bf??} to\marginnote{\bf??} check\marginnote{\bf??} that\marginnote{\bf??} their\marginnote{\bf??} intersections\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} coincide\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} the\marginnote{\bf??} largest\marginnote{\bf??} eigenvalue\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}=k\marginnote{\bf??}\bil\marginnote{\bf??} xy\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(Lemma\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{ewprelmaxev\marginnote{\bf??}}\marginnote{\bf??}(b\marginnote{\bf??})\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} \marginnote{\bf??}\eqref\marginnote{\bf??}{claim\marginnote{\bf??}}\marginnote{\bf??} follows\marginnote{\bf??} indeed\marginnote{\bf??} from\marginnote{\bf??} \marginnote{\bf??}\eqref\marginnote{\bf??}{polarcapc\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} theorem\marginnote{\bf??} is\marginnote{\bf??} proved\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{lmioxo\marginnote{\bf??}}\marginnote{\bf??} The\marginnote{\bf??} highest\marginnote{\bf??} weights\marginnote{\bf??} of\marginnote{\bf??} irreducible\marginnote{\bf??} representations\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}
are\marginnote{\bf??} the\marginnote{\bf??} nonnegative\marginnote{\bf??} integral\marginnote{\bf??} linear\marginnote{\bf??} combinations\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} fundamental\marginnote{\bf??} weights\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}_i\marginnote{\bf??}=\marginnote{\bf??}\frac12\marginnote{\bf??}|\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}|\marginnote{\bf??}^2\marginnote{\bf??}\lambda\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$i\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},l\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} restriction\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}_i\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{\marginnote{\bf??}\scriptscriptstyle\marginnote{\bf??}\vee\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$0\marginnote{\bf??}$\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\frac1\marginnote{\bf??}{2m\marginnote{\bf??}}\marginnote{\bf??}|\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}|\marginnote{\bf??}^2\marginnote{\bf??}\mu\marginnote{\bf??}_j\marginnote{\bf??}$\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$r\marginnote{\bf??}(\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\beta\marginnote{\bf??}_j\marginnote{\bf??}\ne0\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}\in\marginnote{\bf??}\marginnote{\bf??}{1\marginnote{\bf??},2\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} number\marginnote{\bf??} of\marginnote{\bf??} simple\marginnote{\bf??} roots\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Pi\marginnote{\bf??}$\marginnote{\bf??} that\marginnote{\bf??} restrict\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\beta\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(Lemma\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{restrfundgew\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} the\marginnote{\bf??}
\marginnote{\bf??}$\marginnote{\bf??}|\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}|\marginnote{\bf??}^2\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} explicit\marginnote{\bf??} rational\marginnote{\bf??} numbers\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} see\marginnote{\bf??} how\marginnote{\bf??} to\marginnote{\bf??} find\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} given\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} rational\marginnote{\bf??} coordinates\marginnote{\bf??},\marginnote{\bf??} a\marginnote{\bf??} real\marginnote{\bf??} number\marginnote{\bf??} \marginnote{\bf??}$c\marginnote{\bf??}>0\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} an\marginnote{\bf??} integral\marginnote{\bf??} dominant\marginnote{\bf??} weight\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\omega\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{h\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??}{cx\marginnote{\bf??}}y\marginnote{\bf??}=\marginnote{\bf??}\omega\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{spwrem\marginnote{\bf??}}\marginnote{\bf??} We\marginnote{\bf??} illustrate\marginnote{\bf??} the\marginnote{\bf??} previous\marginnote{\bf??} remark\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} example\marginnote{\bf??} already\marginnote{\bf??} studied\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonn\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} consider\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\ge3\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} take\marginnote{\bf??} the\marginnote{\bf??} identity\marginnote{\bf??} matrix\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=I\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonrem\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} are\marginnote{\bf??} looking\marginnote{\bf??} for\marginnote{\bf??} a\marginnote{\bf??} linear\marginnote{\bf??} matrix\marginnote{\bf??} inequality\marginnote{\bf??} description\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}=\marginnote{\bf??}(\marginnote{\bf??}\conv\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??})\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} has\marginnote{\bf??} rational\marginnote{\bf??} coordinates\marginnote{\bf??} since\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$1\marginnote{\bf??}\le\marginnote{\bf??} i\marginnote{\bf??}<n\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\alpha\marginnote{\bf??}_n\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=2\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonn\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\bil\marginnote{\bf??} xy\marginnote{\bf??}=\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^ny\marginnote{\bf??}_i\marginnote{\bf??}=2\marginnote{\bf??}\lambda\marginnote{\bf??}_n\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} procedure\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{lmioxo\marginnote{\bf??}}\marginnote{\bf??} leads\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} spectrahedral\marginnote{\bf??} representation\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}\bigl\marginnote{\bf??}(\marginnote{\bf??}\conv\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}\bigr\marginnote{\bf??})\marginnote{\bf??}^o\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\Bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\colon\marginnote{\bf??}\rho\marginnote{\bf??}_n\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{pmatrix\marginnote{\bf??}}0\marginnote{\bf??}&y\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\mathtt\marginnote{\bf??}{y\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^t\marginnote{\bf??}&0\marginnote{\bf??}\end\marginnote{\bf??}{pmatrix\marginnote{\bf??}}\marginnote{\bf??}\preceq\marginnote{\bf??}\frac12\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??}\mathrm\marginnote{\bf??}{id\marginnote{\bf??}}\marginnote{\bf??}\Bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\rho\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??}-th\marginnote{\bf??} fundamental\marginnote{\bf??} representation\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} is\marginnote{\bf??} in\marginnote{\bf??} accordance\marginnote{\bf??} with\marginnote{\bf??} Saunderson\marginnote{\bf??},\marginnote{\bf??} Parrilo\marginnote{\bf??} and\marginnote{\bf??} Willsky\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Theorem\marginnote{\bf??} 1\marginnote{\bf??}.1\marginnote{\bf??}]\marginnote{\bf??}{spw\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} c\marginnote{\bf??}.f\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} the\marginnote{\bf??} remark\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{sonrem\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
For\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}=3\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbitope\marginnote{\bf??} itself\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\ge4\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} two\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??},\marginnote{\bf??} but\marginnote{\bf??} not\marginnote{\bf??} of\marginnote{\bf??} one\marginnote{\bf??} \marginnote{\bf??}(Example\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{dn2orb\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\section\marginnote{\bf??}{Examples\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{sect\marginnote{\bf??}:exs\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
We\marginnote{\bf??} describe\marginnote{\bf??} all\marginnote{\bf??} irreducible\marginnote{\bf??} polar\marginnote{\bf??} representations\marginnote{\bf??} that\marginnote{\bf??} arise\marginnote{\bf??} from\marginnote{\bf??} semisimple\marginnote{\bf??} Lie\marginnote{\bf??} algebras\marginnote{\bf??} of\marginnote{\bf??} classical\marginnote{\bf??} type\marginnote{\bf??}.\marginnote{\bf??} Roughly\marginnote{\bf??},\marginnote{\bf??} these\marginnote{\bf??} are\marginnote{\bf??} the\marginnote{\bf??} well\marginnote{\bf??}-known\marginnote{\bf??} unitary\marginnote{\bf??} group\marginnote{\bf??} actions\marginnote{\bf??} on\marginnote{\bf??} rectangular\marginnote{\bf??} matrices\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}(skew\marginnote{\bf??}-\marginnote{\bf??})\marginnote{\bf??} hermitian\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}(skew\marginnote{\bf??}-\marginnote{\bf??})\marginnote{\bf??} symmetric\marginnote{\bf??} square\marginnote{\bf??} matrices\marginnote{\bf??},\marginnote{\bf??} over\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} or\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} skew\marginnote{\bf??}-field\marginnote{\bf??} of\marginnote{\bf??} Hamilton\marginnote{\bf??} quaternions\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}(For\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} there\marginnote{\bf??} is\marginnote{\bf??} no\marginnote{\bf??} action\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}(skew\marginnote{\bf??}-\marginnote{\bf??})\marginnote{\bf??} symmetric\marginnote{\bf??} matrices\marginnote{\bf??}.\marginnote{\bf??})\marginnote{\bf??} In\marginnote{\bf??} each\marginnote{\bf??} case\marginnote{\bf??} we\marginnote{\bf??} mention\marginnote{\bf??} a\marginnote{\bf??} standard\marginnote{\bf??} choice\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} maximal\marginnote{\bf??} abelian\marginnote{\bf??} subspace\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Using\marginnote{\bf??} Kostant\marginnote{\bf??}'s\marginnote{\bf??} results\marginnote{\bf??},\marginnote{\bf??} in\marginnote{\bf??} particular\marginnote{\bf??} Proposition\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{kost33\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} Corollary\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{cor2kost\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} this\marginnote{\bf??} allows\marginnote{\bf??} us\marginnote{\bf??} to\marginnote{\bf??} give\marginnote{\bf??} explicit\marginnote{\bf??} descriptions\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} respective\marginnote{\bf??} orbitopes\marginnote{\bf??} in\marginnote{\bf??} all\marginnote{\bf??} cases\marginnote{\bf??}.\marginnote{\bf??} Naturally\marginnote{\bf??},\marginnote{\bf??} this\marginnote{\bf??} uses\marginnote{\bf??} the\marginnote{\bf??} description\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}(reduced\marginnote{\bf??})\marginnote{\bf??} root\marginnote{\bf??} systems\marginnote{\bf??} and\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} fundamental\marginnote{\bf??} weights\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} which\marginnote{\bf??} there\marginnote{\bf??} are\marginnote{\bf??} many\marginnote{\bf??} references\marginnote{\bf??} \marginnote{\bf??}(e\marginnote{\bf??}.g\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{kn\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{ti\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{lie3\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} will\marginnote{\bf??} see\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} corresponding\marginnote{\bf??} orbitopes\marginnote{\bf??} can\marginnote{\bf??} be\marginnote{\bf??} described\marginnote{\bf??} in\marginnote{\bf??} terms\marginnote{\bf??} of\marginnote{\bf??} Ky\marginnote{\bf??} Fan\marginnote{\bf??} norm\marginnote{\bf??} balls\marginnote{\bf??},\marginnote{\bf??} which\marginnote{\bf??} in\marginnote{\bf??} turn\marginnote{\bf??} are\marginnote{\bf??} defined\marginnote{\bf??} using\marginnote{\bf??} singular\marginnote{\bf??} values\marginnote{\bf??} of\marginnote{\bf??} matrices\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??} First\marginnote{\bf??} recall\marginnote{\bf??} the\marginnote{\bf??} singular\marginnote{\bf??} value\marginnote{\bf??} decomposition\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} always\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} one\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} or\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$U\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} denote\marginnote{\bf??} the\marginnote{\bf??} unitary\marginnote{\bf??} group\marginnote{\bf??} over\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} i\marginnote{\bf??}.e\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$U\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{g\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\colon\marginnote{\bf??} gg\marginnote{\bf??}^\marginnote{\bf??}*\marginnote{\bf??}=I\marginnote{\bf??}_n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$g\marginnote{\bf??}^\marginnote{\bf??}*\marginnote{\bf??}=\marginnote{\bf??}\overline\marginnote{\bf??} g\marginnote{\bf??}^t\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} \marginnote{\bf??}$U\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}=O\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} real\marginnote{\bf??} orthogonal\marginnote{\bf??} group\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$U\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}=U\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} usual\marginnote{\bf??} \marginnote{\bf??}(complex\marginnote{\bf??})\marginnote{\bf??} unitary\marginnote{\bf??} group\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$U\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}=Sp\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} symplectic\marginnote{\bf??} group\marginnote{\bf??}.\marginnote{\bf??} Given\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}(rectangular\marginnote{\bf??})\marginnote{\bf??} matrix\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_\marginnote{\bf??}{m\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}\ge\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} there\marginnote{\bf??} exist\marginnote{\bf??} unitary\marginnote{\bf??} matrices\marginnote{\bf??} \marginnote{\bf??}$u\marginnote{\bf??}\in\marginnote{\bf??} U\marginnote{\bf??}(m\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$v\marginnote{\bf??}\in\marginnote{\bf??} U\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$uxv\marginnote{\bf??}^\marginnote{\bf??}*\marginnote{\bf??}=\marginnote{\bf??}\mathrm\marginnote{\bf??}{diag\marginnote{\bf??}}\marginnote{\bf??}(a\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},a\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} real\marginnote{\bf??} numbers\marginnote{\bf??} \marginnote{\bf??}$a\marginnote{\bf??}_1\marginnote{\bf??}\ge\marginnote{\bf??}\cdots\marginnote{\bf??}\ge\marginnote{\bf??} a\marginnote{\bf??}_n\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} \marginnote{\bf??}$a\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} uniquely\marginnote{\bf??} determined\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} they\marginnote{\bf??} are\marginnote{\bf??} called\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{singular\marginnote{\bf??} values\marginnote{\bf??}}\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} denoted\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\sigma\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}:\marginnote{\bf??}=a\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$1\marginnote{\bf??}\le\marginnote{\bf??} i\marginnote{\bf??}\le\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} or\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} this\marginnote{\bf??} is\marginnote{\bf??} classical\marginnote{\bf??} \marginnote{\bf??}(e\marginnote{\bf??}.g\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[2\marginnote{\bf??}.6\marginnote{\bf??}]\marginnote{\bf??}{hj\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} here\marginnote{\bf??} \marginnote{\bf??}$a\marginnote{\bf??}_1\marginnote{\bf??}^2\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},a\marginnote{\bf??}_n\marginnote{\bf??}^2\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} the\marginnote{\bf??} eigenvalues\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} psd\marginnote{\bf??} hermitian\marginnote{\bf??} matrix\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}^\marginnote{\bf??}*x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} essentially\marginnote{\bf??} the\marginnote{\bf??} same\marginnote{\bf??} is\marginnote{\bf??} true\marginnote{\bf??} \marginnote{\bf??}(with\marginnote{\bf??} eigenvalues\marginnote{\bf??} replaced\marginnote{\bf??} by\marginnote{\bf??} right\marginnote{\bf??} eigenvalues\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} but\marginnote{\bf??} less\marginnote{\bf??} well\marginnote{\bf??}-known\marginnote{\bf??};\marginnote{\bf??} see\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[7\marginnote{\bf??}.2\marginnote{\bf??}]\marginnote{\bf??}{zh\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[5\marginnote{\bf??}.7\marginnote{\bf??}]\marginnote{\bf??}{fp\marginnote{\bf??}}\marginnote{\bf??} for\marginnote{\bf??} details\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{kyfanorm\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} or\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=M\marginnote{\bf??}_\marginnote{\bf??}{m\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}\ge\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\sigma\marginnote{\bf??}_1\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}+\marginnote{\bf??}\cdots\marginnote{\bf??}+\marginnote{\bf??}\sigma\marginnote{\bf??}_k\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} sum\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$k\marginnote{\bf??}$\marginnote{\bf??} largest\marginnote{\bf??} singular\marginnote{\bf??} values\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} defines\marginnote{\bf??} a\marginnote{\bf??} matrix\marginnote{\bf??} norm\marginnote{\bf??} on\marginnote{\bf??} the\marginnote{\bf??} space\marginnote{\bf??} of\marginnote{\bf??} matrices\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{\marginnote{\bf??}$k\marginnote{\bf??}$\marginnote{\bf??}-th\marginnote{\bf??} Ky\marginnote{\bf??} Fan\marginnote{\bf??} norm\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}\cite\marginnote{\bf??}{fa\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{hj\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 7\marginnote{\bf??}.4\marginnote{\bf??}.8\marginnote{\bf??} and\marginnote{\bf??} 7\marginnote{\bf??}.4\marginnote{\bf??}.10\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} particular\marginnote{\bf??},\marginnote{\bf??} all\marginnote{\bf??} balls\marginnote{\bf??} with\marginnote{\bf??} respect\marginnote{\bf??} to\marginnote{\bf??} any\marginnote{\bf??} of\marginnote{\bf??} these\marginnote{\bf??} norms\marginnote{\bf??} are\marginnote{\bf??} convex\marginnote{\bf??}.\marginnote{\bf??} Note\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} first\marginnote{\bf??} Ky\marginnote{\bf??} Fan\marginnote{\bf??} norm\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_1\marginnote{\bf??}=\marginnote{\bf??}\sigma\marginnote{\bf??}_1\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} operator\marginnote{\bf??} norm\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} last\marginnote{\bf??} one\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_n\marginnote{\bf??}=\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??} \marginnote{\bf??}\sigma\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} called\marginnote{\bf??} the\marginnote{\bf??} nuclear\marginnote{\bf??} norm\marginnote{\bf??} and\marginnote{\bf??} often\marginnote{\bf??} denoted\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_\marginnote{\bf??}*\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
In\marginnote{\bf??} view\marginnote{\bf??} of\marginnote{\bf??} Remark\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{dirprod\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} restrict\marginnote{\bf??} our\marginnote{\bf??} discussion\marginnote{\bf??} of\marginnote{\bf??} classical\marginnote{\bf??} polar\marginnote{\bf??} orbitopes\marginnote{\bf??} to\marginnote{\bf??} orbitopes\marginnote{\bf??} that\marginnote{\bf??} arise\marginnote{\bf??} from\marginnote{\bf??} simple\marginnote{\bf??} real\marginnote{\bf??} Lie\marginnote{\bf??} algebras\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} classical\marginnote{\bf??} type\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{expqnonexcept\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}\ge\marginnote{\bf??} n\marginnote{\bf??}\ge1\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Consider\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(m\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(case\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=S\marginnote{\bf??}(U\marginnote{\bf??}(m\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} U\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(case\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=Sp\marginnote{\bf??}(m\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} Sp\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(case\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} together\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(u\marginnote{\bf??},v\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}=M\marginnote{\bf??}_\marginnote{\bf??}{m\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$uxv\marginnote{\bf??}^\marginnote{\bf??}*\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??},\marginnote{\bf??} arising\marginnote{\bf??} from\marginnote{\bf??} the\marginnote{\bf??} simple\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=so\marginnote{\bf??}(m\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=su\marginnote{\bf??}(m\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=sp\marginnote{\bf??}(m\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(assume\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}+n\marginnote{\bf??}\ne2\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},4\marginnote{\bf??}$\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}\cong\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} can\marginnote{\bf??} take\marginnote{\bf??} the\marginnote{\bf??} space\marginnote{\bf??} of\marginnote{\bf??} real\marginnote{\bf??} matrices\marginnote{\bf??} that\marginnote{\bf??} are\marginnote{\bf??} diagonal\marginnote{\bf??} in\marginnote{\bf??} the\marginnote{\bf??} upper\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??} rows\marginnote{\bf??} and\marginnote{\bf??} zero\marginnote{\bf??} below\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(a\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},a\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} diagonal\marginnote{\bf??} part\marginnote{\bf??} of\marginnote{\bf??} such\marginnote{\bf??} a\marginnote{\bf??} matrix\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} us\marginnote{\bf??} write\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_i\marginnote{\bf??}:\marginnote{\bf??}=a\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} case\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=so\marginnote{\bf??}(m\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}=n\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} exceptional\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{expqexcept\marginnote{\bf??}}\marginnote{\bf??} below\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} let\marginnote{\bf??} us\marginnote{\bf??} first\marginnote{\bf??} discard\marginnote{\bf??} it\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} all\marginnote{\bf??} other\marginnote{\bf??} cases\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_1\marginnote{\bf??}\ge\marginnote{\bf??}\cdots\marginnote{\bf??}\ge\marginnote{\bf??} x\marginnote{\bf??}_n\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Moreover\marginnote{\bf??},\marginnote{\bf??} using\marginnote{\bf??} Lemma\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{kost33\marginnote{\bf??}}\marginnote{\bf??} we\marginnote{\bf??} see\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}\colon\marginnote{\bf??} y\marginnote{\bf??}_1\marginnote{\bf??}+\marginnote{\bf??}\cdots\marginnote{\bf??}+y\marginnote{\bf??}_k\marginnote{\bf??}\le\marginnote{\bf??} x\marginnote{\bf??}_1\marginnote{\bf??}+\marginnote{\bf??}\cdots\marginnote{\bf??}+x\marginnote{\bf??}_k\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}(k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} then\marginnote{\bf??} clearly\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_i\marginnote{\bf??}=\marginnote{\bf??}\sigma\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}$\marginnote{\bf??}-th\marginnote{\bf??} singular\marginnote{\bf??} value\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} It\marginnote{\bf??} follows\marginnote{\bf??} for\marginnote{\bf??} arbitrary\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}\colon\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\le\marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}(k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} since\marginnote{\bf??} both\marginnote{\bf??} sets\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-invariant\marginnote{\bf??} and\marginnote{\bf??} their\marginnote{\bf??} intersections\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} coincide\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} intersection\marginnote{\bf??} of\marginnote{\bf??} balls\marginnote{\bf??} with\marginnote{\bf??} center\marginnote{\bf??} \marginnote{\bf??}$0\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} respect\marginnote{\bf??} to\marginnote{\bf??} the\marginnote{\bf??} Ky\marginnote{\bf??} Fan\marginnote{\bf??} norms\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Vert\marginnote{\bf??}\cdot\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} radii\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} balls\marginnote{\bf??} being\marginnote{\bf??} the\marginnote{\bf??} norms\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Note\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} \marginnote{\bf??}$Kx\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} matrices\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} same\marginnote{\bf??} singular\marginnote{\bf??} values\marginnote{\bf??} as\marginnote{\bf??}~\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{expqexcept\marginnote{\bf??}}\marginnote{\bf??} Now\marginnote{\bf??} consider\marginnote{\bf??} the\marginnote{\bf??} exceptional\marginnote{\bf??} case\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}=n\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} previous\marginnote{\bf??} example\marginnote{\bf??},\marginnote{\bf??} so\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} the\marginnote{\bf??} natural\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Here\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}\mathrm\marginnote{\bf??}{diag\marginnote{\bf??}}\marginnote{\bf??}(x\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},x\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??}
\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_1\marginnote{\bf??}\ge\marginnote{\bf??}\cdots\marginnote{\bf??}\ge\marginnote{\bf??} x\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}\ge\marginnote{\bf??}|x\marginnote{\bf??}_n\marginnote{\bf??}|\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\Bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}\colon\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^k\marginnote{\bf??}(x\marginnote{\bf??}_i\marginnote{\bf??}-y\marginnote{\bf??}_i\marginnote{\bf??})\marginnote{\bf??}\ge0\marginnote{\bf??}\marginnote{\bf??}
\marginnote{\bf??}(k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}-1\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}|x\marginnote{\bf??}_n\marginnote{\bf??}-y\marginnote{\bf??}_n\marginnote{\bf??}|\marginnote{\bf??}\le\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}(x\marginnote{\bf??}_i\marginnote{\bf??}-y\marginnote{\bf??}_i\marginnote{\bf??})\marginnote{\bf??}\Bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} Now\marginnote{\bf??} the\marginnote{\bf??} last\marginnote{\bf??} diagonal\marginnote{\bf??} entry\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} coincides\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} smallest\marginnote{\bf??} singular\marginnote{\bf??} value\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\sigma\marginnote{\bf??}_n\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} only\marginnote{\bf??} up\marginnote{\bf??} to\marginnote{\bf??} sign\marginnote{\bf??}.\marginnote{\bf??} With\marginnote{\bf??} similar\marginnote{\bf??} reasoning\marginnote{\bf??} as\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{expqnonexcept\marginnote{\bf??}}\marginnote{\bf??} we\marginnote{\bf??} conclude\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigcap\marginnote{\bf??}_\marginnote{\bf??}{k\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\colon\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\le\marginnote{\bf??} \marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}\cap\marginnote{\bf??}\Bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\colon\marginnote{\bf??}\sigma\marginnote{\bf??}_n\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}-\marginnote{\bf??}\sigma\marginnote{\bf??}_n\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\le\marginnote{\bf??} \marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}-\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}\Bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{ovsso\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}(Orthogonal\marginnote{\bf??} vs\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} special\marginnote{\bf??} orthogonal\marginnote{\bf??} group\marginnote{\bf??})\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}\ge\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(m\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}'\marginnote{\bf??}=O\marginnote{\bf??}(m\marginnote{\bf??})\marginnote{\bf??}\times\marginnote{\bf??} O\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}>n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} it\marginnote{\bf??} is\marginnote{\bf??} easy\marginnote{\bf??} to\marginnote{\bf??} see\marginnote{\bf??} that\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} of\marginnote{\bf??} an\marginnote{\bf??} arbitrary\marginnote{\bf??} matrix\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_\marginnote{\bf??}{m\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} coincides\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} of\marginnote{\bf??}~\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}=n\marginnote{\bf??}$\marginnote{\bf??} this\marginnote{\bf??} is\marginnote{\bf??} true\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\det\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} but\marginnote{\bf??} otherwise\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}'x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} union\marginnote{\bf??} of\marginnote{\bf??} two\marginnote{\bf??} distinct\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbits\marginnote{\bf??},\marginnote{\bf??} as\marginnote{\bf??} one\marginnote{\bf??} sees\marginnote{\bf??} from\marginnote{\bf??} the\marginnote{\bf??} determinant\marginnote{\bf??}.\marginnote{\bf??} A\marginnote{\bf??} spectrahedral\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??}-orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}'\marginnote{\bf??}_x\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??} \marginnote{\bf??}\conv\marginnote{\bf??}(K\marginnote{\bf??}'x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} was\marginnote{\bf??} given\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Theorem\marginnote{\bf??} 4\marginnote{\bf??}.7\marginnote{\bf??}]\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??} in\marginnote{\bf??} the\marginnote{\bf??} case\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}=n\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}'\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} was\marginnote{\bf??} called\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{Fan\marginnote{\bf??} orbitope\marginnote{\bf??}}\marginnote{\bf??} there\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Note\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}'\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\conv\marginnote{\bf??} O\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=I\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Moreover\marginnote{\bf??},\marginnote{\bf??} a\marginnote{\bf??} spectrahedral\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} coorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(\marginnote{\bf??}\conv\marginnote{\bf??} O\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??})\marginnote{\bf??}^o\marginnote{\bf??}$\marginnote{\bf??} was\marginnote{\bf??} provided\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}(Corollary\marginnote{\bf??} 4\marginnote{\bf??}.9\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Both\marginnote{\bf??} representations\marginnote{\bf??} can\marginnote{\bf??} easily\marginnote{\bf??} be\marginnote{\bf??} derived\marginnote{\bf??} from\marginnote{\bf??} our\marginnote{\bf??} discussion\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-\marginnote{\bf??}(co\marginnote{\bf??})orbitopes\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{hermact\marginnote{\bf??}}\marginnote{\bf??} Next\marginnote{\bf??} consider\marginnote{\bf??} the\marginnote{\bf??} actions\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} classical\marginnote{\bf??} compact\marginnote{\bf??} simple\marginnote{\bf??} Lie\marginnote{\bf??} groups\marginnote{\bf??} on\marginnote{\bf??} hermitian\marginnote{\bf??} matrices\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\ge2\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(case\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SU\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(case\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=Sp\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(case\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\colon\marginnote{\bf??} x\marginnote{\bf??}=x\marginnote{\bf??}^\marginnote{\bf??}*\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} space\marginnote{\bf??} of\marginnote{\bf??} hermitian\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??}-matrices\marginnote{\bf??} over\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}_0\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}\colon\marginnote{\bf??} \marginnote{\bf??}\tr\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} where\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} the\marginnote{\bf??} trace\marginnote{\bf??} condition\marginnote{\bf??} has\marginnote{\bf??} to\marginnote{\bf??} be\marginnote{\bf??} replaced\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\trd\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(reduced\marginnote{\bf??} trace\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$g\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??} act\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$gxg\marginnote{\bf??}^\marginnote{\bf??}*\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Clearly\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}_0\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-invariant\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=V\marginnote{\bf??}_0\marginnote{\bf??}\oplus\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-modules\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}_0\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} irreducible\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??},\marginnote{\bf??} resulting\marginnote{\bf??} from\marginnote{\bf??} the\marginnote{\bf??} simple\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=sl\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=su\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=sl\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}\cong\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}{n\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} space\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} real\marginnote{\bf??} diagonal\marginnote{\bf??} matrices\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}(x\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},x\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} trace\marginnote{\bf??} zero\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}\colon\marginnote{\bf??} x\marginnote{\bf??}_1\marginnote{\bf??}\ge\marginnote{\bf??}\cdots\marginnote{\bf??}\ge\marginnote{\bf??} x\marginnote{\bf??}_n\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}(x\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},x\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\Bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}\colon\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^ky\marginnote{\bf??}_i\marginnote{\bf??}\le\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^kx\marginnote{\bf??}_i\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}(i\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}\Bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} The\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} \marginnote{\bf??}$Kx\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} hermitian\marginnote{\bf??} matrices\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} same\marginnote{\bf??} eigenvalues\marginnote{\bf??} as\marginnote{\bf??}~\marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(for\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} one\marginnote{\bf??} has\marginnote{\bf??} to\marginnote{\bf??} speak\marginnote{\bf??} of\marginnote{\bf??} right\marginnote{\bf??} eigenvalues\marginnote{\bf??} instead\marginnote{\bf??} of\marginnote{\bf??} eigenvalues\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}{fp\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
In\marginnote{\bf??} order\marginnote{\bf??} to\marginnote{\bf??} describe\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} replace\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}_0\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}'\marginnote{\bf??}=x\marginnote{\bf??}+cI\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$c\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} chosen\marginnote{\bf??} such\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}'\marginnote{\bf??}\succeq0\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} i\marginnote{\bf??}.e\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}'\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} positive\marginnote{\bf??} semidefinite\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}\emph\marginnote{\bf??}{psd\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Of\marginnote{\bf??} course\marginnote{\bf??} this\marginnote{\bf??} doesn\marginnote{\bf??}'t\marginnote{\bf??} change\marginnote{\bf??} the\marginnote{\bf??} orbitope\marginnote{\bf??} up\marginnote{\bf??} to\marginnote{\bf??} an\marginnote{\bf??} affine\marginnote{\bf??}-linear\marginnote{\bf??} isomorphism\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} psd\marginnote{\bf??} hermitian\marginnote{\bf??} matrix\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} clearly\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\succeq0\marginnote{\bf??}$\marginnote{\bf??} holds\marginnote{\bf??} for\marginnote{\bf??} every\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} sequence\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\sigma\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}=\marginnote{\bf??}(\marginnote{\bf??}\sigma\marginnote{\bf??}_1\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\sigma\marginnote{\bf??}_n\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} singular\marginnote{\bf??} values\marginnote{\bf??} coincides\marginnote{\bf??} with\marginnote{\bf??} the\marginnote{\bf??} sequence\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}(right\marginnote{\bf??})\marginnote{\bf??} eigenvalues\marginnote{\bf??} for\marginnote{\bf??} these\marginnote{\bf??}~\marginnote{\bf??}$y\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} we\marginnote{\bf??} see\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}\colon\marginnote{\bf??} y\marginnote{\bf??}\succeq0\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\sigma\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\trianglelefteq\marginnote{\bf??} \marginnote{\bf??}\sigma\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} for\marginnote{\bf??} nonincreasing\marginnote{\bf??} sequences\marginnote{\bf??} \marginnote{\bf??}$a\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},b\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??}$\marginnote{\bf??} the\marginnote{\bf??} majorization\marginnote{\bf??} relation\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\trianglelefteq\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} defined\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}(b\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},b\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\trianglelefteq\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}(a\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},a\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}:\marginnote{\bf??}\Leftrightarrow\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^kb\marginnote{\bf??}_i\marginnote{\bf??} \marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\le\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\sum\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^ka\marginnote{\bf??}_i\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} with\marginnote{\bf??} equality\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$k\marginnote{\bf??}=n\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[4\marginnote{\bf??}.3\marginnote{\bf??}.41\marginnote{\bf??}]\marginnote{\bf??}{hj\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} terms\marginnote{\bf??} of\marginnote{\bf??} Ky\marginnote{\bf??} Fan\marginnote{\bf??} norms\marginnote{\bf??} this\marginnote{\bf??} says\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} set\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$y\marginnote{\bf??}\succeq0\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\le\marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}-1\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_n\marginnote{\bf??}=\marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_n\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(provided\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\succeq0\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{unitskewherm\marginnote{\bf??}}\marginnote{\bf??} Next\marginnote{\bf??} let\marginnote{\bf??} the\marginnote{\bf??} unitary\marginnote{\bf??} group\marginnote{\bf??} over\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} act\marginnote{\bf??} on\marginnote{\bf??} skew\marginnote{\bf??}-hermitian\marginnote{\bf??} matrices\marginnote{\bf??} over\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$gxg\marginnote{\bf??}^\marginnote{\bf??}*\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} this\marginnote{\bf??} is\marginnote{\bf??} essentially\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$SU\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} hermitian\marginnote{\bf??} matrices\marginnote{\bf??},\marginnote{\bf??} already\marginnote{\bf??} considered\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{hermact\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} since\marginnote{\bf??} a\marginnote{\bf??} complex\marginnote{\bf??} matrix\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} skew\marginnote{\bf??}-hermitian\marginnote{\bf??} if\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$ix\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} hermitian\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} the\marginnote{\bf??} remaining\marginnote{\bf??} two\marginnote{\bf??} cases\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(with\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\ge3\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} or\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=Sp\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(with\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}\ge1\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} space\marginnote{\bf??} of\marginnote{\bf??} skew\marginnote{\bf??}-hermitian\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$x\marginnote{\bf??}+x\marginnote{\bf??}^\marginnote{\bf??}*\marginnote{\bf??}=0\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} matrices\marginnote{\bf??} over\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} size\marginnote{\bf??}~\marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} irreducible\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} namely\marginnote{\bf??} the\marginnote{\bf??} adjoint\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} its\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=Lie\marginnote{\bf??}(K\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(so\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=Lie\marginnote{\bf??}(K\marginnote{\bf??})\marginnote{\bf??}_\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} here\marginnote{\bf??}.\marginnote{\bf??})\marginnote{\bf??} A\marginnote{\bf??} maximal\marginnote{\bf??} abelian\marginnote{\bf??} subspace\marginnote{\bf??} can\marginnote{\bf??} be\marginnote{\bf??} described\marginnote{\bf??} as\marginnote{\bf??} follows\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} put\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}\lfloor\marginnote{\bf??}\frac\marginnote{\bf??} n2\marginnote{\bf??}\rfloor\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} consist\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} real\marginnote{\bf??} block\marginnote{\bf??} matrices\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}\begin\marginnote{\bf??}{pmatrix\marginnote{\bf??}}0\marginnote{\bf??}&\marginnote{\bf??}\tilde\marginnote{\bf??} x\marginnote{\bf??}\marginnote{\bf??}\marginnote{\bf??}-\marginnote{\bf??}\tilde\marginnote{\bf??} x\marginnote{\bf??}&0\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{pmatrix\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\tilde\marginnote{\bf??} x\marginnote{\bf??}=\marginnote{\bf??}\mathrm\marginnote{\bf??}{diag\marginnote{\bf??}}\marginnote{\bf??}(x\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},x\marginnote{\bf??}_m\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??};\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} odd\marginnote{\bf??},\marginnote{\bf??} an\marginnote{\bf??} extra\marginnote{\bf??} row\marginnote{\bf??} \marginnote{\bf??}(at\marginnote{\bf??} the\marginnote{\bf??} bottom\marginnote{\bf??})\marginnote{\bf??} and\marginnote{\bf??} column\marginnote{\bf??} \marginnote{\bf??}(at\marginnote{\bf??} the\marginnote{\bf??} right\marginnote{\bf??})\marginnote{\bf??} of\marginnote{\bf??} zeros\marginnote{\bf??} has\marginnote{\bf??} to\marginnote{\bf??} be\marginnote{\bf??} added\marginnote{\bf??}.\marginnote{\bf??} If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=Sp\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} consist\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} diagonal\marginnote{\bf??} matrices\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}(ix\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},ix\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},x\marginnote{\bf??}_n\marginnote{\bf??}\in\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
To\marginnote{\bf??} describe\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} and\marginnote{\bf??} orbitopes\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} first\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=SO\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??}
\marginnote{\bf??}$x\marginnote{\bf??}_1\marginnote{\bf??}\ge\marginnote{\bf??}\cdots\marginnote{\bf??}\ge\marginnote{\bf??} x\marginnote{\bf??}_m\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(case\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??} odd\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_1\marginnote{\bf??}\ge\marginnote{\bf??}\cdots\marginnote{\bf??}\ge\marginnote{\bf??} x\marginnote{\bf??}_\marginnote{\bf??}{m\marginnote{\bf??}-1\marginnote{\bf??}}\marginnote{\bf??}\ge\marginnote{\bf??}|x\marginnote{\bf??}_m\marginnote{\bf??}|\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(case\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??} even\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} description\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} is\marginnote{\bf??} analogous\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{expqnonexcept\marginnote{\bf??}}\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{expqexcept\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} singular\marginnote{\bf??} values\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_1\marginnote{\bf??},x\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},x\marginnote{\bf??}_m\marginnote{\bf??},x\marginnote{\bf??}_m\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} with\marginnote{\bf??} an\marginnote{\bf??} extra\marginnote{\bf??} zero\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} odd\marginnote{\bf??}.\marginnote{\bf??} So\marginnote{\bf??} we\marginnote{\bf??} get\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\bigcap\marginnote{\bf??}_\marginnote{\bf??}{k\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^m\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}\colon\marginnote{\bf??} \marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_\marginnote{\bf??}{2k\marginnote{\bf??}}\marginnote{\bf??}\le\marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_\marginnote{\bf??}{2k\marginnote{\bf??}}\marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}=2m\marginnote{\bf??}+1\marginnote{\bf??}$\marginnote{\bf??} odd\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigcap\marginnote{\bf??}_\marginnote{\bf??}{k\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^m\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\colon\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_\marginnote{\bf??}{2k\marginnote{\bf??}}\marginnote{\bf??}\le\marginnote{\bf??} \marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_\marginnote{\bf??}{2k\marginnote{\bf??}}\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}\cap\marginnote{\bf??}\Bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\colon\marginnote{\bf??}\sigma\marginnote{\bf??}_n\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}-\marginnote{\bf??}\sigma\marginnote{\bf??}_n\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}\le\marginnote{\bf??} \marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-2\marginnote{\bf??}}\marginnote{\bf??}-\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_\marginnote{\bf??}{n\marginnote{\bf??}-2\marginnote{\bf??}}\marginnote{\bf??}\Bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}=2m\marginnote{\bf??}$\marginnote{\bf??} even\marginnote{\bf??}.\marginnote{\bf??} Note\marginnote{\bf??} that\marginnote{\bf??} in\marginnote{\bf??} either\marginnote{\bf??} case\marginnote{\bf??},\marginnote{\bf??} only\marginnote{\bf??} the\marginnote{\bf??} even\marginnote{\bf??} Ky\marginnote{\bf??} Fan\marginnote{\bf??} norms\marginnote{\bf??} are\marginnote{\bf??} needed\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=Sp\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_1\marginnote{\bf??}\ge\marginnote{\bf??}\cdots\marginnote{\bf??}\ge\marginnote{\bf??} x\marginnote{\bf??}_m\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} we\marginnote{\bf??} find\marginnote{\bf??} again\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\bigcap\marginnote{\bf??}_\marginnote{\bf??}{k\marginnote{\bf??}=1\marginnote{\bf??}}\marginnote{\bf??}^n\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}\colon\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\le\marginnote{\bf??} \marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{unitcongr\marginnote{\bf??}}\marginnote{\bf??} There\marginnote{\bf??} remains\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} complex\marginnote{\bf??} unitary\marginnote{\bf??} group\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}=U\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=sym\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=so\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(symmetric\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} skew\marginnote{\bf??}-symmetric\marginnote{\bf??} complex\marginnote{\bf??} matrices\marginnote{\bf??})\marginnote{\bf??} by\marginnote{\bf??} \marginnote{\bf??}$gxg\marginnote{\bf??}^t\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$g\marginnote{\bf??}\in\marginnote{\bf??} K\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} V\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Again\marginnote{\bf??} this\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} irreducible\marginnote{\bf??} polar\marginnote{\bf??} representation\marginnote{\bf??} that\marginnote{\bf??} arises\marginnote{\bf??} from\marginnote{\bf??} the\marginnote{\bf??} simple\marginnote{\bf??} Lie\marginnote{\bf??} algebra\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=sp\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(for\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=sym\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathfrak\marginnote{\bf??}{g\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=so\marginnote{\bf??}^\marginnote{\bf??}*\marginnote{\bf??}(2n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(for\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=so\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
First\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=sym\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} A\marginnote{\bf??} maximal\marginnote{\bf??} abelian\marginnote{\bf??} subspace\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} real\marginnote{\bf??} diagonal\marginnote{\bf??} matrices\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}\mathrm\marginnote{\bf??}{diag\marginnote{\bf??}}\marginnote{\bf??}(x\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},x\marginnote{\bf??}_n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} is\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}\colon\marginnote{\bf??} x\marginnote{\bf??}_1\marginnote{\bf??}\ge\marginnote{\bf??}\cdots\marginnote{\bf??}\ge\marginnote{\bf??} x\marginnote{\bf??}_n\marginnote{\bf??}\ge0\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}$\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} \marginnote{\bf??}$P\marginnote{\bf??}_x\marginnote{\bf??}\cap\marginnote{\bf??} C\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} C\marginnote{\bf??}\colon\marginnote{\bf??} y\marginnote{\bf??}_1\marginnote{\bf??}+\marginnote{\bf??}\cdots\marginnote{\bf??}+y\marginnote{\bf??}_k\marginnote{\bf??}\le\marginnote{\bf??} x\marginnote{\bf??}_1\marginnote{\bf??}+\marginnote{\bf??}\cdots\marginnote{\bf??}+x\marginnote{\bf??}_k\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}(k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Since\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_i\marginnote{\bf??}=\marginnote{\bf??}\sigma\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} get\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} sym\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\colon\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\le\marginnote{\bf??} \marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}(k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} In\marginnote{\bf??} the\marginnote{\bf??} skew\marginnote{\bf??}-symmetric\marginnote{\bf??} case\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}=so\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}=\marginnote{\bf??}\lfloor\marginnote{\bf??}\frac\marginnote{\bf??} n2\marginnote{\bf??} \marginnote{\bf??}\rfloor\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??} even\marginnote{\bf??},\marginnote{\bf??} a\marginnote{\bf??} maximal\marginnote{\bf??} abelian\marginnote{\bf??} subspace\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} block\marginnote{\bf??} matrices\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}\begin\marginnote{\bf??}{pmatrix\marginnote{\bf??}}0\marginnote{\bf??}&\marginnote{\bf??}\tilde\marginnote{\bf??} x\marginnote{\bf??}\marginnote{\bf??}\marginnote{\bf??}-\marginnote{\bf??}\tilde\marginnote{\bf??} x\marginnote{\bf??}&0\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{pmatrix\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\tilde\marginnote{\bf??} x\marginnote{\bf??}=\marginnote{\bf??}\mathrm\marginnote{\bf??}{diag\marginnote{\bf??}}\marginnote{\bf??}(x\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},x\marginnote{\bf??}_m\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} a\marginnote{\bf??} real\marginnote{\bf??} diagonal\marginnote{\bf??} matrix\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??} odd\marginnote{\bf??} the\marginnote{\bf??} description\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} same\marginnote{\bf??},\marginnote{\bf??} except\marginnote{\bf??} that\marginnote{\bf??} one\marginnote{\bf??} row\marginnote{\bf??} \marginnote{\bf??}(at\marginnote{\bf??} the\marginnote{\bf??} bottom\marginnote{\bf??})\marginnote{\bf??} and\marginnote{\bf??} one\marginnote{\bf??} column\marginnote{\bf??} \marginnote{\bf??}(at\marginnote{\bf??} the\marginnote{\bf??} right\marginnote{\bf??})\marginnote{\bf??} of\marginnote{\bf??} zeros\marginnote{\bf??} has\marginnote{\bf??} to\marginnote{\bf??} be\marginnote{\bf??} added\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} either\marginnote{\bf??} case\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} chamber\marginnote{\bf??} \marginnote{\bf??}$C\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_1\marginnote{\bf??}\ge\marginnote{\bf??}\cdots\marginnote{\bf??}\ge\marginnote{\bf??} x\marginnote{\bf??}_m\marginnote{\bf??}\ge0\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} the\marginnote{\bf??} singular\marginnote{\bf??} values\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}_1\marginnote{\bf??},x\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},x\marginnote{\bf??}_m\marginnote{\bf??},x\marginnote{\bf??}_m\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} together\marginnote{\bf??} with\marginnote{\bf??} an\marginnote{\bf??} extra\marginnote{\bf??} zero\marginnote{\bf??} if\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} odd\marginnote{\bf??}.\marginnote{\bf??} Once\marginnote{\bf??} more\marginnote{\bf??} we\marginnote{\bf??} therefore\marginnote{\bf??} find\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\in\marginnote{\bf??} so\marginnote{\bf??}(n\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\colon\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\le\marginnote{\bf??} \marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}(k\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??})\marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} The\marginnote{\bf??} fact\marginnote{\bf??} that\marginnote{\bf??} any\marginnote{\bf??} \marginnote{\bf??}(skew\marginnote{\bf??}-\marginnote{\bf??})\marginnote{\bf??} symmetric\marginnote{\bf??} complex\marginnote{\bf??} matrix\marginnote{\bf??} is\marginnote{\bf??} unitarily\marginnote{\bf??} congruent\marginnote{\bf??} to\marginnote{\bf??} a\marginnote{\bf??} real\marginnote{\bf??} matrix\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\mathfrak{a}}\marginnote{\bf??}$\marginnote{\bf??} as\marginnote{\bf??} above\marginnote{\bf??} is\marginnote{\bf??} known\marginnote{\bf??} as\marginnote{\bf??} Youla\marginnote{\bf??}'s\marginnote{\bf??} theorem\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} e\marginnote{\bf??}.g\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\cite\marginnote{\bf??}[Theorem\marginnote{\bf??} 4\marginnote{\bf??}.4\marginnote{\bf??}.9\marginnote{\bf??}]\marginnote{\bf??}{hj\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{example\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{doublspectex\marginnote{\bf??}}\marginnote{\bf??} For\marginnote{\bf??} all\marginnote{\bf??} the\marginnote{\bf??} examples\marginnote{\bf??} from\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{expqnonexcept\marginnote{\bf??}}\marginnote{\bf??} to\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{unitcongr\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} following\marginnote{\bf??} is\marginnote{\bf??} true\marginnote{\bf??}:\marginnote{\bf??} The\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} doubly\marginnote{\bf??} spectrahedral\marginnote{\bf??},\marginnote{\bf??} provided\marginnote{\bf??} that\marginnote{\bf??} all\marginnote{\bf??} singular\marginnote{\bf??} values\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} rational\marginnote{\bf??} numbers\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} follows\marginnote{\bf??} from\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{doublspect\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??} As\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} seen\marginnote{\bf??},\marginnote{\bf??} most\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} classical\marginnote{\bf??} irreducible\marginnote{\bf??} polar\marginnote{\bf??} orbitopes\marginnote{\bf??} are\marginnote{\bf??} intersections\marginnote{\bf??} of\marginnote{\bf??} Ky\marginnote{\bf??} Fan\marginnote{\bf??} balls\marginnote{\bf??} of\marginnote{\bf??} matrices\marginnote{\bf??},\marginnote{\bf??} possibly\marginnote{\bf??} intersected\marginnote{\bf??} with\marginnote{\bf??} suitable\marginnote{\bf??} linear\marginnote{\bf??} spaces\marginnote{\bf??} of\marginnote{\bf??} matrices\marginnote{\bf??} \marginnote{\bf??}(like\marginnote{\bf??} \marginnote{\bf??}(skew\marginnote{\bf??}-\marginnote{\bf??})\marginnote{\bf??} symmetric\marginnote{\bf??} or\marginnote{\bf??} \marginnote{\bf??}(skew\marginnote{\bf??}-\marginnote{\bf??})\marginnote{\bf??} hermitian\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} It\marginnote{\bf??} is\marginnote{\bf??} easy\marginnote{\bf??} to\marginnote{\bf??} see\marginnote{\bf??} that\marginnote{\bf??} every\marginnote{\bf??} Ky\marginnote{\bf??} Fan\marginnote{\bf??} ball\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??}.\marginnote{\bf??} This\marginnote{\bf??} gives\marginnote{\bf??} a\marginnote{\bf??} second\marginnote{\bf??} proof\marginnote{\bf??} of\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{connpolorbsp\marginnote{\bf??}}\marginnote{\bf??} in\marginnote{\bf??} those\marginnote{\bf??} cases\marginnote{\bf??} where\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} an\marginnote{\bf??} intersection\marginnote{\bf??} of\marginnote{\bf??} such\marginnote{\bf??} balls\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{rem\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{prop\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{kyfanballspectr\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\in\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}\ge\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$1\marginnote{\bf??}\le\marginnote{\bf??} k\marginnote{\bf??}\le\marginnote{\bf??} n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Vert\marginnote{\bf??}\cdot\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}$\marginnote{\bf??} denote\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$k\marginnote{\bf??}$\marginnote{\bf??}-th\marginnote{\bf??} Ky\marginnote{\bf??} Fan\marginnote{\bf??} norm\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$M\marginnote{\bf??}_\marginnote{\bf??}{m\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}\ref\marginnote{\bf??}{kyfanorm\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Then\marginnote{\bf??} the\marginnote{\bf??} unit\marginnote{\bf??} ball\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$B\marginnote{\bf??}_k\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}:\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_\marginnote{\bf??}{m\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\colon\marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\le1\marginnote{\bf??} \marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} spectrahedron\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{prop\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$A\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_N\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} a\marginnote{\bf??} complex\marginnote{\bf??} matrix\marginnote{\bf??} with\marginnote{\bf??} eigenvalues\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\theta\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\theta\marginnote{\bf??}_N\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} \marginnote{\bf??}$k\marginnote{\bf??}$\marginnote{\bf??}-th\marginnote{\bf??} exterior\marginnote{\bf??} power\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mathsf\marginnote{\bf??}{\marginnote{\bf??}\Lambda\marginnote{\bf??}}\marginnote{\bf??}^kA\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$A\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} square\marginnote{\bf??} matrix\marginnote{\bf??} of\marginnote{\bf??} size\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\choose\marginnote{\bf??} Nk\marginnote{\bf??}$\marginnote{\bf??} that\marginnote{\bf??} depends\marginnote{\bf??} linearly\marginnote{\bf??} on\marginnote{\bf??} \marginnote{\bf??}$A\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} whose\marginnote{\bf??} eigenvalues\marginnote{\bf??} are\marginnote{\bf??} the\marginnote{\bf??} sums\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\theta\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}_1\marginnote{\bf??}}\marginnote{\bf??}+\marginnote{\bf??}\cdots\marginnote{\bf??}+\marginnote{\bf??}\theta\marginnote{\bf??}_\marginnote{\bf??}{i\marginnote{\bf??}_k\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$1\marginnote{\bf??}\le\marginnote{\bf??} i\marginnote{\bf??}_1\marginnote{\bf??}<\marginnote{\bf??}\cdots\marginnote{\bf??}<i\marginnote{\bf??}_k\marginnote{\bf??}\le\marginnote{\bf??} N\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
If\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} replace\marginnote{\bf??} quaternions\marginnote{\bf??} with\marginnote{\bf??} complex\marginnote{\bf??} \marginnote{\bf??}$2\marginnote{\bf??}\times2\marginnote{\bf??}$\marginnote{\bf??} matrices\marginnote{\bf??},\marginnote{\bf??} to\marginnote{\bf??} avoid\marginnote{\bf??} the\marginnote{\bf??} problem\marginnote{\bf??} of\marginnote{\bf??} defining\marginnote{\bf??} exterior\marginnote{\bf??} powers\marginnote{\bf??} of\marginnote{\bf??} quaternion\marginnote{\bf??} matrices\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} \marginnote{\bf??}$A\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_N\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} hermitian\marginnote{\bf??} \marginnote{\bf??}(\marginnote{\bf??}$A\marginnote{\bf??}=A\marginnote{\bf??}^\marginnote{\bf??}*\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}(real\marginnote{\bf??})\marginnote{\bf??} right\marginnote{\bf??} eigenvalues\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\theta\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\theta\marginnote{\bf??}_N\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Write\marginnote{\bf??} \marginnote{\bf??}$A\marginnote{\bf??}=A\marginnote{\bf??}_1\marginnote{\bf??}+jA\marginnote{\bf??}_2\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$A\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},A\marginnote{\bf??}_2\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_N\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}\tilde\marginnote{\bf??} A\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\begin\marginnote{\bf??}{pmatrix\marginnote{\bf??}}A\marginnote{\bf??}_1\marginnote{\bf??}&\marginnote{\bf??}-\marginnote{\bf??}\overline\marginnote{\bf??} A\marginnote{\bf??}_2\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{A\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_2\marginnote{\bf??}&\marginnote{\bf??}\overline\marginnote{\bf??} A\marginnote{\bf??}_1\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{pmatrix\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} Then\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\tilde\marginnote{\bf??} A\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} complex\marginnote{\bf??} hermitian\marginnote{\bf??} matrix\marginnote{\bf??} of\marginnote{\bf??} size\marginnote{\bf??} \marginnote{\bf??}$2N\marginnote{\bf??}$\marginnote{\bf??} with\marginnote{\bf??} eigenvalues\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\theta\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\theta\marginnote{\bf??}_1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},\marginnote{\bf??}\theta\marginnote{\bf??}_N\marginnote{\bf??},\marginnote{\bf??}\theta\marginnote{\bf??}_N\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Let\marginnote{\bf??} us\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} this\marginnote{\bf??} purpose\marginnote{\bf??},\marginnote{\bf??} define\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mathsf\marginnote{\bf??}{\marginnote{\bf??}\Lambda\marginnote{\bf??}}\marginnote{\bf??}^kA\marginnote{\bf??}$\marginnote{\bf??} to\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} complex\marginnote{\bf??} matrix\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mathsf\marginnote{\bf??}{\marginnote{\bf??}\Lambda\marginnote{\bf??}}\marginnote{\bf??}^k\marginnote{\bf??}\tilde\marginnote{\bf??} A\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(of\marginnote{\bf??} size\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\choose\marginnote{\bf??}{2N\marginnote{\bf??}}k\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
Now\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} any\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_\marginnote{\bf??}{m\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} and\marginnote{\bf??} let\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\wh\marginnote{\bf??} x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_\marginnote{\bf??}{m\marginnote{\bf??}+n\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} be\marginnote{\bf??} the\marginnote{\bf??} hermitian\marginnote{\bf??} \marginnote{\bf??}(block\marginnote{\bf??})\marginnote{\bf??} matrix\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??}\wh\marginnote{\bf??} x\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{pmatrix\marginnote{\bf??}}0\marginnote{\bf??}&x\marginnote{\bf??}\marginnote{\bf??}{\marginnote{\bf??}\mathtt\marginnote{\bf??}{x\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^\marginnote{\bf??}*\marginnote{\bf??}&0\marginnote{\bf??}\end\marginnote{\bf??}{pmatrix\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} The\marginnote{\bf??} \marginnote{\bf??}(right\marginnote{\bf??})\marginnote{\bf??} eigenvalues\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\wh\marginnote{\bf??} x\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\pm\marginnote{\bf??}\sigma\marginnote{\bf??}_i\marginnote{\bf??}(x\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$i\marginnote{\bf??}=1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},n\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} together\marginnote{\bf??} with\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}-n\marginnote{\bf??}$\marginnote{\bf??} additional\marginnote{\bf??} zeros\marginnote{\bf??}.\marginnote{\bf??} It\marginnote{\bf??} follows\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}\in\marginnote{\bf??} B\marginnote{\bf??}_k\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} i\marginnote{\bf??}.e\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\Vert\marginnote{\bf??} x\marginnote{\bf??}\Vert\marginnote{\bf??}_k\marginnote{\bf??}\le1\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} if\marginnote{\bf??} and\marginnote{\bf??} only\marginnote{\bf??} if\marginnote{\bf??} all\marginnote{\bf??} eigenvalues\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\mathsf\marginnote{\bf??}{\marginnote{\bf??}\Lambda\marginnote{\bf??}}\marginnote{\bf??}^k\marginnote{\bf??}\wh\marginnote{\bf??} x\marginnote{\bf??}$\marginnote{\bf??} are\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\le1\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??} other\marginnote{\bf??} words\marginnote{\bf??},\marginnote{\bf??} this\marginnote{\bf??} shows\marginnote{\bf??} that\marginnote{\bf??} \marginnote{\bf??}$B\marginnote{\bf??}_k\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} described\marginnote{\bf??} by\marginnote{\bf??} the\marginnote{\bf??} linear\marginnote{\bf??} matrix\marginnote{\bf??} inequality\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}$B\marginnote{\bf??}_k\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}>\marginnote{\bf??}\bigl\marginnote{\bf??}\marginnote{\bf??}{x\marginnote{\bf??}\in\marginnote{\bf??} M\marginnote{\bf??}_\marginnote{\bf??}{m\marginnote{\bf??}\times\marginnote{\bf??} n\marginnote{\bf??}}\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}\colon\marginnote{\bf??}\mathsf\marginnote{\bf??}{\marginnote{\bf??}\Lambda\marginnote{\bf??}}\marginnote{\bf??}^k\marginnote{\bf??}\wh\marginnote{\bf??} x\marginnote{\bf??}\preceq\marginnote{\bf??} I\marginnote{\bf??} \marginnote{\bf??}\bigr\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??}$\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{proof\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{biorb1\marginnote{\bf??}}\marginnote{\bf??} Finally\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} record\marginnote{\bf??} the\marginnote{\bf??} cases\marginnote{\bf??} when\marginnote{\bf??} the\marginnote{\bf??} orbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} a\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-biorbitope\marginnote{\bf??}.\marginnote{\bf??} First\marginnote{\bf??} consider\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{hermact\marginnote{\bf??}}\marginnote{\bf??} on\marginnote{\bf??} hermitian\marginnote{\bf??} matrices\marginnote{\bf??},\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??}\marginnote{\bf??},\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Up\marginnote{\bf??} to\marginnote{\bf??} scaling\marginnote{\bf??} and\marginnote{\bf??} translation\marginnote{\bf??} there\marginnote{\bf??} is\marginnote{\bf??} exactly\marginnote{\bf??} one\marginnote{\bf??} biorbitope\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} this\marginnote{\bf??} type\marginnote{\bf??},\marginnote{\bf??} namely\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}\mathrm\marginnote{\bf??}{diag\marginnote{\bf??}}\marginnote{\bf??}(1\marginnote{\bf??},0\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},0\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} The\marginnote{\bf??} orbit\marginnote{\bf??} \marginnote{\bf??}$Kx\marginnote{\bf??}$\marginnote{\bf??} consists\marginnote{\bf??} of\marginnote{\bf??} all\marginnote{\bf??} psd\marginnote{\bf??} rank\marginnote{\bf??} one\marginnote{\bf??} matrices\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}(reduced\marginnote{\bf??})\marginnote{\bf??} trace\marginnote{\bf??}~\marginnote{\bf??}$1\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} Its\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} has\marginnote{\bf??} the\marginnote{\bf??} rank\marginnote{\bf??} condition\marginnote{\bf??} removed\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\colon\marginnote{\bf??} y\marginnote{\bf??}\succeq0\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}\tr\marginnote{\bf??}(y\marginnote{\bf??})\marginnote{\bf??}=1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}\label\marginnote{\bf??}{biorb2\marginnote{\bf??}}\marginnote{\bf??} For\marginnote{\bf??} the\marginnote{\bf??} remaining\marginnote{\bf??} actions\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{expqnonexcept\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{unitskewherm\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{unitcongr\marginnote{\bf??}}\marginnote{\bf??} there\marginnote{\bf??} exist\marginnote{\bf??} two\marginnote{\bf??} essentially\marginnote{\bf??} different\marginnote{\bf??} biorbitopes\marginnote{\bf??}.\marginnote{\bf??} When\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} have\marginnote{\bf??} to\marginnote{\bf??} exclude\marginnote{\bf??} the\marginnote{\bf??} case\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}=n\marginnote{\bf??}$\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{expqnonexcept\marginnote{\bf??}}\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} case\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??} even\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{unitskewherm\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Indeed\marginnote{\bf??},\marginnote{\bf??} these\marginnote{\bf??} are\marginnote{\bf??} the\marginnote{\bf??} cases\marginnote{\bf??} when\marginnote{\bf??} the\marginnote{\bf??} restricted\marginnote{\bf??} root\marginnote{\bf??} system\marginnote{\bf??} is\marginnote{\bf??} of\marginnote{\bf??} type\marginnote{\bf??}~\marginnote{\bf??}$D\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(see\marginnote{\bf??} Theorem\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{biorbitop\marginnote{\bf??}}\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} Otherwise\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} two\marginnote{\bf??} biorbitopes\marginnote{\bf??} are\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\begin\marginnote{\bf??}{itemize\marginnote{\bf??}}\marginnote{\bf??} \marginnote{\bf??}\item\marginnote{\bf??}[\marginnote{\bf??}(a\marginnote{\bf??})\marginnote{\bf??}]\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}(1\marginnote{\bf??},0\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},0\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\colon\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_\marginnote{\bf??}*\marginnote{\bf??}\le1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} unit\marginnote{\bf??} ball\marginnote{\bf??} in\marginnote{\bf??} the\marginnote{\bf??} nuclear\marginnote{\bf??} norm\marginnote{\bf??};\marginnote{\bf??} \marginnote{\bf??}\item\marginnote{\bf??}[\marginnote{\bf??}(b\marginnote{\bf??})\marginnote{\bf??}]\marginnote{\bf??} \marginnote{\bf??}$x\marginnote{\bf??}=\marginnote{\bf??}(1\marginnote{\bf??},\marginnote{\bf??}\dots\marginnote{\bf??},1\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}=\marginnote{\bf??}\marginnote{\bf??}{y\marginnote{\bf??}\colon\marginnote{\bf??}\Vert\marginnote{\bf??} y\marginnote{\bf??}\Vert\marginnote{\bf??}_1\marginnote{\bf??}\le1\marginnote{\bf??}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} unit\marginnote{\bf??} ball\marginnote{\bf??} in\marginnote{\bf??} the\marginnote{\bf??} operator\marginnote{\bf??} norm\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{itemize\marginnote{\bf??}}\marginnote{\bf??} In\marginnote{\bf??} case\marginnote{\bf??} \marginnote{\bf??}(b\marginnote{\bf??})\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} action\marginnote{\bf??} \marginnote{\bf??}\ref\marginnote{\bf??}{expqnonexcept\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}$K\marginnote{\bf??}$\marginnote{\bf??}-orbit\marginnote{\bf??} \marginnote{\bf??}$Kx\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} Stiefel\marginnote{\bf??} manifold\marginnote{\bf??} \marginnote{\bf??}$V\marginnote{\bf??}_n\marginnote{\bf??}(\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^m\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} of\marginnote{\bf??} orthonormal\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}$\marginnote{\bf??}-frames\marginnote{\bf??} in\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}^m\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} at\marginnote{\bf??} least\marginnote{\bf??} for\marginnote{\bf??} \marginnote{\bf??}$n\marginnote{\bf??}<m\marginnote{\bf??}$\marginnote{\bf??}.\marginnote{\bf??} We\marginnote{\bf??} therefore\marginnote{\bf??} call\marginnote{\bf??} these\marginnote{\bf??} orbitopes\marginnote{\bf??} the\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{Stiefel\marginnote{\bf??} orbitopes\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} For\marginnote{\bf??} \marginnote{\bf??}$m\marginnote{\bf??}=n\marginnote{\bf??}$\marginnote{\bf??} and\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}\ne\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{R\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??},\marginnote{\bf??} we\marginnote{\bf??} get\marginnote{\bf??} tautological\marginnote{\bf??} orbitopes\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathscr\marginnote{\bf??}{O\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}_x\marginnote{\bf??}$\marginnote{\bf??} is\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$SU\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(case\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{C\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??},\marginnote{\bf??} resp\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$Sp\marginnote{\bf??}(n\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??} \marginnote{\bf??}(case\marginnote{\bf??} \marginnote{\bf??}$\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{K\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}=\marginnote{\bf??}{\marginnote{\bf??}\mathbb\marginnote{\bf??}{H\marginnote{\bf??}}\marginnote{\bf??}}\marginnote{\bf??}$\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}\end\marginnote{\bf??}{lab\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\begin\marginnote{\bf??}{thebibliography\marginnote{\bf??}}\marginnote{\bf??}{28\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{bgh1\marginnote{\bf??}}\marginnote{\bf??} L\marginnote{\bf??}.\marginnote{\bf??}~Biliotti\marginnote{\bf??},\marginnote{\bf??} A\marginnote{\bf??}.\marginnote{\bf??}~Ghigi\marginnote{\bf??},\marginnote{\bf??} P\marginnote{\bf??}.\marginnote{\bf??}~Heinzner\marginnote{\bf??}:\marginnote{\bf??} Polar\marginnote{\bf??} orbitopes\marginnote{\bf??}.\marginnote{\bf??} Comm\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Analysis\marginnote{\bf??} Geometry\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{21\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 579\marginnote{\bf??}-\marginnote{\bf??}-606\marginnote{\bf??} \marginnote{\bf??}(2013\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{bgh2\marginnote{\bf??}}\marginnote{\bf??} L\marginnote{\bf??}.\marginnote{\bf??}~Biliotti\marginnote{\bf??},\marginnote{\bf??} A\marginnote{\bf??}.\marginnote{\bf??}~Ghigi\marginnote{\bf??},\marginnote{\bf??} P\marginnote{\bf??}.\marginnote{\bf??}~Heinzner\marginnote{\bf??}:\marginnote{\bf??} Coadjoint\marginnote{\bf??} orbitopes\marginnote{\bf??}.\marginnote{\bf??} Osaka\marginnote{\bf??} J\marginnote{\bf??}.\marginnote{\bf??}~Math\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{51\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 935\marginnote{\bf??}-\marginnote{\bf??}-968\marginnote{\bf??} \marginnote{\bf??}(2014\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{boti\marginnote{\bf??}}\marginnote{\bf??} A\marginnote{\bf??}.\marginnote{\bf??}~Borel\marginnote{\bf??},\marginnote{\bf??} J\marginnote{\bf??}.\marginnote{\bf??}~Tits\marginnote{\bf??}:\marginnote{\bf??} Groupes\marginnote{\bf??} r\marginnote{\bf??}\marginnote{\bf??}'eductifs\marginnote{\bf??}.\marginnote{\bf??} Publ\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Math\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} I\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},H\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},E\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},S\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{27\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 55\marginnote{\bf??}-\marginnote{\bf??}-151\marginnote{\bf??} \marginnote{\bf??}(1965\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{bu\marginnote{\bf??}}\marginnote{\bf??} D\marginnote{\bf??}.\marginnote{\bf??}~Bump\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{Lie\marginnote{\bf??} Groups\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Second\marginnote{\bf??} Edition\marginnote{\bf??}.\marginnote{\bf??} Grad\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Texts\marginnote{\bf??} Math\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{225\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} Springer\marginnote{\bf??},\marginnote{\bf??} New\marginnote{\bf??} York\marginnote{\bf??},\marginnote{\bf??} 2013\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{ca\marginnote{\bf??}}\marginnote{\bf??} W\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},A\marginnote{\bf??}.\marginnote{\bf??}~Casselman\marginnote{\bf??}:\marginnote{\bf??} Geometric\marginnote{\bf??} rationality\marginnote{\bf??} of\marginnote{\bf??} Satake\marginnote{\bf??} compactifications\marginnote{\bf??}.\marginnote{\bf??} In\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{Algebraic\marginnote{\bf??} groups\marginnote{\bf??} and\marginnote{\bf??} Lie\marginnote{\bf??} groups\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} Austral\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Math\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Soc\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Lect\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Ser\marginnote{\bf??}.\marginnote{\bf??}~\marginnote{\bf??}\textbf\marginnote{\bf??}{9\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} Cambridge\marginnote{\bf??},\marginnote{\bf??} 1997\marginnote{\bf??},\marginnote{\bf??} pp\marginnote{\bf??}~81\marginnote{\bf??}-\marginnote{\bf??}-103\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{ch\marginnote{\bf??}}\marginnote{\bf??} C\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},B\marginnote{\bf??}.\marginnote{\bf??}~Chua\marginnote{\bf??}:\marginnote{\bf??} Relating\marginnote{\bf??} homogeneous\marginnote{\bf??} cones\marginnote{\bf??} and\marginnote{\bf??} positive\marginnote{\bf??} definite\marginnote{\bf??} cones\marginnote{\bf??} via\marginnote{\bf??} \marginnote{\bf??}$T\marginnote{\bf??}$\marginnote{\bf??}-algebras\marginnote{\bf??}.\marginnote{\bf??} SIAM\marginnote{\bf??} J\marginnote{\bf??}.\marginnote{\bf??}~Optim\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{14\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 500\marginnote{\bf??}-\marginnote{\bf??}-506\marginnote{\bf??} \marginnote{\bf??}(2003\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{da\marginnote{\bf??}}\marginnote{\bf??} J\marginnote{\bf??}.\marginnote{\bf??}~Dadok\marginnote{\bf??}:\marginnote{\bf??} Polar\marginnote{\bf??} coordinates\marginnote{\bf??} induced\marginnote{\bf??} by\marginnote{\bf??} actions\marginnote{\bf??} of\marginnote{\bf??} compact\marginnote{\bf??} Lie\marginnote{\bf??} groups\marginnote{\bf??}.\marginnote{\bf??} Trans\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Am\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Math\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Soc\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{288\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 125\marginnote{\bf??}-\marginnote{\bf??}-137\marginnote{\bf??} \marginnote{\bf??}(1985\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{fa\marginnote{\bf??}}\marginnote{\bf??} K\marginnote{\bf??}.\marginnote{\bf??}~Fan\marginnote{\bf??}:\marginnote{\bf??} Maximum\marginnote{\bf??} properties\marginnote{\bf??} and\marginnote{\bf??} inequalities\marginnote{\bf??} for\marginnote{\bf??} the\marginnote{\bf??} eigenvalues\marginnote{\bf??} of\marginnote{\bf??} completely\marginnote{\bf??} continuous\marginnote{\bf??} operators\marginnote{\bf??}.\marginnote{\bf??} Proc\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Nat\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Acad\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Sci\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} USA\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{37\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 760\marginnote{\bf??}-\marginnote{\bf??}-766\marginnote{\bf??} \marginnote{\bf??}(1951\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{fp\marginnote{\bf??}}\marginnote{\bf??} D\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},R\marginnote{\bf??}.\marginnote{\bf??}~Farenick\marginnote{\bf??},\marginnote{\bf??} B\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},A\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},F\marginnote{\bf??}.\marginnote{\bf??}~Pidkowich\marginnote{\bf??}:\marginnote{\bf??} The\marginnote{\bf??} spectral\marginnote{\bf??} theorem\marginnote{\bf??} in\marginnote{\bf??} quaternions\marginnote{\bf??}.\marginnote{\bf??} Linear\marginnote{\bf??} Algebra\marginnote{\bf??} Appl\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{371\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 75\marginnote{\bf??}-\marginnote{\bf??}-102\marginnote{\bf??} \marginnote{\bf??}(2003\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{hj\marginnote{\bf??}}\marginnote{\bf??} R\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},A\marginnote{\bf??}.\marginnote{\bf??}~Horn\marginnote{\bf??},\marginnote{\bf??} Ch\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},R\marginnote{\bf??}.\marginnote{\bf??}~Johnson\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{Matrix\marginnote{\bf??} Analysis\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Second\marginnote{\bf??} edition\marginnote{\bf??},\marginnote{\bf??} Cambridge\marginnote{\bf??},\marginnote{\bf??} 2013\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{kn\marginnote{\bf??}}\marginnote{\bf??} A\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},W\marginnote{\bf??}.\marginnote{\bf??}~Knapp\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{Lie\marginnote{\bf??} Groups\marginnote{\bf??} Beyond\marginnote{\bf??} an\marginnote{\bf??} Introduction\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Second\marginnote{\bf??} Edition\marginnote{\bf??}.\marginnote{\bf??} Prog\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Math\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{140\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} Birkh\marginnote{\bf??}\marginnote{\bf??}"auser\marginnote{\bf??},\marginnote{\bf??} Boston\marginnote{\bf??},\marginnote{\bf??} 2005\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{kob\marginnote{\bf??}}\marginnote{\bf??} T\marginnote{\bf??}.\marginnote{\bf??}~Kobert\marginnote{\bf??}:\marginnote{\bf??} Spectrahedral\marginnote{\bf??} and\marginnote{\bf??} semidefinite\marginnote{\bf??} representability\marginnote{\bf??} of\marginnote{\bf??} orbitopes\marginnote{\bf??}.\marginnote{\bf??} Ph\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},D\marginnote{\bf??}.\marginnote{\bf??}~thesis\marginnote{\bf??},\marginnote{\bf??} Univ\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Konstanz\marginnote{\bf??},\marginnote{\bf??} 2018\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{ko\marginnote{\bf??}}\marginnote{\bf??} B\marginnote{\bf??}.\marginnote{\bf??}~Kostant\marginnote{\bf??}:\marginnote{\bf??} On\marginnote{\bf??} convexity\marginnote{\bf??},\marginnote{\bf??} the\marginnote{\bf??} Weyl\marginnote{\bf??} group\marginnote{\bf??} and\marginnote{\bf??} the\marginnote{\bf??} Iwasawa\marginnote{\bf??} decomposition\marginnote{\bf??}.\marginnote{\bf??} Ann\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} sci\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} E\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},N\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},S\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}(4\marginnote{\bf??})\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{6\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 413\marginnote{\bf??}-\marginnote{\bf??}-455\marginnote{\bf??} \marginnote{\bf??}(1973\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{lrt\marginnote{\bf??}}\marginnote{\bf??} R\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},S\marginnote{\bf??}.\marginnote{\bf??}~Leite\marginnote{\bf??},\marginnote{\bf??} T\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},R\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},W\marginnote{\bf??}.\marginnote{\bf??}~Richa\marginnote{\bf??},\marginnote{\bf??} C\marginnote{\bf??}.\marginnote{\bf??}~Tomei\marginnote{\bf??}:\marginnote{\bf??} Geometric\marginnote{\bf??} proofs\marginnote{\bf??} of\marginnote{\bf??} some\marginnote{\bf??} theorems\marginnote{\bf??} of\marginnote{\bf??} Schur\marginnote{\bf??}-Horn\marginnote{\bf??} type\marginnote{\bf??}.\marginnote{\bf??} Linear\marginnote{\bf??} Algebra\marginnote{\bf??} Appl\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{286\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 149\marginnote{\bf??}-\marginnote{\bf??}-173\marginnote{\bf??} \marginnote{\bf??}(1999\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{luti\marginnote{\bf??}}\marginnote{\bf??} G\marginnote{\bf??}.\marginnote{\bf??}~Lusztig\marginnote{\bf??},\marginnote{\bf??} J\marginnote{\bf??}.\marginnote{\bf??}~Tits\marginnote{\bf??}:\marginnote{\bf??} The\marginnote{\bf??} inverse\marginnote{\bf??} of\marginnote{\bf??} a\marginnote{\bf??} Cartan\marginnote{\bf??} matrix\marginnote{\bf??}.\marginnote{\bf??} Ann\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Univ\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Timisoara\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{30\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 17\marginnote{\bf??}-\marginnote{\bf??}-23\marginnote{\bf??} \marginnote{\bf??}(1992\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{lie3\marginnote{\bf??}}\marginnote{\bf??} A\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},L\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Onishchik\marginnote{\bf??},\marginnote{\bf??} E\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},B\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Vinberg\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{Lie\marginnote{\bf??} Groups\marginnote{\bf??} and\marginnote{\bf??} Lie\marginnote{\bf??} Algebras\marginnote{\bf??} III\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Structure\marginnote{\bf??} of\marginnote{\bf??} Lie\marginnote{\bf??} Groups\marginnote{\bf??} and\marginnote{\bf??} Lie\marginnote{\bf??} Algebras\marginnote{\bf??}.\marginnote{\bf??} Encycl\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Math\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Sciences\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{41\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} Springer\marginnote{\bf??},\marginnote{\bf??} Berlin\marginnote{\bf??},\marginnote{\bf??} 1994\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{or\marginnote{\bf??}}\marginnote{\bf??} J\marginnote{\bf??}.\marginnote{\bf??}-P\marginnote{\bf??}.\marginnote{\bf??}~Ortega\marginnote{\bf??},\marginnote{\bf??} T\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},S\marginnote{\bf??}.\marginnote{\bf??}~Ratiu\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{Momentum\marginnote{\bf??} Maps\marginnote{\bf??} and\marginnote{\bf??} Hamiltonian\marginnote{\bf??} Reduction\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Prog\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Math\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{222\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} Birkh\marginnote{\bf??}\marginnote{\bf??}"auser\marginnote{\bf??},\marginnote{\bf??} Boston\marginnote{\bf??},\marginnote{\bf??} 2004\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{sss\marginnote{\bf??}}\marginnote{\bf??} R\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},Sanyal\marginnote{\bf??},\marginnote{\bf??} F\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},Sottile\marginnote{\bf??},\marginnote{\bf??} B\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},Sturmfels\marginnote{\bf??}:\marginnote{\bf??} Orbitopes\marginnote{\bf??}.\marginnote{\bf??} Mathematika\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{57\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 275\marginnote{\bf??}-\marginnote{\bf??}-314\marginnote{\bf??} \marginnote{\bf??}(2011\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{sat\marginnote{\bf??}}\marginnote{\bf??} I\marginnote{\bf??}.\marginnote{\bf??}~Satake\marginnote{\bf??}:\marginnote{\bf??} On\marginnote{\bf??} representations\marginnote{\bf??} and\marginnote{\bf??} compactifications\marginnote{\bf??} of\marginnote{\bf??} symmetric\marginnote{\bf??} Riemannian\marginnote{\bf??} symmetric\marginnote{\bf??} spaces\marginnote{\bf??}.\marginnote{\bf??} Ann\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Math\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{71\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 77\marginnote{\bf??}-\marginnote{\bf??}-110\marginnote{\bf??} \marginnote{\bf??}(1960\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{spw\marginnote{\bf??}}\marginnote{\bf??} J\marginnote{\bf??}.\marginnote{\bf??} Saunderson\marginnote{\bf??},\marginnote{\bf??} P\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},A\marginnote{\bf??}.\marginnote{\bf??} Parrilo\marginnote{\bf??},\marginnote{\bf??} A\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},S\marginnote{\bf??}.\marginnote{\bf??} Willsky\marginnote{\bf??}:\marginnote{\bf??} Semidefinite\marginnote{\bf??} descriptions\marginnote{\bf??} of\marginnote{\bf??} the\marginnote{\bf??} convex\marginnote{\bf??} hull\marginnote{\bf??} of\marginnote{\bf??} rotation\marginnote{\bf??} matrices\marginnote{\bf??}.\marginnote{\bf??} SIAM\marginnote{\bf??} J\marginnote{\bf??}.\marginnote{\bf??}~Optim\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{25\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 1314\marginnote{\bf??}-\marginnote{\bf??}-1343\marginnote{\bf??} \marginnote{\bf??}(2015\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{sch\marginnote{\bf??}:hn\marginnote{\bf??}}\marginnote{\bf??} C\marginnote{\bf??}.\marginnote{\bf??}~Scheiderer\marginnote{\bf??}:\marginnote{\bf??} Spectrahedral\marginnote{\bf??} shadows\marginnote{\bf??}.\marginnote{\bf??} SIAM\marginnote{\bf??} J\marginnote{\bf??}.\marginnote{\bf??}~Appl\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Algebra\marginnote{\bf??} Geometry\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{2\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 26\marginnote{\bf??}-\marginnote{\bf??}-44\marginnote{\bf??} \marginnote{\bf??}(2018\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{sn\marginnote{\bf??}}\marginnote{\bf??} R\marginnote{\bf??}.\marginnote{\bf??}~Schneider\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{Convex\marginnote{\bf??} Bodies\marginnote{\bf??}:\marginnote{\bf??} The\marginnote{\bf??} Brunn\marginnote{\bf??}-Minkowski\marginnote{\bf??} Theory\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Second\marginnote{\bf??} expanded\marginnote{\bf??} edition\marginnote{\bf??},\marginnote{\bf??} Cambridge\marginnote{\bf??},\marginnote{\bf??} 2014\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{si\marginnote{\bf??}}\marginnote{\bf??} R\marginnote{\bf??}.\marginnote{\bf??}~Sinn\marginnote{\bf??}:\marginnote{\bf??} Algebraic\marginnote{\bf??} boundaries\marginnote{\bf??} of\marginnote{\bf??} \marginnote{\bf??}$SO\marginnote{\bf??}(2\marginnote{\bf??})\marginnote{\bf??}$\marginnote{\bf??}-orbitopes\marginnote{\bf??}.\marginnote{\bf??} Discrete\marginnote{\bf??} Comput\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Geom\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{50\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 219\marginnote{\bf??}-\marginnote{\bf??}-235\marginnote{\bf??} \marginnote{\bf??}(2013\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{ti\marginnote{\bf??}}\marginnote{\bf??} J\marginnote{\bf??}.\marginnote{\bf??}~Tits\marginnote{\bf??}:\marginnote{\bf??} \marginnote{\bf??}\emph\marginnote{\bf??}{Tabellen\marginnote{\bf??} zu\marginnote{\bf??} den\marginnote{\bf??} einfachen\marginnote{\bf??} Lie\marginnote{\bf??} Gruppen\marginnote{\bf??} und\marginnote{\bf??} ihren\marginnote{\bf??} Darstellungen\marginnote{\bf??}}\marginnote{\bf??}.\marginnote{\bf??} Lect\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Notes\marginnote{\bf??} Math\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{40\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} Springer\marginnote{\bf??},\marginnote{\bf??} Berlin\marginnote{\bf??},\marginnote{\bf??} 1967\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{vi\marginnote{\bf??}}\marginnote{\bf??} E\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??},B\marginnote{\bf??}.\marginnote{\bf??}~Vinberg\marginnote{\bf??}:\marginnote{\bf??} The\marginnote{\bf??} theory\marginnote{\bf??} of\marginnote{\bf??} homogeneous\marginnote{\bf??} convex\marginnote{\bf??} cones\marginnote{\bf??}.\marginnote{\bf??} \marginnote{\bf??}(English\marginnote{\bf??} translation\marginnote{\bf??})\marginnote{\bf??} Trans\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Moscow\marginnote{\bf??} Math\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} Soc\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{12\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 340\marginnote{\bf??}-\marginnote{\bf??}-403\marginnote{\bf??} \marginnote{\bf??}(1965\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\bibitem\marginnote{\bf??}{zh\marginnote{\bf??}}\marginnote{\bf??} F\marginnote{\bf??}.\marginnote{\bf??}~Zhang\marginnote{\bf??}:\marginnote{\bf??} Quaternions\marginnote{\bf??} and\marginnote{\bf??} matrices\marginnote{\bf??} of\marginnote{\bf??} quaternions\marginnote{\bf??}.\marginnote{\bf??} Linear\marginnote{\bf??} Algebra\marginnote{\bf??} Appl\marginnote{\bf??}.\marginnote{\bf??}\marginnote{\bf??} \marginnote{\bf??}\textbf\marginnote{\bf??}{251\marginnote{\bf??}}\marginnote{\bf??},\marginnote{\bf??} 21\marginnote{\bf??}-\marginnote{\bf??}-57\marginnote{\bf??} \marginnote{\bf??}(1997\marginnote{\bf??})\marginnote{\bf??}.\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\end\marginnote{\bf??}{thebibliography\marginnote{\bf??}}\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}
\marginnote{\bf??}\end\marginnote{\bf??}{document\marginnote{\bf??}}\marginnote{\bf??} | arXiv | {
"id": "2010.02045.tex",
"language_detection_score": 0.4767707884311676,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{A Multiplication Rule for\\ the Descent Algebra of Type $D$} \author{N. Bergeron and S.J. van Willigenburg\\Department of Mathematics and Statistics,\\York University, 4700 Keele St,\\North York, ON, M3J 1P3, CANADA.} \maketitle \begin{abstract} Here we give an interpretation of Solomon's rule for multiplication in the descent algebra of Coxeter groups of type $D$, $\Sigma D_n$. We describe an ideal ${\cal I}$ such that $\Sigma D_n /{\cal I}$ is isomorphic to the descent algebra of the hyperoctahedral group, $\Sigma B_{n-2}$.\end{abstract}
\section{Introduction}
Given a Coxeter group, $W$, we can construct an algebra - \textit{the descent algebra} - which is a sub-algebra of the group algebra $\mathbb{Q}[W]$. These were introduced in 1976 by Louis Solomon \cite{solomon-mackey}. A revival of interest in this area began in the 80's when applications were found for an interpretation of the rule for multiplying together basis elements of the descent algebra of the symmetric group, for example \cite{garsia-reutenauer}, \cite{garsia-remmel}. Since this interpretation involved matrices,
we shall call it the ``matrix interpretation'' from here on. This matrix interpretation provided the key to many advances in the subject (for instance \cite{atkinson-solomon}, \cite{bergeron-gr}, \cite{bergberg-ht}) including an analogous matrix interpretation by Fran\c{c}ois and Nantel Bergeron for the descent algebra of the hyperoctahedral group, \cite{bergeron-bergeron}.
Until now, there has been little success in developing such an interpretation for the Coxeter groups of type $D$. However, in this paper we shall give the matrix interpretation for this remaining Coxeter family, after defining the Coxeter groups of type $D$, and their corresponding descent algebra.
The $n$-th Coxeter group of type $D$, $D_n$, is the group acting on the set $$\{-n, \ldots ,-1,1,\ldots ,n\}$$ whose Coxeter generators are the set $S=\{s_{1'},s_1,s_2,\ldots ,s_{n-1}\}$, where $s_i$ is the product of transpositions $(-i\! -\! 1,\, -i)(i,i+1)$ for $i=1,2,\ldots ,n-1$, and $s_{1'}$ is the product of transpositions $(-2,1)(-1,2)$. The relations are given by the following diagram:
where an edge between distinct nodes $i$ and $j$ gives us the relation $(s_is_j)^3=1$, and no edge gives $(s_is_j)^2=1$, and $(s_i)^2=1$.
Solomon proved that if $J$ is a subset of $S$, $W_J$ is the subgroup generated by $J$, $X_J$ ($X_J^{-1}$) is the unique set of minimal length left (right) coset representatives of $W_J$, and ${\cal X}_J$ is the formal sum of the elements in $X_J$ then for $J,K,L\subseteq S$
\[{\cal X}_J{\cal X}_K=\sum _L a_{JKL}{\cal X}_L\]
where $a_{JKL}$ is the number of elements $x\in X_J^{-1}\cap X_K$ such that $x^{-1}Jx\cap K=L$. Hence, the set of all ${\cal X}_J$'s form a basis for an algebra - the descent algebra of $D_n$, $\Sigma D_n$. Our interpretation of this multiplication rule uses this basis, but for ease of computation, we use a different notation.
We define a composition, $q$, of an integer, $n$, to be an ordered list $[q_1,q_2,\ldots ,q_k]$ of positive integers whose sum is $n$, and shall write $q\vDash n$ to denote this. We shall call the integers $q_1,q_2,\ldots ,q_k$ the \textit{components} of $q$.
There exists a natural bijection between the subsets of $S$ and the disjoint union,
$C(n)$, of the sets \( C_{<n}=\{q|q\vDash m,m\leq
n-2\}\), \( C_1=\{q|q\vDash n,q_1=1 \}\)
\( C_n=\{q|q\vDash n,q_1\geq 2 \}\) and
\( C_n'=\{q|q\vDash n,q_1\geq 2\}\). Note that $C_n$ and $C_n'$ are two copies of the same set. Let $q\in C(n)$ such that $q\vDash m\leq n$, then the subset corresponding to $q$ is
\begin{enumerate} \item \(\{s_{q_0}, s_{q_0+q_1},\ldots , s_{q_0+\ldots +q_{(k-1)}}\}\) if \(q\in\)$C_{<n}$, \item \(\{s_{1'}, s_{1}, s_{1+q_2},\ldots , s_{1+q_2+\ldots +q_{(k-1)}}\}\) if \(q\in\)\( C_1\), \item \(\{s_{1'},s_{ q_1},\ldots , s_{q_1+\ldots +q_{(k-1)}}\}\) if \(q\in\)\( C_n\), \item \(\{s_{1}, s_{q_1},\ldots , s_{q_1+\ldots +q_{(k-1)}}\}\) if \(q\in\)\( C_n'\),
\end{enumerate} where \(q_0=n-m.\)
\begin{remark} The step of corresponding a set, $J$, containing $s_{1'}$ ($s_1$) with a composition, $q$, in $C_n$ ($C_n'$) is because we shall later relate $q$ to the complement of $J$. \end{remark}
\section{The Matrix Interpretation, and Results}
If $J^c$ is the complement of $J$ in $S$, then we let $B_q={\cal X}_{ J^c}$ where $q$ is the composition in $C(n)$ that corresponds to $J$ by the
above bijection. The matrix interpretation of Solomon's multiplication rule can now be described as follows.
Consider the template with the following form \[\begin{pmatrix} z_{00}&z_{01}&z_{02}&\ldots &z_{0v}\\ &y_{11}&y_{12}&\ldots &y_{1v}\\ z_{10}&z_{11}&z_{12}&\ldots &z_{1v}\\ \vdots &\vdots &\vdots &\ddots &\vdots\\ &y_{u1}&y_{u2}&\ldots &y_{uv}\\ z_{u0}&z_{u1}&z_{u2}&\ldots &z_{uv}\\ \end{pmatrix}\] Note that the $y$-lines do not have entries in column 0. We say a template is a ``filled template'' if all entries in a template are non-negative integers.
\begin{dfn} Let $\boldsymbol{t}$ be a filled template. We define the \emph{border-sum}, ${\cal B}(\boldsymbol{t})$, of $\boldsymbol{t}$ to be the sum \[z_{00}+\sum _{i=1}^{u}z_{i0}+\sum _{j=1}^{v}z_{0j}\] and the\emph{ y-sum}, ${\cal Y}(\boldsymbol{t})$, to be \(\sum_{i, j} y_{ij}\). The reading word of $\boldsymbol{t}$, $r(\boldsymbol{t})$, is given by {\small \[[z_{01},z_{02},\ldots ,z_{0v},y_{1v},\ldots ,y_{12},y_{11},z_{10},z_{11},z_{12},\ldots ,z_{1v},\ldots ,z_{u0},z_{u1},z_{u2}, \ldots ,z_{uv}]\]} with zero entries omitted, unless $z_{00}=1$, in which case $r(\boldsymbol{t})$ is given by
{\small \[[1,z_{01},z_{02},\ldots ,z_{0v},y_{1v},\ldots ,y_{12},y_{11},z_{10},z_{11},z_{12},\ldots ,z_{1v},\ldots ,z_{u0},z_{u1},z_{u2}, \ldots ,z_{uv}]\]}with zero entries omitted.\end{dfn}
If \(p\), and \(q\) are compositions in $C (n)$ such that $p\vDash l\leq n$, and $q\vDash m\leq n$, then let \(Z(p,q)\) be the set of filled templates, $\boldsymbol{t}$, such that \begin{enumerate} \item \(z_{0j}+\sum _{i\neq 0}(y_{ij}+z_{ij})=p _{j},\ j\neq 0\), \item $\sum _i z_{i0}=n-l$, \item \(z_{i0}+\sum _{j\neq 0}(y_{ij}+z_{ij})=q _{i},\ i\neq 0\), \item $\sum _{j} z_{0j}=n-m$, \item If ${\cal B}(\boldsymbol{t})$=0, ${\cal Y}(\boldsymbol{t})$ is odd if
\begin{enumerate}\item $p\in C _1\cup C _n$ and $q\in C _n'$, or \item $p\in C _n'$ and $q\in C _1\cup C _n$.\end{enumerate}
Otherwise ${\cal Y}(\boldsymbol{t})$ is even. \end{enumerate}
We are now ready to state our matrix interpretation. To distinguish between those compositions belonging to $C_n$ and those belonging to $C_n'$, we shall write $q'$ when $q\vDash n$ and $q\in C_n'$.
\begin{theorem} Let $p,q\in C (n)$. For any filled template $\boldsymbol{t}$, let $r(\boldsymbol{t})=[r_1(\boldsymbol{t}), r_2(\boldsymbol{t}), \ldots ]$.
Then, \[B_p B_q=\sum _{\boldsymbol{t}\in Z(p,q)} \tilde{B}_{r(\boldsymbol{t})}\] where $\tilde{B}_{r(\boldsymbol{t})}$ satisfies the following.
\begin{enumerate} \item If $q\in C _1$, then $\tilde{B}_{r(\boldsymbol{t})}=B_{r(\boldsymbol{t})} $.\label{ono-p} \item If $q\in C _n$, then $\tilde{B}_{r(\boldsymbol{t})}=B_{r(\boldsymbol{t})} $.\label{no-p} \item If $q\in C _n'$, then if $r_1(\boldsymbol{t})=1$ then $\tilde{B}_{r(\boldsymbol{t})}=B_{r(\boldsymbol{t})} $, otherwise $\tilde{B}_{r(\boldsymbol{t})}=B_{r(\boldsymbol{t})'}$. \label{rt-up} \item If $q\in C _{<n}$, then \label{ty-pe1} \begin{enumerate} \item If $r_1(\boldsymbol{t})\geq 2$, $p\in C _1\cup C _n$ and ${\cal Y}(\boldsymbol{t})$ is odd, or $r_1(\boldsymbol{t})\geq 2$,
$p\in C _n'$ and ${\cal Y}(\boldsymbol{t})$ is even, then $\tilde{B}_{r(\boldsymbol{t})}=B_{r(\boldsymbol{t})'}$. \label{on-ep} \item If $p\in C _{<n}$ and $z_{00}=0$, then if $r_1(\boldsymbol{t}) =1$, then $\tilde{B}_{r(\boldsymbol{t})}=2B_{r(\boldsymbol{t})} $, otherwise $\tilde{B}_{r(\boldsymbol{t})}=B_{r(\boldsymbol{t})} +B_{r(\boldsymbol{t}) '}$. \label{two-u} \item Otherwise $\tilde{B}_{r(\boldsymbol{t})}=B_{r(\boldsymbol{t})}$. \end{enumerate} \end{enumerate} \label{d-mult} \end{theorem}
A rigorous proof of this theorem can be obtained through a variety of methods. One is to use shuffle products in a way similar to that seen in \cite{garsia-remmel}, or sketched in \cite{bergeron-bergeron}, to prove the analogous interpretations for the descent algebras of the Coxeter groups of types $A$ and $B$, respectively. Alternatively, this theorem can be proved using the general framework suggested in \cite{vanwilli-justgar}. Indeed this framework inspired Theorem ~\ref{d-mult}, and a proof in this vein can be found in \cite{vanwilli-thesis}.
Here, however, we wish to emphasize that it is the formulation of Theorem ~\ref{d-mult} that is the most difficult stage. Once this has been achieved, a proof can be derived by the diligent reader, with or without the use of the above references, or found in \cite{vanwilli-thesis}. Therefore we feel it would be more beneficial to replace the proof with a collection of illuminating examples.
\begin{example} To illustrate our rule we shall work in $\Sigma D_4$. Each example, $B_pB_q$, shall consist of $Z(p,q)$, and the resulting summands it generates according to the rule. \begin{enumerate} \item $B_{[4]}B_{[1,3]}$. \[\begin{pmatrix} 0&0\\ &1\\ 0&0\\ &3\\ 0&0 \end{pmatrix} \begin{pmatrix} 0&0\\ &0\\ 0&1\\ &0\\ 0&3 \end{pmatrix} \begin{pmatrix} 0&0\\ &0\\ 0&1\\ &2\\ 0&1 \end{pmatrix} \begin{pmatrix} 0&0\\ &1\\ 0&0\\ &1\\ 0&2 \end{pmatrix}\] \[B_{[4]}B_{[1,3]}=2B_{[1,3]}+B_{[1,2,1]}+B_{[1,1,2]}\]\label{b31-b4} \item $B_{[3,1]'}B_{[4]}$. \[\begin{pmatrix} 0&0&0\\ &3&0\\ 0&0&1 \end{pmatrix} \begin{pmatrix} 0&0&0\\ &0&1\\ 0&3&0 \end{pmatrix} \begin{pmatrix} 0&0&0\\ &2&1\\ 0&1&0 \end{pmatrix} \begin{pmatrix} 0&0&0\\ &1&0\\ 0&2&1 \end{pmatrix}\] \[B_{[3,1]'}B_{[4]}=B_{[3,1]}+B_{[1,3]}+2B_{[1,2,1]}\]\label{b31p-b4} \item $B_{[2,2]'}B_{[4]'}$. \[\begin{pmatrix} 0&0&0\\ &2&2\\ 0&0&0 \end{pmatrix} \begin{pmatrix} 0&0&0\\ &2&0\\ 0&0&2 \end{pmatrix} \begin{pmatrix} 0&0&0\\ &0&2\\ 0&2&0 \end{pmatrix} \begin{pmatrix} 0&0&0\\ &0&0\\ 0&2&2 \end{pmatrix} \begin{pmatrix} 0&0&0\\ &1&1\\ 0&1&1 \end{pmatrix} \] \[B_{[2,2]'}B_{[4]'}=4B_{[2,2]'}+B_{[1,3]}+B_{[1,1,1,1]}\]\label{b22p-b4p} \item $B_{[4]}B_{[2]}$. \[\begin{pmatrix} 0&2\\ &2\\ 0&0 \end{pmatrix}\quad \begin{pmatrix} 0&2\\ &0\\ 0&2 \end{pmatrix}\quad \begin{pmatrix} 0&2\\ &1\\ 0&1 \end{pmatrix}\] \[B_{[4]}B_{[2]}=2B_{[2,2]}+B_{[2,1,1]'}\]\label{b4-b2} \item $B_{[2]}B_{[2]}$. \[ \begin{pmatrix} 2&0\\ &2\\ 0&0 \end{pmatrix} \begin{pmatrix} 2&0\\ &0\\ 0&2 \end{pmatrix} \begin{pmatrix} 2&0\\ &1\\ 0&1 \end{pmatrix} \begin{pmatrix} 0&2\\ &0\\ 2&0 \end{pmatrix} \begin{pmatrix} 1&1\\ &1\\ 1&0 \end{pmatrix} \begin{pmatrix} 1&1\\ &0\\ 1&1 \end{pmatrix}\] \[B_{[2]}B_{[2]}=2B_{[2]}+B_{[1,1]}+B_{[2,2]}+B_{[2,2]'}+2B_{[1,1,1,1]}\]\label{b2-b2} \item $B_{[1,1]}B_{[2]}$ \begin{gather*}\begin{pmatrix} 2&0&0\\ &1&1\\ 0&0&0 \end{pmatrix} \begin{pmatrix} 2&0&0\\ &1&0\\ 0&0&1 \end{pmatrix} \begin{pmatrix} 2&0&0\\ &0&1\\ 0&1&0 \end{pmatrix} \begin{pmatrix} 2&0&0\\ &0&0\\ 0&1&1 \end{pmatrix} \begin{pmatrix} 0&1&1\\ &0&0\\ 2&0&0 \end{pmatrix}\\ \begin{pmatrix} 1&1&0\\ &0&1\\ 1&0&0 \end{pmatrix} \begin{pmatrix} 1&1&0\\ &0&0\\ 1&0&1 \end{pmatrix} \begin{pmatrix} 1&0&1\\ &1&0\\ 1&0&0 \end{pmatrix} \begin{pmatrix} 1&0&1\\ &0&0\\ 1&1&0 \end{pmatrix}\end{gather*} \[B_{[1,1]}B_{[2]}=4B_{[1,1]}+2B_{[1,1,2]}+4B_{[1,1,1,1]}\]\label{b11-b2} \end{enumerate}
\begin{remark} Note, in particular, that these examples illustrate the various conditions given in Theorem ~\ref{d-mult}. Examples ~\ref{b31-b4} and ~\ref{b31p-b4} illustrate conditions ~\ref{ono-p} and ~\ref{no-p} respectively, and the influence of ${\cal B}(\boldsymbol{t})=0$ on possible filled templates belonging to $Z(p,q)$. Example ~\ref{b22p-b4p} illustrates condition ~\ref{rt-up}, and examples ~\ref{b4-b2}, ~\ref{b2-b2} and ~\ref{b11-b2} illustrate condition ~\ref{ty-pe1}. More specifically, examples ~\ref{b4-b2}, ~\ref{b2-b2} and ~\ref{b11-b2} illustrate respectively conditions ~\ref{on-ep}, ~\ref{two-u} when $r _1(\boldsymbol{t})\geq 2$, and ~\ref{two-u} when $r _1(\boldsymbol{t})=1$. \end{remark} \end{example}
\begin{cor}
${\cal I}=<B_q|q\in C_1\cup C_n\cup C_n'>$ is an ideal. \end{cor} \begin{prf} Let $B_p$ be a basis element of $\Sigma D_n$, and $B_q\in {\cal I}$. From our matrix interpretation it follows that any filled template, $T$, in $Z(p,q)$ or $Z(q,p)$ will be such that $z_{00}=0$. Therefore $r(T)\vDash n$, that is $B_pB_q,B_qB_p\in {\cal I}$. The corollary follows immediately by linearity. \end{prf}
Moreover, we have the following.
\begin{theorem} Let $B_n$ be the Coxeter group of type $B$, whose Dynkin diagram is on $n$ vertices, and let $\Sigma B_n$ be its associated descent algebra. Then \[\Sigma B_{n-2}\cong \Sigma D_n/{\cal I}\] \end{theorem} \begin{prf} For clarity, for $q\in$$C_{<n}$, let $B_q^D$ be a basis element of $\Sigma D_n$, and let $B_q^B$ be a basis element of $\Sigma B_{n-2}$.
Note that the set $\{B_q^D\}_{q\in C_{<n}}$ is a basis for $ \Sigma D_n/{\cal I}$. Hence, let $p\vDash m_1$, $q\vDash m_2$, $m_1,m_2\leq n-2$.
By Theorem ~\ref{d-mult}, it follows that in $\Sigma D_n/{\cal I}$, the only non-zero term in the product $B_p^DB_q^D$ are those corresponding to filled templates in $Z(p,q)$ with $z_{00}\geq 2$. We denote this set of filled templates by $I(p,q)$. Note that if we subtract 2 from the $z_{00}$ of any filled template, $T\in I(p,q)$, the reading word, row sum, and column sum of $T$ are unaffected. Moreover, if this is performed on all $T\in I(p,q)$ the resulting filled templates are precisely those that arise if we calculate the product $B_p^BB_q^B$ in $\Sigma B_{n-2}$ (\cite{bergeron-bergeron}). Since this argument is reversible, the result follows. \end{prf}
\end{ack}
\end{document} | arXiv | {
"id": "0706.2711.tex",
"language_detection_score": 0.5655297040939331,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title[Lower bounds for the principal genus]{Lower bounds for the principal genus\\ of definite binary quadratic forms} \author{Kimberly Hopkins} \address{Department of Mathematics\\University of Texas at Austin\\Austin, TX 78712-0257} \email{khopkins@math.utexas.edu} \author{Jeffrey Stopple} \address{Department of Mathematics\\University of California, Santa Barbara\\Santa Barbara, CA 93106-3080} \email{stopple@math.ucsb.edu} \subjclass{11M20,11R29} \begin{abstract} We apply Tatuzawa's version of Siegel's theorem to derive two lower bounds on the size of the principal genus of positive definite binary quadratic forms.
\end{abstract}
\maketitle
\subsection*{Introduction} Suppose $-D<0$ is a fundamental discriminant. By genus theory we have an exact sequence for the class group $\mathcal C(-D)$ of positive definite binary quadratic forms: \[
\mathcal P(-D)\overset{\text{def.}}=\mathcal C(-D)^2 \hookrightarrow \mathcal C(-D) \twoheadrightarrow \mathcal C(-D)/ \mathcal C(-D)^2 \simeq (\mathbb Z/2)^{g-1}, \] where $D$ is divisible by $g$ primary discriminants (i.e., $D$ has $g$ distinct prime factors). Let $p(-D)$ denote the cardinality of the principal genus $\mathcal P(-D)$. The genera of forms are the cosets of $\mathcal C(-D)$ modulo the principal genus, and thus $p(-D)$ is the number of classes of forms in each genus. The study of this invariant of the class group is as old as the study of the class number $h(-D)$ itself. Indeed, Gauss wrote in \cite[Art. 303]{Gauss} \begin{quote}. . . Further, the series of [discriminants] corresponding to the same given classification (i.e. the given number of both genera and classes) always seems to terminate with a finite number . . . However, \emph{rigorous} proofs of these observations seem to be very difficult. \end{quote}
Theorems about $h(-D)$ have usually been closely followed with an analogous result for $p(-D)$. When Heilbronn \cite{He} showed that $h(-D)\to\infty$ as $D\to\infty$, Chowla \cite{C} showed that $p(-D)\to\infty$ as $D\to\infty$.
An elegant proof of Chowla's theorem is given by Narkiewicz in \cite[Prop 8.8 p. 458]{N}.
Similarly, the Heilbronn-Linfoot result \cite{HL} that $h(-D)>1$ if $D>163$, with at most one possible exception was matched by Weinberger's result \cite{W} that $p(-D)>1$ if $D>5460$ with at most one possible exception. On the other hand, Oesterl\'{e}'s \cite{Oes} exposition of the Goldfeld-Gross-Zagier bound for $h(-D)$ already contains the observation that the result was not strong enough to give any information about $p(-D)$.
In \cite{T} Tatuzawa proved a version of Siegel's theorem: for every $\varepsilon$ there is an explicit constant $C(\varepsilon)$ so that \[ h(-D)>C(\varepsilon)D^{1/2-\varepsilon} \] with at most one exceptional discriminant $-D$. This result has never been adapted to the study of the principal genus. It is easily done; the proofs are not difficult so it is worthwhile filling this gap in the literature. We present two versions. The first version contains a transcendental function (the Lambert $W$ function discussed below). The second version gives, for each $n\ge4$, a bound which involves only elementary functions. For each fixed $n$ the second version is stronger on an interval $I=I(n)$ of $D$ , but the first is stronger as $D\to\infty$. The second version has the added advantage that it is easily computable.
(N.B. The constants in Tatuzawa's result have been improved in \cite{Ho} and \cite{JL}; these could be applied at the expense of slightly more complicated statements.)
\subsection*{Notation} We will always assume that $g\ge 2$, for if $g=1$ then $-D=-4,-8$, or $-q$ with $q\equiv 3\bmod 4$ a prime. In this last case $p(-q)=h(-q)$ and Tatuzawa's theorem \cite{T} applies directly.
\section*{First version} \begin{lemma}\label{Lemma1} If $g\ge 2$, \[ \log(D)>g\log(g). \] \end{lemma} \begin{proof} Factor $D$ as $q_1,\ldots q_g$ where the $q_i$ are (absolute values) of primary discriminants, i.e. $4$, $8$, or odd primes. Let $p_i$ denote the $i$th prime number, so we have \begin{equation}\label{Eq:inequality} \log(D)=\sum_{i=1}^g \log(q_i)\ge \sum_{i=1}^g \log(p_i)\overset{\text{def.}}=\theta(p_g). \end{equation} By \cite[(3.16) and (3.11)]{RS}, we know that Chebyshev's function $\theta$ satisfies $\theta(x)>x(1-1/\log(x))$ if $x>41$, and that \[ p_g>g(\log(g)+\log(\log(g))-3/2). \] After substituting $x=p_g$ and a little calculation, this gives $\theta(p_g)>g\log(g)$ as long as $p_g>41$, i.e. $g>13$. For $g=2,\ldots, 13$, one can easily verify the inequality directly. \end{proof}
Let $W(x)$ denote the Lambert $W$-function, that is, the inverse function of $f(w)=w\exp(w)$ (see \cite{E}, \cite[p. 146 and p. 348, ex 209]{PS}). For $x\ge0$ it is positive, increasing, and concave down. The Lambert $W$-function is also sometimes called the product log, and is implemented as \texttt{ProductLog} in \emph{Mathematica}.
\begin{theorem}\label{Theorem2} If $0<\varepsilon<1/2$ and $D>\max(\exp(1/\varepsilon),\exp(11.2))$, then with at most one exception \[ p(-D)>\frac{1.31}{\pi}\varepsilon D^{1/2-\varepsilon-\log(2)/W(\log(D))}. \] \end{theorem} \begin{proof} Tatuzawa's theorem \cite{T}, says that with at most one exception \begin{equation}\label{Eq:tatuzawa} \frac{\pi\cdot h(-D)}{ \sqrt{D}}=L(1,\chi_{-D})>.655\varepsilon D^{-\varepsilon}, \end{equation} thus \[ p(-D)=\frac{2h(-D)}{2^g}>\frac{1.31\varepsilon\cdot D^{1/2-\varepsilon}}{\pi \cdot 2^g}. \]
The relation $\log(D)>g\log(g)$ is equivalent to \begin{gather*} \log(D)>\exp(\log(g))\log(g),\\ \intertext{Thus applying the increasing function $W$ gives, by definition of $W$} W(\log(D))>\log(g),\\ \intertext{and applying the exponential gives} \exp(W(\log(D))>g. \end{gather*} The left hand side above is equal to $\log(D)/W(\log(D))$ by the definition of $W$. Thus \begin{gather*} -\log(D)/W(\log(D)) < -g,\\ D^{-\log(2)/W(\log(D))}=2^{-\log(D)/W(\log(D))} < 2^{-g}, \end{gather*} and the Theorem follows. \end{proof} \begin{remark}
Our estimate arises from the bound $\log(D)>g\log(g)$, which is nearly optimal. That is, for every $g$, there exists a fundamental discriminant (although not necessarily negative) of the form \[ D_g\overset{\text{def.}}=\pm 3\cdot4\cdot5\cdot7\dots p_g, \] and \[
\log|D_g| = \theta(p_g)+\log(2). \] From the Prime Number Theorem we know $\theta(p_g)\sim p_g$, so \[
\log|D_g| \sim p_g+\log(2) \] while \cite[3.13]{RS} shows $p_g<g(\log(g)+\log(\log(g))$ for $g\ge6$. \end{remark}
\section*{Second version}
\begin{theorem}\label{Theorem1} Let $n\ge 4$ be any natural number. If $0<\varepsilon<1/2$ and $D>\max(\exp(1/\varepsilon),\exp(11.2))$, then with at most one exception \[ p(-D)>\frac{1.31\varepsilon}{\pi}\cdot \frac{D^{1/2-\varepsilon-1/n}}{f(n)}, \] where \[f(n) = \exp\big[ ( \pi(2^n) - 1/n ) \log 2 - \theta(2^n)/n\big]; \] here $\pi$ is the prime counting function and $\theta$ is the Chebyshev function.
\end{theorem} \begin{proof} First observe \[ f(n) = \frac{2^{\pi(2^n)} }{2^{1/n}\prod_{\text{primes }p<2^n}p^{1/n}}. \]
From Tatuzawa's Theorem (\ref{Eq:tatuzawa}), it suffices to show $2^g \leq f(n)D^{1/n}$. Suppose first that $D$ is not $\equiv0\pmod8$.
Let $S = \{ 4, \: \text{odd primes} < 2^n\}$, so $|S| = \pi(2^n)$. Factor $D$ as $q_1\cdots q_g$ where $q_i$ are (absolute values) of coprime primary discriminants, that is, $4$ or odd primes, and satisfy $q_i<q_j$ for $i<j$. Then, for some $0\leq m\leq g$, we have $q_1,\dots, q_m \in S$ and $q_{m+1},\dots, q_g\not\in S$, and thus $2^n<q_i$ for $i=m+1,\dots, g$. This implies \begin{align*}
2^{gn} &= \underbrace{2^n\cdots 2^n}_{m} \cdot \underbrace{2^n\cdots 2^n}_{g-m}
\le 2^{mn}\ q_{m+1} q_{m+2} \ldots q_g \\
&= \frac{ 2^{mn} }{ q_1\cdots q_m} D
\leq \frac{2^{|S|\cdot n}}{ \prod_{q\in S} q}\cdot D \\
\intertext{ as we have included in the denominator the remaining elements of $S$ (each of which is $\le2^n$). The above is}
&= \frac{2^{\pi(2^n) \cdot n}}{ 2 \prod_{\text{primes }p<2^n}p} \cdot D
= f(n)^n\cdot D. \end{align*} This proves the theorem when $D$ is not $\equiv 0\bmod 8$. In the remaining case, apply the above argument to $D^\prime=D/2$; so \[ 2^{gn}\le f(n)^nD^\prime <f(n)^n D. \] \end{proof} \begin{examples}If $0<\varepsilon<1/2$ and $D>\max(\exp(1/\varepsilon),\exp(11.2))$, then with at most one exception, Theorem \ref{Theorem1} implies \begin{gather*} p(-D)>0.10199\cdot\varepsilon\cdot D^{1/4-\varepsilon}\quad (n=4)\\ p(-D)>0.0426\cdot\varepsilon\cdot D^{3/10-\varepsilon}\quad (n=5)\\ p(-D)>0.01249\cdot\varepsilon\cdot D^{1/3-\varepsilon}\quad (n=6)\\ p(-D)>0.00188\cdot\varepsilon\cdot D^{5/14-\varepsilon}\quad (n=7)\\ \end{gather*} \end{examples}
\section*{Comparison of the two theorems} How do the two theorems compare? Canceling the terms which are the same in both, we seek inequalities relating \[ D^{-\log 2/W(\log D)} \quad\text{v.}\quad \frac{D^{-1/n}}{f(n)}. \] \begin{theorem}\label{Theorem3} For every $n$, there is a range of $D$ where the bound from Theorem \ref{Theorem1} is better than the bound from Theorem \ref{Theorem2}. However, for any fixed $n$ the bound from Theorem \ref{Theorem2} is eventually better as $D$ increases. \end{theorem} For fixed $n$, the first statement of Theorem \ref{Theorem3} is equivalent to proving \[ D^{\log(2)/W(\log(D)) - 1/n}\ge f(n) \] on a non-empty compact interval of the $D$ axis. Taking logarithms, it suffices to show, \begin{lemma}\label{T:compare} Let $n\ge 4$. Then \[
x\bigg( \frac{\log 2}{W(x)} -\frac{1}{n}\bigg) \geq \log f(n) \] on some non-empty compact interval of positive real numbers $x$. \end{lemma} \begin{proof} Let $g(n,x) = x\, ( \log 2/W(x) - 1/n )$. Then \[ \frac{\partial g}{\partial x} = \frac{\log 2}{W(x)+1} - \frac1n \qquad \text{and}\qquad \frac{\partial^2 g}{\partial x^2} = \frac{-\log 2 \cdot W(x)}{ x( W(x)+1)^3}. \] This shows $g$ is concave down on the positive real numbers and has a maximum at \[ x = 2^n (n\log 2-1)/e. \] Because of the concavity, all we need to do is show that $g(n,x)>\log f(n)$ at \emph{some} $x$. The maximum point is slightly ugly so instead we let $x_0 = 2^n n\log 2/e$.
Using $W(x) \sim \log x - \log\log x$, a short calculation shows \[
g(n,x_0) \sim \frac{1}{e}\cdot\frac{2^n}{n}. \]
By \cite[5.7)]{RS2}, a lower bound on Chebyshev's function is \[
\theta(t)> t\bigg( 1- \frac{1}{40 \log t}\bigg) , \quad t>678407. \] (Since we will take $t=2^n$ this requires $n>19$ which is not much of a restriction.)\ \ By \cite[(3.4)]{RS}, an upper bound on the prime counting function is \[ \pi(t) < \frac{t}{\log t -3/2}, \quad t>e^{3/2}. \]
Hence $-\theta(2^n)< 2^n\, (1/(40 n\log 2) -1)$ and so \begin{align*} \log f(n) &= \bigg(\pi(2^n)-\frac1n\bigg)\log 2 - \frac{\theta(2^n)}{n} \\
&< \bigg( \frac{2^n}{n\log 2 - 3/2} -\frac1n\bigg)\log2 + \frac{2^n}{n} \bigg(\frac{1}{40 n\log2} -1\bigg)\\
&\sim \frac{61}{40\log2}\cdot\frac{2^n}{n^2}. \end{align*}
Comparing the two asymptotic bounds for $g$ and $\log f$ respectively we see that \[
\frac{1}{e}\cdot\frac{2^n}{n} > \frac{61}{40\log2}\cdot\frac{2^n}{n^2}, \]
for $n\ge 6$; small $n$ are treated by direct computation.\footnote{The details of the asymptotics have been omitted for conciseness.} \end{proof}
\begin{figure}
\caption{$\log$-$\log$ plots of the bounds from Theorems \ref{Theorem2} and \ref{Theorem1}}
\label{F:pic}
\end{figure}
Figure \ref{F:pic} shows a $\log$-$\log$ plot of the two lower bounds, omitting the contribution of the constants which are the same in both and the terms involving $\varepsilon$. That is, Theorem \ref{Theorem1} gives for each $n$ a lower bound $b(D)$ of the form \begin{gather*} b(D)=C(n) \varepsilon D^{1/2-1/n-\varepsilon},\quad\text{so}\\ \log(b(D)) = (1/2-1/n-\varepsilon)\log(D)+\log(C(n))+\log(\varepsilon). \end{gather*} Observe that for fixed $n$ and $\varepsilon$, this is linear in $\log(D)$, with the slope an increasing function of the parameter $n$. What is plotted is actually $(1/2-1/n)\log(D)+\log(C(n))$ as a function of $\log(D)$, and analogously for Theorem \ref{Theorem2}.\ \ In red, green, and blue are plotted the lower bounds from Theorem \ref{Theorem1} for $n=4$, $5$, and $6$ respectively. In black is plotted the lower bound from Theorem \ref{Theorem2}.
\begin{examples}The choice $\varepsilon=1/\log(5.6\cdot 10^{10})$ in Theorem \ref{Theorem2} shows that $p(-D)>1$ for $D>5.6\cdot 10^{10}$ with at most one exception. (For comparison, Weinberger \cite[Lemma 4]{W} needed $D>2\cdot 10^{11}$ to get this lower bound.) And, $\varepsilon=1/\log(3.5\cdot 10^{14})$ in Theorem \ref{Theorem2} gives $p(-D)>10$ for $D>3.5\cdot 10^{14}$ with at most one exception. Finally, $n=6$ and $\varepsilon=1/\log(4.8\cdot 10^{17})$ in Theorem \ref{Theorem1} gives $p(-D)>100$ for $D>4.8\cdot 10^{17}$ with at most one exception. \end{examples}
\end{document} | arXiv | {
"id": "0811.0358.tex",
"language_detection_score": 0.7126477360725403,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{A Rolling Horizon Approach for a Bilevel Stochastic Pricing Problem for Demand-Side Management}
\begin{abstract} To guarantee the well-functioning of electricity distribution networks, it is crucial to constantly ensure the demand-supply balance. To do this, one can control the means of production, but also influence the demand: \emph{demand-side management} becomes more and more popular as the demand keeps increasing and getting more chaotic. In this work, we propose a bilevel model involving an energy supplier and a smart grid operator (SGO): the supplier induces shifts of the load controlled by the SGO by offering time-dependent prices. We assume that the SGO has contracts with consumers and decides their consumption schedule, guaranteeing that the inconvenience induced by the load shifts will not overcome the related financial benefits. Furthermore, we assume that the SGO manages a source of renewable energy (RE), which leads us to consider a stochastic bilevel model, as the generation of RE is by nature highly unpredictable. To cope with the issue of large problem sizes, we design a rolling horizon algorithm that can be applied in a real context. \end{abstract}
\section{Introduction}\label{sect_intro}
The efficient production, distribution, and consumption of energy are some of the most important challenges of our time. With the growing significance of distributed generation (DG) in the global energy mix, the production levels are less predictable than ever. To avoid both losses or blackouts, new solutions must be found: adapting the demand to the production instead of the opposite has become increasingly popular to improve the efficiency of the electricity grid's functioning. The notion of giving the desired shape to the demand curve is known as \emph{demand-side management} (DSM) \citep{Kreith_Energy_2016}, which can be implemented through several techniques. Among those techniques, \emph{load shifting} consists in shifting a part of the demand, either by moving forward or by postponing the consumption of electricity \citep{Wang_Load_2016}. In this work, load shifting is the only DSM technique that is considered.
Naturally, applying DSM techniques can only be done in a \emph{smart grid} context, where extensive means of communication ensure the transmission of data among the various actors of the electricity distribution network, specifically between energy suppliers and consumers \citep{Farhangi_Path_2010,Kabalci_Survey_2016}.
In \cite{Alekseeva_Bilevel_2018}, a power supplier aims to maximize its profit, knowing that its clients are going to optimize their consumption accordingly to the prices that the supplier offers. A similar pattern is found in \cite{Afsar_Achieving_2016}, where the supplier's objective consists in minimizing the peak load. Finally, in \cite{Aussel_Trilevel_2020}, a model involving an energy supplier in a best response situation and various types of clients is considered. Common elements of those last three works are twofold. First, they all feature an energy supplier that induces a load shifting from its clients through price incentives. Second, they all rely on bilevel optimization to reach their objectives. \begin{itemize}
\item Pricing can be a strong tool to incentivize the buyers to adopt a certain behavior, as in general augmenting the prices will lead to a decrease in demand, and vice versa. Numerous applications can be contemplated: from peer-to-peer networks \citep{Park_Pricing_2010} to freight delivery \citep{Holguin-Veras_Selfsupported_2015} via communication networks \citep{Ozdaglar_Incentives_2007}. In the electricity domain, a personalized real-time pricing mechanism that aims to optimize the system's functioning while preserving the users' welfare is proposed in \citet{Tsaousoglou_Personalized_2019}. Closer to this work, in \citet{Liu_PricingBased_2019}, pricing-based demand response is implemented for a smart home with various types of household appliances, together with energy storage units and DG, and taking into account the consumer's welfare, but without a bilevel structure.
\item Bilevel optimization originates in the seminal work of Stackelberg \citep{vonStackelberg_Marktform_1934}, and is used to model all kinds of hierarchical interactions between a \emph{leader} making decisions first and a \emph{follower} reacting optimally to the leader's decisions (general references to bilevel programming include \citet{Bard_Practical_2010,Dempe_Foundations_2002}). In the three works mentioned above, the electricity supplier always acts as the leader: it offers time-dependent prices, to which the clients and/or customers react in an optimal way by changing their load demand. Although proven to be NP-hard even in the simplest cases where everything is linear \citep{Ben-Ayed_Computational_1990,Labbe_Bilevel_1998}, bilevel programs are appropriate tools, as they allow to explicitly take into account the response of the follower to the leader's decisions. \end{itemize}
Besides the three works mentioned above that combine demand response and bilevel optimization, several researchers have investigated this specific setting \citep{Besancon_Bilevel_2018, Kovacs_Bilevel_2019, Yuan_Realtime_2020a, Shomalzadeh_Energy_2020}. Next to those works, in \citet{Grimm_Optimal_2020}, several pricing schemes (time-of-use, critical-peak-pricing, real-time-pricing, and fixed-price) are compared in a retailer-prosumer interaction. Two bilevel models are presented in \citet{Alves_Optimizing_2020} in the aim to optimize time-of-use prices. In the first model, the periods where modified prices apply are predetermined, whereas in the second model, they are variables as well. Finally, in \citet{Tang_Game_2019}, an interaction between a power grid and individual buildings is shown, where the grid aims to optimize its profit and reduce the demand fluctuation, while the buildings minimize their bill while modifying their demand as less as possible. A more detailed overview of demand response problems modeled through bilevel programs consists in \citet{HenggelerAntunes_Bilevel_2020}.
In this article, we present the problem (SBPP), which is strongly inspired by the problems studied in \citet{Afsar_Achieving_2016}: an energy supplier aims to maximize its profit (its sales minus its costs) by selling electricity to a smart grid operator (SGO) that manages the consumption schedule of a set of clients. The SGO aims to minimize a utility function that consists in the sum of its clients' electricity bills plus a so-called \emph{inconvenience cost}. We assume indeed that shifting the clients' loads comes at a certain cost: if the use of a client's device has to be postponed, the client will undergo some inconvenience. To satisfy its clients' demand, we finally hypothesize that the SGO manages a source of DG in the form of photovoltaic panels, and some storage capacities - a configuration is considered in \citet{Xu_Demand_2020} without the bilevel setting. Specifically, managing the DG induces some difficulties, as it is impossible to know with complete precision the energy that will be produced. This is why (SBPP){} involves stochasticity, under the form of scenario trees. The models resulting from this method becoming exponentially large as the size of the scenario trees grows, we present a \emph{rolling horizon} algorithm that does not solve a problem with a large scenario tree, but can be applied in a real setting to decide the supplier's prices on the go. Although rolling horizons have been known and used for decades in the framework of stochastic optimization \citep{Sethi_Theory_1991}, with various applications e.g., in scheduling \citep{Sama_Rolling_2013} or in vehicle routing \citep{Crama_Vehicle_2019}, they have not been used yet in the framework of bilevel optimization, to the notable exception of \citet{Kallabis_Strategic_2019}. In the latter, the leader is an investor who decides on investments in power generation assets, and the follower is a market operator maximizing welfare given consumer demand and installed generation assets.
This article is organized as follows: in Section \ref{sect_sto-prob}, the problem (SBPP){} is first defined in its deterministic form, that is without scenario trees, for pedagogical reasons. In Section \ref{sect_scen-tree}, we introduce the stochasticity by exposing the modifications implied on the deterministic form of (SBPP){}, and present a one-level formulation of the problem in order to solve it computationally. Section \ref{sect_rh-approach} is dedicated to the rolling horizon algorithm. In Section \ref{sect_num-res}, numerical results support our theoretical work: first, an analysis of the deterministic form of (SBPP){} justifies our approach, showing that pricing incentives can have interesting outcomes for an energy supplier, then the rolling horizon algorithm is tested and proves to be applicable in a real setting. Finally, some conclusions are drawn in Section \ref{sect_conc}.
\section{Deterministic Form of (SBPP){}}\label{sect_sto-prob}
\subsection{Follower's Problem}
The follower is a smart grid operator (SGO) that has contracts with a set of clients $N$. Each of the clients $n\inN$ has available a set of devices $A_n$ that must be powered during an associated time window $T_{\na}$. The device $(n,a)$ ($a\inA_n$ for $n\inN$) must receive a quantity of energy $E_{\na}$ during its associated time window, but cannot receive more than $\beta^{max}_{\na}$ energy units. To power the devices, the SGO can take energy from four sources: \begin{itemize}
\item $x^h_{\na}$: energy purchased from the leader,
\item $\bar{x}^h_{\na}$: energy purchased from the competitor,
\item $\lambda^h_{\na}$: energy taken from the DG,
\item $s^h_{\na}$: energy taken from the storage. \end{itemize} Therefore, for each device $(n,a)$, the SGO must satisfy the constraints: \begin{align}
&\displaystyle\sum_{h\inT_{\na}} \left(x^h_{\na}+\bar{x}^h_{\na}+\lambda^h_{\na}+s^h_{\na}\right) \geq E_{\na} \label{fol_con1} \\
&x^h_{\na}+\bar{x}^h_{\na}+\lambda^h_{\na}+s^h_{\na}\leq \beta^{max}_{\na} && \forall h\inT_{\na}. \label{fol_con2} \end{align} Next, the SGO manages a storage capacity in the form of a battery. The battery state $S^h$ must be actualized at each hour, and naturally cannot exceed the battery capacity, or be lower than the minimal required amount of stored energy. The battery state being known at time $0$, the follower must ensure that the following constraints are satisfied for all $h\in\mathcal{H}$: \begin{align}
&\displaystyle S^{0}= S^{start} \label{fol_con3} \\
&\displaystyle S^{h+1}= \rho^d S^h-\sum_{\begin{subarray}{c} n\inN \\ a\inA_n\end{subarray}} s^h_{\na} + \rho^c \left(\lambda^h_s+x^h_s+\bar{x}^h_s\right) \label{fol_con4} \\
&\displaystyleS^{\min}\leq S^h\leq S^{\max}. \label{fol_con5} \end{align} Here, $\rho^c$ and $\rho^d$ are respectively the charging and the discharging coefficients of the battery, and $S^{\min}$ and $S^{\max}$ are the lower and upper bound on the battery state. Furthermore, the variables $\lambda^h_s$, $x^h_s$ and $\bar{x}^h_s$ represent the stored energy respectively taken from the DG, purchased from the leader, and purchased from the competitor.
Naturally, it is impossible to take more energy from the battery than the quantity that is stored at the beginning of a time slot. Therefore, the constraint \begin{align}
&\displaystyle\sum_{\begin{subarray}{c} n\inN \\ a\inA_n \end{subarray}} s^h_{\na}\leq S^h \label{fol_con6} \end{align} must be satisfied for all $h\in\mathcal{H}$.
The last constraint of the follower concerns the DG: only the energy that is indeed produced can be consumed or stored. Therefore, for all $h\in\mathcal{H}$, \begin{align}
& \displaystyle\lambda^h_s+\sum_{\begin{subarray}{c} n\inN \\ a\inA_n\end{subarray}} \lambda^h_{\na} \leq \lambda^h_{max} \label{fol_con7} \end{align} has to hold.
Finally, the objective of the SGO is to minimize the generalized cost consisting in the sum of the purchase costs and the inconvenience caused by delaying the use of a device. We indeed assume that the clients prefer to use their devices at the beginning of their respective time windows. To represent this inconvenience, we introduce the coefficient $C^h_{\na}$, that, multiplied by the energy consumption of device $(n,a)$, gives the inconvenience caused by the usage of $(n,a)$ at time $h\inT_{\na}$. The objective function of the follower is thus: \begin{align}
\begin{array}{l}
\displaystyle\sum_{\begin{subarray}{c} n\in N\\ a \in A_n \\ h\in T_{\na} \end{subarray}} C^h_{\na}\left(x^h_{\na} +\bar{x}^h_{\na} +\lambda^h_{\na}+s^h_{\na} \right) \\
\displaystyle +\sum_{\begin{subarray}{c} n\in N\\ a \in A_n \\ h\in T_{\na} \end{subarray}}\left(p^h x^h_{\na} +\bar{p}^h \bar{x}^h_{\na}\right) + \sum_{h\in\mathcal{H}} \left(\phx^h_s +\bar{p}^h\bar{x}^h_s\right).
\end{array} \label{fol_obj} \end{align}
In summary, the follower's problem can be formulated as $\left(P_{\SGO}\right)$: \[
\begin{array}{c c}
\displaystyle\min_{\mathbf{x},\mathbf{\bar{x}},\mathbf{s},\boldsymbol{\lambda},\mathbf{S}\geq 0} & (\ref{fol_obj})\\
\mathrm{s.t.} & (\ref{fol_con1}) - (\ref{fol_con7}),
\end{array} \] where bold variables are used to denote vectors (whenever indices and exponents are clear from the context).
\subsection{Leader's Problem}
The leader is an energy supplier who aims to maximize their revenue, which is the difference between their sales and their costs. These costs can be production costs, or, in our case, a linear function $K^h$ that represents the purchase costs on the spot market. The prices of the spot market are here supposed to be known in advance. The problem of the leader is thus $(P_{\Sup})$: \[
\begin{array}{r l}
\displaystyle\max_{\mathbf{p}}\max_{\mathbf{x},\mathbf{\bar{x}},\mathbf{s},\boldsymbol{\lambda},\mathbf{S}} & \displaystyle\sum_{\begin{subarray}{c} n\in N \\ a \in A_n \\ h\in T_{\na}\end{subarray}} p^h x^h_{\na} + \sum_{h\in \mathcal{H}} p^h x^h_s - \sum_{h\in\mathcal{H}} K^h\left(x^h_s+\sum_{\begin{subarray}{c} n\in N, a\in A_n \\ \mathrm{s.t.\ }h\inT_{\na} \end{subarray}} x^h_{\na}\right) \\
\mathrm{s.t.} & \left(\mathbf{x},\mathbf{\bar{x}},\mathbf{s},\boldsymbol{\lambda},\mathbf{S}\right)\mathrm{\ solves\ }\left(P_{\SGO}\right).
\end{array} \]
Observe in particular that $(P_{\Sup})$ is the optimistic formulation of the supplier's problem, as the maximum over the follower's variables is taken. Remark as well that there are no constraints on the leader's variables, as upper and lower bounds on the prices are implicit. Negative prices would indeed induce losses for the leader, whereas prices higher than the competitor's prices imply that the follower buys from the competitor.
\section{Scenario Tree Approach}\label{sect_scen-tree}
To tackle the issue of having a stochastic bound in the follower's problem, we adopt a scenario tree approach. Scenario trees are a widely used method in classical stochastic optimization (see e.g. \cite{Heitsch_Scenario_2009}), although their application to bilevel stochastic problems remains confidential.
Since the unknown information is part of the lower level, the lower level is actually a multistage problem; thus the scenario trees are introduced into the lower level problem. At the upper level, the prices of the leader are decided at the beginning of the time horizon and do not change at a later stage, thus do not vary depending on the scenario.
Formally, in our problem, a scenario $\sigma\in\Sigma$ takes the form of a vector of bounds on the DG $\left( \lambda^{1,\sigma}_{\max},\dots,\lambda^{H,\sigma}_{\max} \right)$, where for all $h\in\mathcal{H}$, $\lambda^{h,\sigma}_{\max}=\lambda^{h,\sigma_i}_{\max}$ for some $i\in\{1,\dots,n_\Sigma\}$. The scenarios $\sigma_i$ for $i\in\{1,\dots,n_\Sigma\}$ are called the \emph{base} scenarios: each scenario $\sigma\in\Sigma$ switches from one base scenario to the other as time passes by. The set of possible scenarios can thus be represented as the leaves of a rooted tree, where each node has one ascendant and $n_\Sigma$ descendants. Furthermore, the scenario $\sigma\in\Sigma$ has a probability $P[\sigma]$ to occur.
Introducing scenario trees into the follower's problem has multiple repercussions. First, for each scenario $\sigma\in\Sigma$, we associate a set of follower's variables: \begin{align}
\begin{array}{l c l l c l}
x^h_{\na} & \mapsto & x^{h,\scena}_{\na} \hspace{2cm}& x^h_s & \mapsto & x^{h,\scena}_s \\
\bar{x}^h_{\na} & \mapsto & \bar{x}^{h,\scena}_{\na} & \bar{x}^h_s & \mapsto & \bar{x}^{h,\scena}_s \\
\lambda^h_{\na} & \mapsto & \lambda^{h,\scena}_{\na} & \lambda^h_s & \mapsto & \lambda^{h,\scena}_s\\
s^h_{\na} & \mapsto & s^{h,\scena}_{\na} & S^h & \mapsto & S^{h,\scena}. \\
\end{array} \label{eq_varreplace} \end{align}
Naturally, constraints \ref{fol_con1}-\ref{fol_con6} must hold for all scenarios $\sigma\in\Sigma$, with the variables replacement described in \ref{eq_varreplace}. We denote those constraints by (\ref{fol_con1})($\sigma$)-(\ref{fol_con6})($\sigma$). The upper bound on the renewable energy production in constraint (\ref{fol_con7}) depends on the scenario; therefore the new constraints (\ref{fol_con7})($\sigma$) are as follows: \[
\displaystyle\lambda^{h,\scena}_s+\sum_{\begin{subarray}{c} n\inN \\ a\inA_n\end{subarray}} \lambda^{h,\scena}_{\na} \leq \lambda^{h,\scena}_{\max}\qquad \forall h\in\mathcal{H}. \]
Since scenario trees are considered, it is crucial that as long as two scenarios are indistinguishable, the same decisions are made. Thus we define \[
h(\sigma,\sigma') = \max\left\{h\in\mathcal{H} \mid \lambda^{h',\sigma}_{\max} = \lambda^{h',\sigma'}_{\max} \quad \forall h'\leq h \right\}, \] which is the latest time slot for which the scenarios $\sigma$ and $\sigma'$ are indistinguishable. For all pairs of scenarios $\sigma,\sigma'\in \Sigma$, the so-called \emph{nonanticipativity} constraints have to be satisfied: \begin{align}
\begin{array}{l c l l c l}
x^{h,\scena}_{\na} & = & x^{h,\scena'}_{\na} \hspace{1cm}& x^{h,\scena}_s & = & x^{h,\scena'}_s \\
\bar{x}^{h,\scena}_{\na} & = & \bar{x}^{h,\scena'}_{\na} & \bar{x}^{h,\scena}_s & = & \bar{x}^{h,\scena'}_s \\
\lambda^{h,\scena}_{\na} & = & \lambda^{h,\scena'}_{\na} & \lambda^{h,\scena}_s & = & \lambda^{h,\scena'}_s\\
s^{h,\scena}_{\na} & = & s^{h,\scena'}_{\na} & S^{h,\scena} & = & S^{h,\scena'} \\
\end{array} && \forall h\leq h(\sigma,\sigma'). \label{cons_nonant} \end{align}
The last necessary change induced by the introduction of scenario trees concerns the objective functions of both the leader and follower. First, the follower's objective function is now defined as \begin{align}
\begin{array}{l}
\displaystyle\sum_{\sigma\in\Sigma}P[\sigma]\left(\sum_{\begin{subarray}{c} n\in N\\ a \in A_n \\ h\in T_{\na} \end{subarray}} C^h_{\na}\left(x^{h,\scena}_{\na} +\bar{x}^{h,\scena}_{\na} +\lambda^{h,\scena}_{\na}+s^{h,\scena}_{\na} \right)\right. \\
\displaystyle \hphantom{\sum_{\sigma\in\Sigma}P[\sigma]} \left.+\sum_{\begin{subarray}{c} n\in N\\ a \in A_n \\ h\in T_{\na} \end{subarray}}\left(p^h x^{h,\scena}_{\na} +\bar{p}^h \bar{x}^{h,\scena}_{\na}\right) + \sum_{h\in\mathcal{H}} \left(p^hx^{h,\scena}_s +\bar{p}^h\bar{x}^{h,\scena}_s\right)\right),
\end{array} \label{fol_obj_sto} \end{align} where $P[\sigma]$ is the occurring probability of $\sigma\in\Sigma$, so that $\sum_{\sigma\in\Sigma} P[\sigma] =1$.
The follower's stochastic problem is finally defined as $\left(P^{\sto}_{\SGO}\right)$: \[
\begin{array}{r c}
\displaystyle\min_{\mathbf{x},\mathbf{\bar{x}},\mathbf{s},\boldsymbol{\lambda},\mathbf{S}\geq 0} & (\ref{fol_obj_sto}) \\
\mathrm{s.t.} & \left\{\begin{array}{c l}
(\ref{fol_con1})(\sigma) - (\ref{fol_con7})(\sigma) & \forall \sigma\in\Sigma\\
(\ref{cons_nonant}) & \forall \sigma,\sigma'\in\Sigma.
\end{array}\right.
\end{array} \]
At the upper level, the leader also optimizes their profit expectancy. Therefore, their objective function becomes \begin{align}
\sum_{\sigma\in\Sigma}P[\sigma]\left(\sum_{\begin{subarray}{c} n\in N \\ a \in A_n \\ h\in T_{\na}\end{subarray}} p^h x^{h,\scena}_{\na} + \sum_{h\in \mathcal{H}} p^h x^{h,\scena}_s - \sum_{h\in\mathcal{H}} K^h\left(x^{h,\scena}_s+\sum_{\begin{subarray}{c} n\in N, a\in A_n \\ \mathrm{s.t.\ }h\inT_{\na} \end{subarray}} x^{h,\scena}_{\na}\right)\right), \label{lead_obj_sto} \end{align} and the leader's stochastic problem is defined as $\left(P^{\sto}_{\Sup}\right)$: \[
\begin{array}{r c}
\displaystyle\max_{\mathbf{p}}\max_{\mathbf{x},\mathbf{\bar{x}},\mathbf{s},\boldsymbol{\lambda},\mathbf{S}} & (\ref{lead_obj_sto}) \\
\mathrm{s.t.} & \left(\mathbf{x},\mathbf{\bar{x}},\mathbf{s},\boldsymbol{\lambda},\mathbf{S}\right)\mathrm{\ solves\ }\left(P^{\sto}_{\SGO}\right).
\end{array} \]
\subsection{One-level formulation}\label{subs_bp-to-milp}
Now that the stochastic problem is defined, we intend to solve it. This can be achieved by transforming it into an equivalent MIP that can be computationally tackled by (commercial) solvers. Observe that for fixed decisions of the leader, the follower's problem is a linear one. Therefore, it can be replaced by its optimality conditions in the leader's problem, giving rise to a mathematical problem with complementarity constraints (MPCC), which can be linearized through the \emph{big M} method, yielding the expected MIP. This method is widely used in bilevel programming, since MIPs can be solved efficiently by commercial solvers. The optimality conditions consist in the primal constraints, the dual constraints and the complementarity slackness constraints.
Let us denote a tuple of primal variables by \[
\varphi^p=\left( \mathbf{x},\mathbf{\bar{x}},\boldsymbol{\lambda}, \mathbf{s}, \mathbf{x_s},\mathbf{\bar{x}_s},\boldsymbol{\lambda_s}, \mathbf{S} \right). \] This tuple $\varphi^p$ belongs to $Primal$ if it satisfies the constraints (\ref{fol_con1})($\sigma$)-(\ref{fol_con7})($\sigma$) for all $\sigma\in\Sigma$ and (\ref{cons_nonant}) for all pairs of scenarios $\sigma,\sigma'\in\Sigma$. To each of the constraints, we associate its dual variable: respectively $d1^{\sigma}_{\na}$, $d2^{\sigma}_{\na}$, $d3^{\sigma}_0$, $d4^{h,\sigma}$, $d5^{h,\sigma}_{min}$, $d5^{h,\sigma}_{max}$, $d6^{h,\sigma}_{max}$, $d7^{h,\sigma}$, $dx^{\sigma,\sigma',h}_{\na}$, $d\bar{x}^{\sigma,\sigma',h}_{\na}$, $d\lambda^{\sigma,\sigma',h}_{\na}$, $ds^{\sigma,\sigma',h}_{\na}$, $d\lambda^{\sigma,\sigma',h}_s$, $dx^{\sigma,\sigma',h}_s$, $d\bar{x}^{\sigma,\sigma',h}_s$, and $dS^{\sigma,\sigma',h}$. Thus, we denote a tuple of dual variables by \[
\varphi^d=\left( \mathbf{d1}, \mathbf{d2}, \mathbf{d3}, \mathbf{d4}, \mathbf{d5}, \mathbf{d6}, \mathbf{d7}, \mathbf{dx}, \mathbf{d\bar{x}}, \mathbf{d}\boldsymbol{\lambda}, \mathbf{ds}, \mathbf{d}\boldsymbol{\lambda_s}, \mathbf{dx_s}, \mathbf{d\bar{x}_s}, \mathbf{dS}\right), \] and write $\varphi^d\inDual$ if $\varphi^d$ satisfies the dual constraints of $\left( P^{\sto}_{\SGO} \right)$, which are described in detail in \ref{app_dual}.
Finally, let us denote by $\varphi$ a pair $(\varphi^p, \varphi^d)$ of primal and dual variables. This tuple belongs to $Comp$ if it satisfies the complementarity constraints detailed in \ref{app_comp}. If $(\varphi^p,\varphi^d)$ belongs to $\left( Primal\timesDual \right)\capComp$, then $\varphi^p$ is primal optimal and $\varphi^d$ is dual optimal. Therefore, the leader's problem $\left( P^{\sto}_{\Sup} \right)$ can be replaced by the single-level problem: \begin{align*}
&\max_{\mathbf{p},\varphi} \displaystyle \sum_{\sigma\in \Sigma} P[\sigma]\cdot \left( \sum_{\begin{subarray}{c} n\in N \\ a \in A_n \\ h\in T_{\na}\end{subarray}} p^h x^{h,\scena}_{\na} + \sum_{h\in \mathcal{H}} p^h x^{h,\scena}_s\right. \\
&\left.\hphantom{\max_{\mathbf{p},\varphi} \displaystyle \sum_{\sigma\in \Sigma} P[\sigma]} - \sum_{h\in\mathcal{H}} K^h\left( x^{h,\scena}_s+\sum_{\begin{subarray}{c} n\in N, a\in A_n \\ \mathrm{s.t.\ }h\inT_{\na} \end{subarray}} x^{h,\scena}_{\na}\right)\right)\\
&\mathrm{s.t.}\ \varphi \in \left(Primal\timesDual\right)\capComp. \end{align*} Nevertheless, this problem is nonlinear, due to the products of variables in the complementarity constraints and in the objective function. To get a mixed integer linear problem (MILP), we linearize those products. First, the dual objective function of the follower's problem is used. Thanks to strong duality, an optimal pair $\left(\varphi^p, \varphi^d\right)$ satisfies that \begin{align*}
(\ref{fol_obj_sto})=&\sum_{\sigma\in\Sigma} \left( \sum_{\begin{subarray}{c}n\inN \\ a\inA_n\end{subarray}} \left(\Enad1^{\sigma}_{\na} - \sum_{h\inT_{\na}} \beta^{max}_{\na}d2^{\sigma}_{\na} \right)\right. \\
&\hphantom{\sum_{\sigma\in\Sigma}}\left.+ \sum_{h\in\mathcal{H}}\left( -\lambdamaxd7^{h,\sigma}+\Smind5^{h,\sigma}_{min}-\Smaxd5^{h,\sigma}_{max} \right) \vphantom{\sum_{\begin{subarray}{c}n\inN \\ a\inA_n\end{subarray}}}\right). \end{align*} It follows that the leader's objective function $(\ref{lead_obj_sto})$ can be rewritten as \[
F(\varphi) =\left\{ \begin{array}{l}
\displaystyle\sum_{\sigma\in\Sigma} \left(\vphantom{\sum_{\begin{subarray}{c}n\inN \\ a\inA_n \\ h\inT_{\na}\end{subarray}}} \sum_{\begin{subarray}{c}n\inN \\ a\inA_n \end{subarray}} \left(\Enad1^{\sigma}_{\na} - \sum_{h\inT_{\na}} \beta^{max}_{\na}d2^{\sigma}_{\na} \right)\right. \\
\displaystyle + \sum_{h\in\mathcal{H}}\left( -\lambdamaxd7^{h,\sigma}+\Smind5^{h,\sigma}_{min}-\Smaxd5^{h,\sigma}_{max} \right)\\
\displaystyle - \left(\sum_{\begin{subarray}{c}n\inN \\ a\inA_n \\ h\inT_{\na}\end{subarray}}\left( \bar{p}^h\bar{x}^{h,\scena}_{\na}+C^h_{\na}\left( x^{h,\scena}_{\na}+\bar{x}^{h,\scena}_{\na}+\lambda^{h,\scena}_{\na}+s^{h,\scena}_{\na}\right) \right)\right.\\
\displaystyle\left.\left. + \sum_{h\in\mathcal{H}} \left(\bar{p}^h\bar{x}^{h,\scena}_s + K^h\left( x^{h,\scena}_s+ \sum_{\begin{subarray}{c}n\inN \\ a\inA_n \end{subarray}} x^{h,\scena}_{\na}\right)\right)\vphantom{\sum_{\begin{subarray}{c}n\inN \\ a\inA_n \\ h\inT_{\na}\end{subarray}}}\right) \right).\\
\end{array}\right. \] Furthermore, the complementarity constraints are linearized with the big $M$ method, which consists in replacing the equation system \begin{align}\label{chap1_abzero1}
\begin{array}{l}
a\cdot b = 0\\
a,b\geq 0\\
\end{array} \end{align} by \begin{align}\label{chap1_abzero2}
\begin{array}{l}
a\leq M\cdot \delta\\
b\leq M\cdot \left(1-\delta\right)\\
a,b\geq 0,\\
\delta\in\{0,1\},\\
\end{array} \end{align} with $M$ sufficiently large. This process is not anodyne. Taking $M$ too small might eliminate solutions, and even careful algorithms can be fooled, as shown by \cite{Pineda_Solving_2019}. On the other hand, choosing $M$ too large will likely decrease the efficiency of the solver. Finally, choosing the right $M$ is NP-hard, as demonstrated by \cite{Kleinert_There_2019}. In our case, the big $M$ is chosen empirically. If $\varphi$ satisfies the set of big $M$ constraints replacing the original complementarity constraints, we write $\varphi\inComp'$. Hence, the leader's problem $\left( P^{\sto}_{\Sup} \right)$ can finally be solved by finding an optimal solution of Problem $\left( P^{\sto,\MIP}_{\Sup} \right)$: \begin{align*}
& \max_{\mathbf{p},\varphi} F\left(\varphi\right) \\
&\mathrm{s.t.\ }\varphi\in \left(Primal\timesDual\right)\cap Comp'. \end{align*}
Obviously, Problem $\left( P^{\sto,\MIP}_{\Sup} \right)$ becomes huge when the number of time periods increases. Therefore, solving this MIP is not an option for instances comprising a large time horizon. This fact motivates the following section, where we design a rolling horizon algorithm.
\section{Rolling Horizon Approach}\label{sect_rh-approach}
Methods using rolling horizons are well-known in the topic of stochastic optimization, see for example \cite{Pironet_MultiPeriod_2014} for applications in transportation management. The idea behind rolling horizons is that it is possible to build a solution for a large problem out of the solutions of a sequence of smaller problems. More precisely, this sequence of smaller problems follows a chronological logic: the first problem to be solved considers a small time horizon from the beginning of the whole time horizon ($0$) to another close time slot ($l_{RH}$). Then, the second problem will consider a time horizon going from $s_{\RH}$ to $l_{RH}+s_{\RH}$ and so on, justifying the naming of \emph{rolling horizon}.
We first define the main parameters: \begin{itemize}
\item The (whole) \emph{time horizon} is the set $\mathcal{H} = \{0,1,\dots,H\}$, where $H$ can be infinite.
\item The \emph{length} of the rolling horizon $l_{RH}$ is the number of time slots that will be considered at every iteration of the rolling horizon method. Clearly, $l_{RH} \leq H$.
\item The rolling horizon \emph{step} $s_{\RH}$ is the number of time slots between two instances of the rolling horizon method. More precisely, if at the $n$th iteration, the time horizon of the considered instance starts at $t$, then the time horizon of the $n+1$st iteration will start at $t+s_{\RH}$. To have sensible parameters, it is necessary that $s_{\RH}\leq l_{RH}$.
\item The length of the frozen horizon $l_{FH}$ determines how much of the results computed at the $n-1$st iteration will be reused in the $n$th iteration. In classical stochastic optimization, it means that at the $n$th iteration, on the time horizon going from $t$ to $t+l_{RH}$, the decisions corresponding to the time slots $t,\dots,t+l_{FH}$ were made during the $n-1$st iteration and are not recomputed. In bilevel programming, the situation is slightly more complex. Indeed, the lower level variables can be seen as recourse variables, and should thus be modifiable at any iteration of the rolling horizon method. On the other hand, the energy supplier offers prices to the SGO, and thus should not be able to modify their prices at the last minute. Hence, during the $n$th iteration of the rolling horizon method, the leader's prices will be fixed for the time slots $t,\dots, t+l_{FH}$, and only the prices corresponding to later time slots might change, along with the SGO-decided consumption schedule. It follows from the definition of $l_{FH}$ that $l_{RH}-l_{FH}\leqs_{\RH}$ must hold. \end{itemize} Figure \ref{fig_rhscheme} illustrates the situation.
\begin{figure}
\caption{The rolling horizon scheme}
\label{fig_rhscheme}
\end{figure}
Let us denote by $P_\Hh$ the set of parameters defining an instance of $(SBPP)$ over the full time horizon $\mathcal{H}$. $P_\Hh$ thus comprises the information related to the devices, to the competitor's prices, to the storage capacities, and to the distributed generation scenarios. The rolling horizon algorithm is described in Algorithm \ref{algo_rh}.
\begin{algorithm}[!ht]
\caption{Rolling horizon algorithm}\label{algo_rh}
\begin{algorithmic}[1]
\Procedure{RH}{$P_\Hh$, $l_{RH}$, $l_{FH}$, $s_{\RH}$}
\State $t\gets 0$
\Repeat
\State Generate $P^t_{s_{\RH}}$ \label{step_gen}
\State Solve $P^t_{s_{\RH}}$ \label{step_solve}
\State Select a scenario $\sigma$ \label{step_scenachoice}
\State Actualize data \label{step_dataact}
\State $t\gets t+s_{\RH}$
\Until{$t+l_{RH} \geq H$}
\State \textbf{return} data \label{step_return}
\EndProcedure
\end{algorithmic} \end{algorithm}
In step \ref{step_gen}, the subinstance $P^t_{s_{\RH}}$ is generated. The time period considered in $P^t_{s_{\RH}}$ is the time period $\mathcal{H}_t=\{t,\dots,t+l_{RH}\}$. Therefore, the only devices in $P^t_{s_{\RH}}$ are the devices $(n,a)$ such that $T_{\na} \cap \{t,\dots,t+l_{RH}\} \neq \emptyset$. Two cases may arise for a given device $(n,a)$: \begin{itemize}
\item If $T_{\na}^{\mathrm{first}}<t$, then the device was already considered in the previous iterations of the algorithm. Therefore, it might have received some energy during the previous time slots. It results that the required energy has to be actualized: the energy demand of $(n,a)$ in $P^t_{s_{\RH}}$ is denoted by $E_{\na}^t$ and is worth $E_{\na} - \sum_{h< t} \left(x^{h,\scena}_{\na}+\bar{x}^{h,\scena}_{\na}+s^{h,\scena}_{\na}+\lambda^{h,\scena}_{\na}\right)$, with $\sigma$ the scenario that has been selected at step \ref{step_scenachoice} in the previous iterations.
\item If $T_{\na}^{\mathrm{last}}>t+l_{RH}$, then $T_{\na}$ partly belongs to the time period considered in $P^t_{s_{\RH}}$. Several options are conceivable. Here, we choose to set $E_{\na}^t=\min\left\{E_{\na}, \left(t+l_{RH}-T_{\na}^{\mathrm{first}}\right)\beta^{max}_{\na}\right\}$, meaning that the device $(n,a)$ should consume as much energy as possible during the time period $\mathcal{H}_t$. Indeed, delaying the demand leads to inconvenience for the follower, thus powering the device as early as possible minimizes the inconvenience. \end{itemize}
The instances $P_\Hh$ and $P^t_{s_{\RH}}$ for all $t\in\mathcal{H}$ all involve a scenario tree. Here, we assume that the scenario tree of $P_\Hh$ is a complete tree with time periods of length $s_{\RH}$. Therefore, one of the base scenarios is selected at every iteration of step \ref{step_scenachoice}: the scenario selected at time $t$ is the scenario that comes to reality for the time slots $t,\dots,t+s_{\RH}-1$. However, considering a complete tree on $\mathcal{H}_t$ can be difficult, even if $\mathcal{H}_t$ is small, as shown in Section \ref{sect_num-res}. Therefore, the scenario tree of $P^t_{s_{\RH}}$ is assumed to only comprise the base scenarios. Furthermore, we assume that the probability to choose one of the scenarios only depends on the scenario that was selected at the previous iteration, as in a Markov process.
In step \ref{step_solve}, the problem solved is a MIP, as defined in Section \ref{subs_bp-to-milp}, to which we add constraints ensuring that the prices belonging to the frozen horizon are indeed the prices that have been determined in the previous iterations.
In step \ref{step_dataact}, the added information consists in the choice made for the scenario in step \ref{step_scenachoice}, the follower's decisions associated to the chosen scenario for the time slots $t$ to $t+s_{\RH}-1$ and the leader's decision for the frozen horizon (except for the last iteration of the algorithm, where the leader's and follower's decisions are saved until the end of the horizon). At the end of the rolling horizon method, the data thus contains one scenario and the associated decisions for the whole time horizon $\mathcal{H}$.
\section{Numerical Results}\label{sect_num-res}
This section is divided into two parts. First, a sensitivity analysis is conducted on a one-week instance with a single DG scenario to study the influence of individual parameters on the results. Second, we evaluate the efficiency of the rolling horizon method, focusing on how the length of the frozen horizon affects the obtained results.
All the results presented here were obtained with CPLEX 12.6 on a Linux virtual machine with 10 Go RAM working on a computer equipped with an Intel i7-4600u processor at 2.10 GHz.
\subsection{Test instances}\label{sec_testinstance}
All the numerical tests are conducted on an instance having the following features: \begin{itemize}
\item Each time slot represents a 30-minute period,
\item The instance comprises 336 time slots, adding up to one full week,
\item The energy costs are based on the prices of electricity on the spot market during a fall week (see Figure \ref{fig_spotprices}),
\item There are 120 devices. Their demand in the ideal case (i.e., without delay) approximates the electricity consumption due to the heating of a dozen households during a fall week (see Figure \ref{fig_basedemands}),
\item The DG scenario is based on the load factor profile for a fall week, with a generation capacity large enough to cover the electricity consumption during off-peak periods (see Figure \ref{fig_dgscen}). \end{itemize} The data used to create the test instance were supplied by the industrial partner (EDF).
\begin{figure}
\caption{The spot prices retrieved from industrial data.}
\label{fig_spotprices}
\end{figure}
\begin{figure}
\caption{The energy consumption retrieved from industrial data, and implied by the devices if they are used during the first time slots of their associated time window.}
\label{fig_basedemands}
\end{figure}
\begin{figure}
\caption{The original DG scenario.}
\label{fig_dgscen}
\end{figure}
\subsection{Sensitivity analysis}
The parameters that are perturbed to conduct the sensitivity analysis are the DG scenario, the inconvenience coefficients, the energy costs, the battery size, and the time window sizes. In total, thirteen instances are solved. From these thirteen instances, only one is solved to optimality within the time limit of $1200$ seconds, namely the instance where the inconvenience coefficients are all set to $0$. For the other twelve, the optimality gaps range from $0.3\%$ to $1.9\%$. Usually, the solver can reach a relatively good solution early in the process, but fails to close the optimality gap subsequently.
The numbers of variables and constraints of the MIP not only depend on the number of time slots and the number of devices, but also on the length of the time windows and their position in the time horizon. A time window located in the first time period will generate more nonanticipativity constraints than a time window located near the end of the time horizon. As an indication, solving the MIP relative to the original test instance requires the use of 35530 constraints, 33745 continuous variables and 1157 binary variables.
To perform the sensitivity analysis and quantify the impact of the bilevel model, we compute the relevant values of the so-called \emph{reference case}, in opposition with the \emph{optimized case}, which consists in the optimal solution of (SBPP). The reference case is defined as follows: first, the leader sets their prices at the same level as the competitor. Then the follower determines the schedule of the devices in the most convenient way: each device $(n,a)$ is powered at its maximum power $\beta^{max}_{\na}$ during the first hours of the related time slots until $E_{\na}$ is reached, then the device is stopped. To power the device, the follower makes DG their first choice, then energy from the battery, and finally energy from the leader. If at a given time slot, the DG exceeds the demand, the remaining energy is stored, unless the battery is full. The energy demand of the follower in the reference case is depicted in Figure \ref{fig_basedemands}. In particular, observe that the reference case minimizes the inconvenience for the follower, but does not optimize the DG use, which implies that in the reference case, the follower's response is not optimal with relation to the leader's prices.
The results for the objective values of the leader are given in Table \ref{tab_leadobj}, whereas Table \ref{tab_folobj} gathers information about the follower's objective. In the first column of both tables, "bat." stands for battery, "inc." for inconvenience, "Spot" for the instance with higher market prices and "TW" for the instances where the sizes of the time windows vary. "BC ref" represents the billing cost of the follower in the reference case, "IC ref" the inconvenience cost in the reference case, "BC opt" the billing cost when the prices of the leader are optimized, and "IC opt" the inconvenience cost when the prices of the leader are optimized. Finally, \% BC, \% IC and \% GC show the difference (in percentage) between the reference case and the optimized case of the billing cost, the inconvenience cost and the generalized cost respectively (e.g., \% BC $=100\cdot$BC opt$/$BC ref).
\begin{table}[!ht]
\begin{center}
\begin{tabular}{|c|c|c|c|} \hline
Instance & Ref. obj. & Opt. obj. & \% diff. \\ \hline \hline
Base & $34172.68$ & $34676.53$ & $1.47$ \\
Zero bat. & $34261.4$ & $34372.9$ & $0.33$ \\
Small bat. & $34201.32$ & $34521.54$ & $0.94$ \\
Large bat. & $34182.14$ & $34714.16$ & $1.56$ \\
Zero inc. & $34172.68$ & $35753.54$ & $4.63$ \\
Low inc. & $34172.68$ & $35385.64$ & $3.55$ \\
High inc. & $34172.68$ & $34389.21$ & $0.63$ \\
Zero DG & $46597.9$ & $47101.38$ & $1.08$ \\
Low DG & $40390.54$ & $40721.06$ & $0.82$ \\
High DG & $29084.13$ & $28611.95$& $-1.62$ \\
Spot & $33240.14$& $34323.98$ & $3.26$ \\
Small TW & $34172.68$ & $34668.36$ & $1.45$ \\
Large TW & $34172.68$ & $34531.8$ & $1.05$ \\ \hline
\end{tabular}
\end{center}
\caption{The leader's objective values in the reference case, in the optimized case, and the difference in percentages.}
\label{tab_leadobj} \end{table}
\begin{table}[!ht]
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|c|c|}\hline
Instance & BC ref & IC ref & BC opt & IC opt & \% BC & \% IC & \% GC \\ \hline\hline
Base & $46573.2$ & $525.13$ & $46025.7$ & $877.31$ & $98.82$ & $167.07$ & $99.59$ \\
Zero bat. & $46659.2$& $525.13$& $45964.7$& $856.52$ & $98.51$ & $163.11$ & $99.23$ \\
Small bat. & $46589.2$& $525.13$& $46246.8$& $737.52$ & $99.27$ & $140.45$ & $99.72$ \\
Large bat. & $46573.2$& $525.13$& $46379.4$& $646.92$ & $99.58$ & $123.19$ & $99.84$ \\
Zero inc. & $46573.2$ & $0$ & $46573.2$ & $0$ & $100$ & $-$ & $100$ \\
Low inc. & $46573.2$& $262.56$& $46343.7$& $540.29$ & $99.51$ & $205.77$ & $100.1$ \\
High inc. & $46573.2$& $787.69$ & $46052.1$ & $1047.97$ & $98.88$ & $133.04$ & $99.45$ \\
Zero DG & $63882$& $525.13$& $63737.8$& $655.5$ & $99.77$ & $124.83$ & $99.98$ \\
Low DG & $55227.6$& $525.13$& $54784.7$& $712.29$ & $99.2$ & $135.64$ & $99.54$ \\
High DG & $39317.2$& $525.13$& $37755.5$& $786.96$ & $96.03$ & $149.86$ & $96.74$ \\
Spot & $46573.2$& $525.13$& $46002.4$& $905.36$ & $98.77$ & $172.41$ & $99.6$ \\
Small TW & $46573.2$ & $525.13$ & $46383.4$ & $681.32 $ & $99.59$ & $129.75$ & $99.93$ \\
Large TW & $46573.2$ & $525.13$ & $45543.8$ & $1031.83$ & $97.79$ & $196.49$ & $98.89$ \\
\hline
\end{tabular}
\end{center}
\caption{The follower's detailed objective values.}\label{tab_folobj} \end{table}
\subsubsection{DG scenarios}\label{sec_scenas}
Besides the original test instance, three more instances are solved with DG scenarios equivalent to respectively $0$, $0.5$, and $1.5$ times the original DG scenario which is represented in Figure \ref{fig_dgscen}.
As much as possible of the energy coming from DG is used (that is, directly consumed or stored for later consumption), as this energy is considered free by the SGO. This behavior is illustrated in Figure \ref{fig_sensi_dg4}. It follows directly that if the DG increases, the amount of energy bought from the grid will decrease accordingly. This naturally widely affects the leader's profits, as Table \ref{tab_leadobj} shows: those profits range from $28611$ when the DG is high to $47101$ when the DG is nonexistent.
\begin{figure}
\caption{The DG consumption for the base instance, for time slots $54$ to $96$.}
\label{fig_sensi_dg4}
\end{figure}
As for the demand, the fact that the follower's problem is linear induces an "all or nothing" situation: if for time slots $h_1<h_2$, the generalized cost of powering a device $(n,a)$ during $h_2$ is smaller than during $h_1$ (i.e., $\min\{p^{h_1},\bar{p}^{h_1}\}+C^{h_1}_{(n,a)} > \min\{p^{h_2},\bar{p}^{h_2}\}+C^{h_2}_{(n,a)}$), then all the energy that can possibly be shifted from $h_1$ to $h_2$ will be. This explains the huge variations in the follower's demand (e.g., from $0$ during time slot $23$ to $30$ during the next time slot) that can be observed in Figure \ref{fig_sensi_dg23}. Furthermore, it can be observed in the same figure that power coming from the DG is entirely used: smooth curves following the DG curve can be observed between time $16$ and $29$, especially on the curve illustrating the demand with a high DG scenario.
\begin{figure}
\caption{The follower's demands for the instances with various DG scenarios for the time slots $15$ to $40$ (bottom).}
\label{fig_sensi_dg23}
\end{figure}
\subsubsection{Inconvenience coefficients}\label{sect_sensiinc}
To study the influence of the inconvenience coefficients on the results, we consider four instances. Recall beforehand that the inconvenience coefficients increase linearly with the time for each device, therefore the difference between the inconvenience coefficients of two consecutive hours is constant, and assumed to be the same for all devices: in the original case (normal inconvenience), this value is $0.0625$. In the three other cases, it is respectively $0$, $0.5$, and $1.5$ times the original value, that is to say $0$ in the zero inconvenience case, $0.03125$ in the low inconvenience case, and $0.09375$ in the high inconvenience case. Unsurprisingly, the leader's profit decreases as the inconvenience coefficients grow (see Table \ref{tab_leadobj}): if the consumers are more willing to shift their loads, less incentive is necessary to induce the same shift, and thus more profit is achievable for the leader.
The latter fact can be easily observed in Figure \ref{fig_sensi_inc23}. The optimal leader price profile when there is no inconvenience is constant at the level of the competitor's prices, because no incentive is required to induce a load shift. The optimistic assumption implies that the follower's answer is precisely what is best for the leader. At the contrary, inducing load shifts for high inconvenience values is difficult. Prices resulting in a load shift despite a high inconvenience are clearly lower than prices achieving a similar load shift with a low inconvenience. This is conspicuous for example on time slot $33$. For other time slots, the inconvenience is too high to induce shift (during time slot $7$ for instance) which is why the producer lowers their prices during these periods only for the instances with low inconvenience values.
Finally, let us observe that the prices tend to decrease linearly (with the slope depending on the inconvenience) as the supplier induces delays. This can be observed in Figure \ref{fig_sensi_inc23}: for example, the optimal prices for the instance with high inconvenience linearly decrease between time slots $11$ and $19$, with the noticeable exceptions constituted by time slots $13$ and $18$. The explanation lies in the optimistic assumption of the problem: when the follower gets their energy at the same generalized cost at two time slots, they consumes during the time slot that is most advantageous for the leader. When the prices decrease linearly over a given period, they compensate the inconvenience costs that grow linearly with the delay, so that the generalized cost remains the same over the period.
\begin{figure}
\caption{The leader price profiles for the instances with various inconvenience coefficients for the $40$ first time slots.}
\label{fig_sensi_inc23}
\end{figure}
\subsubsection{Energy costs}
In this paper, the energy supplied by the leader is assumed to be purchased on the spot market at prices that are known in advance. The spot market prices thus have a large influence, as they determine which time slots are the most profitable for the leader, or at the contrary which time slots are the least valuable. To show this influence, we consider an instance where the spot market prices are $20\%$ higher during peak periods, i.e., periods when the prices are particularly high (more than $4.5$). In that case, the leader has an increased motivation to induce a load shift from peak to off-peak periods, as such a shift induces a larger profit.
Like for the two parameters previously studied, it is not surprising that the leader's optimal value is lower when the spot prices are higher: with the original prices, the leader's objective reaches $34677$, whereas it only attains $34324$ with the higher peak prices. Observe, however, that the difference is not as significant as when the DG scenario varies: whereas differences in the leader's optimal value can reach several thousand, it is limited to $353$ in the case of higher market prices. This value is in the same order of magnitude as the differences between the instance with various inconvenience coefficients ($939$ between the instance with low inconvenience and the instance with high inconvenience).
\begin{figure}
\caption{The follower's demands and the leader price profiles for the instances with various spot market prices.}
\label{fig_sensi_spot23}
\end{figure}
In Figure \ref{fig_sensi_spot23}, observe that the demand curves differ for the high market prices instance and the base instance: the differences occur almost always during the peak periods, i.e., when the market prices in both considered instances are different. The higher need to shift the load from peak to off-peak periods becomes clear while having a look at the optimal price profiles. The optimal prices for the high market prices instance are indeed generally lower than the optimal prices of the base instance, e.g., during time slots $25$ and $170$.
\subsubsection{Battery Size}
Besides the original instance, three instances with various battery sizes are considered: a zero storage capacity, a small battery, and a large battery, which are equal to respectively $0$, $0.5$, and $1.5$ times the size of the original battery. Clearly, storage capacities are filled when electricity prices are low, and the stored energy is consumed when prices are higher. One might think that a greater storage capacity represents an advantage for the follower. However, it turns out that a greater capacity is especially advantageous for the leader, as the leader's objective values indicate in Table \ref{tab_leadobj}. The reason is that a larger battery allows for more freedom. Specifically, the follower can purchase more energy during cheap time slots, when the leader makes the most profit, and store this energy for expensive time slots.
In Figure \ref{fig_sensi_stor45}, the battery states are shown for the four considered storage sizes. The origin of the stored energy is indicated for the instance with the largest storage capacity. Observe that most of the stored energy is bought from the grid, instead of being taken from the DG. As for the demand curves, the "all or nothing" nature of linear programming is clearly visible, since the battery often oscillates between being full and being empty. These oscillations are particularly visible for the small battery instance during time slots $97$ to $109$ (Figure \ref{fig_sensi_stor45}, bottom graph).
\begin{figure}
\caption{The battery states for the instances with various storage sizes and the origin of the stored energy for the large storage instance, for time slots $97$ to $149$.}
\label{fig_sensi_stor45}
\end{figure}
\subsubsection{Time Window Sizes}
Finally, the last parameters that we considered are the sizes of the time windows. Whereas the length of the time windows varies between $1.8$ and $2.5$ times the number of time slots needed to power each appliance in the base instance, these factors are in a range from $1.4$ to $2$ in the instance with narrower time windows, and between $2.14$ and $3$ in the instance with longer time windows. Observe, however, that the difference of profit for the leader is rather small ($34677$ with normal time windows, $34668$ with narrow time windows and $34532$ for longer time windows), and thus that this parameter does not much influence the leader's profit, nor the follower's results.
\subsubsection{A Word on the Objective Values}\label{sec_sensilead}
The solutions of (SBPP){} and the reference case are compared on the thirteen instances considered in this section. The objective values of the leader are given in Table \ref{tab_leadobj}. Except for the instance with high DG, the leader's profit is always higher in the optimized case than in the reference case. Although the difference in profit is rather small (between $0.33\%$ and $4.63\%$), it is not negligible. Unsurprisingly, the largest difference between the profits in the reference case and in the optimized case lies in the instance where the follower does not face inconvenience. As argued in Section \ref{sect_sensiinc}, no incentive is required to make the follower react in the most favorable way for the leader. Therefore, $4.63\%$ is the best possible increase for the leader's profit, in the case where the inconvenience factors vary. Concerning the instance with higher peak energy costs, the difference in profit is significant as well, with a difference equal to $3.26\%$. The energy cost differences indeed make load shifts more advantageous for the leader than in the instance with normal inconvenience. The example with high DG resulting in a lower leader profit can easily be explained by the fact that the DG use in the reference case is not optimal, which leads to significant differences as the DG quantities become important.
On the follower's side, even though (SBPP){} is not a zero-sum game, there is a clear relation between the follower's and the leader's objectives. The billing cost of the follower constitutes the revenues of the leader, as no energy is purchased from the competitor due to the optimistic assumption and the fact that the competitor prices are greater than the energy costs. Therefore, intuitively, the leader's optimized prices are going to induce a follower's reaction that will bring approximately the same objective value as in the reference case. Of course, the follower's optimal objective value in the reference case constitutes an upper bound for the follower's optimal value associated with any leader price profile, since the consumption schedule of the reference case is always feasible. The results for the follower's optimal value are illustrated in Table \ref{tab_folobj}.
All follower's generalized costs are smaller in the optimized case than in the reference case, except for the low inconvenience instance, which is probably due to the commercial solver finding a local optimum instead of a global one. Furthermore, the percentages are all very close to $100\%$, the furthest being the instance with large time windows ($98.89\%$), which confirms the above-mentioned intuition. Furthermore, the billing cost is always smaller in the optimized case, but the inconvenience cost increases. This is due to the fact that in the reference case, the follower's inconvenience is actually minimized.
Note that in the reference case, the parameters influencing the billing cost are the size of the battery and the DG, whereas only changes in the inconvenience coefficients induce a change in the inconvenience cost.
\subsection{Test of the RH Method}
To evaluate the efficiency of the rolling horizon methods, a set of tests has been determined. The parameter we choose to analyze is the length of the frozen horizon $l_{FH}$. The considered instance is the one described in Section \ref{sec_testinstance} with the three nonzero scenarios used in Section \ref{sec_scenas} as base scenarios. For the various parameters, it has been decided that the length of the rolling horizon would be $12$, whereas the iteration step would be $1$. The values tested for the length of the frozen horizon are all even numbers between (and including) $0$ and $10$. The number of possible scenarios over the whole time horizon being obviously gigantic, a set of five random scenarios have been generated as follows: during the run with $l_{FH}=0$, at each iteration of the rolling horizon algorithm, the scenario that gets real between times $t$ and $t+1$ is chosen following a Markov process, the probability to keep the same scenario as between $t-1$ and $t$ being equal to $0.4$, and the probability to switch to another scenario being equal to $0.3$. In the runs for longer frozen horizons, the choice of the scenario at each iteration is forced as the scenario that should be followed has been previously determined.
To evaluate the efficiency of the rolling horizon method, two comparison values are computed. Ideally, these values should represent bounds, but in fact, they are not. \begin{itemize}
\item The first one is the \emph{reference case}, as presented in Section \ref{sec_sensilead},
\item The second one is the \emph{perfect case}, that is the case where the scenario is known in advance, and both the leader and the follower completely optimize their decisions. \end{itemize} Observe that the second comparison value is strongly inspired by the well-known expected value of perfect information (see e.g. \cite{Birge_Introduction_2011}): the value computed here would be the \emph{wait-and-see} solution. However, the bilevel structure of the problem prevents this value to be a bound, even without considering the effects of the rolling horizon method: the information is shared both at the leader's and the follower's level, which might benefit more to the follower than to the leader (see \cite{vonNiederhausern_Design_2019} for more details).
In Figure \ref{fig_leadobj}, the leader's objective values are presented. In the above graph, observe that no pattern is followed as the length of the frozen horizon grows: for the first scenario, the leader's objective value is higher with $l_{FH}=8$ than with $l_{FH}=6$, whereas for the third run, the opposite occurs. We first expected the leader's profit to be higher with shorter frozen horizons, as the leader has more freedom, but it seems this is not the case in general. A potential reason for this is that many of the problems that are considered during the process are not solved to optimality and only feasible solutions are provided, as the solver only gets 150 seconds to find a solution at each iteration.
\begin{figure}
\caption{Leader's objective values \\
The above graph indicates only the leader values, whereas the chart below shows the comparison with the two computed comparison values.}
\label{fig_leadobj}
\end{figure}
Observe, furthermore, that first, the leader's objective value with the rolling horizon method is always higher than the objective value in the reference case, which proves that it is worthwhile trying to optimize the prices through DSM. Second, the objective value in the perfect case is also surpassed by the leader's objective value obtained with the rolling horizon method, except for the second scenario with $l_{FH}=6$. Obviously, the leader's price profile obtained with the rolling horizon method is feasible when computing the perfect case; thus one might think that the perfect case would bring a better profit to the leader. However, the stochastic nature of the follower's problem combined with the functioning of the rolling horizon method implies that the follower's response to the leader's prices might not be optimal. In our experiments, this happens regularly: often, the follower's response with the rolling horizon method even performs worse than the follower's response in the reference case, as can be seen in Figure \ref{fig_folobj}.
\begin{figure}
\caption{Follower's objective values \\
Comparison for the follower's objective values for all runs of the rolling horizon methods and for the reference case.}
\label{fig_folobj}
\end{figure}
Let us recall that the follower's objective consists in the purchase costs plus the inconvenience costs. The latter only amount to a small fraction of the generalized cost: they range from $525.13$ in the reference case (in that case, the inconvenience is minimal) to $878.13$ among generalized costs ranging from $45830$ to $50597$, which means less than two percent. Furthermore, it is more than logical to have higher inconvenience costs with the rolling horizon method, since the follower's response in the reference case actually minimizes the inconvenience.
Besides the nature of the rolling horizon method that avoids any optimality guarantee of the follower's response, the fact that each step of the rolling horizon method has to be solved in a time limit of $150$ seconds even hinders CPLEX to find optimal solutions for all the subproblems encountered during the algorithm's run. Several artifacts confirm this. First, sometimes, energy is bought from the competitor, which should not happen: at the price offered by the competitor, the leader makes profit. Therefore, buying energy from the competitor contradicts the optimistic assumption. This case, however, does not often happen: among the $30$ runs of the algorithm, $17$ have strictly positive values for $\sum_{n\inN,a\inA_n,h\inT_{\na}}\bar{x}^h_{\na}$, but only seven have values exceeding $1$, for a total demand of $4563$, with a maximum at $27.25$ (see Figure \ref{fig_enecomp} for their repartition), which represents less than $381.5$ in the generalized cost. Although not negligible, the importance of these flaws is relatively small. Observe, furthermore, that problems occur mostly in the runs with smaller lengths of frozen horizons. The justification is easy: the smaller the frozen horizon, the more leader variables, and thus the higher is the difficulty to obtain an optimal solution to each of the bilevel subproblems.
\begin{figure}
\caption{Energy quantity bought from the competitor \\
The bars represent the sum over all hours of the energy quantities bought from the competitor.}
\label{fig_enecomp}
\end{figure}
The other obviously nonoptimal behavior concerns the usage of renewable energy. In some of the algorithm's runs, the renewable energy, that is considered as free by the follower, is not used completely. Besides, some cases are more problematic, as the follower consumed more renewable energy than what is available (see Figure \ref{fig_fol_re}). This means that the equation that bounds the usage of renewable energy is not always satisfied. In fact, this happens on average between $15$ and $16$ times per run. Observe, however, that when $l_{FH}=0$, the above-mentioned equation is always satisfied. Moreover, the over- and underconsumption of renewable energy represent small percentages of the total disposable energy: the values are bounded by $50$ in most cases, which means less than $4$ percent. To justify those computing mistakes, we assume that again, the time left to the solver at each iteration is too small.
\begin{figure}
\caption{Unused renewable energy \\
The bars represent the difference between the renewable energy that is produced and the one that is either consumed or stored.}
\label{fig_fol_re}
\end{figure}
Despite the previously mentioned flaws, using the rolling horizon method is worth it. Besides the obvious fact that time goes on, implying a constant need for reoptimization, the method allows for leader profits that exceed the losses (with relation to the reference case) of the follower. This situation is illustrated in Figure \ref{fig_lead_fol}. In a realistic setting, the possibility of refunding the follower for the losses should be contemplated, in order to motivate the clients to play along. No rational client would take the risk to pay more (which is often the case in our setting) to help their supplier to make more profit. However, since the leader's profit easily covers the follower's losses, the deal would be harmless for the leader.
\begin{figure}
\caption{The advantages of the rolling horizon method \\
Above, the difference of profit for the leader with the rolling horizon method and in the reference case.\\
Below, the difference in the objective function of the follower between the rolling horizon method and the reference case, which can be seen as a loss.}
\label{fig_lead_fol}
\end{figure}
Let us finally mention the computing time to run the rolling horizon method. Unfortunately, it takes a very long time: between $5666$ and $11540$ seconds. Observe that with a time limit at each iteration of $150$ seconds, the theoretical maximum time for a run of the method is around $50000$. Furthermore, as a rule of thumb, the needed time decreases as the length of the frozen horizon increases: a greater $l_{FH}$ means a smaller number of leader variables, and thus easier problems to solve at each iteration. Although the running times may seem large, they actually allow considering the horizon of a whole week, with a tremendous number of possible scenarios that would be totally impossible to handle otherwise.
\begin{figure}
\caption{The running times for the various runs of the rolling horizon method.}
\label{fig_runtime}
\end{figure}
\section{Conclusion}\label{sect_conc}
In this paper, we have studied a bilevel pricing problem for demand-side management with a strong stochastic component under the form of scenario trees. Besides proving the validity of the pricing approach through numerical analyses, we designed a rolling horizon method that numerically proved to be applicable in real situations, for which the original problem cannot be solved due to its exponential size.
Being a first attempt at applying rolling horizons in a bilevel framework, prospects remain numerous. Specifically, using efficient heuristics at every iteration of the rolling horizon method could help reach better solutions, and possibly avoid feasibility problems when the time allowed at each iteration is not enough for the MILP solver to find a feasible point. From a problem design point of view, it could also be interesting to consider robust optimization to solve (SBPP), in order to maximize the worst-case revenue of the leader.
\section*{Acknowledgment}
This research benefited from the support of the FMJH Program Gaspard Monge in Optimization and Operations Research, and from the support to this program from EDF.
\appendix
\section{Dual constraints of $\left(P^{\sto}_{\SGO}\right)$} \label{app_dual}
The dual constraints of $\left(P^{\sto}_{\SGO}\right)$ are as follows: \footnotesize \begin{align*}
&\displaystyled1^{\sigma}_{\na}-d2^{\sigma}_{\na}+\sum_{\sigma'\in\mathrm{Com_h\left(\sigma\right)}} sgn\left(\sigma,\sigma'\right)dx^{\sigma,\sigma',h}_{\na} \leq P[\sigma]\left(p^h+C^h_{\na}\right) && \forall n\inN, a\inA_n, h\inT_{\na}, \sigma\in\Sigma \\
&\displaystyled1^{\sigma}_{\na}-d2^{\sigma}_{\na}+\sum_{\sigma'\in\mathrm{Com_h\left(\sigma\right)}} sgn\left(\sigma,\sigma'\right)d\bar{x}^{\sigma,\sigma',h}_{\na} \leq P[\sigma]\left(\bar{p}^h+C^h_{\na}\right) && \forall n\inN, a\inA_n, h\inT_{\na}, \sigma\in\Sigma \\
&\displaystyled1^{\sigma}_{\na}-d2^{\sigma}_{\na} -d7^{h,\sigma} +\sum_{\sigma'\in\mathrm{Com_h\left(\sigma\right)}} sgn\left(\sigma,\sigma'\right)d\lambda^{\sigma,\sigma',h}_{\na}\leq \PscenC^h_{\na} && \forall n\inN, a\inA_n, h\inT_{\na}, \sigma\in\Sigma \\
&\displaystyled1^{\sigma}_{\na}-d2^{\sigma}_{\na} +dS-ds^{h,\sigma}_{\na} \\
&\displaystyle\qquad +\sum_{\sigma'\in\mathrm{Com_h\left(\sigma\right)}} sgn\left(\sigma,\sigma'\right)ds^{\sigma,\sigma',h}_{\na} \leq \PscenC^h_{\na} && \forall n\inN, a\inA_n, h\inT_{\na}, \sigma\in\Sigma \\
&\displaystyle-\rhocd4^{h,\sigma} +\sum_{\sigma'\in\mathrm{Com_h\left(\sigma\right)}} sgn\left(\sigma,\sigma'\right)dx^{\sigma,\sigma',h}_s \leq \Pscenp^h && \forall h\in\mathcal{H}, \sigma\in\Sigma\\
&\displaystyle-\rhocd4^{h,\sigma}+\sum_{\sigma'\in\mathrm{Com_h\left(\sigma\right)}} sgn\left(\sigma,\sigma'\right)d\bar{x}^{\sigma,\sigma',h}_s\leq P[\sigma]\bar{p}^h && \forall h\in\mathcal{H}, \sigma\in\Sigma \\
&\displaystyle-d7^{h,\sigma}-\rhocd4^{h,\sigma}+\sum_{\sigma'\in\mathrm{Com_h\left(\sigma\right)}} sgn\left(\sigma,\sigma'\right)d\lambda^{\sigma,\sigma',h}_s \leq 0 && \forall h\in\mathcal{H}, \sigma\in\Sigma \\
&\displaystyled3^{\sigma}_0-\rhodd4^{h,\sigma}+ds^{h,\sigma}_{\na}+\sum_{\sigma'\in\mathrm{Com_h\left(\sigma\right)}} sgn\left(\sigma,\sigma'\right)dS^{\sigma,\sigma',h}\leq 0 && \forall \sigma\in\Sigma, h=0 \\
&\displaystyled4^{h-1,\sigma}-\rhodd4^{h,\sigma}+ds^{h,\sigma}_{\na}+d5^{h,\sigma}_{min}-d5^{h,\sigma}_{max} \\
&\displaystyle\qquad+\sum_{\sigma'\in\mathrm{Com_h\left(\sigma\right)}} sgn\left(\sigma,\sigma'\right)dS^{\sigma,\sigma',h}\leq 0 && \forall h\in\mathcal{H}\setminus\{0\},\sigma\in\Sigma \\
&\displaystyled4^{h-1,\sigma}+d5^{h,\sigma}_{min}-d5^{h,\sigma}_{max}\leq 0 && \forall \sigma\in\Sigma, h=|\mathcal{H}|+1 \\
&\displaystyled1^{\sigma}_{\na},d2^{\sigma}_{\na},d7^{h,\sigma},ds^{h,\sigma}_{\na},d5^{h,\sigma}_{min},d5^{h,\sigma}_{max} \geq 0 &&\forall \sigma\in\Sigma, h\in\mathcal{H}, n\inN, a\inA_n, \end{align*}\normalsize where \[
\mathrm{Com_h\left(\sigma\right)} = \left\{\sigma'\in\Sigma \mid \left(\sigma,\sigma'\right)\in\mathrm{Com}_h\mathrm{\ or\ } \left(\sigma',\sigma\right)\in\mathrm{Com}_h \right\}, \] and the sign function is defined as follows: \[
sgn\left(\sigma,\sigma'\right)=\left\{\begin{array}{l l}
1 & \mathrm{if\ }\sigma\prec\sigma' \\
0 & \mathrm{if\ }\sigma=\sigma' \\
-1 & \mathrm{if\ }\sigma\succ\sigma'. \\
\end{array}\right. \]
\section{Complementarity constraints of $\left(P^{\sto}_{\SGO}\right)$} \label{app_comp}
The complementarity constraints of $\left(P^{\sto}_{\SGO}\right)$ are as follows:\footnotesize \begin{align*}
&x^h_{\na}\left(d1^{\sigma}_{\na}-d2^{\sigma}_{\na} - P[\sigma]\left(p^h+C^h_{\na}\right)\right) =0 && \forall n\inN, a\inA_n, h\inT_{\na}, \sigma\in\Sigma \\
&\bar{x}^h_{\na}\left(d1^{\sigma}_{\na}-d2^{\sigma}_{\na} - P[\sigma]\left( \bar{p}^h+C^h_{\na}\right)\right)=0 && \forall n\inN, a\inA_n, h\inT_{\na}, \sigma\in\Sigma \\
&\lambda^h_{\na}\left(d1^{\sigma}_{\na}-d2^{\sigma}_{\na} -d7^{h,\sigma} - \PscenC^h_{\na}\right) =0 && \forall n\inN, a\inA_n, h\inT_{\na}, \sigma\in\Sigma \\
&s^h_{\na}\left(d1^{\sigma}_{\na}-d2^{\sigma}_{\na} +dS-d6^{h,\sigma}_{max} - \PscenC^h_{\na}\right)=0 && \forall n\inN, a\inA_n, h\inT_{\na}, \sigma\in\Sigma \\
&x^h_s\left(-\rhocd4^{h,\sigma}- \Pscenp^h\right)=0 && \forall h\in\mathcal{H}, \sigma\in\Sigma\\
&\bar{x}^h_s\left(-\rhocd4^{h,\sigma}- P[\sigma]\bar{p}^h\right)=0 && \forall h\in\mathcal{H}, \sigma\in\Sigma \\
&\lambda^h_s\left(-d7^{h,\sigma}-\rhocd4^{h,\sigma}\right)=0 && \forall h\in\mathcal{H}, \sigma\in\Sigma \\
&S^h\left(d3^{\sigma}_0-\rhodd4^{h,\sigma}+d6^{h,\sigma}_{max}\right)= 0 && \forall \sigma\in\Sigma, h=0 \\
&S^h\left(d4^{h-1,\sigma}-\rhodd4^{h,\sigma}+d6^{h,\sigma}_{max}+d5^{h,\sigma}_{min}-d5^{h,\sigma}_{max}\right)= 0 && \forall h\in\mathcal{H}\setminus\{0\},\sigma\in\Sigma \\
&S^h\left(d4^{h-1,\sigma}+d5^{h,\sigma}_{min}-d5^{h,\sigma}_{max}\right)= 0 && \forall \sigma\in\Sigma, h=|\mathcal{H}|+1 \\
&d1^{\sigma}_{\na}\left(\sum_{h\inT_{\na}} \left(x^{h,\scena}_{\na}+\bar{x}^{h,\scena}_{\na}+\lambda^{h,\scena}_{\na}+s^{h,\scena}_{\na}\right)- E_{\na}\right)=0 & &\forall n\inN, a\inA_n, \sigma\in\Sigma \\
&d2^{\sigma}_{\na}\left(x^{h,\scena}_{\na}+\bar{x}^{h,\scena}_{\na}+\lambda^{h,\scena}_{\na}+s^{h,\scena}_{\na}- \beta^{max}_{\na}\right)=0 & & \forall n\inN, a\inA_n, h\inT_{\na}, \sigma\in\Sigma \\
&d7^{h,\sigma}\left(\lambda^{h,\scena}_s+\sum_{n\inN}\sum_{a\inA_n} \lambda^{h,\scena}_{\na} - \lambda^{h,\scena}_{\max}\right)=0 & &\forall h\in\mathcal{H}, \sigma\in\Sigma \\
&d6^{h,\sigma}_{max}\left(\sum_{n\inN}\sum_{a\inA_n} s^{h,\scena}_{\na}- S^{h,\scena}\right)=0 & &\forall h\in \mathcal{H}, \sigma\in\Sigma \\
&d5^{h,\sigma}_{min}\left(S^{\min}-S^{h,\scena}\right)=0 && \forall h\in\mathcal{H}, \sigma\in\Sigma \\
&d5^{h,\sigma}_{max}\left(S^{h,\scena}-S^{\max}\right)=0 && \forall h\in\mathcal{H}, \sigma\in\Sigma. \end{align*} \normalsize
\end{document} | arXiv | {
"id": "2102.13634.tex",
"language_detection_score": 0.7965680360794067,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{ Rigidity results on $\rho$--Einstein solitons with zero scalar
curvature }
\author{Romildo Pina}
\curraddr{Instituto de Matem\'atica e Estat\'istica, Universidade Federal de Goi\'as, Goi\^ania, Brasil, 74001-970}
\email{romildo@ufg.br}
\author{Ilton Menezes}
\email{iltomenezesufg@gmail.com}
\author{Lucyjane Silva}
\email{lucyjanedealmeida@gmail.com}
\subjclass[2010]{Primary 53A30, 53C21}
\keywords{Conformal metric, $\rho$--Einstein solitons, rigidity $\rho$--Einstein solitons, Scalar curvature}
\begin{abstract}
In this paper we show that a $\rho$-Einstein solitons conformal to a
pseudo-Euclidean space, invariant under the action of the pseudo-orthogonal group with zero scalar curvature is stady and consequently flat. How
application of the results obtained we present an explicit example for a the question proposed by Kazdan in \cite{Ka}.
\end{abstract}
\maketitle
\section{Introduction and main statements}
In this paper, we study two related problems. The first problem is on the existence of $\rho$--Einstein sotiton with scalar curvature $K_{\bar{g}}=0$. Besides that we present some rigity results.
The second problem consists in find all metrics that are conformal to the pseudo Euclidean metrics, with zero scalar curvature, which are invariant under the action of the pseudo-orthogonal group. This provides explicit solutions to Yamabe's problem in the non-compact case. In the Riemannian case under some additional assumptions, all metrics obtained are complete. As application of this results we obtain a family of complete metrics in $\mathbb{R}^{n}\setminus\{0\}$ with scalar curvature positive, negative and zero, presenting an explicit example for a question proposed by Kazdan in \cite{Ka}.
In 1982, R. Hamilton introduced a nonlinear evolution equation for Riemannian metrics with the aim of finding canonical metrics on manifolds (see \cite{SB} or \cite{Ha}). This evolution equation is known as the Ricci flow, and it has since been used widely and with great success, most notably in Perelman's solution of the Poincaré conjecture. Furthermore, several convergence theorems have been established. One important aspect in the treatment of the Ricci flow is the study of Ricci solitons, which generate self-similar solutions to the flow and often arise as singularity models.
Given a semi-Riemannian manifold $(M^{n},g)$, $n\geq 3$, we say that $(M,g)$ is a gradient Ricci soliton if there exists a differentiable function $h:M\longrightarrow \mathbb{R}$ (called the potential function) such that \begin{equation}\label{000} \mbox{Ric}_g+\mbox{Hess}_g(h)=\lambda g, \qquad \lambda\in\mathbb{R}, \end{equation} where Ric$_g$ is the Ricci tensor, Hess$_g(h)$ is the Hessian of $h$ with respect to the metric $g$, and $\lambda$ is a real number. We say that a gradient Ricci soliton is {\em shrinking, steady, or expanding} if $\lambda>0$, $\lambda=0$, or $\lambda<0$, respectively. Bryant \cite{BRYANT} proved that there exists a complete, steady, gradient Ricci soliton that is spherically symmetric for any $n\geq 3$, which is known as Bryant's soliton. In the bi-dimensional case an analogous nontrivial rotationally symmetric solution was obtained explicitly, and is known as the Hamilton cigar. Recently Cao-Chen \cite{CAOCHEN} showed that any complete, steady, gradient Ricci soliton, locally conformally flat, up to homothety, is either flat or isometric to the Bryant's soliton. The results obtained in \cite{CAOCHEN} were extended to bach -- flat gradient steady Ricci solitons (see \cite{CCM}). Complete, conformally flat shrinking gradient solitons have been characterized as being quotients of $\mathbb{R}^n$, $\mathbb{S}^n$ or $R\times \mathbb{S}^{n-1}$ (see \cite{FG}). In the case of steady gradient Ricci solitons, \cite{RO} provides all such solutions when the metric is conformal to an n-dimensional pseudo-Euclidean space and invariant under the action of an $(n-1)-$dimensional translation group.
Motivated by the notion of Ricci solitons on a semi-Riemannian manifold $(M^{n},g)$, $n\geq3$, it is natural to consider geometric flows of the following type: \begin{equation}\label{flow} \frac{\partial}{\partial t}g(t)=-2(Ric-\rho Rg) \end{equation} for $\rho\in\mathbb{R}$, $\rho\neq0$, as in \cite{BO} and \cite{CA}. We call these the Ricci-Bourguignon flows. We notice that short time existence for the geometric flows described in \eqref{flow} is provided in (\cite{CCD}). Associated to the flows, we have the following notion of gradient $\rho$-Einstein solitons, which generate self-similar solutions:
\begin{definition} Let $\left(M^{n},g\right), n\geq3$, be a Riemannian manifold and let $\rho\in \mathbb{R},\rho\neq0$. We say that $(M^{n},g)$ is a gradient $\rho-$Einstein soliton if there exists a smooth function $h:M\longrightarrow\mathbb{R}$, such that the metric $g$ satisfies the equations
\begin{align}\label{def. 1}
Ric_{g}+Hess_{g}h=\rho K_{g}g +\lambda g
\end{align}
for some constant $\lambda\in\mathbb{R}$, where $K_{g}$ is the scalar curvature of the metric $g$. \end{definition}
A $\rho$-Einstein soliton is said to be shrinking, steady, or expanding if $\lambda>0$, $\lambda=0$, or $\lambda<0$, respectively. Furthermore, a $\rho$-Einstein solitons is said to be a gradient Einstein soliton, gradient traceless Ricci soliton, and gradient Schouten soliton if $\rho=\frac{1}{2}$, $\rho=\frac{1}{n}$, and $\rho=\frac{1}{2(n-1)}$, respectively.
The gradient $\rho-$Einstein solitons equation \eqref{def. 1} links geometric information about the curvature of the manifold through the Ricci tensor and the geometry of the level sets of the potential function by means of their second fundamental form. Hence, classifying gradient $\rho-$Einstein solitons under some curvature conditions is a natural problem. The $\rho-$Einstein solitons were investigated by Catino and Mazzieri in \cite{CA}, they obtained important rigidity results, proving that every compact gradient Einstein, Schouten, or traceless Ricci soliton is trivial. In addition, they proved that every complete gradient steady Schouten soliton is trivial, hence Ricci flat.
Gradient Ricci solitons with constant scalar curvature were investigated by Petersen and Wylie in \cite{P}, they proved that: If a non-steady gradient Ricci soliton has constant scalar curvature $K_{g}$, then it is bounded as $0 \leq K_{g} \leq n\lambda$ in the shrinking case, and $n\lambda \leq K_{g} \leq 0$ in the expanding case. Fern\'andez-L\'opez and Garcia-R\'io in \cite{GR} improved this result proving that: If an n--dimensional complete gradient Ricci soliton with constant scalar curvature $K_{g}$, then $K_{g}$ must be a multiple of $\lambda$.
In \cite{MP} the authors considered a $\rho$--Einstein solitons that are conformal to a pseudo Euclidean space and invariant under the action of the pseudo-orthogonal group. They provide all the solutions for the gradient Schouten soliton case. Moreover, they proved that if a gradient Schouten soliton is both complete, conformal to a Euclidean metric, and rotationally symmetric, then it is isometric to $\mathbb{R}\times\mathbb{S}^{n-1}$.
In \cite{Ma} the autors used the variational method to study the existence problem of metrics with constant scalar curvature on complete non-compact Riemannian manifolds. The assumptions of results are motivated from question in the work of Kazdan \cite{Ka}. The question is that if M has complete metrics $g_{+}$ and $g_{-}$ with positive (respectively negative) scalar curvature, is there one with zero scalar curvature? With several additional hypotheses o autor provide an answer to the question posed by Kazdan, more details see \cite{MA}.
We studied the equation \eqref{def. 1} in semi-Riemannian manifolds with scalar curvature constante. We consider gradient $\rho$-Einstein solitons conformal to a pseudo-Euclidean space, which are invariant under the action of the pseudo-orthogonal group. More precisely, let $(\mathbb{R}^{n},g)$ be the standard pseudo-Euclidean space with metric $g$ and coordinates $(x_{1},...,x_{n})$, with $g_{ij}=\delta_{ij}\varepsilon_{i}$, $1\leq i,j \leq n$, where $\delta_{ij}$ is the Kronecker delta, and $\varepsilon_{i}=\pm1$. Let $r=\sum_{i=1}^{n}\varepsilon_{i}x_{i}^{2}$ be a basic invariant for an $(n-1)-$dimensional pseudo-orthogonal group. The main goal of this paper is to present in the Riemannian case a family of complete metrics and some results of rigidity
on a large class of noncompact semi-Riemannian manifolds in the case where the scalar curvature is zero. In the Riemannian case the same results hold.
We initially find a system of differential equations, such that the functions $h$ and $\psi$ must satisfy, so that the metric $\bar{g}= g/\psi^{2}$ satisfies \eqref{def. 1} (see Theorem \ref{theorem 1}). Note that if the solutions are invariant under the action of the pseudo-orthogonal group, the system of partial differential equations given in Theorem \ref{theorem 1} can be transformed into a system of ordinary differential equations (see Corollary \ref{theorem 2}). In the Theorem \ref{coro1} we found all metrics that are conformal to the pseudo Euclidean metrics, with zero scalar curvature, which are invariant under the action of the pseudo-orthogonal group. As a consequence of the Theorem \ref{coro1}, we obtain the Corollary \ref{coro2} we constuct a family of complete metrics with zero scalar curvature. We present results of rigidity on gradient $\rho$- Einstein soliton with scalar curvature zero ( Theorem\ref{theorem 5}, Corollary \ref{theorem 6} and Corollary \ref{theorem 7}).
In the Proposition \ref{prop} we construct a family of complete metrics with zero scalar curvature. In the Corollary \ref{coro3} we constuct an explicit example for Kazdan's question.
In what follows, we state our main results. We denote the second order derivative of $\psi$ and $h$ by $\psi_{,x_{i}x_{j}}$ and $h_{,x_{i}x_{j}}$, respectively, with respect to $x_{i}x_{j}$. \begin{theorem}\label{theorem 1}
Let $\left(\mathbb{R}^{n},g\right)$,$n\geq 3$, be a pseudo-Euclidean space with coordinates $x=\left(x_{1},...,x_{n}\right)$ and metric components $g_{ij}=\delta_{ij}\varepsilon_{i}$, $1\leq i,j \leq n$, where $\varepsilon_{i}=\pm 1$. Consider a smooth function $h:\mathbb{R}^{n}\longrightarrow \mathbb{R}$. There exists a metric $\bar{g}=\frac{1}{\psi^{2}}g$ such that $\left(\mathbb{R}^{n},\bar{g}\right)$ is a gradient $\rho$-Einstein soliton with $h$ as a potential function if, and only if, the functions $\psi$, and $h$ satisfy
\begin{align}\label{01}
(n-2)\psi_{,x_{i}x_{j}}+\psi h_{,x_{i}x_{j}}+\psi_{,x_{i}}h_{,x_{j}}+\psi_{,x_{j}}h_{,x_{i}}=0,\hspace{0,2cm} i\neq j,
\end{align} and
\begin{center}
\begin{align}\label{2}
\psi\left[\left(n-2\right)\psi_{,x_{i}x_{i}}+\psi h_{,x_{i}x_{i}}+2\psi_{,x_{i}}h_{,x_{i}} \right]
\end{align}
\begin{align*}
+\varepsilon_{i}\sum_{k=1}^{n}\varepsilon_{k}\left[\left(n-1\right)\left(\rho n \psi_{,x_{k}}^{2}-2\rho \psi \psi_{,x_{k}x_{k}}-\psi_{,x_{k}}^{2}\right)-\psi \psi_{,x_{k}}h_{,x_{k}}+\psi \psi_{,x_{k}x_{k}}\right]=\lambda \varepsilon_{i}, \hspace{0,2cm} i=j.
\end{align*}
\end{center}
\end{theorem}
Our objective is to determine solutions of the system \eqref{01}, \eqref{2} of the form $\psi(r)$ and $h(r)$, where $r=\sum_{i=1}^{n}\varepsilon_{i}x_{i}^{2}$. The following theorem reduces the system of partial differential equations \eqref{01} and \eqref{2} into an system of ordinary differential equations.
\begin{corollary}\label{theorem 2}
Let $\left(\mathbb{R}^{n},g\right)$,$n\geq 3$, be a pseudo-Euclidean space with coordinates $x=\left(x_{1},...,x_{n}\right)$ and metric components $g_{ij}=\delta_{ij}\varepsilon_{i}$, $1\leq i,j \leq n$, where $\varepsilon_{i}=\pm 1$. Consider smooth functions $\psi(r)$ and $h(r)$ with $r=\sum_{k=1}^{n}\varepsilon_{k}x_{k}^{2}$. Then there exists a metric $\bar{g}=\frac{1}{\psi^{2}}g$ such that $\left(\mathbb{R}^{n},\bar{g}\right)$ is a gradient $\rho$-Einstein soliton with $h$ as a potential function if, and only if, the functions $\psi$ and $h$ satisfy
\begin{equation}\label{3}
(n-2)\psi''+\psi h''+2\psi'h'=0,
\end{equation}
and
\begin{center}
\begin{equation}\label{4}
2\psi\left[\left(n-2\right)\psi'+\psi h'\right]+2n[1-2(n-1)\rho]\psi \psi'
\end{equation}
\begin{equation*}
+4r\left\{\left(n-1\right)\left[\left(\rho n-1\right)(\psi')^{2}-2\rho\psi \psi''\right]-\psi \psi'h'+\psi \psi''\right\}=\lambda.
\end{equation*}
\end{center} \end{corollary}
The next we found all metrics that are conformal to the pseudo Euclidean metrics, with zero scalar curvature, which are invariant under the action of the pseudo-orthogonal group.
\begin{theorem}\label{coro1}
Let $( \mathbb{R}^n, g)$ be a
pseudo-Euclidean space, $n\geq 3$, with coordinates
$x=(x_1,\cdots, x_n)$ and $g_{ij}=\delta_{ij}\varepsilon_i$, $1\leq i,j\leq n$, where $\varepsilon_i=\pm1$.
Consider $\bar{g}=\frac{1}{\psi(r)^{2}}g$ where $r=\sum\limits_{k=1}^n\varepsilon_kx_{k}^2$. Then $\bar{g}$ have scalar curvature $K_{\bar{g}}=0$, if and only if
\begin{equation}
\psi(r)= \frac{k_2r}{\left(1+Ar^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}},
\end{equation}
where $A, k_2 \in \mathbb{R}$ with $k_2>0$. If $A \geq 0$ the metric $\bar{g}$ is defined in $ \mathbb{R}^n\setminus\{0\}$. If $A<0$ the set of singularity points of $\bar{g}$ consist of the origin and a sphere $(n-1)$--dimensional, with center at the origin and radius $R=\sqrt{(\frac{-1}{A})^{\frac{2}{n-2}}}$. \end{theorem} \begin{remark} If $(\mathbb{R}^n, g)$ is the Euclidean space, then we find in the Theorem \ref{coro1} all metrics conformal to $g$ and spherically symmetrical with zero scalar curvature. This provides explicit solutions to Yamabe's problem in the non-compact case. \end{remark}
In \cite{CO}, the authors showed that $\{\mathbb{R}^n\setminus\{0\}, \bar{g}=\frac{1}{\varphi^2}g_{0}, \varphi(r)=\sqrt{r}\}$ is a complete Riemannian manifold and isometric at $\mathbb{S}^{n-1}\times\mathbb{R}$. As a consequence of Theorem \ref{coro1} together with this fact, we obtain the following result: \begin{corollary}\label{coro2}
Let $( \mathbb{R}^n, g)$ be a
Euclidean space, $n\geq 3$, with coordinates
$x=(x_1,\cdots, x_n)$ and $\left(g_{0}\right)_{ij}=\delta_{ij}$, $1\leq i,j\leq n$.
Consider $\bar{g}=\frac{1}{\psi(r)^{2}}g_{0}$ where $r=\sum\limits_{k=1}^nx_{k}^2$. The metrics obtained in the Theorem \ref{coro1} are complete whenever $A>0$. \end{corollary}
As an consequence of the Theorem \ref{coro1}, we get the following rigidity results. \begin{theorem}\label{theorem 5}
Let $( \mathbb{R}^n, g)$ be a
pseudo-Euclidean space, $n\geq 3$, with coordinates
$x=(x_1,\cdots, x_n)$ and $g_{ij}=\delta_{ij}\varepsilon_i$, $1\leq i,j\leq n$, where $\varepsilon_i=\pm1$.
Consider $\left(\mathbb{R}^{n},\bar{g}\right)$, $\bar{g}=\frac{1}{\psi^{2}}g$ a $\rho$--Einstein sotiton with scalar curvature $K_{\bar{g}}=0$, where $\psi(r)$ and $h(r)$ smooth functions,
$r=\sum\limits_{k=1}^n\varepsilon_kx_{k}^2$ and $h$ as a potential function. Then $\lambda=0$, that is $\left(\mathbb{R}^{n},\bar{g}\right)$ is steady. \end{theorem}
\begin{corollary}\label{theorem 6}
Let $( \mathbb{R}^n, g)$ be a
pseudo-Euclidean space, $n\geq 3$, with coordinates
$x=(x_1,\cdots, x_n)$ and $g_{ij}=\delta_{ij}\varepsilon_i$, $1\leq i,j\leq n$, where $\varepsilon_i=\pm1$. Then $\left(\mathbb{R}^{n},\bar{g}\right)$, $\bar{g}=\frac{1}{\psi^{2}}g$ is a $\rho$--Einstein sotiton steady with scalar curvature $K_{\bar{g}}=0$, where $\psi(r)$ and $h(r)$ smooth functions,
$r=\sum\limits_{k=1}^n\varepsilon_kx_{k}^2$ and $h$ as a potential function, if and only if, $\left(\mathbb{R}^{n},\bar{g}\right)$ is flat.
\end{corollary}
As a consequence of the previous results, we have the following result in the Riemannian case. \begin{corollary}\label{theorem 7} Let $( \mathbb{M}^n, \bar{g})$ be $n\geq 3$ a $\rho$--Einstein sotiton, Riemannian,locally conformally flat and rotationally symmetric with zero scalar curvature.Then $(\mathbb{M}^n, \bar{g})$ is necessarily steady. Besides that $(\mathbb{M}^n, \bar{g})$ is flat. \end{corollary}
\begin{remark}
These results hold for $\rho=0$ and therefore they are extended to the Ricci solitons gradients, proving that a Ricci soliton gradient, conformal to the Euclidean space and spherically symmetrical with zero scalar curvature is necessarily steady and consequently flat. \end{remark}
\begin{remark}
As a consequently of the results obtained let's make an aplication giving a positive answer for a question proposed by Kazdan in \cite{Ka}, as follows:
If M has complete metrics $g_+$ and $g_ -$ with positive (respectively, negative) scalar curvature, is there one with zero scalar curvature? kazdan mostrou em \cite{KW}, que no caso compacto the answer is "yes".
We built in $(\mathbb{R}^n\setminus\{0\})$ complete metrics with positive, negative and zero scalar curvature, respectively.
\end{remark}
\begin{propo}\label{prop}
Let $( \mathbb{R}^n, g)$ be a
Euclidean space, $n\geq 3$, with coordinates
$x=(x_1,\cdots, x_n)$ and $\left(g_{0}\right)_{ij}=\delta_{ij}$, $1\leq i,j\leq n$. Consider $g=\frac{1}{\varphi(r)^{2}}g_{0}$ where $r=\sum\limits_{k=1}^nx_{k}^2$. If $\varphi(r)=re^{-\left(1+r^{\frac{2}{n-2}}\right)^{\frac{n-2}{2}}}$, then the metric $g$ on $\mathbb{R}^n$ is complete with scalar curvature negative given by
\begin{equation*}
K_{g}=h(r)\left[(n-2)\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}+2(n-1)r^{\frac{2-n}{2}}+(n+2)\right],
\end{equation*}
where $h(r)=-\frac{4(n-1)r^{n-1}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2(3-n)}{n-2}}}{e^{2\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}}$. \end{propo}
In the next result we construct an explicit example in Riemannian manifolds for the question left by Kazdan \cite{Ka}. \begin{corollary}\label{coro3}
Note that $\{\mathbb{R}^n\setminus\{0\}, \bar{g}=\frac{1}{\varphi^2}g_{0}, \varphi(r)=\sqrt{r}\}$
is a complete Riemannian manifold with scalar curvature positive and $\left\{\mathbb{R}^n\setminus\{0\}, \bar{g}=\frac{1}{\varphi_1^2}g_{0}, \varphi_1(r)=re^{-\left(1+r^{\frac{2}{n-2}}\right)^{\frac{n-2}{2}}}\right\}$
is a complete Riemannian manifold with scalar curvature negative, exists a complete metric of scalar curvature zero. \end{corollary}
\section{Proofs of the main results}\label{sec2}
\begin{proof}
Proof of Theorem \ref{theorem 1}. It is well known (see, e.g., \cite{RO}) that if $\bar{g}=\frac{g}{\psi^{2}}$, then
\begin{align*}
Ric_{\bar{g}}=\frac{1}{\psi^{2}} \{(n-2)\psi Hess_{g}(\psi)+[\psi\Delta_{g}\psi-(n-1)|\nabla_{g}\psi^{2}]g \}
\end{align*}
and
\begin{align*}
\bar{R}=(n-1)\left(2\psi\Delta_{g}\psi-n|\nabla_{g}\psi|^{2}\right).
\end{align*}
Hence, the equation
\begin{align*}
Ric_{\bar{g}}+Hess_{\bar{g}}(h)=\rho \bar{R}\bar{g}+\lambda\bar{g},
\end{align*}
is equivalent to
\begin{align}\label{A}
\frac{1}{\psi^{2}} \{(n-2)\psi Hess_{g}(\psi)_{ij}+[\psi\Delta_{g}\psi-(n-1)|\nabla_{g}\psi|^{2}]\delta_{ij}\varepsilon_{i}\}+Hess_{\bar{g}}(h)_{ij}
\end{align}
\begin{equation*}
=\left[\rho(n-1)(2\psi\Delta_{g}\psi-n|\nabla_{g}\psi|^{2})+\lambda\right]\frac{1}{\psi^{2}}\delta_{ij}\varepsilon_{i}.
\end{equation*}
Recall that,
\begin{equation*}
Hess_{\bar{g}}(h)_{ij}=h_{,x_{i}x_{j}}-\sum_{k=1}^{n}\bar{\Gamma}_{ij}^{k}h_{,x_{k}}
\end{equation*}
where $\bar{\Gamma}_{ij}^{k}$ are the Christoffel symbols of the metric $\bar{g}$. For a distinct $i,j,k$, we have
\begin{equation*}
\bar{\Gamma}_{ij}^{k}=0,\hspace{0.5cm}\bar{\Gamma}_{ij}^{i}=-\frac{\psi_{,x_{j}}}{\psi},\hspace{0.5cm}\bar{\Gamma}_{ii}^{k}=\varepsilon_{i}\varepsilon_{k}\frac{\psi_{,x_{k}}}{\psi},\hspace{0.5cm}\bar{\Gamma}_{ii}^{i}=-\frac{\psi_{,x_{i}}}{\psi},
\end{equation*}
therefore,
\begin{equation}\label{10}
Hess_{\bar{g}}(h)_{ij}=
h_{,x_{i}x_{j}}+\frac{\psi_{,x_{j}}h_{,x_{i}}}{\psi}+\frac{\psi_{,x_{i}}h_{,x_{j}}}{\psi}, \hspace{0,2cm} i\neq j.
\end{equation}
Similarly, by considering $i=j$, we have
\begin{equation}\label{11}
Hess_{\bar{g}}(h)_{ii}=
h_{,,x_{i}x_{i}}+\frac{2\psi_{,x_{i}}h_{,x_{i}}}{\psi}-\varepsilon_{i}\sum_{k=1}^{n}\varepsilon_{k}\frac{\psi_{,x_{k}}h_{,x_{k}}}{\psi}.
\end{equation}
However, we note that
\begin{align}\label{14}
|\nabla_{g}\psi|^{2}=\sum_{k=1}^{n}\varepsilon_{k}\left(\frac{\partial\psi}{\partial x_{k}}\right)^{2}, \hspace{0.5cm} \Delta_{g}\psi=\sum_{k=1}^{n}\varepsilon_{k}\psi_{,x_{k}x_{k}},\hspace{0.5cm} Hess_{g}(\psi)_{ij}=\psi_{,x_{i}x_{j}}.
\end{align}
If $i\neq j$ in \eqref{A}, we obtain
\begin{align}\label{15}
(n-2) \frac{Hess_{g}(\psi)_{ij}}{\psi}+Hess_{\bar{g}}(h)_{ij}=0.
\end{align}
Substituting the expressions found in \eqref{10}, and \eqref{14} into \eqref{15}, we obtain
\begin{align*}
(n-2)\psi_{,x_{i}x_{j}}+\psi h_{,x_{i}x_{j}}+\psi_{,x_{i}}h_{,x_{j}}+\psi_{,x_{j}}h_{,x_{i}}=0,\hspace{0,2cm} i\neq j.
\end{align*}
Similarly, if $i=j$ in \eqref{A}, we have
\begin{align}\label{13}
(n-2)\psi Hess_{g}(\psi)_{ii}+\psi\Delta_{g}\psi\varepsilon_{i}-(n-1)|\nabla_{g}\psi|^{2}\varepsilon_{i}+\psi^{2}Hess_{\bar{g}}(h)_{ii}
\end{align}
\begin{align*}
=2(n-1)\rho\Delta_{g}\psi \varepsilon_{i}-n(n-1)\rho|\nabla_{g}\psi|^{2}\varepsilon_{i}+\lambda\varepsilon_{i}.
\end{align*}
Substituting the expressions found in \eqref{11}, and \eqref{14} into \eqref{13}, we obtain
\begin{align*}
\psi\left[\left(n-2\right)\psi_{,x_{i}x_{i}}+\psi h_{,x_{i}x_{i}}+2\psi_{,x_{i}}h_{,x_{i}} \right]
\end{align*}
\begin{align*}
+\varepsilon_{i}\sum_{k=1}^{n}\varepsilon_{k}\left[\left(n-1\right)\left(\rho n \psi_{,x_{k}}^{2}-2\rho \psi \psi_{,x_{k}x_{k}}-\psi_{,x_{k}}^{2}\right)-\psi \psi_{,x_{k}}h_{,x_{k}}+\psi \psi_{,x_{k}x_{k}}\right]=\lambda \varepsilon_{i}.
\end{align*}
This concludes the proof of Theorem \ref{theorem 1}.\\ \end{proof}
\begin{proof}
Proof of Corollary \ref{theorem 2}. Let $\bar{g}=\psi^{-2}g$ be a conformal metric of $g$. We are assuming that $\psi(r)$
and $h(r)$ are functions of $r$, where $r=\sum_{k=1}^{n}\varepsilon_{k}x_{k}^{2}$. Hence, we have
\begin{align*}
\psi_{,x_{i}}=2\varepsilon_{i}x_{i}\psi',\hspace{0,5cm} \psi_{,x_{i}x_{i}}=4x_{i}^{2}\psi''+2\varepsilon_{i}\psi', \hspace{0,5cm} \psi_{,x_{i}x_{j}}=4\varepsilon_{i}\varepsilon_{j}x_{i}x_{j}\psi''
\end{align*}
and
\begin{align*}
h_{,x_{i}}=2\varepsilon_{i}x_{i}h',\hspace{0,5cm} h_{,x_{i}x_{i}}=4x_{i}^{2}h''+2\varepsilon_{i}h', \hspace{0,5cm} h_{,x_{i}x_{j}}=4\varepsilon_{i}\varepsilon_{j}x_{i}x_{j}h''.
\end{align*}
Substituting these expressions into \eqref{01}, we obtain
\begin{align*}
4\varepsilon_{i}\varepsilon_{j}(n-2)x_{i}x_{j}\psi''+4\varepsilon_{i}\varepsilon_{j}x_{i}x_{j}\psi h''+(2\varepsilon_{i}x_{i}\psi').(2\varepsilon_{j}x_{j}h')+(2\varepsilon_{j}x_{j}\psi').(2\varepsilon_{i}x_{i}f')=0,
\end{align*}
which is equivalent to
\begin{align*}
4\varepsilon_{i}\varepsilon_{j}\left[(n-2)\psi''+\psi h''+2\psi' h'\right]x_{i}x_{j}=0.
\end{align*}
Since there exist $i\neq j$, such that $x_{i}x_{j}\neq 0$, we have
\begin{align*}
(n-2)\psi''+h''\psi+2\psi'h'=0.
\end{align*}
Similarly, considering the equation \eqref{2}, we obtain
\begin{center}
\begin{align*}
4\psi\left[(n-2)\psi''+\psi h''+2\psi'h'\right]x_{i}^{2}+
2\psi\left[\left(n-2\right)\psi'+\psi h'\right]\varepsilon_{i}+2\varepsilon_{i}n[1-2(n-1)\rho]\psi \psi'
\end{align*}
\begin{align*}
+4\varepsilon_{i}\sum_{k=1}^{n}\varepsilon_{k}x_{k}^{2}\left\{\left(n-1\right)\left[\left(\rho n-1\right)(\psi')^{2}-2\rho\psi \psi''\right]-\psi \psi'h'+\psi \psi''\right\}=\lambda\varepsilon_{i}.
\end{align*}
\end{center}
Note that $(n-2)\psi''+\psi h''+2\psi' h'=0$ and $r=\sum_{k=1}^{n}\varepsilon_{k}x_{k}^{2}$. Therefore, we obtain
\begin{center}
\begin{align*}
2\psi\left[\left(n-2\right)\psi'+\psi h'\right]+2n[1-2(n-1)\rho]\psi \psi'
\end{align*}
\begin{align*}
+4r\left\{\left(n-1\right)\left[\left(\rho n-1\right)(\psi')^{2}-2\rho\psi \psi''\right]-\psi \psi'h'+\psi \psi''\right\}=\lambda.
\end{align*}
\end{center}
This concludes the proof of Corollary \ref{theorem 2}.\\ \end{proof}
\begin{proof} Proof of the Theorem \ref{coro1} It is well known (see, e.g., \cite{RO} or \cite{MP}) that if $\bar{g}=\frac{g}{\psi^{2}}$, then
\begin{equation*}
K_{\bar{g}}=(n-1)\left(2\psi\Delta_{g}\psi-n|\nabla_{g}\psi|^{2}\right).
\end{equation*}
How we are assuming that $\psi(r)$
is a functions of $r$, where $r=\sum_{k=1}^{n}\varepsilon_{k}x_{k}^{2}$, then we have that $K_{\bar{g}}=0$ if, and only, if
\begin{equation*}
-n\psi\psi'-2r\psi \psi''+nr\left(\psi'\right)^2=0,
\end{equation*} which is equivalent to
\begin{equation*}
-\frac{n}{2r}\frac{\psi'}{\psi}+\frac{n}{2}\left(\frac{\psi'}{\psi}\right)^2-\frac{\psi''}{\psi}=0.
\end{equation*}
By equality $\frac{\psi''}{\psi}=\left(\frac{\psi'}{\psi}\right)'+\left(\frac{\psi'}{\psi}\right)^2$, follows that
\begin{equation*}
-\frac{n}{2r}\frac{\psi'}{\psi}+\frac{n}{2}\left(\frac{\psi'}{\psi}\right)^2-\left(\frac{\psi'}{\psi}\right)'-\left(\frac{\psi'}{\psi}\right)^2=0.
\end{equation*}
Taking $y=\frac{\psi'}{\psi}$, the previous equation becomes
\begin{equation}\label{18}
y'=-\frac{n}{2r}y+\frac{n-2}{2}y^2.
\end{equation}
Note that equation \eqref{18} is an ordinary differential equation of Bernoulli. Therefore, you can determine all your solutions, whose general solution is given by
\begin{equation}
y^{-1}=Ce^{F}-\frac{(n-2)}{2}e^{F}\int e^{-F}dr,\hspace{0.5cm} \text{where}\hspace{0.5cm} F(r)=\frac{n}{2}\int
\frac{1}{r}dr=\ln r^{\frac{n}{2}},
\end{equation}
C is an arbitrary constant (for more details see \cite{PO}). Thus
\begin{equation*}
y^{-1}=C r^{\frac{n}{2}}- \frac{(n-2)}{2}r^{\frac{n}{2}}\int r^{-\frac{n}{2}}dr
\end{equation*}
equivalently,
\begin{equation*}
y^{-1}=\left(C -\frac{(n-2)}{2}k_1\right)r^{\frac{n}{2}}+r,
\end{equation*}
where $k_1$ is a real number. Implies that
\begin{equation*}
y^{-1}=Ar^{\frac{n}{2}}+r,
\end{equation*}
where $A=C -\frac{(n-2)}{2}k_1$. It follow that
\begin{equation*}
y=\frac{r^{-\frac{n}{2}}}{A+r^{\frac{2-n}{2}}},
\end{equation*}
how $y=\frac{\psi'}{\psi}$, we get
\begin{equation}\label{19}
\psi(r)=exp\left\{{\int\frac{r^{-\frac{n}{2}}}{A+r^{\frac{2-n}{2}}}dr}+\ln k_2\right\}
\end{equation}
where $k_2\in \mathbb{R}_{+}^*$. Note that
\begin{equation}\label{20}
\int\frac{r^{-\frac{n}{2}}}{A+r^{\frac{2-n}{2}}}dr=\ln\left(A+r^{\frac{2-n}{2}}\right)^{\frac{2}{2-2}},
\end{equation}
combining the equations \eqref{19} and \eqref{20}, the following that
\begin{equation}\label{21}
\psi(r)=k_2B^{\frac{2}{2-n}},
\end{equation}
where $B=A+r^{\frac{2-n}{2}}$. How $n \geq 3$ we obtain that
\begin{equation}\label{22}
\psi(r)= \frac{k_2r}{\left(1+Ar^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}},
\end{equation} \end{proof}
\begin{proof} Proof of the Corollary \ref{coro2} If $K_{\bar{g}}=0$, by Theorem \ref{coro1} we get $\psi(r)= \frac{k_2r}{\left(1+Ar^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}$. We will show that $\bar{g}=\frac{g_{0}}{\psi^2}$ is complete.
Consider the manifolds $M=\left(\mathbb{R}^n\setminus\{0\},\bar{g}=\frac{g_0}{\psi^2}\right)$, where $\psi(r)=\frac{k_2r}{\left(1+Ar^\frac{n-2}{2}\right)^\frac{2}{n-2}}$, $k_2\in\mathbb{R}_{+}^{*}$ and $N=\left(\mathbb{R}^n\setminus\{0\},g=\frac{g_0}{\varphi^2}\right)$, where $\varphi(r)=\sqrt{r}$, and $g_0$ is a Euclidean metric. Note that \begin{equation*}
|v|_{\bar{g}}=\frac{1}{\psi}|v|_{g_0}\hspace{0.5cm} \textit{and} \hspace{0.5cm} |v|_{g}=\frac{1}{\varphi}|v|_{g_0} \end{equation*}
By other hand, we get \begin{equation*}
|v|_{\bar{g}}=\frac{\left(1+Ar^\frac{n-2}{2}\right)^\frac{2}{n-2}}{k_2r}|v|_{g_0}=\frac{\left(1+Ar^\frac{n-2}{2}\right)^\frac{2}{n-2}}{k_2\sqrt{r}}\frac{1}{\sqrt{r}}|v|_{g_0}, \end{equation*} thus, \begin{equation*}
|v|_{\bar{g}}=f(r)|v|_g, \end{equation*} where $f(r)=\frac{\left(1+Ar^\frac{n-2}{2}\right)^\frac{2}{n-2}}{k_2\sqrt{r}}$.
To find $c> 0$ such that $|v|_{\bar{g}}\geq c|v|_g$, just solve the following problem \begin{equation*} \min\limits_{r\in\mathbb{R_{+}^{*}}} f(r) \end{equation*} The first derivative of $f$ takes us \begin{equation*} f'(r)=\frac{r^{\frac{1}{2}}\frac{2}{n-2}\left(1+Ar^\frac{n-2}{2}\right)^\frac{4-n}{n-2}A\frac{n-2}{2}r^{\frac{n-4}{2}}-\frac{1}{2}r^{-\frac{1}{2}}\left(1+Ar^\frac{n-2}{2}\right)^\frac{2}{n-2}}{k_2r}, \end{equation*} equaivalently, \begin{equation*} f'(r)=\frac{\left(1+Ar^\frac{n-2}{2}\right)^\frac{2}{n-2}}{k_2r}\left[\left(1+Ar^\frac{n-2}{2}\right)^{-1}r^{\frac{n-3}{2}}-\frac{1}{2r^{\frac{1}{2}}}\right]. \end{equation*} Therefore, \begin{equation*} f'(r)=\frac{\left(1+Ar^\frac{n-2}{2}\right)^\frac{4-n}{n-2}}{k_2r^{\frac{3}{2}}}\left(Ar^{\frac{n-2}{2}}-1\right). \end{equation*}
Given $f$ is a real function, we have that $r$ is a critical point if, and only if, $f'(r)=0$. How $r>0$, the minimum point candidate is given by \begin{equation*} r=\frac{1}{A^{\frac{2}{n-2}}}. \end{equation*}
Let's calculate the second derivative of $f$ and evaluate at this point, this is, \begin{equation*} f''(r)=\frac{1}{2k_2}\left[\frac{4-n}{n-2}\left(1+Ar^\frac{n-2}{2}\right)^\frac{6-2n}{n-2}A\frac{n-2}{2}r^{\frac{n-4}{2}}\left(Ar^{\frac{n-5}{2}}-r^{-\frac{3}{2}}\right)\right] \end{equation*} \begin{equation*} +\frac{1}{2k_2}\left[\left(1+Ar^\frac{n-2}{2}\right)^\frac{4-n}{n-2}\left(\frac{n-5}{2}Ar^{\frac{n-7}{2}}+\frac{3}{2}r^{-\frac{5}{2}}\right)\right] \end{equation*} equivalently, \begin{equation*} f''(r)=\frac{1}{2k_2}\left(1+Ar^\frac{n-2}{2}\right)^\frac{6-2n}{n-2}\left(\frac{4-n}{2}A^2r^{\frac{2n-9}{2}}-\frac{(4-n)A}{2}r^{\frac{n-7}{2}}\right) \end{equation*} \begin{equation*} +\frac{1}{2k_2}\left(1+Ar^\frac{n-2}{2}\right)^\frac{4-n}{n-2}\left(\frac{n-5}{2}Ar^{\frac{n-7}{2}}+\frac{3A}{2}r^{-\frac{5}{2}}\right) \end{equation*} implies that \begin{equation*} f''(r)=-\frac{\left(1+Ar^\frac{n-2}{2}\right)^\frac{6-2n}{n-2}}{4k_2r^{\frac{5}{2}}}\left( A^2r^{n-2}+6Ar^{\frac{n-2}{2}}-2Anr^{\frac{n-2}{2}}-3\right). \end{equation*}
Now let's evaluate the second derivative at the point $r=\frac{1}{A^{\frac{2}{n-2}}}$, that is, \begin{equation*} f''\left(\frac{1}{A^{\frac{2}{n-2}}}\right)=-\frac{\left(1+A\left(\frac{1}{A^{\frac{2}{n-2}}}\right)^\frac{n-2}{2}\right)^\frac{6-2n}{n-2}}{4k_2\left(\frac{1}{A^{\frac{2}{n-2}}}\right)^{\frac{5}{2}}}\left( A^2\left(\frac{1}{A^{\frac{2}{n-2}}}\right)^{n-2}+6A\left(\frac{1}{A^{\frac{2}{n-2}}}\right)^{\frac{n-2}{2}}-2An\left(\frac{1}{A^{\frac{2}{n-2}}}\right)^{\frac{n-2}{2}}-3\right), \end{equation*} equivalently, \begin{equation*} f''\left(\frac{1}{A^{\frac{2}{n-2}}}\right)=-\frac{2^\frac{6-2n}{n-2}}{4k_2\frac{1}{A^{\frac{5}{n-2}}}}\left(1+6-2n-3\right), \end{equation*} implies that \begin{equation*} f''\left(\frac{1}{A^{\frac{2}{n-2}}}\right)=-\frac{2^\frac{10-4n}{n-2}}{k_2}A^{\frac{5}{n-2}}\left(4-2n\right). \end{equation*}
Therefore, \begin{equation*} f''\left(\frac{1}{A^{\frac{2}{n-2}}}\right)=\frac{2^\frac{8-3n}{n-2}}{k_2}A^{\frac{5}{n-2}}\left(n-2\right). \end{equation*}
How $n\geq3$, $A,k_2\in \mathbb{R}_{+}^{*}$, we get $f''\left(\frac{1}{A^{\frac{2}{n-2}}}\right)>0$, consequently $r=\frac{1}{A^{\frac{2}{n-2}}}$ it's a minimum point. Therefore, \begin{equation} f\left(\frac{1}{A^{\frac{2}{n-2}}}\right)=\frac{\left(1+A\left(\frac{1}{A^{\frac{2}{n-2}}}\right)^\frac{n-2}{2}\right)^\frac{2}{n-2}}{k_2\sqrt{\left(\frac{1}{A^{\frac{2}{n-2}}}\right)}}=\frac{\left(4A\right)^{\frac{1}{n-2}}}{k_2} \end{equation}
Just take $c=\frac{\left(4A\right)^{\frac{1}{n-2}}}{k_2}$, we get $f(r)\geq c$, $\forall r$. Thus $|v|_{\bar{g}}\geq c|v|_{g}$, how $N=\left(\mathbb{R}^n\setminus\{0\},g=\frac{g_0}{\varphi^2}\right)$ is complete, implies that $M=\left(\mathbb{R}^n\setminus\{0\},\bar{g}=\frac{g_0}{\psi^2}\right)$ is complete. Therefore, the proof is done. \end{proof}
\begin{proof}Proof of the Theorem \ref{theorem 5} How $\left(\mathbb{R}^{n},\bar{g}\right)$ is a $\rho$--Einstein sotiton, with zero scalar curvature follows by Theorem \ref{coro1} that
\begin{equation}\label{5}
\psi(r)=k_2B^{\frac{2}{2-n}},
\end{equation}
where $B=A+r^{\frac{2-n}{2}}$.
Consequently,
\begin{equation}\label{6}
\psi'(r)=k_2B^{\frac{2}{2-n}}r^{-\frac{n}{2}},\hspace{0.5cm}\psi''(r)=\frac{k_2n}{2}\left(B^{\frac{2}{2-n}}r^{-n}-B^{\frac{2}{2-n}}r^{-\frac{(n+2)}{2}}\right).
\end{equation}
Replacing the expressions found in \eqref{5} and \eqref{6} in \eqref{4}, we have
\begin{equation*}
2\left(n-1\right)\left(1-n\rho\right)k_2^2B^{\frac{2}{2-n}}B^{\frac{n}{2-n}}r^{-\frac{n}{2}}+2\left(n-1\right)\left(n\rho-1\right)k_2^2rB^{\frac{2n}{2-n}}r^{-n}
\end{equation*}
\begin{equation*}
+2\left(1-2\left(n-1\right)\rho\right)rk_2B^{\frac{2}{2-n}}\frac{k_2n}{2}\left(B^{\frac{2(n-1)}{2-n}}r^{-n}-B^{\frac{n}{2-n}}r^{-\frac{(n+2)}{2}}\right)
\end{equation*}
\begin{equation*}
+k_2^2B^{\frac{2}{2-n}}\left(B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}\right)h'=\frac{\lambda}{2},
\end{equation*}
equivalently,
\begin{equation*}
2\left(n-1\right)\left(1-n\rho\right)k_2^2B^{\frac{2}{2-n}}B^{\frac{n}{2-n}}r^{-\frac{n}{2}}+2\left(1-2\left(n-1\right)\rho\right)\frac{k_2^2n}{2}B^{\frac{2}{2-n}}B^{\frac{2\left(n-1\right)}{2-n}}r^{1-n}
\end{equation*}
\begin{equation*}
-2\left(1-2\left(n-1\right)\rho\right)\frac{k_2^2n}{2}B^{\frac{2}{2-n}}B^{\frac{n}{2-n}}r^{-\frac{n}{2}}+2\left(n-1\right)\left(n\rho-1\right)k_2^2B^{\frac{2\left(n-1\right)}{2-n}}B^{\frac{2}{2-n}}r^{1-n}
\end{equation*}
\begin{equation*}
+k_2^2B^{\frac{2}{2-n}}\left(B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}\right)h'=\frac{\lambda}{2}.
\end{equation*}
How $B\neq0$, we get
\begin{equation*}
2\left(n-1\right)\left(1-n\rho\right)B^{\frac{n}{2-n}}r^{-\frac{n}{2}}+\left(1-2\left(n-1\right)\rho\right)nB^{\frac{2\left(n-1\right)}{2-n}}r^{1-n}-\left(1-2\left(n-1\right)\rho\right)nB^{\frac{n}{2-n}}r^{-\frac{n}{2}}
\end{equation*}
\begin{equation*}
-2\left(n-1\right)\left(1-n\rho\right)B^{\frac{2\left(n-1\right)}{2-n}}r^{1-n}+k_2^2B^{\frac{2}{2-n}}\left(B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}\right)h'=\frac{\lambda}{2k_2^2B^{\frac{2}{2-n}}}.
\end{equation*}
Consequently,
\begin{equation*}
(n-2)\left(B^{\frac{n}{2-n}}r^{-\frac{n}{2}}-B^{\frac{2(n-1)}{2-n}}r^{1-n}\right)+\left(B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}\right)h'=\frac{\lambda}{2k_2^2B^{\frac{2}{2-n}}}
\end{equation*}
equivalently,
\begin{equation*}
2k_2^2B^{\frac{2}{2-n}}\left(n-2\right)\left(B^{\frac{n}{2-n}}r^{-\frac{n}{2}}-B^{\frac{2\left(n-1\right)}{2-n}}r^{1-n}\right)+2k_2^2B^{\frac{2}{2-n}}\left(B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}\right)h'=\lambda.
\end{equation*}
Note that $B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}\neq0$, otherwise $B=2r^{\frac{2-n}{2}}$ and Consequently $ B = 2A$.
Thus,
\begin{equation*}
h'(r)=\frac{\lambda}{2k_2^2B^{\frac{2}{2-n}}\left(B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}\right)}+\left(2-n\right)\left(\frac{B^{\frac{n}{2-n}}r^{-\frac{n}{2}}-B^{\frac{2\left(n-1\right)}{2-n}}r^{1-n}}{B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}}\right).
\end{equation*}
Making
\begin{equation*}
\varphi(r)=\frac{\lambda}{2k_2^2B^{\frac{2}{2-n}}\left(B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}\right)},\hspace{0.3cm}\text{and}\hspace{0.3cm} w(r)=\left(2-n\right)\left(\frac{B^{\frac{n}{2-n}}r^{-\frac{n}{2}}-B^{\frac{2\left(n-1\right)}{2-n}}r^{1-n}}{B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}}\right),
\end{equation*}
the first derivative of this equations leads us to
\begin{equation}
\varphi'(r)=-\frac{\lambda}{2k_2^2}\frac{\left(nB^{-1}r^{-\frac{n}{2}}-(n+2)B^{-2}r^{1-n}\right)}{\left(B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}\right)^2},
\end{equation}
and
\begin{equation}
w'(r)=\frac{\left(2-n\right)}{2}\frac{\left(3nB^{\frac{2n}{2-n}}r^{-n}-4(n-1)B^{\frac{3n-2}{2-n}}r^{\frac{2-3n}{2}}+2(n-2)B^{\frac{4(n-1)}{2-n}}r^{2(1-n)}-nB^{\frac{n+2}{2-n}}r^{-\frac{(n+2)}{2}}\right)}{\left(B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}\right)^2}
\end{equation}
Replacing the functions in the equation \eqref{3}, that is,
\begin{equation*}
(n-2)\psi''+\psi h''+2\psi'h'=0,
\end{equation*}
we obtain
\begin{equation*}
(n-2)\frac{k_2n}{2}\left(B^{\frac{2(n-1)}{2-n}}r^{-n}-B^{\frac{n}{2-n}}r^{-\frac{2+n}{2}}\right)\left(B^{\frac{4}{2-n}}-4B^{\frac{2+n}{2-n}}r^{\frac{2-n}{2}}+4B^{\frac{2n}{2-n}}r^{2-n}\right)
\end{equation*}
\begin{equation*}
+\frac{(2-n)}{2}k_2B^{\frac{2}{2-n}}\left(3nB^{\frac{2n}{2-n}}r^{-n}-4\left(n-1\right)B^{\frac{2-3n}{2-n}}r^{\frac{2-3n}{2}}+2\left(n-2\right)B^{\frac{4\left(n-1\right)}{2-n}}r^{2\left(1-n\right)}-nB^{\frac{n+2}{2-n}}r^{-\frac{n+2}{2}}\right)
\end{equation*}
\begin{equation*}
2\left(2-n\right)k_2B^{\frac{n}{2-n}}r^{-\frac{n}{2}}\left(B^{\frac{n}{2-n}}r^{-\frac{n}{2}}-B^{\frac{2\left(n-1\right)}{2-n}}r^{1-n}\right)\left(B^{\frac{2}{2-n}}-2B^{\frac{n}{2-n}}r^{\frac{2-n}{2}}\right)
\end{equation*}
\begin{equation*}
-\frac{\lambda}{2k_2}\left(nB^{\frac{n}{2-n}}r^{-\frac{n}{2}}-(n+2)B^{\frac{2(n-1)}{2-n}}r^{1-n}\right)=0,
\end{equation*}
Which is equivalent to
\begin{equation*}
B^{\frac{2(n+1)}{2-n}}r^{-n}-2B^{\frac{3n}{2-n}}r^ {\frac{2-3n}{2}}+B^{\frac{2(2n-1)}{2-n}}r^{2(1-n)}=\frac{\lambda}{(n-2)^2k_2^2}\left(nB^{\frac{n}{2-n}}r^{-\frac{n}{2}}-(n+2)B^{\frac{2(n-1)}{2-n}}r^{1-n}\right).
\end{equation*}
Note that $nB^{\frac{n}{2-n}}r^{-\frac{n}{2}}-(n+2)B^{\frac{2(n-1)}{2-n}}r^{1-n}\neq0$, because otherwise $B=\frac{n+2}{n}r^{\frac{2-n}{2}}$. On the other hand,
\begin{equation*}
B^{\frac{2(n+1)}{2-n}}r^{-n}-2B^{\frac{3n}{2-n}}r^{\frac{2-3n}{2}}+B^{\frac{2(2n-1)}{2-n}}r^{2(1-n)}=0
\end{equation*}
therefore,
\begin{equation*}
\left(B^{\frac{n+1}{2-n}} r^{-\frac{n}{2}}-B^{\frac{2n-1}{2-n}} r^{1-n}\right)^2=0,
\end{equation*}
and consequently $B=r^{\frac{2-n}{2}}$, but this is a contradiction, because $B=\frac{n+2}{n}r^{\frac{2-n}{2}}$. Therefore,
\begin{equation}\label{lambda}
\lambda=(n-2)^2k_2^2\frac{\left(B^{\frac{n+1}{2-n}} r^{-\frac{n}{2}}-B^{\frac{2n-1}{2-n}} r^{1-n}\right)^2}{\left(nB^{\frac{n}{2-n}} r^{-\frac{n}{2}}-(n+2)B^{\frac{2(n-1)}{2-n}} r^{1-n}\right)}.
\end{equation}
How $\lambda$ is constant, we have that
\begin{equation}
\frac{d}{dr}\frac{\left(B^{\frac{n+1}{2-n}} r^{-\frac{n}{2}}-B^{\frac{2n-1}{2-n}} r^{1-n}\right)^2}{\left(nB^{\frac{n}{2-n}} r^{-\frac{n}{2}}-(n+2)B^{\frac{2(n-1)}{2-n}} r^{1-n}\right)}=0.
\end{equation}
if, and only if,
\begin{equation*}
(4n-1)B^{\frac{4n}{2-n}} r^{-2n}-(4n-1)(n+2)B^{\frac{5n-2}{2-n}} r^{\frac{2-5n}{2}}-n^2B^{\frac{3n+2}{2-n}} r^{-\frac{3n+2}{2}}+(n+2)nB^{\frac{4n}{2-n}} r^{-2n}
\end{equation*}
\begin{equation*}
+(2-5n)nB^{\frac{5n-2}{2-n}} r^{\frac{2-5n}{2}}-(2-5n)(n+2)B^{\frac{2(3n-2)}{2-n}} r^{2-3n}+(2n-1)nB^{\frac{2(3n-2)}{2-n}} r^{2-3n}
\end{equation*}
\begin{equation*}
-\frac{1}{2}\left(3n^2+2n-4\right)B^{\frac{4n}{2-n}} r^{-2n}-(2n-1)(n+2)B^{\frac{7n-6}{2-n}} r^{\frac{6-7n}{2}}+(n^2+n-2)B^{\frac{5n-2}{2-n}} r^{\frac{2-5n}{2}}
\end{equation*}
\begin{equation*}
+\frac{n^2}{2}B^{\frac{3n+2}{2-n}} r^{-\frac{3n+2}{2}}+(3n^2+2n-4)+\frac{n^2}{2}B^{\frac{5n-2}{2-n}} r^{-\frac{2-5n}{2}}-2(n^2+n-2)B^{\frac{2(3n-2)}{2-n}} r^{2-3n}
\end{equation*}
\begin{equation*}
-n^2B^{\frac{4n}{2-n}} r^{-2n}-\frac{1}{2}\left(3n^2+2n-4\right)B^{\frac{2(n-2)}{2-n}} r^{2-3n}+\frac{n^2}{2}B^{\frac{5n-2}{2-n}} r^{-\frac{2-5n}{2}}+(n^2+n-2)B^{\frac{7n-6}{2-n}} r^{-\frac{6-7n}{2}}=0,
\end{equation*}
if, and only if,
\begin{equation}\label{derivada}
A^2\left[-n^2r^{\frac{n-2}{2}}A^2+(n^2+4)A+4(1-n)r^{\frac{2-n}{2}}\right]=0.
\end{equation}
We will prove that equation \eqref{derivada} is satisfied if, and only, if $A=0$. For this, consider
\begin{equation*}
f(r)= -n^2r^{\frac{n-2}{2}}A^2+(n^2+4)A+4(1-n)r^{\frac{2-n}{2}}.
\end{equation*}
If $f(r)= 0 \;\; \forall r>0$ and $A \ne 0$, then its derivative also is zero. Since there is a single value of $r$ such that $f'(r) =0$ given by $ r= \left( \frac{4(n-1)}{n^2A^2}\right)^{\frac{1}{n-2}}$, we get a contradiction.
Therefore the equation \eqref{derivada} is satisfied if, and only, if $A=0$. In this case $B=r^{\frac{2-n}{2}}$ . Substituindo $B=r^{\frac{2-n}{2}}$ in \eqref{lambda} we obtain that $ \lambda = 0$. Therefore the proof is done. \end{proof}
\begin{proof} Proof of the Theorem \ref{theorem 6}
Follows by Theorem \ref{theorem 5} that $\left(\mathbb{R}^{n},\bar{g}\right)$, $\bar{g}=\frac{1}{\psi^{2}}g$ is a $\rho$--Einstein sotiton with scalar curvature $K_{\bar{g}}=0$
is steady if, and only if, $A=0$. Besides that $ h(r)$ is constant. How $A=0$ we obtain from Lemma \ref{coro1}, that $\psi(r)=k_2r$.
Follow of the \cite{MA} that $\left(\mathbb{R}^{n},\bar{g}\right)$ have sectional curvature zero. Therefore, we conclude que $\left(\mathbb{R}^{n},\bar{g}\right)$ is flat. The reciprocal is automatically satisfied. \end{proof}
\begin{proof} Proof of the Corollary \ref{theorem 7}
How $\left(\mathbb{M}^{n},\bar{g}\right)$ is locally conformally flat and rotationally symmetric, then locally the metric $\bar{g}$ is given by $\bar{g}=\frac{1}{\psi^{2}}g$ where $ \psi =\psi(r)$ and
$r=\sum\limits_{k=1}^nx_{k}^2$ and $ g$ is the euclidean metric.Therefore the results obtained in Theorem \ref{theorem 5} and Corollary \ref{theorem 6} are satisfied. \end{proof}
\begin{proof} Proof of the Proposition \ref{prop} Consider the manifolds $M=\left(\mathbb{R}^n\setminus\{0\},g=\frac{g_0}{\varphi^2}\right)$, where $\varphi(r)=re^{-\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}$, $k_2\in\mathbb{R}_{+}^{*}$ and $N=\left(\mathbb{R}^n\setminus\{0\},\bar{g}=\frac{g_0}{\psi^2}\right)$, where $\psi(r)=\frac{k_2r}{\left(1+r^\frac{n-2}{2}\right)^\frac{2}{n-2}}$, and $g_0$ is a Euclidean metric. Note that
\begin{equation*}
|v|_g=\frac{1}{\varphi}|v|_{g_0}\hspace{0.5cm} \textit{and} \hspace{0.5cm} |v|_{\bar{g}}=\frac{1}{\psi}|v|_{g_0}
\end{equation*}
By other hand, we get
\begin{equation*}
|v|_{g}=\frac{e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}}{r}|v|_{g_0}=\frac{e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}}{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}\frac{\left(1+r^\frac{n-2}{2}\right)^\frac{2}{n-2}}{k_2r}|v|_{g_0}.
\end{equation*}
Thus,
\begin{equation*}
|v|_{g}=h(r)|v|_{\bar{g}},
\end{equation*}
where, $h(r)=\frac{e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}}{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}$.
To find $c_1> 0$ such that $|v|_{\bar{g}}\geq c_1|v|_g$, just solve the following problem
\begin{equation*}
\min\limits_{r\in\mathbb{R_{+}^{*}}} h(r)
\end{equation*}
The first derivative of $h$ takes us
\begin{equation*}
h'(r)=e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{-n}{n-2}}r^{\frac{n-4}{2}}\left[\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}-1\right].
\end{equation*}
How $r>0$, we get $h'(r)>0$, $\forall r$, so the $h$ function is strictly increasing. Therefore,
\begin{equation}
\min\limits_{r\in\mathbb{R}^{*}_+}h(r)=\lim\limits_{r\longrightarrow 0}\frac{e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}}{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}=e.
\end{equation}
Just take $c_1=e$, we get $h(r)\geq c_1$, $\forall r$. Thus $|v|_{g}\geq c|v|_{\bar{g}}$, how $M=\left(\mathbb{R}^n\setminus\{0\}, \bar{g}=\frac{g_0}{\psi^2}\right)$ is complete, implies that $N=\left(\mathbb{R}^n\setminus\{0\}, g=\frac{g_0}{\varphi^2}\right)$ is complete.
We will show that $\left(\mathbb{R}^n,\bar{g}\right)$ has negative scalar curvature. Indeed, note that
\begin{equation}\label{pride}
\varphi'(r)=\frac{1-r^{\frac{n-2}{2}}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{4-n}{n-2}}}{e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}},
\end{equation} and \begin{equation*} \varphi''(r)=-\frac{1}{2e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}}\left[(4-n)r^{\frac{2n-6}{2}}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{6-2n}{n-2}}+(n-2)r^{\frac{n-4}{2}}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{4-n}{n-2}}\right. \end{equation*} \begin{equation*} \left.-2r^{\frac{n-4}{2}}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{4-n}{n-2}}-2r^{\frac{2n-6}{2}}\left(1+r^{\frac{n-2}{2}}\right)^{2\frac{(4-n)}{n-2}}\right], \end{equation*} implies that, \begin{equation}\label{segder} \varphi''(r)=-\frac{\left[nr^{\frac{n-4}{2}}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{4-n}{n-2}}+(4-n)r^{n-3}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{6-2n}{n-2}}-2r^{n-3}\left(1+r^{\frac{n-2}{2}}\right)^{2\frac{(4-n)}{n-2}}\right]}{2e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}}. \end{equation}
It is well known (see, e.g., \cite{RO} or \cite{MP}) that if $\bar{g}=\frac{g}{\psi^{2}}$, then \begin{equation}\label{curv} K_{\bar{g}}=4r\left[2(n-1)\varphi\varphi''-n(n-1)\left(\varphi'\right)^2\right]+4n(n-1)\varphi\varphi'. \end{equation}
Substituting the expressions found in \eqref{pride} and \eqref{segder} into \eqref{curv}, we \begin{equation*} K_{\bar{g}}=\frac{4(n-1)}{e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}}\left[(2-n)r^{n-1}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2(4-n)}{n-2}}+2(1-n)r^{\frac{n}{2}}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{4-n}{n-2}}\right. \end{equation*} \begin{equation*} +\left.(n-4)r^{n-1}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2(3-n)}{n-2}}\right] \end{equation*} equivalently, \begin{equation*} K_{\bar{g}}=\frac{4(n-1)\left(1+r^{\frac{n-2}{2}}\right)^{\frac{4-n}{n-2}}}{e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}}\left[(2-n)r^{n-1}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{4-n}{n-2}}+2(1-n)r^{\frac{n}{2}}\right. \end{equation*} \begin{equation*} +\left.(n-4)r^{n-1}\left(1+r^{\frac{n-2}{2}}\right)^{-1}\right] \end{equation*} implies that, \begin{equation*} K_{\bar{g}}=\frac{4(n-1)\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2(3-n)}{n-2}}}{e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}}\left[(2-n)r^{n-1}\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}+2(1-n)r^{\frac{n}{2}}\left(1+r^{\frac{n-2}{2}}\right)\right. \end{equation*} \begin{equation*} -\left.(4-n)r^{n-1}\right]. \end{equation*} Therefore, \begin{equation*} K_{\bar{g}}=-\frac{4(n-1)\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2(3-n)}{n-2}}}{e^{\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}}}\left[(n-2)\left(1+r^{\frac{n-2}{2}}\right)^{\frac{2}{n-2}}+2(n-1)r^{\frac{2-n}{2}}+(n+2)\right]r^{n-1}. \end{equation*}
\end{proof} \begin{proof} Proof of Corollary \ref{coro3} Follow immediately from Corollary \ref{coro2}. \end{proof}
The authors would like to thank the referee for his careful reading, relevant remarks, and valuable suggestion.
\end{document} | arXiv | {
"id": "2102.05202.tex",
"language_detection_score": 0.5222916007041931,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title[Graphical small cancellation groups with the Haagerup property]{Graphical small cancellation groups with the Haagerup property}
\author{Goulnara Arzhantseva} \address{Universit\"at Wien, Fakult\"at f\"ur Mathematik\\ Oskar-Morgenstern-Platz 1, 1090 Wien, Austria. } \address{Erwin Schr\"odinger International Institute for Mathematical Physics\\ Boltzmanngasse~9, 1090 Wien, Austria. } \email{goulnara.arzhantseva@univie.ac.at}
\author{Damian Osajda} \address{Instytut Matematyczny, Uniwersytet Wroc\l awski (\textup{on leave})\\ pl.\ Grunwaldzki 2/4, 50--384 Wroc{\l}aw, Poland} \address{Universit\"at Wien, Fakult\"at f\"ur Mathematik\\ Oskar-Morgenstern-Platz 1, 1090 Wien, Austria. } \email{damian.osajda@univie.ac.at} \subjclass[2010]{{20F06, 20F67, 46B85, 46L80}} \keywords{Small cancellation theory, Haagerup property, Gromov's a-T-menability}
\thanks{G.A.\ was partially supported by the ERC grant ANALYTIC no.\ 259527. D.O.\ was partially supported by Narodowe Centrum Nauki, decision no DEC-2012/06/A/ST1/00259.}
\begin{abstract} We prove the Haagerup property (= Gromov's a-T-menability) for finitely generated groups defined by infinite presentations satisfying the graphical $C'(\ov {\lambda})$--small cancellation condition with respect to graphs endowed with a compatible wall structure. We deduce that these groups are coarsely embeddable into a Hilbert space and that the strong Baum-Connes conjecture and, hence, the Baum-Connes conjecture with arbitrary coefficients hold for them. As the main step we show that $C'(\ov {\lambda})$--complexes satisfy the linear separation property. Our result provides many new examples and a general technique to show the Haagerup property for graphical small cancellation groups. \end{abstract}
\maketitle
\section{Introduction} \label{s:intro} The aim of this paper is two-fold: to show a general analytic result, the Haagerup property, for a wide class of graphically presented groups and to provide an approach to a long-standing problem on the existence of a coarsely embeddable, into a Hilbert space, but not coarsely amenable group.
Graphical presentations are group presentations where relators are words labeling cycles of a given graph. Every group has a graphical presentation in a trivial way: the corresponding graph is the disjoint union of simple cycles labeled by the relator words. In general, given a labeled graph, one expects that its combinatorial structure and properties of the labeling encode algebraic and geometric features of the group it defines.
The first concrete use of non-trivial graphical presentations is due to Rips and Segev in the context of Kaplansky's zero-divisor conjecture: they give the first example of a torsion-free group that is not a unique product group~\cite{RS}. A recent application is the celebrated construction of Gromov's monster group which contains an infinite expander family of graphs in the Cayley graph~\cites{Gro, AD}. Both constructions follow a general idea: find an appropriate combinatorial interpretation (e.g.\ the expansion) of a required group property and combine it with a suitable small cancellation condition on the labeling (e.g.\ the geometric small cancellation in the case of Gromov's monster) in order to produce a group containing a given graph in its Cayley graph. This approach leads to spectacular counterexamples: to the unique product property~\cite{RS} and to the Baum-Connes conjecture with arbitrary coefficients~\cite{HLS}.
In contrast to such specific counterexamples, our first goal is to prove an affirmative result, the Haagerup property, for many groups given by graphical small cancellation presentations.
A second countable, locally compact group $G$ has the \emph{Haagerup property} (or $G$ is \emph{a-T-menable} in the sense of Gromov) if it possesses a proper continuous affine isometric action on a Hilbert space. The concept first appeared in the seminal paper of Haagerup \cite{Haa}, where this property was shown for finitely generated free groups. Regarded as a weakening of von Neumann's amenability and a strong negation of Kazhdan's property (T), the Haagerup property has been revealed independently in harmonic analysis, non-commutative geometry, and ergodic theory \cites{AW,Cho,BoJaS,BR}, \cite{Gro88}*{4.5.C}, \cite{Gro93}*{7.A and 7.E}. A major breakthrough was a spectacular proof of Higson and Kasparov \cite{HK} of the strong Baum-Connes conjecture (which is strictly stronger than the Baum-Connes conjecture with coefficients~\cite{MeyerNest}) for all groups with the Haagerup property. It follows that the Novikov higher signature conjecture and, for discrete torsion-free groups, the Kadison-Kaplansky idempotents conjecture hold for these groups. Nowadays, many groups have been shown to have the Haagerup property and significant applications in K-theory and topology have been discovered \cites{ChCJJV,MislinValette}, making groups with the Haagerup property increasingly fundamental to study.
Groups given by classical small cancellation presentations\footnote{The graph is the disjoint union of simple cycles and the labeling satisfies the classical $C'(1/6)$--small cancellation condition~\cite{LS}.} are known to possess the Haagerup property by a result of Wise~\cite{W-sc} for finitely presented groups and by a recent result of the authors~\cite{AO} for infinitely presented groups. In contrast, there exist non-trivial \emph{graphical small cancellation} presentations defining infinite groups with Kazhdan's property (T), hence, without the Haagerup property~\cites{Gro,Silberman,OllivierWise}.
In this paper, we determine a natural compatibility assumption between the graph combinatorics and the graphical small cancellation condition on the labeling, which guarantee the Haagerup property of the resulting graphically presented group.
\begin{mtw} \label{t:main} Let $G$ be a finitely generated group given by graphical presentation satisfying the lacunary walling condition (see Definition~\ref{d:lwpres}). Then $G$ acts properly on a space with walls. In particular, $G$ has the Haagerup property. \end{mtw}
A systematic discussion of groups having proper actions on a space with walls can be found in~\cite{Cor}. In the terminology of that paper, our main result means that $G$ has Property PW.
We actually prove a stronger result which, in addition, holds for more general spaces.
\begin{tw}\label{t:linear} A complex $X$ with the lacunary walling condition satisfies the linear separation property, that is, the path metric on $X^{(0)}$ and the wall pseudo-metric are bi-Lipschitz equivalent. \end{tw}
Our method arises in ambition to approach the following -- still open -- well-known problem (see e.g.~\cite{claire}, \cite{NowakYu}*{5.3.3}):
\noindent \emph{Does there exist a finitely generated group which coarsely embeds into a Hilbert space but which is not coarsely amenable?}
The concept of coarse embedding was introduced by Gromov~\cite{Gro93}*{p.218}. Yu~\cite{Yu}*{Theorem 1.1} proved the coarse Baum-Connes conjecture for every discrete space with bounded geometry coarsely embeddable into a Hilbert space. This implies the Novikov higher signature conjecture for all closed manifolds whose fundamental group, viewed with the word length metric, admits such a coarse embedding. This result generated an intense study of groups and metric spaces coarsely embeddable into a Hilbert space.
Coarse amenability is a weak form of amenability. It was introduced in~\cite{Yu}, under the term Property A, as a sufficient condition for coarse embeddings into a Hilbert space. For a countable discrete group $G$, coarse amenability is equivalent to the existence of a topological amenable action of $G$ on a compact Hausdorff space and to the $C^*$--exactness of the reduced $C^*$--algebra $C^*_r(G)$, see e.g.~\cite{NowakYu}.
All finitely generated groups currently known to be coarsely embeddable into a Hilbert space are, moreover, coarsely amenable. That is, the above question remains open. Towards a positive answer, we have the following assertion.
\begin{tw}\label{t:nota} Let $(r_i)_{i\in \mathbb N}$ be graphs with all vertex degrees at least 3. Let $G$ be given by graphical presentation with relators $(r_i)_{i\in \mathbb N}$ satisfying the lacunary walling condition. Then $G$ has the Haagerup property but is not coarsely amenable. In particular, $G$ admits a coarse embedding into a Hilbert space but $G$ is not coarsely amenable. \end{tw}
The lacunary walling condition (see Definition~\ref{d:lwpres}) assures some upper bound on all vertex degrees and girth tending to infinity as $i\to\infty$. Coarse non-amenability of such a group $G$ follows then by a result of Willet~\cite{Willett} combined with Lemma~\ref{l:remb}, and the fact that coarse amenability is inherited by subspaces, see e.g.~\cite{NowakYu}*{Proposition 4.2.5}. The Haagerup property of $G$ follows by our Main Theorem and the coarse embedding into a Hilbert space is provided by an orbit map of the given proper affine isometric action.
The question above and the interplay of coarse amenability versus coarse embeddings and the Haagerup property are still challenging as an explicit construction of such a sequence $(r_i)_{i\in \mathbb N}$ is yet an open problem. Specifically, the existence of a graphical small cancellation labeling (see the next Section for the terminology) is of great interest.
\begin{open} Is there a sequence $(r_i)_{i\in \mathbb N}$ of graphs with all vertex degrees between 3 and some upper bound such that some graphical presentation with relators $(r_i)_{i\in \mathbb N}$ satisfies the $C'(1/6)$--small cancellation condition? \end{open}
A natural candidate for a sequence $(r_i)_{i\in \mathbb N}$ as in Theorem~\ref{t:nota} is Arzhantseva-Guentner-\v{S}pakula's box space~\cite{AGS}. This space is the first example of a metric space with bounded geometry (indeed, of a regular graph) which coarsely embeds into a Hilbert space but which is not coarsely amenable. In~\cite{AGS}, a wall structure on this graph is provided. We show in Example 3 that this wall structure indeed satisfies our $\beta$--condition and, after a slight variation of the construction, our $\delta$--condition (Definition~\ref{d:sep}), both required by the lacunary walling condition.
In quest of a sequence of graphs $(r_i)_{i\in \mathbb N}$ as above, we prove the following general result, of independent interest, as it provides the first -- explicit -- small cancellation labeling of an appropriate subdivision of any given sequence of bounded degree graphs.
\begin{tw}\label{t:sc} Let $\ov \Gamma = (\Gamma_i)_{i\in \mathbb N}$ be a family of finite graphs with degree bounded by $d$. For every $\ov \lambda = (\lambda_i)_{i\in \mathbb N}$ with $\lambda_i\in (0,1)$, there exists a sequence $(j_i)_{i\in \mathbb N}$ of natural numbers with the following property. There exists an explicit labeling of the family $(\Gamma_i^{j_i})_{i\in \mathbb N}$ of subdivisions by $d+2$ letters satisfying the $C'(\ov \lambda)$--small cancellation condition. \end{tw}
Moreover, in Theorem~\ref{p:scsubl}, for a given infinite family of graphs we describe a method of labeling it (modulo a subdivision) in such a way that a stronger \emph{lacunarity} condition (see Definition~\ref{d:cond}) is satisfied. This is then used to provide examples of non-classical infinite graphical small cancellation presentations of groups with the Haagerup property.
\noindent {\bf Organisation.} In \S~\ref{s:prelim}, we define small cancellation complexes and graphical presentations. In \S~\ref{s:wall}, we describe the structure of a space with walls on small cancellation complexes and introduce the ($\beta,\delta$)--separation condition, required for the definition of the lacunary walling condition. In \S~\ref{s:properness}, we define the lacunary walling condition and prove that a complex with the lacunary walling condition satisfies the linear separation property, Theorem~\ref{t:linear}. In \S~\ref{s:Haa}, we deduce Main Theorem. In \S~\ref{s:app}, we prove Theorem~\ref{t:sc}. Then we give concrete examples of graphical small cancellation presentations that satisfy the hypothesis of our Main Theorem and that do not reduce to classical small cancellation presentations. This is done in \S~\ref{s:ex}, where we also discuss the box space from~\cite{AGS}. Finally, we show that our lacunary walling condition, crucial for the proof of Theorem~\ref{t:linear}, cannot be removed, see~\S~\ref{optim}.
\noindent {\bf Acknowledgments.} We thank Rufus Willett for comments on the Baum-Connes conjectures and Jan \v{S}pakula for the discussion on the box spaces in Example 3.
\section{Small cancellation complexes} \label{s:prelim}
\subsection{\texorpdfstring{$C'(\lambda)$}{C'(lambda)}--complexes.}\label{s:c'cpl} Here we describe the spaces that we work on throughout this paper. Let $X^{(1)}$ be a graph. Our graphs have no loops neither multiple edges, and moreover, all our graphs have uniformly bounded degree. Let $(\varphi_i \colon r_i \to \xj)_{i\in \mathbb N}$ be a family of local isometries of finite graphs $r_i$. We call these finite graphs \emph{relators}. We assume that $\varphi_i \neq \varphi_j$, for $i\neq j$. The \emph{cone} over the relator $r_i$ is the quotient space $\mr{cone}\, r_i:=(r_i \times [0,1])/\{ (x,1) \sim (y,1)\}$. The main object of our study is the \emph{coned-off space}: \begin{align*} X:=X^{(1)}\cup_{(\varphi_i)} \bigcup_{i\in \mathbb N} \mr{cone}\,r_i, \end{align*} where $\varphi_i$ is the map $r_i\times \{ 0 \} \to \xj$. We assume that $X$ is simply connected. The space $X$ has a natural structure of a CW complex, or even of a simplicial complex, however we will not specify it. Nevertheless, we usually call $X$ a ``complex". Throughout the article, if not specified otherwise, we consider the \emph{path metric}, denoted by $d(\cdot,\cdot)$, defined on the $0$--skeleton $X^{(0)}$ of $X$ by (combinatorial) paths in the $1$--skeleton $X^{(1)}$. \emph{Geodesics} are the shortest paths in $X^{(1)}$ for this metric.
A path $p \hookrightarrow X^{(1)}$ is a \emph{piece} if there are relators $r_i,r_j$ such that $p\hookrightarrow X$ factors as $p \hookrightarrow r_i \stackrel{\varphi_i}{\longrightarrow} X$ and as $p\hookrightarrow r_j \stackrel{\varphi_j}{\longrightarrow} X$, but there is no isomorphism $r_i \to r_j$ that makes the following diagram commutative. $$ \begindc{\commdiag}[2] \obj(12,1)[a]{$r_i$} \obj(35,2)[b]{$X$} \obj(35,1)[b']{} \obj(35,16)[c]{$r_j$} \obj(35,17)[c']{} \obj(12,16)[d]{$p$} \obj(12,17)[d']{} \mor{a}{b'}{} \mor{c}{b}{} \mor{a}{c}{} \mor{d'}{c'}{} \mor{d'}{a}{} \enddc $$
\noindent
This means that $p$ occurs in $r_i$ and $r_j$ in two essentially distinct ways.
Let $\lambda \in (0,1)$. We say that the complex $X$ satisfies the \emph{$C'(\lambda)$--small cancellation condition} (or, shortly, the \emph{$C'(\lambda)$--condition}; or we say that $X$ is a \emph{$C'(\lambda)$--complex}) if every piece $p\hookrightarrow X$ factorizing through $p\hookrightarrow r_i \stackrel{\varphi_i}{\longrightarrow} X$ has length strictly less than $\lambda \cdot \gi\,r_i$.
In this paper, we use the following stronger variant of the $C'(\lambda)$--small cancellation condition. Let $\overline {\lambda}:=(\lambda (r_i))_{i\in \mathbb N}$ be a vector with $\lambda (r_i) \in (0,1)$. We say that the complex $X$ satisfies the \emph{$C'(\overline{\lambda})$--small cancellation condition} if every piece $p\hookrightarrow X$ factorizing through $p\hookrightarrow r_i \stackrel{\varphi_i}{\longrightarrow} X$ has length strictly less than $\lambda (r_i) \cdot \gi\,r_i$.
\begin{lem}[Relators embed] \label{l:remb} If $X$ is a $C'(1/24)$--complex, then the maps $\varphi_i\colon r_i \to X$ are isometric embeddings. \end{lem} \begin{proof} This follows from results in~\cite{W-qch} (see also \cite{W-ln}*{Chapter 10} or \cite{Oll}*{Theorem 1}). Indeed, by the proof of Lemma 3.46 there, $X$ has \emph{short innerpaths}, and by Theorem 3.31 the nonpositive curvature Condition 5.1.(2) is satisfied (see also \cite{W-ln}*{Lemma 9.12}). Thus, the claim follows from Lemma 5.5. \end{proof}
In our case, of graphical small cancellation, the preceding result can be strengthened to $C'(1/6)$--complexes. We do not elaborate on this since anyway we will use $C'(\lambda)$--complexes as $\lambda \to 0$.
\subsection{\texorpdfstring{$C'(\lambda)$}{C'(lambda)}--presentations}\label{s:c_prime_gp}
Let $Y^{(1)}$ be a finite graph and let $(\varphi_i \colon r_i \to Y^{(1)})_{i\in \mathbb N}$ be a family of local isometries of graphs. They form a \emph{graphical presentation} \begin{align}\label{eq:gpres}
\langle Y^{(1)}\; | \; (r_i)_{i\in \mathbb N} \rangle, \end{align} defining a group $G:=\pi_1(Y^{(1)})/\langle\langle \pi_1(r_i)_{i\in \mathbb N} \rangle\rangle$, being the fundamental group of a coned-off space: \begin{align*} Y:=Y^{(1)}\cup_{(\phi_i)} \bigcup_{i\in \mathbb N} \mr{cone}\,r_i. \end{align*}
We say that $\langle Y^{(1)}\; | \; (r_i)_{i\in \mathbb N} \rangle$ is a \emph{$C'(\lambda)$--small cancellation presentation} (respectively, a \emph{$C'(\overline{\lambda})$--small cancellation presentation}) if the universal cover $X$ of $Y$, with the induced maps $(\varphi_i \colon r_i \to X^{(1)})_{i\in \mathbb N}$, is so.
\subsection{Local-to-global density principle}\label{s:prel}
Here we provide a simple tool that allows us to conclude global properties of complexes from the local ones. Its proof can be found in~\cite{AO}.
Let $\gamma$ be a simple path in $X^{(1)}$. For a subcomplex $B$ of $\gamma$, by $E(B)$ we denote the set of edges of $B$. Let $\mathcal U$ be a family of nontrivial subpaths of $\gamma$, and let $A$ be a subcomplex of $\bigcup \mathcal U$ (that is, of the union $\bigcup_{U\in \mathcal U}U$).
\begin{lem}[Local-to-global density principle] \label{l:lgd} Assume that there exists $C\geqslant 0$, such that \begin{align*}
\frac{|E(A)\cap E(U)|}{|E(U)|}\geqslant C, \end{align*}
for every $U\in \mathcal U$. Then $|E(A)|\geqslant (C/2)|E(\bigcup \mathcal U)|$. \end{lem}
\section{Walls}\label{s:wall}
Let $X$ be a $C'(\lambda)$--complex. In this section, we equip the $0$--skeleton $X^{(0)}$ of $X$ with the structure of a space with walls $(X^{(0)}, \mathcal{W})$. We use a method of constructing walls from~\cite{W-qch}.
Recall, cf.\ e.g.\ \cite{ChMV}, that for a set $Y$ and a family $\mathcal W$ of partitions (called \emph{walls}) of $Y$ into two parts, the pair $(Y,\mathcal W)$ is called a \emph{space with walls} if the following holds. For every two distinct points $x,y\in Y$ the number of walls separating $x$ from $y$ (called the \emph{wall pseudo-metric}), denoted by $\dw(x,y)$, is finite.
Now we define walls for $\xz$. For a tentative abuse of notation we denote by ``walls" some subsets of edges of $\xj$, then showing that they indeed define walls. Roughly speaking, two edges are in the same wall if they are ``opposite" in a relator, and then this relation is transitively closed. This general idea goes back to the definition of walls for the classical small cancellation theory; see \cites{W-sc,AO} for finite and infinite classical small cancellation complexes, respectively. However, since there is no notion of ``oppositeness" in a general graph relator $r_i$, we require certain assumptions on each graph $r_i$.
\subsection{Defining walls} \label{s:relwall}
A \emph{wall} in a graph $\Gamma$ is a collection $w$ of edges such that removing all open edges of $w$ decomposes $\Gamma$ in exactly two connected components. In particular, it requires $\Gamma$ to be connected. We call $\Gamma$ a \emph{graph with walls}, if every edge belongs to a wall.
If not stated otherwise, we assume that for a $C'(1/24)$--complex $X$, with given relators $r_i$, each graph $r_i$ is a graph with walls. Observe that every $r_i$ is in fact an isometrically embedded subgraph of $X$, by Lemma~\ref{l:remb}. Following \cite[Section 5]{W-qch} (see also \cite[Chapter 10]{W-ln}), we define walls in $\xj$ as follows: Two edges are in the same wall if they are in the same wall in some relator $r_i$. This relation is then extended transitively for all relators.
In general, the above definition may not result in walls for $\xj$ --- they might not decompose $\xj$, etc. We require some further assumptions on walls in relators, which are formulated in the next section. Then, in Section~\ref{s:walls}, we prove that our definition of walls in $\xj$ makes sense, and we explore further properties of such a system of walls.
\subsection{Separation property} \label{s:sep}
\begin{de}[($\beta,\delta$)--separation] \label{d:sep} For $\beta \in (0,1/2]$ and $\delta \in (0,1)$ a graph $r$ with walls satisfies the \emph{($\beta,\delta$)--separation property} if the following two conditions hold:
\noindent \emph{\underline{$\beta$--condition}}: for every two edges $e,e'$ in $r$ belonging to the same wall we have \begin{align*} d(e,e')+1\geqslant \beta \cdot \gi\,r. \end{align*}
\noindent \emph{\underline{$\delta$--condition}}: for every geodesic $\gamma$ in $r$, the number of edges in $\gamma$ whose walls meet $\gamma$ at
least twice is at most $\delta\cdot |\gamma|$.
A complex $X$ satisfies the \emph{($\beta,\delta$)--separation property} if every its relator does so. \end{de}
There are other ways of defining an analogue of the $\beta$--condition above, which would make the $\delta$--condition unnecessary. However, one requires then large $\beta$, which is not convenient for providing examples.
\subsection{Walls in $X$.} \label{s:walls}
Let us show that a $C'(\lambda)$--complex $X$ satisfying the \bds property, does possess the wall structure given by the walls as defined in Subsection~\ref{s:relwall}, for sufficiently small $\lambda\leqslant 1/24$. We use results of Wise \cite[Section 5]{W-qch} (see also \cite[Chapter 10]{W-ln}). In particular, we have to check that $X$ satisfies the assumptions from~\cite{W-qch}.
\begin{lem}[Generalized $B(6)$] \label{l:b6} Let $X$ be a complex satisfying the $\beta$--condition from Definition~\ref{d:sep}. Then there exists $\lambda\leqslant 1/24$ with the following property. If $X$ satisfies the $C'(\lambda)$--condition then $X$ satisfies the \emph{generalized $B(6)$ condition} of \cite[Definition 5.1]{W-qch}. \end{lem} \begin{proof} Condition (1) of \cite[Definition 5.1]{W-qch} follows immediately from our definition of the coned-off space $X$. Condition (2) follows from the $C'(1/24)$--condition (see the proof of Lemma~\ref{l:remb} above). Conditions (3) and (6) follow from our definition of walls in graph relators. For a given $\beta$ the conditions (4) and (5) are implied by the $\beta$--condition together with the $C'(\lambda)$--condition, provided $\lambda$ is sufficiently small. \end{proof}
For the rest of this subsection we assume that $X$ satisfies the $\beta$--condition, for some $\beta\in (0,1)$, and the $C'(\lambda)$--small cancellation condition for $\lambda$ as in Lemma~\ref{l:b6}. With this fact in hand we use~\cite[Section 5]{W-qch} in our setting.
\begin{lem}[{\cite[Remark 5.24]{W-qch}} and {(\cite[Theorem 10.1]{W-ln})}]
\label{l:wallsep}
Removing all open edges from a given wall decomposes $\xj$ into exactly two connected components. \end{lem}
Thus, we define the family $\mathcal W$ for $\xz$ as the partitions of $\xz$ into sets of vertices in the connected components described by the lemma above.
\begin{prop}
\label{p:sww}
With the system of walls defined as above, $(\xz,\mathcal W)$ is a space with walls. \end{prop} \begin{proof}
Since, for any two vertices, there exists a path in $\xj$ connecting them, we get that the number of walls separating those two vertices is finite. \end{proof}
We recall further results on walls that will be extensively used in Section~\ref{s:properness}.
For a wall $w$, its \emph{hypergraph} $\Gamma_w$ is a graph defined as follows (see \cite[Definition 5.18]{W-qch} and \cite{W-sc}). There are two types of vertices in $\Gamma_w$ (see e.g.\ Figure~\ref{f:C}): \begin{itemize} \item \emph{edge-vertices} correspond to edges in $w$, \item \emph{relator-vertices} correspond to relators containing edges in $w$. \end{itemize} An \emph{edge} in $\Gamma_w$ connects an edge-vertex with a relator-vertex whenever the corresponding relator contains the given edge.
\begin{lem}[{\cite[Theorem 5.19]{W-qch}}] \label{l:hypergraph} Each hypergraph is a tree. \end{lem}
The \emph{hypercarrier} of a wall $w$ is the $1$--skeleton of the subcomplex of $X$ consisting of all relators containing edges in $w$ or of a single edge $e$ if $w=\{ e \}$.
\begin{tw}[{\cite[Corollary 5.34]{W-qch}}]
\label{l:carrconv}
Each hypercarrier is a convex subcomplex of $\xj$, that is, any geodesic connecting vertices of a hypercarrier is contained in this hypercarrier. \end{tw}
The following result is implicit in \cite[Section 5]{W-qch}, and formally it follows from Lemmas~\ref{l:hypergraph} \& \ref{l:carrconv} above, and from \cite[Lemma 5.9]{W-qch}.
\begin{cor} \label{l:relcon} Relators are convex subcomplexes of $\xj$. \end{cor}
\section{Linear separation property} \label{s:properness} \noindent In this section, we perform the main step toward Main Theorem, namely, we prove Theorem~\ref{p:lsp} below (Theorem~\ref{t:linear} from Introduction). This implies the properness of the wall pseudo-metric and it is the most involved part of the paper (cf.\ also Remark after Definition~\ref{d:cond} and Section~\ref{optim} below).
\emph{From now on, unless stated otherwise, each complex $X$ considered in this paper has relators $(r_i)_{i\in \mathbb N}$ being graphs with walls, and satisfies the following lacunary walling condition}.
\begin{de}[{Lacunary walling}] \label{d:cond} Let $\beta \in (0,1/2], \delta \in (0,1), M\in (0,1), K >0$, and let $k$ be a natural number larger than $1$. Let $\overline{\lambda}=(\lambda (r_i))_{i\in \mathbb N}$ be a vector with $\lambda (r_i)\leqslant \lambda$, where $\lambda< \beta/2$ is the constant from Lemma~\ref{l:b6} (that is, such that $X$ satisfies the generalized $B(6)$ condition). We denote by $b_{r_i}(t)$ the maximal number of edges in a ball of radius $t$ in the graph $r_i$. We say that $X$ satisfies the \emph{lacunary walling condition} if:
\begin{itemize} \item $X^{(1)}$ has degree bounded by $k$; \item (Small cancellation condition) $X$ satisfies the $C'(\overline{\lambda})$--condition; \item (Lacunarity) $b_{r_i}(\lambda (r_i) \cdot \gi\,r_i) \leqslant K \cdot \gi\,r_i$; \item (Separation condition) $X$ satisfies the \bds property; \item (Compatibility) $(1-\delta)(\beta - \lambda(r_i)) - 2K - 4\lambda(r_i) \geqslant M \cdot (\beta - \lambda(r_i))$. \end{itemize} \end{de}
Observe that this definition makes sense, that is, there are choices of all the constants and functions above satisfying the given constraints. To see this, note that (in the compatibility condition): \begin{align*} (1-\delta)(\beta - \lambda(r_i)) - 2K-4\lambda(r_i) = \left(1 - \delta-\frac{2K+4\lambda(r_i)}{\beta - \lambda(r_i)}\right)(\beta - \lambda(r_i)). \end{align*} Thus, after setting $\beta$ and $\delta$, one can choose small $K$ and $\ov \lambda$ so that the compatibility condition holds. Then one may further decrease the function $\ov \lambda$ to satisfy the lacunarity condition.
\rems 1) Our assumptions are not quantitatively optimal, they suit our general goal toward explicit examples (cf.\ Section~\ref{s:ex}). However, in Section~\ref{optim}, we argue that the lacunary walling condition is in a sense necessary in our approach.
\noindent 2) In this paper, we follow (up to some notations) the construction of walls provided by Wise \cite{W-qch} in a much more general case of small cancellation over CAT(0) cubical complexes. In fact, for graphical small cancellation --- as considered in our paper --- one could adapt the proofs provided in \cite{W-sc} in the classical small cancellation case (cf.\ e.g.\ \cite{OllivierWise}). We decided to follow the more general approach having in mind possible future extensions of our results.
\noindent 3) Whereas the construction of walls for (cubical) small cancellation complexes is entirely the idea of Wise, the properness of the wall pseudo-metric is proved only in some cases in \cites{W-sc,W-qch}. In particular, as we point out in \cite[Section 6]{AO}, there exist classical $B(6)$ small cancellation complexes whose wall pseudo-metric is not proper. In \cite[Section 5.k]{W-qch} the linear separation property is proved in the case of graphical small cancellation under the additional assumption on the presentation finiteness. The proof does not extend to our -- infinitely presented -- case. In Section~\ref{optim}, we comment on relations between our approach and the one from \cite[Section 5.k]{W-qch}.
By Subsection~\ref{s:walls}, for a complex $X$ with the lacunary walling condition, there is a structure of space with walls $(X^{(0)},\mathcal W)$. The rest of this section is devoted to proving that $(X^{(0)},\mathcal W)$ satisfies the \emph{linear separation property} (Theorem~\ref{t:linear} in the Introduction, and Theorem~\ref{p:lsp} below) stating that the wall pseudo-metric on $\xz$ is bi-Lipschitz equivalent to the path metric (cf.\ e.g.\ \cite[Section 5.11]{W-qch}).
Let $p,q$ be two distinct vertices in $X$. It is clear that \begin{align*}
\dw(p,q) \leqslant d(p,q). \end{align*} For the rest of this section our aim is to prove an opposite (up to a scaling constant) inequality.
Let $\gamma$ be a geodesic in $X$ (that is, in its $1$--skeleton $\xj$) with endpoints $p,q$. Let $A(\gamma)$ denote the set of edges in $\gamma$ whose walls meet $\gamma$ in only one edge (in particular such walls separate $p$ from $q$). Clearly $\dw(p,q)\geqslant |\ag|$. We thus estimate $\dw(p,q)$ by closely studying the set $\ag$. The estimate is first provided locally (in Subsection~\ref{s:local} below) and then we use the local-to-global density principle (Lemma~\ref{l:lgd}) to obtain a global bound.
We begin with an auxiliary lemma. Let $r$ be a relator. Since $r$ is convex in $X$, its intersection with $\gamma$ is an interval $p'q'$, with $p'$ lying closer to $p$ --- see Figure~\ref{f:C}. Consider the set $C$ of edges $e$ in $p'q'$, whose walls $w$ meet $\gamma$ at least twice and, moreover, have the following properties. Let $e'\in w$ (considered as an edge-vertex in the hypergraph $\Gamma_w$) be a closest vertex to $e$ in $\Gamma_w$, among edges of $w$ lying on $\gamma$. In the hypergraph $\Gamma_w$ of the wall $w$, which is a tree by Lemma~\ref{l:hypergraph}, consider the unique geodesic $\gamma_w$ between vertices $e$ and $e'$. We assume that there are at least two distinct relator-vertices on $\gamma_w$, one of them being~$r$.
\begin{figure}
\caption{Lemma~\ref{l:C}.}
\label{f:C}
\end{figure}
\begin{lem} \label{l:C}
In the situation as above we have $|C|\leqslant 2\cdot b_r(\lambda(r)\cdot \mr{girth}\,r)$. \end{lem} \begin{proof} Suppose that $q'$ lies between $e$ and $e'$ (on $\gamma$). Let $e''\neq e$ be the edge-vertex on $\gamma_w$ adjacent to $r$ and, consequently, let $r''$ be the relator-vertex on $\gamma_w$ adjacent to $e''$ --- see Figure~\ref{f:C}. By convexity (Lemma~\ref{l:carrconv}) and the tree-like structure (Lemma~\ref{l:hypergraph}) of the hypercarrier of $w$, containing $e$ and $e'$, we have that $q'\in r''$. Since $r\cap r''$ is convex and contains both $e''$ and $q'$, by the small cancellation condition we have \begin{align*} d(e'',q')+1\leqslant \lambda(r)\cdot \mr{girth}\, r. \end{align*} Therefore, the number of edges $e''$ as above is at most $b_r(\lambda(r)\cdot \mr{girth}\,r)$. The same number bounds the quantity of the corresponding walls. By our assumptions, every such wall contains only one edge in $p'q'$. Thus, the number of edges $e$ as above is at most $b_r(\lambda(r)\cdot \mr{girth}\,r)$. Taking into account the situation when $p'$ lies between $e$ and $e'$ we have \begin{align*}
|C|\leqslant 2\cdot b_r(\lambda(r)\cdot \mr{girth}\,r). \end{align*} \end{proof}
\subsection{Local estimate on \texorpdfstring{$|\ag|$}{|A(gamma)|}.} \label{s:local}
For a local estimate we need to define neighborhoods $N_e$ -- \emph{relator neighborhoods in $\gamma$} -- one for every edge $e$ in $\gamma$, for which the number $|E(N_e)\cap \ag|$ can be bounded from below.
For a given edge $e$ of $\gamma$ we define a corresponding relator neighborhood $N_e$ as follows. If $e\in \ag$ then $N_e=\{ e \}$. Otherwise, we proceed in the way described below.
Since $e$ is not in $\ag$, its wall $w$ crosses $\gamma$ in at least one more edge. In the wall $w$, choose an edge $e'\subseteq \gamma$ being a closest edge-vertex to $e\neq e'$ in the hypergraph $\Gamma_w$ of the wall $w$. We consider separately the two following cases, see Subsection~\ref{s:c1} and Subsection~\ref{s:c2} below. \subsubsection{(\emph{Case I.)} The edges $e$ and $e'$ do not lie in common relator.} \label{s:c1} In the hypergraph $\Gamma_w$ of the wall $w$, which is a tree by Lemma~\ref{l:hypergraph}, consider the geodesic $\gamma_w$ between vertices $e$ and $e'$. Let $r$ be the relator-vertex in $\gamma_w$ adjacent to $e$. Let $e''$ be an edge-vertex in $\gamma_w$ adjacent to $r$. Consequently, let $r''$ be the other relator-vertex in $\gamma_w$ adjacent to $e''$. The intersection of $r$ with $\gamma$ is an interval $p'q'$. Assume without loss of generality, that $q'$ lies between $e$ and $e'$ --- see Figure~\ref{f:C}.
We define the relator neighborhood $N_e$ as the interval $p'q'=r\cap \gamma$.
\begin{lem} \label{l:Ne} \begin{align*}
|E(N_e)|> (\beta - \lambda(r))\cdot \gi\,r. \end{align*} \end{lem} \begin{proof} Let $xq'$ be the geodesic between $e''$ and $q'$. Let $z$ be the vertex in $e$ closest to $q'$ --- see Figure~\ref{f:C}. By the \bds property we have \begin{align} \label{e:110}
|xq'|+|q'z|+1\geqslant \beta \cdot \gi\,r. \end{align} On the other hand, by the small cancellation condition, we have \begin{align} \label{e:115}
|xq'|+1\leqslant \lambda(r)\cdot \gi\,r. \end{align} Combining (\ref{e:110}) with (\ref{e:115}), we obtain \begin{align*}
|p'q'|&\geqslant |q'z|+1\geqslant \beta \cdot \gi\, r -|xq'| > \beta \cdot \gi\, r - \lambda(r)\cdot \gi\,r \\ & \geqslant (\beta - \lambda(r))\cdot \gi\,r. \end{align*} \end{proof}
We are now ready to state the main result in Case I.
\begin{lem}[Local density of $\ag$ --- Case I]
\label{l:prop2} The number of edges in $N_e$, whose walls separate $p$ from $q$ is estimated as follows: \begin{align*}
|E(N_e)\cap \ag| \geqslant \frac{(1-\delta)\cdot (\beta - \lambda (r))-2K -4\lambda (r)}{\beta - \lambda (r)}|E(N_e)|. \end{align*} \end{lem} \begin{proof}
To estimate $|E(N_e)\cap \ag|$, that is,
the number of edges in $N_e$ that belong to $\ag$, we explore the set of edges $f$ in $N_e$ outside $\ag$. We consider separately the three ways in which an edge $f$ of $N_e$ may fail to belong to $\ag$ --- these are studied in Cases: B, C and D below.
Since $f\notin \ag$ there exists another edge of the same wall $w_f$ in $\gamma$. Let $f'$ be a closest to $f$ such edge-vertex in the hypergraph $\Gamma_{w_f}$. Denote by $\gamma_{w_f}$ the geodesic in $\Gamma_{w_f}$ between $f$ and $f'$. Let $r_f$ be the relator-vertex on $\gamma_{w_f}$ adjacent to $f$.
\noindent \emph{Case B: There is only one relator-vertex between $f$ and $f'$ on $\gamma_{w_f}$, and $r_f=r$.} By convexity of relators, the segment $p'q'$ is geodesic in $r$. Thus, by the \bds property, the cardinality of the set $B$ of such edges $f$ is bounded by \begin{align} \label{e:200}
|B|\leqslant \delta \cdot |E(N_e)|. \end{align}
\begin{figure}
\caption{Lemma~\ref{l:prop2}, Case I(C).}
\label{f:CC}
\end{figure}
\noindent \emph{Case C: There are at least two distinct relator-vertices between $f$ and $f'$ on $\gamma_{w_f}$, and $r_f=r$} --- see Figure~\ref{f:CC}. The cardinality of the set $C$ of such edges $f$ is bounded, by Lemma~\ref{l:C}, as follows: \begin{align} \label{e:205}
|C|\leqslant 2\cdot b_r(\lambda(r)\cdot \mr{girth}\,r). \end{align}
\noindent \emph{Case D: $r_f\neq r$.} Let the set of such edges $f$ be denoted by $D$. Let $p''q'':=r_f \cap \gamma$, with $p''$ closer to $p$.
\begin{figure}
\caption{Lemma~\ref{l:prop2}, the possible Case I(D).}
\label{f:Dpos}
\end{figure}
First consider the subcase when the relator-vertex $r_f$ is adjacent to both $f$ and $f'$. Observe that, since $r_f\neq r$, the edge $f'$ does not belong to $r$. Without loss of generality, we may assume that $q'$ lies (on $\gamma$) between $f$ and $f'$. Since $r_f$ is convex, it follows that the interval on $\gamma$ between $f$ and $q'$ is contained in $r_f$ --- see Figure~\ref{f:Dpos}. Thus, by the small cancellation condition, the number of such edges $f$ is bounded by (taking into account the symmetric situation after exchanging $p'$ and $q'$): \begin{align} \label{e:270} 2\cdot \lambda (r)\cdot \gi\,r. \end{align}
\begin{figure}
\caption{Lemma~\ref{l:prop2}, the impossible Case I(D).}
\label{f:Dimpos}
\end{figure}
The other subcase to consider is when there is another relator-vertex $r_f'\neq r_f$ on $\gamma_{w_f}$ adjacent to $f''$, that is itself adjacent to $r_f$. The number of edges $f$ for which $r_f$ contains $p'$ or $q'$ is, again by the small cancellation condition --- see Figure~\ref{f:Dpos}, bounded by: \begin{align} \label{e:275} 2\cdot \lambda (r)\cdot \gi\,r. \end{align} Thus, further we assume that $p''q'' \subseteq p'q'$ --- see Figure~\ref{f:Dimpos}. We will show that this is impossible. By the small cancellation condition, we have \begin{align} \label{e:280}
|p''q''|\leqslant \lambda(r) \cdot \gi\,r. \end{align} On the other hand, by Lemma~\ref{l:Ne} we obtain \begin{align} \label{e:285}
|p''q''|\geqslant (\beta -\lambda (r))\cdot \gi\,r. \end{align} Combining (\ref{e:280}) and (\ref{e:285}), we get \begin{align*} \lambda (r) \geqslant \beta - \lambda (r). \end{align*} This however contradicts our choice of $\beta$ and $\ov {\lambda}$ (see Definition~\ref{d:cond}).
Combining quantities (\ref{e:270}) and (\ref{e:275}) above we obtain the following bound on the number of edges in $D$: \begin{align} \label{e:290}
|D|\leqslant 4 \cdot \lambda (r)\cdot \gi\,r. \end{align}
Now we combine all the cases: B, C, and D, to obtain the following bound in Case I, see estimates (\ref{e:200}), (\ref{e:205}), and (\ref{e:290}) above. \begin{align*}
|E(N_e) \setminus A(\gamma)|&\leqslant |B|+|C|+|D|\\ & \leqslant \delta \cdot |E(N_e)|+ 2\cdot b_r(\lambda(r)\cdot \mr{girth}\,r) + 4 \cdot \lambda (r)\cdot \gi\,r. \end{align*}
By lacunarity (see Definition~\ref{d:cond}), we have $b_r(\lambda(r)\cdot \mr{girth}\,r) \leqslant K\cdot \gi\,r$, and by Lemma~\ref{l:Ne}, we get $\gi\,r < \frac{|E(N_e)|}{\beta - \lambda (r)}$. Therefore, \begin{align}
|E(N_e) \setminus A(\gamma)| \leqslant \frac{\delta\cdot (\beta - \lambda (r))+2K +4\lambda (r)}{\beta - \lambda (r)}|E(N_e)|, \end{align} and hence \begin{align}
|E(N_e)\cap \ag| \geqslant \frac{(1-\delta)\cdot (\beta - \lambda (r))-2K -4\lambda (r)}{\beta - \lambda (r)}|E(N_e)|. \end{align} \end{proof}
\subsubsection{\emph{(Case II.)} The edges $e$ and $e'$ lie in common relator $r$.} \label{s:c2} We may assume (exchanging $e'$ if necessary) that $e'$ is closest to $e$ (in $X$) among edges in $w$ lying in $r \cap \gamma$.
The relator neighborhood $N_e$ is now defined as the interval $p'q'=r\cap \gamma$ --- see Figure~\ref{f:c2}.
\begin{figure}
\caption{Lemma~\ref{l:prop2}, Case II.}
\label{f:c2}
\end{figure}
\begin{lem}[Local density of $\ag$ --- Case II]
\label{l:prop3} The number of edges in $N_e$, whose walls separate $p$ from $q$ is estimated as follows: \begin{align*}
|E(N_e)\cap \ag| \geqslant \frac{(1-\delta)\cdot \beta-2K -4\lambda (r)}{\beta}|E(N_e)|. \end{align*} \end{lem} \begin{proof} We consider again the set of edges $f$ in $E(N_e)\setminus \ag$. As in Case I (Lemma~\ref{l:prop2}), we consider separately three possibilities: B, C, D for such an edge $f$ to fail belonging to $\ag$. The same considerations as in Case I lead to the estimates: \begin{align*}
|B|&\leqslant \delta \cdot|E(N_e)|,\\
|C|&\leqslant 2\cdot b_r(\lambda(r)\cdot \mr{girth}\,r),\\
|D|&\leqslant 4 \cdot \lambda (r)\cdot \gi\,r. \end{align*} By the \bds property, we have \begin{align*}
|E(N_e)|\geqslant \beta \cdot \gi\,r. \end{align*} Combining all the inequalities above we get \begin{align*}
|E(N_e) \setminus A(\gamma)| \leqslant |B| +|C| +|D|\leqslant \frac{\delta\cdot \beta+2K +4\lambda (r)}{\beta}|E(N_e)|, \end{align*} and hence \begin{align*}
|E(N_e)\cap \ag| \geqslant \frac{(1-\delta)\cdot \beta-2K-4\lambda (r)}{\beta}|E(N_e)|. \end{align*} \end{proof}
\subsubsection{Final local estimate.} We are ready to combine all the previous estimates.
\begin{prop}[Local density of $\ag$]
\label{l:local} The number of edges in $N_e$, whose walls separate $p$ from $q$ is estimated as follows: \begin{align*}
|E(N_e)\cap \ag| \geqslant {M}\cdot |E(N_e)|. \end{align*} \end{prop} \begin{proof} If $e\in \ag$ then the assertion is clear. If $e\notin \ag$ then we use Lemma~\ref{l:prop2} or Lemma~\ref{l:prop3}. \end{proof}
\subsection{Linear separation property} \label{s:global} Using the local estimate on the density of $\ag$ (see Proposition~\ref{l:local}) and the local-to-global density principle (Lemma~\ref{l:lgd}) we now estimate the overall density of edges with walls separating $p$ and $q$, thus obtaining the linear separation property.
\begin{tw}[Linear separation property]
\label{p:lsp}
For any two vertices $p,q$ in $X$ we have
\begin{align*}
d(p,q)\geqslant \dw(p,q)\geqslant \frac{M}{2}\cdot
d(p,q),
\end{align*}
that is, the path metric and the wall pseudo-metric are bi-Lipschitz equivalent. \end{tw}
\begin{proof}
The left inequality is clear. Now we prove the right one.
Let $\gamma$ be a geodesic joining $p$ and $q$.
The number $|E(\gamma)|$ of edges in $\gamma$ is equal to $d(p,q)$. On the other hand, the number $|\ag|$ of edges in $\gamma$ whose walls meet $\gamma$ in only one edge is at most $\dw(p,q)$. We will thus bound $|\ag|$ from below.
For any edge $e$ of $\gamma$, let $N_e$ be its relator neighborhood. The collection $\mathcal U=\{N_e\;|\; e\in E(\gamma)\}$ forms a covering family of subpaths of $\gamma$.
By the local estimate (Proposition~\ref{l:local}) we have that
\begin{align*}
\frac{|\ag\cap E(N_e)|}{|E(N_e)|}\geqslant {M}.
\end{align*}
Thus, by the local-to-global density principle (Lemma~\ref{l:lgd}), we have
\begin{align*}
|\ag|\geqslant \frac{M}{2}\cdot|E(\gamma)|,
\end{align*}
that finishes the proof. \end{proof}
\section{Main result: the Haagerup property} \label{s:Haa}
A consequence of the linear separation property (Theorem~\ref{p:lsp}) is the following main result of the paper.
\begin{tw}
\label{t:haag}
Let $G$ be a group acting properly on a simply connected $C'(\ov \lambda)$--complex $X$ satisfying the lacunary walling condition. Then $G$ acts properly on a space with walls. In particular, $G$ has the Haagerup property. \end{tw} \begin{proof} The group $G$ acts properly on the set of vertices $\xz$ of $X$ equipped with the path metric $d(\cdot,\cdot)$. By Proposition~\ref{p:sww}, this action gives rise to the action by automorphisms on the space with walls $(\xz,\mathcal W)$. By the linear separation property (Theorem~\ref{p:lsp}), we conclude that $G$ acts properly on $(\xz,\mathcal W)$. By an observation of Bo\. zejko-Januszkiewicz-Spatzier \cite{BoJaS} and Haglund-Paulin-Valette (cf.\ \cite{ChMV}), the group $G$ has the Haagerup property. \end{proof}
\begin{de}[Lacunary walling of a presentation]\label{d:lwpres}
A graphical presentation $$\langle Y^{(1)}\; | \; (r_i)_{i\geqslant 1} \rangle$$ is said to satisfy the \emph{lacunary walling condition} if the universal cover $X$ of the coned-off space
$Y^{(1)}\cup_{(\phi_i)} \bigcup_i \mr{cone}\,r_i$
satisfies the lacunary walling condition introduced in Definition~\ref{d:cond}. \end{de}
Observe that Main Theorem follows immediately from the above, since the given group $G$ acts properly on the corresponding universal cover, as described in Section~\ref{s:prelim}.
\section{An explicit small cancellation labeling of a subdivided family of graphs} \label{s:app} The aim of this section is to show that for any (infinite) family of graphs there exists a small cancellation labeling of them, after subdividing edges in a non-uniform way (Theorem~\ref{p:scsub}). Furthermore, one may enhance the labeling (up to taking a subsequence) to the one satisfying the lacunarity condition (Theorem~\ref{p:scsubl}). This result allows us (in Section~\ref{s:ex}) to give many -- non-classical -- examples of presentations satisfying the lacunary walling condition.
All graphs considered in this section are oriented. For a (oriented) graph $\Gamma$ and $j\in \mathbb N$, by $\Gamma^j$ we denote the \emph{$j$--subdivision} of $\Gamma$, that is the (oriented) graph obtained by subdividing every edge of $\Gamma$ into $j$ edges, all directed toward the endpoint of the original orientation.
\begin{tw}[Small cancellation labeling of subdivisions] \label{p:scsub} Let $\ov \Gamma = (\Gamma_i)_{i\in \mathbb N}$ be a (possibly infinite) family of finite graphs with degree bounded by $d$. For every $\ov \lambda = (\lambda_i)_{i\in \mathbb N}$ with $\lambda_i\in (0,1)$, there exists a sequence $(j_i)_{i\in \mathbb N}$ of natural numbers with the following property. There exists a labeling of the family of subdivisions $(\Gamma_i^{j_i})_{i\in \mathbb N}$ by $d+2$ letters satisfying the $C'(\ov \lambda)$--small cancellation condition. \end{tw} \begin{proof}
For each $n,k \in \mathbb N$, let $I_{n,k}$ denote the labeling of the segment with all edges directed toward common end-vertex of length $|I_{n,k}|=kn+k^2/2+k/2$ defined as (here $a^i$ denotes $a$ labeling $i$ consecutive edges, similarly for $b^j$; orientation from left to right): \begin{align*} a^nba^nb^2a^nb^3\ldots a^nb^{k-1}a^nb^{k}. \end{align*} Observe that if a labeling of a subsegment $I$ appears in $I_{n,k}$ twice (in different places) then the length of $I$ is at most \begin{align} \label{f:1} (k-2)+n+(k-1), \end{align}
which is the length of the sequence $b^{k-2}a^nb^{k-1}$.
If a labeling of a segment $I$ appears in two labelings $I_{n,k}$ and $I_{n',k'}$, with $n\neq n'$, then its length is at most \begin{align} \label{f:2} \min \{2n+k-1, 2n'+k'-1\}, \end{align}
that corresponds to the sequence $a^nb^{k-1}a^n$ or $a^{n'}b^{k'-1}a^{n'}$. Combining (\ref{f:1}) and (\ref{f:2}) we have that if a subsegment $I$ of $I_{n,k}$ appears in two different places in the family $\{ I_{n,k}\; | \; n\neq n' \; \rm{if}\; (n,k)\neq (n',k') \}$ then its length is less than \begin{align} \label{f:3} 2(k+n). \end{align}
The following technical claim follows from an elementary calculation.
\noindent {\bf Claim.} $\forall E,N\in \mathbb N$ there exist $E$ pairwise distinct numbers $n_1,n_2,\ldots, n_E\geqslant N$ and
$E$ numbers $k_1,k_2,\ldots,k_E\geqslant N$ such that $|I_{n_i,k_i}|=|I_{n_j,k_j}|$, for all $i,j$.
\begin{figure}
\caption{Labeling of $\Gamma_i^{j_i}$.}
\label{f:app}
\end{figure}
Now we come to the actual subdivision and labeling of $(\Gamma_i)_{i\in \mathbb N}$. We proceed inductively. Let $c_1,\ldots, c_d$ be pairwise different and different from $a,b$, letters. We label $\Gamma_i^{j_i}$ such that edges in the subdivision adjacent to an original vertex of $\Gamma_i$ are all labeled by different letters among $c_1,\ldots, c_d$ (this is to prevent foldings). Then we use labelings $I_{n,k}$ to label further subdivided edges of each $\Gamma_i$ --- see Figure~\ref{f:app}. Assume that we defined the required numbers $j_1,\ldots,j_M$, and we found the required labeling of the subfamily $(\Gamma_i^{j_i})_{i\leqslant M}$.
We find now $j_{m+1}$ and the required labeling of $(\Gamma_i^{j_i})_{i\leqslant M+1}$, that is, we show an appropriate labeling of $\Gamma_{M+1}^{j_{M+1}}$. Let $N$ be greater than any index $n$ appearing in $I_{n,k}$ used for labeling the family $(\Gamma_i^{j_i})_{i\leqslant M}$, and greater than $8/\lambda_{M+1}$.
Let $E$ be the number of edges in $\Gamma_{M+1}$. Let $n_1,\ldots,n_E$, and $k_1,\ldots,k_E$ be as in Claim (for $E,N$ as specified above). Then we set $j_{M+1}:=|I_{n_i,k_i}|+2$ and we label each of $E$ subdivided edges of $\Gamma_{M+1}$, using one of $I_{n_i,k_i}$ as: \begin{align*} c_ma^{n_i}ba^{n_i}b^2a^{n_i}b^3\ldots a^{n_i}b^{k_i-1}a^{n_i}b^{k_i}c_p. \end{align*}
We check now that $(\Gamma_i^{j_i})_{i\leqslant M+1}$ satisfies the small cancellation condition. Let $p$ be a path in some $\Gamma_i^{j_i}$ such that $p$ appears also elsewhere --- in the same graph or in another one.
Then there is an edge of $\Gamma_i$ such that the intersection of $p$ with the $j_i$--subdivision $e$ of this edge has length at least
$|p|/2$. If for the labeling of $e$ we used the labeling $I_{n,k}$ (with $j_i=|I_{n,k}|+2$) then we have a subsegment
of $I_{n,k}$ of length at least $|p|/2-1$ appearing also in another
place in the family $\{ I_{n,k}\; | \; n\neq n' \; \rm{if}\; (n,k)\neq (n',k') \}$. By the formula (\ref{f:3}), it means that \begin{align*}
|p|/2-1 < 2(k+n). \end{align*}
Since $j_i=|I_{n,k}|+2=kn+k^2/2+k/2+2$ we obtain \begin{align} \label{e:6.5} \begin{split}
|p|&< 4(k+n)+2=\frac{4(k+n)+2}{j_i}\cdot j_i=\frac{4(k+n)+2}{kn+k^2/2+k/2+2}\cdot j_i\\& \\&=\frac{8}{k}\cdot\frac{(k^2+nk)/2+k/4}{kn+k^2/2+k/2+2}\cdot j_i < \frac{8}{k}\cdot j_i<\lambda_i \cdot j_i<\lambda_i \cdot \gi\,\Gamma_i^{j_i}, \end{split} \end{align} since $k>8/\lambda_i$. This proves the small cancellation condition for $(\Gamma_i^{j_i})_{i\leqslant M+1}$ and, by induction, finishes the proof of the theorem. \end{proof}
Using the same method one can prove the following stronger result that will be used for providing examples in Section~\ref{s:ex}.
\begin{tw}[Lacunary labeling of subdivisions] \label{p:scsubl} Let $\ov \Gamma = (\Gamma_i)_{i\in \mathbb N}$ be an infinite family of finite graphs with degree bounded by $d$ and girth tending to infinity as $i\to\infty$. For every $\ov \lambda = (\lambda_i)_{i\in \mathbb N}$ with $\lambda_i\in (0,1)$, there is $n_0>0$ and there exists a sequence $(j_i)_{i \geqslant n_0}$ of natural numbers with the following property. There exists a labeling of $(\Gamma_i^{j_i})_{i \geqslant n_0}$ by $d+2$ letters satisfying the $C'(\ov \lambda)$--small cancellation condition and, moreover, satisfying the lacunarity condition of Definition~\ref{d:cond}, for a given $K>0$. \end{tw} \begin{proof} We use the same labeling of subdivisions as in the proof of Theorem~\ref{p:scsub}. We follow the notations of that proof. For simplicity, by $\Gamma$ we denote $\Gamma_i$, for a given $i$, and by $\Gamma'$ we denote its $j_i$--subdivision $\Gamma_i^{j_i}$. By the formula (\ref{e:6.5}) the labeling of $\Gamma'$ satisfies the $C'(\lambda)$--small cancellation condition for $\lambda=\frac{4(k+n)+2}{kn+k^2/2+k/2+2}$. Observe that \begin{align*}
\gi\,\Gamma'=j_i\cdot \gi\, \Gamma \;\;\;\;\;\;\mr{and} \;\;\;\;\;\; b_{\Gamma'}(t)\leqslant j_i \cdot b_{\Gamma}(t/j_i), \end{align*} where $b_{\Gamma},b_{\Gamma'}$ are the functions introduced in Definition~\ref{d:cond}. Therefore, we obtain the following \begin{align*} b_{\Gamma'}(\lambda\cdot \gi\,\Gamma')&\leqslant j_i \cdot b_{\Gamma}(\lambda \cdot j_i \cdot \gi\, \Gamma /j_i)\\&= j_i\cdot \gi\, \Gamma \cdot \frac{b_{\Gamma}(\lambda \cdot \gi\, \Gamma)}{\gi\, \Gamma}=\frac{b_{\Gamma}(\lambda \cdot \gi\, \Gamma)}{\gi\, \Gamma}\cdot \gi\, \Gamma'. \end{align*} For sufficiently large $j_i$, that is, for large $k,n$, we have that $\lambda$ is small. For large $i$ the girth is arbitrarily big. Thus, setting $n_0$ and $j_i$ (for $i\geqslant n_0$) large enough we can obtain $\frac{b_{\Gamma}(\lambda \cdot \gi\, \Gamma)}{\gi\, \Gamma}\leqslant K$. Hence the lacunarity condition is satisfied. \end{proof}
In \cite{OllivierWise} it is shown that for a finite family of graphs, random labeling leads to a small cancellation
labeling of some subdivided graph (up to folding). Their method does not extend to infinite families.
A random labeling of an infinite expander family, used by Gromov~\cite{Gro,AD} in his construction of Gromov's monster, satisfies the so-called \emph{geometric} small cancellation condition (which is of rather different nature). Thus, it is very interesting to know whether a random labeling of some subdivisions of an infinite family of bounded degree graphs does satisfy the $C'(\ov \lambda)$--small cancellation condition.
\section{Examples} \label{s:ex}
In this section, we first give examples of infinite graphical small cancellation presentations that do not reduce to classical small cancellation presentations treated in \cites{W-sc, AO}. Then we show that the construction of $\mathbb{Z}/2$--homology covers from \cite{AGS} produces a sequence of regular graphs satisfying the $(\beta,\delta)$--separation property required by the lacunary walling condition. Such a sequence is a natural candidate for a sequence $(r_i)_{i\in \mathbb N}$ satisfying the hypothesis of Theorem~\ref{t:nota}, and, hence, for the construction of a group which is coarsely embeddable into a Hilbert space but not coarsely amenable.
\noindent {\bf Example 1.} For any natural number $k$, let $\Theta_k$ be the graph being the union of three segments: $I_a,I_b,I_c$ of length $3k$ each, with three start-points identified and three end-points identified --- see Figure~\ref{f:ex1}. We label edges of $I_a,I_b,I_c$ in order as, respectively: \begin{align*} a_1a_2\ldots a_ke_1e_2\ldots e_ka_1'a_2'\ldots a_k',\\ b_1b_2\ldots b_ke_1'e_2'\ldots e_k'b_1'b_2'\ldots b_k',\\ c_1c_2\ldots c_ke_1''e_2''\ldots e_k''c_1'c_2'\ldots c_k'. \end{align*} Using this labeling we define walls. There are four kinds of them, for every $i=1,\ldots,k$ --- see Figure~\ref{f:ex1}: \begin{itemize} \item the wall $w_{e_i}$ consists of edges: $e_i,e_i',e_i''$; \item the wall $w_{a_i}$ consists of edges: $a_i,a_i'$; \item the wall $w_{b_i}$ consists of edges: $b_i,b_i'$; \item the wall $w_{c_i}$ consists of edges: $c_i,c_i'$. \end{itemize} It is easy to observe that the following inequalities hold: \begin{align*} d(e_i,e_i')+1 = d(e_i',e_i'')+1 = d(e_i'',e_i)+1\geqslant 2k+1>\frac{1}{3}\cdot 6k=\frac{1}{3}\cdot \gi\,\Theta_k ,\\ d(a_i,a_i')+1=d(b_i,b_i')+1=d(c_i,c_i')+1=2k =\frac{1}{3}\cdot \gi\,\Theta_k. \end{align*} Moreover, for every geodesic the fraction of edges meeting this geodesic twice is at most $2/3$. Thus, the system of walls satisfies the \bds property for $\beta = 1/3$ and $\delta =2/3$.
Using Theorem~\ref{p:scsubl}, one can now find a sequence $(k_j)_{j\in \mathbb N}$ of natural numbers and an appropriate explicit labeling (different from the one used just for defining walls) of graphs $\Theta_{k_j}$ that defines an infinite graphical small cancellation presentation of a group satisfying the lacunary walling condition.
\begin{figure}
\caption{Example 1.}
\label{f:ex1}
\end{figure}
\noindent {\bf Example 2.} Let $I^n$ be the $1$--skeleton of an $n$--cube. Subdividing every its edge into $k$ edges we obtain the graph $I^n_k$ --- see Figure~\ref{f:ex2}. This graph possesses a natural wall structure --- opposite edges in every $4k$ cycle belong to the same wall (edges $e,e',e'',e'''$ form the wall $w$ in Figure~\ref{f:ex2}). For any median graph $r$ (i.e.\ the $1$--skeleton of a CAT(0) cube complex) one can equip each $k$--subdivision of $r$ (every edge subdivided into $k$ edges) with a wall system, applying the above rule to each cube (or simpler -- just to each square) of $r$. Observe that for any two edges $e,e'$ of the same wall, we have \begin{align*} d(e,e')+1\geqslant 2k, \end{align*} which means that the $\beta$--condition from Definition~\ref{d:sep} is satisfied for $\beta = 1/2$. Furthermore, one easily sees that the $\delta$--condition from Definition~\ref{d:sep} is satisfied for $\delta = 1/3$.
\begin{figure}
\caption{Example 2.}
\label{f:ex2}
\end{figure}
Using Theorem~\ref{p:scsubl}, any sequence of median graphs $(r_i)_{i\in \mathbb N}$ may be transformed (up to taking a subsequence) into a sequence of their subdivisions $(r_i')_{i\in \mathbb N}$ with a small cancellation labeling, satisfying the lacunary walling condition. The resulting group acts then properly on the corresponding space with walls and hence, possesses the Haagerup property.
\noindent {\bf Example 3.} We recall the construction of the box space which is coarsely embeddable into a Hilbert space but not coarsely amenable~\cite{AGS}. Let $\mathbb F_m$ be the free group of rank $m\geqslant 2$. Arzhantseva-Guentner-\v{S}pakula's box space is a regular graph $$\Theta= \bigsqcup^{\infty}_{n=1}\Theta_n:=\bigsqcup^{\infty}_{n=1} \mathbb{F}_m/((\mathbb{F}_m^{(2)})^{(2)})^{\ldots (2)},$$ which is the disjoint union of the Cayley graphs $\Theta_n$ of quotients of $\mathbb F_m$ by the subgroups generated iteratively, over $n\geqslant 1$, by the squares of the group elements. That is, $\mathbb{F}_m^{(2)}$ is the normal subgroup of $\mathbb{F}_m$ generated by all the squares of elements of $\mathbb{F}_m$, then $\displaystyle{(\mathbb{F}_m^{(2)})^{(2)}}$ is such a subgroup of $\mathbb{F}_m^{(2)}$, and so on. The corresponding Cayley graphs are viewed with respect to the canonical image of the free generators of $\mathbb F_m$.
The graph $\Theta_n$ is the $\mathbb{Z}/2$--homology cover of $\Theta_{n-1}$ (with $\Theta_0$ being the bouquet of $m$ circuits), that is, a regular cover of $\Theta_{n-1}$ whose the group of deck transformations is the\ ${\rm rank}(\pi_1(\Theta_{n-1}))$--fold direct sum of $\mathbb{Z}/2\mathbb{Z}$'s.
The graph $\Theta$ is the graph with walls~\cite{AGS}*{Section 3}. For each edge $e\in E(\Theta_{n-1})$ and the covering map $\pi_n\colon \Theta_n\to \Theta_{n-1},$
the wall $w_e$ is defined by $w_e:=\pi^{-1}_n(e)\subseteq E(\Theta_n)$ and $\{ w_e \mid e \in E(\Theta_{n-1})\}$ provides the wall structure on $\Theta_n$, for all $n\geqslant 1$.
The following general observation shows that this wall structure on $\Theta$ does satisfy the $\beta$--condition from Definition~\ref{d:sep}.
\begin{lem}\label{lem:bbox} Let $\pi\colon \wt \Gamma \to \Gamma$ be a $\mathbb Z/2$--homology cover endowed with the wall structure as above. Then the $\beta$--condition holds, with $\beta=1/2.$ \end{lem} \begin{proof} Observe that $\gi \, \wt \Gamma = 2 \cdot \gi \, \Gamma$. Indeed, let $\gamma$ be a cycle in $\Gamma$ of length $\gi \, \Gamma$. We remove an arbitrary edge from $\gamma$, include the remaining path into a spanning tree of $\Gamma$, and take the $\mathbb Z/2$--homology cover of $\Gamma$ with respect to this spanning tree. The resulting cover coincides with $\wt \Gamma$, as it does not depend on the choice of a spanning tree, and we have $\gi \, \wt \Gamma = 2 \cdot \gi \, \Gamma$.
Let $e,e'$ belong to a common wall in $\wt \Gamma$. Then $\pi(e)=\pi(e')$, by our definition of walls~\cite{AGS}. Let $\gamma \subseteq \wt \Gamma$ be a geodesic between $e$ and $e'$, with endpoints $v\in e$ and $v'\in e'$. Let $w\neq v$ be another vertex of $e$. If $\pi(v)=\pi(v')$ then $\pi(\gamma)$ contains a closed path in $\Gamma$. If $\pi(w)=\pi(v')$ then there is a closed path in $\Gamma$ of length at most $d(v,v')+1$. In both cases we obtain \begin{align*} d(e,e')+1=d(v,v')+1\geqslant \gi \, \Gamma = (1/2)\gi \, \wt \Gamma. \end{align*} \end{proof}
In order to guarantee the $\delta$--condition from Definition~\ref{d:sep}, we take the $\mathbb{Z}/2$--homology cover of an appropriately chosen sequence of graphs (instead of the above tower of successive covers starting with the bouquet of $m$ circuits).
Let $\Lambda = (\Lambda_i)_{i\in \mathbb N}$ be an infinite family of finite 2-connected graphs, with $\gi\, \Lambda_i\to\infty$ as $i\to\infty$, and such that $\di\, \Lambda_i/\gi\, \Lambda_i\leqslant M$ for some $M>0$, uniformly over $i\in \mathbb N$. Let $\wt\Lambda = (\wt\Lambda_i)_{i\in \mathbb N}$ be the corresponding infinite family of the $\mathbb Z/2$--homology covers: $\wt\Lambda_i$ is the $\mathbb Z/2$--homology cover of $\Lambda_i$, for each $i\in \mathbb N$. Observe that $\gi\, \wt\Lambda_i\to\infty$ as $i\to\infty$. We endow $\wt\Lambda$ with our wall structure as above.
\begin{lem}\label{lem:dbox} Let $\gamma$ be a geodesic in $\wt\Lambda$. Then there exists $\delta\in (0,1)$ such that
the number of edges in $\gamma$ whose walls meet $\gamma$ at least twice is at most $\delta |\gamma|$. \end{lem}
\begin{proof}
The image of $\gamma$ under the covering projection $\pi\colon \wt\Lambda \to\Lambda$ is a so-called admissible path $\pi(\gamma)$ in $\Lambda$,
see~\cite{AGS}*{Definition 3.5}. We have $|\gamma|=|\pi(\gamma)|$ for the edge-length, see~\cite{AGS}*{Lemma 3.6 and Proposition 3.8}, and the path $\pi(\gamma)$
either does not contain any loop, or else every edge on any loop it contains is traversed exactly once, see~\cite{AGS}*{Lemma 3.12}.
Note that $\pi(\gamma)$ has no any backtrack since $\gamma$ is geodesic, see~\cite{AGS}*{Remark 3.9}.
Therefore, if it does not contain any loop, then no walls meet $\gamma$ at least twice, and the $\delta$--condition
is satisfied for all $\delta\in (0,1)$.
Suppose now that $\pi(\gamma)$ contains $L\geqslant 1$ loops. Then $\pi(\gamma)$ decomposes
into $L$ loops $c_1,\ldots, c_L$ traversed exactly once, $l$ geodesics $t_1,\ldots, t_{l}$ which are followed twice (toward and backward each loop as well as between such ``flowers''), and
finitely many geodesics $s_1, s_2, \ldots$, traversed exactly once, joining several trees formed by $t_j$'s, see Figure~\ref{f:adm}.
\begin{figure}
\caption{Decomposition of a shortest admissible path $\pi(\gamma)$ in $\Lambda$.}
\label{f:adm}
\end{figure}
For all $k$ and $j$, we have $|c_k|\geqslant \gi\, \Lambda_i$ and $|t_j|\leqslant \di\, \Lambda_i$, for some $i$.
Using the induction on $L$, taking into account the tree structures formed by $t_j$'s, one checks that $l\leqslant 2L$. Thus, we have
$$
|\gamma|/|\gamma|_{\mathcal W}=\frac{\sum_k|c_k|+2\sum_j|t_j| +\sum_q|s_q|}{\sum_k|c_k|+\sum_q|s_q|}\leqslant 2+ \frac{2(2L)\di\,\Lambda_i}{L\,\gi\,\Lambda_i}\leqslant 2+4M,
$$
where $|\gamma|_{\mathcal W}$ is the length of $\gamma$ with respect to the wall pseudo-metric,
that is, the number of edges in $\gamma$ whose walls meet $\gamma$ exactly once~\cite{AGS}*{Proposition 3.10}. Therefore, $\wt\Lambda$ satisfies the $\delta$--condition with $\delta=1-\frac{1}{2+4M}=\frac{1+4M}{2+4M}$. \end{proof}
We have just checked, using the results of~\cite{AGS}, that the graph metric and the wall pseudo-metric on $\wt\Lambda$ are bi-Lipschitz equivalent. This is the main result of~\cite{Ostrov} (also giving $\delta$, see inequality (3) in that paper), where such a choice of $\Lambda$ was explored, in the context of metric embeddings into the Banach space $\ell_1$.
The existence of infinite families $\Lambda = (\Lambda_i)_{i\in \mathbb N}$ as above, consisting of finite regular graphs of vertex degree at least 3, is well-known. For instance, the famous Ramanujan sequence of Lubotzky-Phillips-Sarnak provides such a family of finite $(p+1)$--regular graphs, where $p$ is an odd prime.
Thus, $\wt\Lambda = (\wt\Lambda_i)_{i\in \mathbb N}$ is coarsely embeddable into a Hilbert space, has a bi-Lipschitz embedding into the Banach space $\ell_1$ (as every wall space has an isometric embedding into $\ell_1$),
but is not coarsely amenable since $\gi\, \wt\Lambda_i\to\infty$ as $i\to\infty$ and the graphs are regular of vertex degree at least 3~\cite{Willett}.
By Lemmas~\ref{lem:bbox} and~\ref{lem:dbox}, $\wt\Lambda = (\wt\Lambda_i)_{i\in \mathbb N}$ satisfies the $(\beta,\delta)$--condition required
by the lacunary walling condition.
\section{Discussion on the optimality} \label{optim}
In this section, we argue that our lacunary walling condition is essential for obtaining the main result --- linear separation property for small cancellation complexes; see Theorem~\ref{t:linear} in Introduction and Theorem~\ref{p:lsp}. We focus on the \bds property and on the lacunarity (together with the compatibility) from Definition~\ref{d:cond}. We provide examples showing that if any of them fails then the wall pseudo-metric may be non-comparable with the $1$--skeleton metric.
\subsection{\texorpdfstring{\bds}{(beta, delta)--separation} property.} \label{s:bds}
The $\beta$--condition from Definition~\ref{d:sep} is essential for the definition of walls in $X$; see Section~\ref{s:walls}. Without it one cannot usually extend the walls in relators to the whole $X$. It corresponds to the \emph{$\frac{\pi}{2}$--strong separation property} from \cite[Section 5.k]{W-qch}, and is essential also in the finitely presented case. For the rest of this subsection we therefore focus on the $\delta$--condition from Definition~\ref{d:sep}. This condition is required in the infinitely presented case and may be easily omitted in the finitely presented one.
Consider the $(4k+2)$--gon $C_k=(v_1,v_2,\ldots,v_{4k+2},v_{4k+3}=v_1)$. Assume that for every $i\in \{1,2,\ldots,k\} \cup \{2k+2,2k+3,\ldots,3k+1 \}$ the edges $v_iv_{i+1}$ and $v_{i+k+1}v_{i+k+2}$ are dual to common wall $w_i$. Moreover, let the edges $v_{k+1}v_{k+2}$ and $v_{3k+2},v_{3k+3}$ be dual to common wall $w_{k+1}$ --- see Figure \ref{f:14}. Observe that the system of walls satisfies the $\beta$--condition from Definition~\ref{d:sep}, for $\beta$ approaching from above $1/4$ while $k$ grows. Note however, that $d(v_1,v_{2k+2})=2k+1$ while $d_W(v_1,v_{2k+2})=1$, where $d_W$ is the wall pseudo-metric given by walls $w_i$, that is, the number of walls separating given vertices. This means that an infinite family $\{ C_{k_i} \}_{i\in \mathbb N}$ does not satisfy the $\delta$--condition for any $\delta >0$.
\begin{figure}
\caption{Failure of the $\delta$--condition.}
\label{f:14}
\end{figure}
One can easily construct a (classical) infinite small cancellation presentation with relators being $(4k+2)$--gons as above. When equipped with walls induced by the above walls in relators, the Cayley complex of the group $G$ defined by such a presentation becomes a space with walls. Yet $X$ contains arbitrarily long geodesic (contained moreover in relators) separated by one wall. Consequently, the group $G$ does not act properly on the obtained space with walls. It shows that both: the $\beta$--condition and the $\delta$--condition, are necessary in our approach.
Note that the system of walls as above can satisfy (choosing an appropriate small cancellation labeling) the \emph{$\frac{\pi}{2}$--strong separation property} from \cite[Section 5.k]{W-qch}. This means that the pathologies as just described are characteristic for infinite presentations, and not for finite ones.
\subsection{Lacunarity/Compatibility} \label{s:lacun}
Consider a relator $r$ as in Figure~\ref{f:lacun1}. Here, walls correspond to the sets of edges: $$\{ a_i,a_i',a_i'',a_i'''\}, \{ b_i,b_i',b_i'',b_i'''\},\hbox{ for } i=1,2,3,4, \hbox{ and } \{c,c' \},\{d,d'\},\{e,e'\},\{f,f'\}.$$ Observe that $\gi \, r = 16 $, and that the wall system satisfies the \bds property for $\beta=1/4$ and some $\delta <1$.
\begin{figure}
\caption{Relator $r$.}
\label{f:lacun1}
\end{figure}
For $k=3,4,\ldots,$ we now construct a complex $X_k$ satisfying the $C'(1/8)$--small cancellation condition using the relator $r$ --- see Figure~\ref{f:lacun2}. The complex $X_k$ is the union of $k$ copies of $r$, such that consecutive copies share a common tree of valence $4$ and diameter $3$, consisting of edges labeled by $a_i'$ or $b_i'$ in $r$ (like the tree $\bf T$ in Figure~\ref{f:lacun1}).
\begin{figure}
\caption{The complex $X_k$, for $k=3$.}
\label{f:lacun2}
\end{figure}
We equip the complex $X_k$ with the wall system $\mathcal W$ as in Subsection~\ref{s:walls}, by extending the walls in $r$. Note that $X_k$ does not satisfy the lacunarity/compatibility conditions from Definition~\ref{d:cond}. By lacunarity we have to have (using the notations from Definintion~\ref{d:cond}): \begin{align*} b_{r}\left( \frac{1}{8}\cdot 16\right) = 4 \leqslant K \cdot 16, \end{align*} which implies $K\geqslant 1/4$. For such $K$ however there is no way to satisfy the compatibility condition for any $M> 0$.
Consider $X_3$, and a geodesic with endpoints $p,q$, as in Figure~\ref{f:lacun2}. Observe that no wall in $\mathcal W$, corresponding to $a_i,b_i$ separates $p$ from $q$. Therefore we have $\dw (p,q) = 8$ and $d(p,q)=24$, in $X_3$. Similarly, for any $k$, in $X_k$ one may find a geodesic (corresponding to $pq$) of length $8k$ whose endpoints are separated by $8$ walls from $\mathcal W$. This shows that in the corresponding infinite union $X_{\infty}$ the wall pseudo-metric is not proper.
Similarly, one can construct other examples for arbitrary small cancellation constant $\lambda>0$. The point here is that the number of edges in the tree $\bf T$ may be exponentially large compared to its diameter. Then the corresponding walls may ``exhaust" most of the edges in a given geodesic.
Observe that in the above example we use only finitely many (precisely, one) types of relators. This corresponds to the case of finitely presented groups. Wise \cite[Section 5.k]{W-qch} uses a notion of \emph{$\frac{\pi}{2}$--strong separation property} for relators, to obtain an analogous linear separation for finitely presented graphical small cancellation groups. In our approach, the $\frac{\pi}{2}$--strong separation property is replaced by the lacunarity condition. Neither of these properties implies another one. We decided to use the lacunarity as a condition that suits better our -- quantitative -- approach to spaces with walls.
\begin{bibdiv} \begin{biblist}
\bib{AW}{article}{
AUTHOR = {Akemann, Ch. A.},
AUTHOR = {Walter, M. E.},
TITLE = {Unbounded negative definite functions},
JOURNAL = {Canad. J. Math.},
FJOURNAL = {Canadian Journal of Mathematics. Journal Canadien de Math\'ematiques},
VOLUME = {33},
YEAR = {1981},
NUMBER = {4},
PAGES = {862--871},
ISSN = {0008-414X},
CODEN = {CJMAAB},
MRCLASS = {43A35 (22D10)},
MRNUMBER = {634144 (83b:43009)}, MRREVIEWER = {Pierre Eymard},
URL = {http://dx.doi.org/10.4153/CJM-1981-067-9}, }
\bib{claire}{article}{
author={Anantharaman-Delaroche, C.},
title={Amenability and exactness for dynamical systems and their $C^\ast$-algebras},
journal={Trans. Amer. Math. Soc.},
volume={354},
date={2002},
number={10},
pages={4153--4178 (electronic)},
}
\bib{AD}{article}{
author={Arzhantseva, G.},
author={Delzant, T.}, TITLE = {Examples of random groups}, status = {preprint}, eprint = {http://www.mat.univie.ac.at/~arjantseva/publicationsGA.html},
YEAR = {2008}}
\bib{AGS}{article}{
author={Arzhantseva, G.},
author={Guentner, E.},
author={{\v{S}}pakula, J.},
title={Coarse non-amenability and coarse embeddings},
journal={Geom. Funct. Anal.},
volume={22},
date={2012},
number={1},
pages={22--36},
}
\bib{AO}{article}{
author={Arzhantseva, G.},
author={Osajda, D.}, TITLE = {Infinitely presented small cancellation groups have the Haagerup property},
status = {preprint},
eprint = { arXiv:1212.4866},
YEAR = {2012}}
\bib{BR}{article}{ AUTHOR = {Bergelson, V.}, AUTHOR = {Rosenblatt, J.},
TITLE = {Mixing actions of groups},
JOURNAL = {Illinois J. Math.},
FJOURNAL = {Illinois Journal of Mathematics},
VOLUME = {32},
YEAR = {1988},
NUMBER = {1},
PAGES = {65--80},
ISSN = {0019-2082},
CODEN = {IJMTAW},
MRCLASS = {28D15 (22D10)},
MRREVIEWER = {Arlan Ramsay},
URL = {http://projecteuclid.org/getRecord?id=euclid.ijm/1255989229}, }
\bib{BoJaS}{article}{
author={Bo{\.z}ejko, M.},
author={Januszkiewicz, T.},
author={Spatzier, R.},
title={Infinite Coxeter groups do not have Kazhdan's property},
journal={J. Operator Theory},
volume={19},
date={1988},
number={1},
pages={63--67},
issn={0379-4024},
}
\bib{ChCJJV}{book}{
AUTHOR = {Cherix, P.-A.},
AUTHOR = {Cowling, M.},
AUTHOR = {Jolissaint, P.},
AUTHOR = {Julg, P.},
AUTHOR = {Valette, A.},
TITLE = {Groups with the {H}aagerup property (Gromov's a-T-menability)},
SERIES = {Progress in Mathematics},
VOLUME = {197},
PUBLISHER = {Birkh\"auser Verlag},
ADDRESS = {Basel},
YEAR = {2001},
PAGES = {viii+126},
ISBN = {3-7643-6598-6},
MRCLASS = {22D10 (22-02 22D25 22E30 43A07 46Lxx)},
MRREVIEWER = {Tullio G. Ceccherini-Silberstein},
URL = {http://dx.doi.org/10.1007/978-3-0348-8237-8}, }
\bib{ChMV}{article}{
author={Cherix, P.-A.},
author={Martin, F.},
author={Valette, A.},
title={Spaces with measured walls, the Haagerup property and property
(T)},
journal={Ergodic Theory Dynam. Systems},
volume={24},
date={2004},
number={6},
pages={1895--1908},
issn={0143-3857},
}
\bib{Cho}{article}{
AUTHOR = {Choda, M.},
TITLE = {Group factors of the {H}aagerup type},
JOURNAL = {Proc. Japan Acad. Ser. A Math. Sci.},
FJOURNAL = {Japan Academy. Proceedings. Series A. Mathematical Sciences},
VOLUME = {59},
YEAR = {1983},
NUMBER = {5},
PAGES = {174--177},
ISSN = {0386-2194},
CODEN = {PJAADT},
MRCLASS = {46L35},
MRREVIEWER = {Vaughan Jones},
URL = {http://projecteuclid.org/getRecord?id=euclid.pja/1195515589}, }
\bib{Cor}{article}{
author={Cornulier, Y.}, TITLE = {Group actions with commensurated subsets, wallings and cubings},
status = {preprint},
eprint = { arXiv:1302.5982 },
YEAR = {2013}}
\bib{Gro88}{incollection} {
AUTHOR = {Gromov, M.},
TITLE = {Rigid transformations groups},
BOOKTITLE = {G\'eom\'etrie diff\'erentielle ({P}aris, 1986)},
SERIES = {Travaux en Cours},
VOLUME = {33},
PAGES = {65--139},
PUBLISHER = {Hermann},
ADDRESS = {Paris},
YEAR = {1988},
MRCLASS = {58H15 (22E40 53C10 57R15 58G30)},
MRREVIEWER = {Christopher W. Stark}, }
\bib{Gro93}{incollection} {
AUTHOR = {Gromov, M.},
TITLE = {Asymptotic invariants of infinite groups},
BOOKTITLE = {Geometric group theory, {V}ol.\ 2 ({S}ussex, 1991)},
SERIES = {London Math. Soc. Lecture Note Ser.},
VOLUME = {182},
PAGES = {1--295},
PUBLISHER = {Cambridge Univ. Press},
ADDRESS = {Cambridge},
YEAR = {1993},
MRCLASS = {20F32 (57M07)},
}
\bib{Gro}{article}{
author={Gromov, M.},
title={Random walk in random groups},
journal={Geom. Funct. Anal.},
volume={13},
date={2003},
number={1},
pages={73--146},
issn={1016-443X},
}
\bib{Haa}{article} {
AUTHOR = {Haagerup, U.},
TITLE = {An example of a nonnuclear {$C^{\ast} $}-algebra, which has
the metric approximation property},
JOURNAL = {Invent. Math.},
FJOURNAL = {Inventiones Mathematicae},
VOLUME = {50},
YEAR = {1978/79},
NUMBER = {3},
PAGES = {279--293},
ISSN = {0020-9910},
CODEN = {INVMBH},
MRCLASS = {46L05 (22D35 43A35)},
MRNUMBER = {520930 (80j:46094)}, MRREVIEWER = {Ole A. Nielsen},
URL = {http://dx.doi.org/10.1007/BF01410082}, }
\bib{HP}{article}{
author={Haglund, F.},
author={Paulin, F.},
title={Simplicit\'e de groupes d'automorphismes d'espaces \`a courbure
n\'egative},
conference={
title={The Epstein birthday schrift},
},
book={
series={Geom. Topol. Monogr.},
volume={1},
publisher={Geom. Topol. Publ., Coventry},
},
date={1998},
pages={181--248},
}
\bib{HK}{article}{
AUTHOR = {Higson, N.},
AUTHOR = {Kasparov, G.},
TITLE = {Operator {$K$}-theory for groups which act properly and
isometrically on {H}ilbert space},
JOURNAL = {Electron. Res. Announc. Amer. Math. Soc.},
FJOURNAL = {Electronic Research Announcements of the American Mathematical
Society},
VOLUME = {3},
YEAR = {1997},
PAGES = {131--142},
ISSN = {1079-6762},
MRCLASS = {46L80 (19K56)},
MRNUMBER = {1487204 (99e:46090)},
URL = {http://dx.doi.org/10.1090/S1079-6762-97-00038-3}, }
\bib{HLS}{article}{
AUTHOR = {Higson, N.},
AUTHOR = {Lafforgue, V.},
AUTHOR = {Skandalis, G.},
TITLE = {Counterexamples to the {B}aum-{C}onnes conjecture},
JOURNAL = {Geom. Funct. Anal.},
FJOURNAL = {Geometric and Functional Analysis},
VOLUME = {12},
YEAR = {2002},
NUMBER = {2},
PAGES = {330--354}, }
\bib{LS}{book}{
author={Lyndon, R. C.},
author={Schupp, P. E.},
title={Combinatorial group theory},
series={Classics in Mathematics},
note={Reprint of the 1977 edition},
publisher={Springer-Verlag},
place={Berlin},
date={2001},
pages={xiv+339},
isbn={3-540-41158-5},
}
\bib{MeyerNest}{article}{
author={Meyer, R.},
author={Nest, R.},
title={The Baum-Connes conjecture via localisation of categories},
journal={Topology},
volume={45},
date={2006},
number={2},
pages={209--259},
}
\bib{MislinValette}{book}{
AUTHOR = {Mislin, G.},
AUTHOR = {Valette, A.},
TITLE = {Proper group actions and the {B}aum-{C}onnes conjecture},
SERIES = {Advanced Courses in Mathematics. CRM Barcelona},
PUBLISHER = {Birkh\"auser Verlag},
ADDRESS = {Basel},
YEAR = {2003},
PAGES = {viii+131},
ISBN = {3-7643-0408-1},
MRCLASS = {19K35 (46L80 55N20 58J22)},
URL = {http://dx.doi.org/10.1007/978-3-0348-8089-3}, }
\bib{NowakYu}{book}{
author={Nowak, P. W.},
author={Yu, G.},
title={Large scale geometry},
series={EMS Textbooks in Mathematics},
publisher={European Mathematical Society (EMS), Z\"urich},
date={2012},
pages={xiv+189},
}
\bib{Oll}{article}{
author={Ollivier, Y.},
title={On a small cancellation theorem of Gromov},
journal={Bull. Belg. Math. Soc. Simon Stevin},
volume={13},
date={2006},
number={1},
pages={75--89},
}
\bib{OllivierWise}{article}{
AUTHOR = {Ollivier, Y.},
author={Wise, D. T.},
TITLE = {Kazhdan groups with infinite outer automorphism group},
JOURNAL = {Trans. Amer. Math. Soc.},
FJOURNAL = {Transactions of the American Mathematical Society},
VOLUME = {359},
YEAR = {2007},
NUMBER = {5},
PAGES = {1959--1976 (electronic)},
ISSN = {0002-9947},
CODEN = {TAMTAM},
}
\bib{Ostrov}{article}{
author={Ostrovskii, M. I.},
title={Low-distortion embeddings of graphs with large girth},
journal={J. Funct. Anal.},
volume={262},
date={2012},
number={8},
pages={3548--3555},
}
\bib{RS}{article}{
author={Rips, E.},
author={Segev, Y.},
title={Torsion-free group without unique product property},
journal={J. Algebra},
volume={108},
date={1987},
number={1},
pages={116--126},
issn={0021-8693},
}
\bib{Silberman}{article}{
author={Silberman, L.},
title={Addendum to: ``Random walk in random groups'' [Geom.\ Funct.\
Anal.\ {\bf 13} (2003), no.\ 1, 73--146; MR1978492] by M. Gromov},
journal={Geom. Funct. Anal.},
volume={13},
date={2003},
number={1},
pages={147--177},
}
\bib{Willett}{article}{
author={Willett, R.},
title={Property A and graphs with large girth},
journal={J. Topol. Anal.},
volume={3},
date={2011},
number={3},
pages={377--384},
issn={1793-5253},
}
\bib{W-sc}{article}{
author={Wise, D. T.},
title={Cubulating small cancellation groups},
journal={Geom. Funct. Anal.},
volume={14},
date={2004},
number={1},
pages={150--214},
issn={1016-443X},
}
\bib{W-qch}{article}{
title ={The structure of groups with quasiconvex hierarchy},
author ={Wise, D. T.},
status={preprint},
eprint ={https://docs.google.com/open?id=0B45cNx80t5-2T0twUDFxVXRnQnc},
date={2011} }
\bib{W-ln}{book}{
author={Wise, D. T.},
title={From riches to raags: 3-manifolds, right-angled Artin groups, and
cubical geometry},
series={CBMS Regional Conference Series in Mathematics},
volume={117},
publisher={Published for the Conference Board of the Mathematical
Sciences, Washington, DC},
date={2012},
pages={xiv+141},
isbn={978-0-8218-8800-1},
}
\bib{Yu}{article}{
author={Yu, G.},
title={The coarse Baum-Connes conjecture for spaces which admit a uniform
embedding into Hilbert space},
journal={Invent. Math.},
volume={139},
date={2000},
number={1},
pages={201--240},
issn={0020-9910},
} \end{biblist} \end{bibdiv}
\end{document} | arXiv | {
"id": "1404.6807.tex",
"language_detection_score": 0.7397796511650085,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Incentive-compatible public transportation fares with random inspection}
\begin{abstract}
We consider the problem of designing prices for public transport where payment enforcing is done through random inspection of passengers' tickets as opposed to physically blocking their access. Passengers are fully strategic such that they may choose different routes or buy partial tickets in their optimizing decision. We derive expressions for the prices that make every passenger choose to buy the full ticket. Using travel and pricing data from the Washington DC metro, we show that a switch to a random inspection method for ticketing while keeping current prices could lead to more than 59\% of revenue loss due to fare evasion, while adjusting prices to take incentives into consideration would reduce that loss to less than 20\%, without any increase in prices. \end{abstract}
\section{Introduction}
In many, if not most, cities in the world, passengers using public transportation have their access to bus and rail systems controlled by barriers that can only be passed by making cash payments, inserting a ticket or tapping a card or a cellphone. In most cities in Germany, the Netherlands, Italy, among others, a proof-of-payment system is used instead. In these systems, the public have unrestricted access to buses and trains, but are expected to buy the tickets that correspond to their travel. The enforcement of these purchases is done by inspectors, who perform random checks of passengers during their trips. If they are not carrying the correct ticket for their trip, they receive a fine.
Transport networks that use proof-of-payment systems require a much simpler infrastructure at stations and buses: subway stations can be directly connected to the streets, trams can stop at arbitrary locations, and bus drivers do not have to handle passenger payments or ticketing. The downside, however, is that it is much easier for passengers to ride without paying. This, of course, can have a potentially catastrophic impact on revenue.
In this paper, we consider the problem of setting prices for proof-of-payment public transportation systems when passengers act as self-interested strategic agents, choosing the tickets they buy and the routes they take to minimize the expected cost of their trip. We derive the expressions for the incentive-compatible prices, i.e., the prices such that passengers will choose to buy the correct ticket for their trip, for both a stylized in a line and for an arbitrary network.
Given these expressions, we use real-life network and passenger travel data for the Washington D.C. metro to derive the incentive-compatible prices that result from our model. These show that, for the most part, the dollar values that result from the direct application of our expressions seem reasonably applicable. We then simulate the scenario in which the Washington D.C. metro switches to a proof-of-payment system without changing the pricing structure. These indicate that fare evasion by strategic passengers could lead to a revenue loss of \textbf{more than 59\%}. An adjustment to the D.C. prices to make them incentive compatible, despite making them weakly lower across the board, would reduce the revenue loss to \textbf{less than 20\%} instead, indicating that incentive considerations can have a substantial impact.
\section{Related literature}
Our paper contributes to a voluminous literature on payment evasion: shoplifting \citep{yaniv_shoplifting_2009,perlman_reducing_2014}, digital piracy \citep{chellappa_managing_2005}, tax evasion \citep{slemrod_cheating_2007}, parking violations \citep{fisman_corruption_2007}, and public transportation fare evasion \citep{boyd_fare_1989,kooreman_fare_1993}, and monitoring and punishment. \cite{becker_crime_1968} shows that maximal fine is the optimal because it does not distort incentive. Subsequent literature suggest, however, that the maximal fine may not be optimal under different extensions \citep{polinsky_optimal_1979,polinsky_optimal_1984,polinsky_economic_2000,malik_avoidance_1990}. Our analysis also relates to the operation research literature on fare inspection in the network \citep{yin_trusts_2012}, network security game \citep{jain_security_2013}, and fare collection systems \citep{tirachini_multimodal_2012}.
\cite{correa_fare_2017} is the closest to our paper. In it, passengers act strategically, being able to buy or not a ticket based on the expected value of each strategy, and the transport agency optimizes the location of the inspectors. Their focus is on producing and evaluating the complexity of algorithms for determining the optimal positioning of inspectors in the network. They also consider an extension in which the prices considers the possibility that a passenger can choose not to buy the ticket. Among other differences, in their model the probability of a passenger being inspected is independent of the number of passengers who choose travel through the same segment.
\section{Baseline model\label{section:baseline}}
There is a positive mass of passengers residing in a line $[0,1]$. Denote $\mathcal{I}$ the set of passengers. Passengers have travel demand density $d\left(x,y\right)$, where $x$ is the origin and $y$ the destination. The function $d$ is continuously differentiable, and we have $d\left(x,y\right)>0$ for any $x,y\in[0,1]$ and $x\neq y$. As an illustration, $\int_0^1 d\left(0,x\right)dx$ is the mass of passengers who wants to travel from point 0.
An important measure, for our purposes, is the density function that tells the mass of passengers that wants to pass through a point $a$ (that is, passengers who start at or before $a$ and leave at or after $a$). This function, $d^{pass}$, can therefore be defined by: \[ d^{pass}\left(a\right) = \int_0^a \int_a^1 d\left(x,y\right)dy\ dx + \int_a^1 \int_0^a d\left(x,y\right)dy\ dx\] The first part consists of all passengers who depart from some point in $[0,a]$ and go to some point in $[a,1]$. The second part consists of all passengers who depart from some point in $[a,1]$ and go to some point in $[0,a]$.
There is a \textbf{pricing scheme} $p$ for every pair of origin and destination. Any tickets can be purchased at every point of departure, at a price $p\left(x,y\right)$, where $x$ is the origin and $y$ the destination.\footnote{This includes the possibility that tickets can be purchased via mobile phones or in the train.}
A passenger who demands to make a trip have to make the full trip. She is free, however, to choose any path between her origin and destination, and to buy any set of tickets, if any, she wants for that trip. Hence, her strategy consists of the combination of (i) a path from her origin to her destination, and (ii) a set of tickets (if any). Formally, a \textbf{strategy} for passenger $i \in \mathcal{I}$, who resides at point $x_i$ and has as destination point $y_i$, is a pair $s_i=(\sigma_i,\tau_i)$, where $\sigma_i=\left(x_i,\sigma_i^0,\sigma_i^1,\ldots,y_i\right)$ is the \textbf{path} that the passenger follows: starting from $x_i$, goes to $\sigma_i^0$, then $\sigma_i^1$, etc, ending at her destination $y_i$. The set $\tau_i=\left\{(\tau_i^0,\tau_i^1),(\tau_i^2,\tau_i^3),\ldots, (\tau_i^{k-1},\tau_i^k) \right\}$ are the tickets that the passenger $i$ purchases---for example, one ticket from $\tau_i^0$ to $\tau_i^1$.
To reduce fare evasion, the transportation authority has a mass $\lambda^{total}$ of inspectors, and distributes them along the line. Without loss of generality, we normalize the mass to unity, i.e., $\lambda^{total}=1$ so that we can describe the distribution of inspectors with a probability density function $\lambda\left(x\right)$.\footnote{We will drop this normalization in our application in Section \ref{section::application}.} The probability that a passenger is inspected when traveling between $x$ and $y$, is given by the following expression:
\[q\left(x,y,\lambda\right)=\left| \int_x^y \frac{\phi\left(\lambda\left(a\right)\right)}{d^{pass}\left(x\right)} da \right|\] where $\phi$ is a monotonically increasing and quasi-concave function of the mass of inspectors. The case $\phi\left(x\right)=x$, for example, represents the case in which the probability of being inspected equals the ratio of inspectors to passengers. Note that $q$ is symmetric in the first two arguments, so that $q(x,y,\lambda)=q(y,x,\lambda)$.
If a passenger is inspected at any point $a\in[0,1]$ and does not have a ticket, she receives a fine of $\alpha$. Moreover, if she has a ticket with origin-destination $(x,y)$ but either $a<x$ or $a>y$, then she is also punished with the same fine.\footnote{Not differentiating between these cases for fine purposes is in line with the practices that we are aware of.} While we will make considerations about how the transportation authority would distribute the inspectors, we assume that it does not incorporate revenues from fines.
We assume that passengers have quasi-linear utilities, are risk neutral and maximize expected utility. Passenger $i \in \mathcal{I}$ derives utility $u_i\left(x,y\right)$ from making a trip from $x$ to $y$. The (expected) utility that the passenger $i$ who travels from $x$ to $y$ derives from her strategy $s_i=(\sigma_i,\tau_i)$ is:
\[u_i\left(x,y\right) - \alpha\sum_{(a,b)\in \sigma_i\backslash \tau_i} q(a,b,\lambda) -\sum_{(a,b)\in \tau_i} p\left(a,b\right)\] where the second term is the expected payment of fine if being inspected on the line segment without ticket and the third term is the total cost of tickets purchased.
Under pricing scheme $p$, the strategy profile $s=(s_i)_{i \in \mathcal{I}}=(\sigma_i,\tau_i)_{i \in \mathcal{I}}$ is an \textbf{equilibrium} if no passenger can be strictly better off by unilaterally deviating to another strategy. A pricing scheme is \textbf{incentive-compatible} if there is an equilibrium such that all passengers buy full tickets corresponding to their trip.
Under strategy profile $s$, the \textbf{total revenue} of the transportation authority is given by: \[
\pi(p,s)=\sum_{(x,y)\subseteq [0,1]^2} p\left(x,y\right)\left|\{i \in \mathcal{I}:(x,y)\in \tau_i\}\right| \]
where $\left|\{i \in \mathcal{I}:(x,y)\in \tau_i\}\right|$ is the mass of passengers who buy tickets between $x$ and $y$. A pricing scheme $p^*$ is \textbf{revenue-maximizing} if it maximizes total revenue $\pi(p^*,s^*)$ where $s^*$ is an equilibrium under $p^*$.
Passengers have the reservation values of riding without buying any ticket. Therefore, the maximum surplus that can be extracted from a passenger $i$ with demand for travel from $x$ to $y$ is $u_i\left(x,y\right)-\alpha q(x,y,\lambda)$. Hence, a pricing scheme $p^*$ is \textbf{full-surplus-extracting} if it is incentive-compatible and every passenger $i\in\mathcal{I}$ has payoff $u_i\left(x,y\right)-\alpha q(x,y,\lambda)$ in an equilibrium $s^*$ under $p^*$. Note that if is a pricing scheme is full-surplus-extracting, then it is also revenue-maximizing.
Our first result is to show that there is incentive-compatible pricing scheme, and that this pricing scheme extracts all the surplus from passengers and thus also maximizes the revenue for the transportation authority.
\begin{prop}\label{prop1} The pricing scheme $p^*\left(x,y\right)=\alpha q(x,y,\lambda)$ is incentive-compatible, full-surplus extracting and revenue-maximizing. \end{prop} \begin{proof} Consider a passenger $i$ who needs to make a trip from $x$ to $y$ under the pricing scheme $p^*$. Without loss of generality assume $x<y$. First, suppose a passenger $i$ is buying the full ticket from $x$ to $y$ , i.e., $\sigma_i=\tau_i$. Since $p^*$ is increasing for a larger line segment (in the sense of inclusion), it suffices to consider $\sigma_i=(x,y)$. The utility of this passenger is \[U^{Full}_i(x,y)\equiv u_i\left(x,y\right)-p^*(x,y)= u_i\left(x,y\right)- \alpha q(x,y,\lambda) \] Second, consider the passenger $i$ is not buying any single ticket. The utility is \[U^{None}_i(x,y)\equiv u_i\left(x,y\right) - \alpha q(x,y,\lambda) \] which is the minimum utility that the passenger can get, since there is always a option to travel without buying a ticket. Hence, as $U^{Full}_i(x,y)=U^{None}_i(x,y)$, the passenger $i$ is indifferent between buying the full ticket and buying no ticket.
Now consider a passenger $i$ who buys a subset of tickets, i.e., using a strategy such that $\sigma_i \neq \tau_i$. By the monotonicity of price $p^*$, it is strictly dominated to buy tickets outside line segments between $x$ and $y$ and also dominated to buy overlapping line segments. Hence, we only need to consider tickets covering disjoint segments between $x$ and $y$. Formally, passenger $i$ buys $n$ tickets where the $i$-th ticket covers the segment $(a_i,b_i)$ where $a_1 \le b_1 \leq a_2 \leq b_2 \leq \ldots \leq a_n \le b_n$. where $x \le a_1$ and $b_n \le y$. The utility is \begin{align*} U^{Partial}_i(s_i,x,y) & \equiv u_i\left(x,y\right) - \alpha \sum_{(a,b)\in \sigma_i \backslash \tau_i} q(a,b,\lambda) -\sum_{(a,b)\in \tau_i} p^*(a,b) \\ & = u_i\left(x,y\right) - \alpha \sum_{(a,b)\in \sigma_i\backslash \tau_i} q(a,b,\lambda) -\alpha \sum_{(a,b)\in \tau_i} q(a,b,\lambda) \\ & = u_i\left(x,y\right) - \alpha q(x,y,\lambda). \end{align*} Hence, we have $U^{Full}_i(x,y)=U^{None}_i(x,y)=U^{Partial}_i(s_i,x,y)$ for all strategies $s_i$ and this equality holds for all possible origin $x$ and destination $y$. This implies that all passengers are indifferent between any tickets purchase (including no ticket at all).
Any price above $p^*\left(x,y\right)$ will make the passenger ride without paying. Therefore: (i) all surplus is extracted under $p^*\left(x,y\right)$, (ii) revenue is maximal, and (iii) every passenger buys tickets between $x$ and $y$ for every $p'(x,y)\leq p^*\left(x,y\right)$. \end{proof}
Next, we consider the distribution of inspectors $\lambda$. The focus of this paper are the prices and the incentives that they induce on strategic passengers, and therefore we have taken the distribution of inspectors as given. However, understanding what would be the ``optimal'' choice for that distribution is important if we want to understand whether the scenarios that we will consider can be deemed as \emph{reasonable}.
A distribution of inspector $\lambda$ is \textbf{revenue-maximizing} if it maximizes the revenues of a transportation authority under some revenue-maximizing pricing scheme.\footnote{Remember that the revenue from the fines themselves are not part of the authorities' revenues.}. The following shows the revenue-maximizing inspector distribution is rather simple.
\begin{prop} \label{prop:optimalDistributionInspectors} The distribution of inspectors $\lambda$ is revenue-maximizing if, for any two points $x,y\in[0,1]$, \[\phi'\left(\lambda\left(x\right)\right) =\phi'\left(\lambda\left(y\right)\right).\] \end{prop}
\begin{proof} From Proposition \ref{prop1}, a revenue-maximizing price is also incentive-compatible. Hence, under this price $p^*$, the profit of transportation authority is \[ \pi = \int_0^1 \int_x^1 d(x,y) p^*(x,y) dy\ dx + \int_0^1 \int_0^x d(x,y) p^*(x,y) dy\ dx \]
Since $p^*(x,y)=\alpha q(x,y,\lambda)=\alpha\left| \int_x^y \frac{\phi\left(\lambda\left(a\right)\right)}{d^{pass}\left(a\right)} da \right| $, we have \[ \pi = \alpha \int_0^1 \int_x^1 d(x,y) \left[\int_x^y \frac{\phi\left(\lambda\left(a\right)\right)}{d^{pass}\left(a\right)} da \right] dy\ dx + \alpha \int_0^1 \int_0^x d(x,y) \left[\int_x^y \frac{\phi\left(\lambda\left(a\right)\right)}{d^{pass}\left(a\right)} da \right] dy\ dx \] By changing the order of integration, we have
\[\pi =\alpha \int_0^1 \int_0^a \int_{a}^1 \left[d(x,y) \frac{\phi\left(\lambda\left(a\right)\right)}{d^{pass}\left(a\right)} \right] dy\ dx\ da + \alpha\int_0^1 \int_{a}^1 \int_0^a \left[d(x,y) \frac{\phi\left(\lambda\left(a\right)\right)}{d^{pass}\left(a\right)} \right] dy\ dx\ da \]
The distribution of inspectors is such that the following holds: \[\int_0^1 \lambda(x) dx=1.\]
Hence, we have the Lagrangian: \[\mathcal{L}=\pi+ \Lambda\left[1-\int_0^1 \lambda(x) dx\right] \] where $\Lambda$ is the Lagrangian multiplier. The FOCs on the Lagrangian are: \[\frac{\partial \mathcal{L}}{\partial \lambda(a)}=0 \text{ for all } a\in[0,1] \text{ and }\int_0^1 \lambda(x) dx=1.\]
The first set of equations implies that we have \[ \alpha\int_0^a \int_{a}^1 \left[d(x,y) \frac{\phi'\left(\lambda\left(a\right)\right)}{d^{pass}\left(a\right)} \right] dy\ dx + \alpha\int_{a}^1 \int_0^a \left[d(x,y) \frac{\phi'\left(\lambda\left(a\right)\right)}{d^{pass}\left(a\right)} \right] dy\ dx = \Lambda \]
The expression $\frac{\phi'\left(\lambda\left(a\right)\right)}{d^{pass}\left(a\right)}$ is independent of $x$ and $y$, and therefore we can rearrange as \[ \frac{\phi'\left(\lambda\left(a\right)\right)}{d^{pass}\left(a\right)}\alpha\left[ \int_0^a \int_{a}^1 d(x,y) dy\ dx + \int_{a}^1 \int_0^a d(x,y) dy\ dx\right]= \Lambda\]
Since \(d^{pass}(a) =\int_0^a \int_{a}^1 d(x,y) dy\ dx + \int_{a}^1 \int_0^a d(x,y) dy\ dx\), we have
\[\phi'\left(\lambda\left(a\right)\right) \alpha=\Lambda,\] for all $a\in [0,1]$. Thus, we have for any two points $x,y\in[0,1]$, \[\phi'\left(\lambda\left(x\right)\right) =\phi'\left(\lambda\left(y\right)\right)=\Lambda/\alpha.\] \end{proof}
Proposition \ref{prop:optimalDistributionInspectors} results in two important corollaries:
\begin{cor} \label{cor:OptimalInspectorsStrictlyConcave} If $\phi$ is strictly concave, the revenue-maximizing distribution of inspectors $\lambda^*$ is such that for every $i,j$, $\lambda^*(i)=\lambda^*(j)$. \end{cor}
\begin{cor} \label{cor:OptimalInspectorsLinear} If $\phi(\lambda)=k\lambda$ for some constant $k>0$, then any distribution of inspectors $\lambda^*$ for which $\int_0^1 \lambda(x) dx=1$ is revenue-equivalent. \end{cor}
Corollary \ref{cor:OptimalInspectorsLinear} implies that if the probability of a passenger being inspected is proportional to the probability that an individual in a train is an inspector, revenue maximization does not imply in a restriction on the distribution of inspectors.
\section{Networks\label{section:Networks}}
In this section, we consider a model in which space is not a line segment, but an arbitrary discrete network. Passengers travel from an two different nodes in the network, and they can only travel through the edges that connect them. A network is a set of nodes $\mathcal{N}=\{N_1,N_2,N_3,\ldots\}$ and of edges $\mathcal{E}$, which is a collection of pairs of nodes. For each origin $x\in \mathcal{N}$ and destination $y\in \mathcal{N}$, there is an exogenous demand $D_{(x,y)}>0$, that gives the mass of passengers who want to travel from $x$ to $y$. Denote $\mathcal{D}=\left(D_{(x,y)}\right)_{(x,y) \in \mathcal{N}^2}$ the set of demands for all origin and destination. Each passenger $i$ derives utility $u_i(x,y)$ from his trip.
Passengers now have more options than simply traveling or not: there might be multiple ways to travel between two nodes. For example, in a connected graph with three nodes $x,y,z$, a passenger who wants to go from node $x$ to node $y$ can either take the direct route, through the edge $(x,y)$, or take the indirect route through edges $(x,y)$ and then $(y,z)$.
Following Section \ref{section:baseline}, a passenger $i$'s strategy consists of the combination of (i) a path from her origin to her destination---a set of connected edges in $\mathcal{E}$---($\sigma_i$), and (ii) a set of tickets ($\tau_i$). Given strategies of all passengers, denote $\sigma^{Pass}_{(x,y)}$ be the mass of passengers that pass through the edge $(x,y)$:\footnote{In the network model we use the $\sigma^{Pass}$ notation instead of $d^{pass}$ to emphasize the fact that the presence of multiple paths between the origin and destination makes the flow dependent of the passengers' path choices, even in equilibrium.}
\[ \sigma^{Pass}_{(x, y)} = |\{i \in \mathcal{I}:(x,y)\in \sigma_i \}|+ |\{i \in \mathcal{I}:(y,x)\in \sigma_i \}|.\]
Each edge $(x,y)$ is monitored by a mass $\lambda_{(x,y)}>0$ of inspectors. Normalizing the total mass of inspectors to unity, let $\lambda$ be a distribution of inspectors and there is full support over all the edges. The probability that a passenger is inspected when traveling through the edge connecting $x$ and $y$, is given by the following expression:
\[Q_{\left(x,y\right)}(\lambda)=\frac{\phi\left(\lambda_{\left(x,y\right)}\right)}{\sigma^{pass}_{\left(x,y\right)}} \] where the first term is the mass of passengers travelling from node $x$ to $y$ and the second term is the mass of passengers travelling in the other direction.
The transportation authority sets a pricing scheme $P$ for every pair of origin and destination. Any tickets can be purchased at every point of node, at a price $P_{(x, y)}$, where $x$ is the origin and $y$ the destination.\footnote{To avoid confusion, we use $Q$ and $P$, respectively, instead of $q$ and $p$ for the probability of being inspected and pricing scheme in the network model.}
Given the pricing scheme $P$, monitors distribution $\lambda$ and strategies from all other passengers $\sigma^{pass}_{-i}$, the expected utility that passenger $i$ who travels from $x$ to $y$ with strategy $s_i=(\sigma_i,\tau_i)$ is:
\[u_i\left(x,y\right) - \alpha\sum_{(a,b)\in \sigma_i\backslash\tau_i} Q_{(a,b)}(\lambda) -\sum_{(a,b)\in \tau_i} P_{\left(a,b\right)} \]
Under the pricing scheme $P$, the strategy profile $s=(\sigma_i,\tau_i)_{i \in \mathcal{I}}$ is an \textbf{equilibrium} if no passenger can be strictly better off by deviating to another strategy. A pricing scheme is \textbf{incentive-compatible} if there is an equilibrium such that every passenger buy the full ticket.
Under strategy profile $s$, the total revenue of the transportation authority is given by: \[
\pi(P,s)=\sum_{(x,y)\subseteq \mathcal{N}^2} P_{\left(x,y\right)}\left|\{i \in \mathcal{I}:(x,y)\in \tau_i\}\right| \]
where $\left|\{i \in \mathcal{I}:(x,y)\in \tau_i\}\right|$ is the mass of passengers who buy tickets between $x$ and $y$. A pricing scheme $P^*$ is \textbf{revenue-maximizing} if it maximizes total revenue $\pi(P^*,s^*)$ where $s^*$ is an equilibrium under $P^*$.
We will show that there exists a system of positive prices on every edge such that all passengers buy full tickets in equilibrium. We do so by constructing a game in which all passengers are free to choose any route between their origin and destination, but \emph{must} pay full price on their trips. Moreover, in this game, as opposed to our model, prices are not given, but a function of all passengers' route choices. We then show that under these prices all passengers will choose to buy the tickets in the original model, in which they choose the path and the tickets to purchase.
\begin{prop} \label{prop:existenceofpricenetwork} For any network $G=(\mathcal{N},\mathcal{E})$ such that demand $\mathcal{D}$ and distribution of monitors $\lambda$ have full support over all the edges, there is an incentive-compatible, revenue-maximizing pricing scheme. \end{prop} \begin{proof} To start, we consider a modified simultaneous move game that have two changes from the original game. First, each passenger only needs to choose which path they want to follow between their origin and destination, while paying full price on their trips (that is, they are not strategic about ticket purchasing). Hence, a passenger $i$'s strategy is just a path between $x$ and $y$. Second, prices are not taken as given but endogenously determined by the combination of all passengers' choices of paths. More specifically, we consider the price of each edge $(x,y)$ is given by \begin{equation} \label{eq:ICPriceInNerwork} P_{(x,y)}=\frac{\phi\left(\lambda_{(x,y)}\right)}{\sigma^{pass}_{(x,y)}}\alpha. \end{equation}
Given the price $P_{(x,y)}$, we can see that the utility is continuous on other passengers' mixed strategies: the utility of a passenger when traveling through a path is his valuation from travel minus the sum of all $P_{(x,y)}$ along the path travelled. Since we have full support on monitoring and the number of travelers passing through each edge (since for every every edge $(x,y)$ it is the case that $D_{(x,y)}>0$, each edge is traveled, at the very least, by those who have $x$ and $y$ as their origin and destination), continuous changes in other players' strategies change continuously the value of $\sigma^{pass}_{(x,y)}$.
We have, therefore, a continuum of players playing a game with a finite number of strategies (that are, for each player, all the paths between their origin and destination). Following \cite{schmeidler1973equilibrium}, when the payoffs of the passengers are continuous on the mixture of other passengers' strategies, there exists a mixed strategy Nash equilibrium. Hence, we have the existence of a mixed strategy equilibrium of this modified game.
By the continuum of passengers and the law of large numbers, there is a pure strategy equilibrium where the randomization probability of a path under the mixed strategy equilibrium is the equilibrium fraction of passengers choosing that path in a pure strategy equilibrium.
Now we return to the original game where price is given from the equilibrium in the modified game. We argue that passengers would continue to travel the same path and purchase the full ticket.
First, passengers in the original game would continue to travel the same path as in the modified game. If there is a profitable deviation in the original game, then there will be also a profitable deviation in the modified game, which is a contradiction.
Second, passengers will purchase the full ticket. Since the logic is exactly the same behind the proof in Proposition \ref{prop1}, we only verbally describe the steps here. Note that the price between every two nodes is set to equal the expected loss of not having a buying a ticket between these two nodes. So regardless of the node of departure that is being considered (and therefore regardless of whether a ticket was purchased for the previous path), following the equilibrium path from then on both (1) minimizes the cost of being monitored without a ticket and (2) minimizes the price paid.
Finally, this pricing scheme is revenue-maximizing. Notice that a lower price at any segment will not increase participation in travels, so it will not increase revenues. Moreover, any increase for the price in a segment makes everybody travel through that segment without paying for it. So, increasing the price at any edge also reduces revenue. \end{proof}
\section{Application: the Washington DC Metro\label{section::application}}
In this last section, we use traffic and pricing data from the Washington DC metro system as an example of the impact that our incentive-compatible pricing scheme could have in a real-life public transport system. Our objective is, first, to see whether given actual values for traffic between stations and realistic numbers for the number of inspectors, the values for the punishment fee $\alpha$ and for the incentive-compatible prices and that come from the model are reasonable.
Second, we perform counterfactual exercises which evaluate the impact on fare evasion and revenue that would result from switching from the current barrier-based system to a proof-of-payment system with random inspection.
Third, we show how adjusting the current pricing scheme via the use of incentive-compatible prices could substantially reduce fare evasion and increase revenue, by reducing some of the prices at some of the segments of the network.
\subsection{Data}
We use two datasets in our analysis. One is a station-to-station passengers count for the month of May 2012.\footnote{Source: Washington Metropolitan Area Transit Authority.} It contains, for every pair of metro stations, the average daily number of passengers traveling from one to the other. These are separated into four parts of the day: AM peak (opening to 9:30am), Midday (9:30am to 3:00pm), PM peak (3:00pm to 7:00pm) and Evening (7:00pm to midnight). We manually replicated the network structure of the subway system.\footnote{You can find the map of the DC Metro network, as in 2012, in the appendix.}
The second is the origin-destination pricing, as of April 2019. The DC Metro uses a pricing scheme in which the cost of the ticket depends on the station of departure and of destination, and also on whether it takes place at Peak or Off-Peak times. Prices varied from U\$2.00 to U\$6.00, and there were 76 different prices depending on the origin, destination, and time.
In our analysis, we use the 2012 traffic data together with the 2019 pricing information. Since the objective of our simulation exercise is to evaluate the model with realistic values, this combination fits our purposes, despite not delivering exact prices for 2012 or 2019. In addition to these, we need values for two more parameters in our model: $\alpha$ (the value of the fine charged to passengers caught with an incorrect ticket) and $\lambda$ (the total mass of inspectors).
For the total mass of inspectors that we will use, we consider the case of the Berlin public transport. Berlin also has a large scale public transport system, but differently from the DC metro, they use random inspections for tickets, as in our model. Berlin has 120 inspectors and 2.9 million passengers per day.\footnote{Sources: \url{http://www.exberliner.com/features/zeitgeist/controllers-out-of-control} and \url{http://www.bvg.de/en/Company/Profile/Structure--facts}} The DC metro has 724,156 passengers per day. Therefore, setting the number of inspectors in DC to 30 would make the proportion of inspectors per passenger approximately the same.
A quick inspection (using Google Maps) tells us that it takes about 70 minutes to ride 26 stations in a line in the DC metro.\footnote{Figure \ref{fig:dcMetroMap} in the Appendix shows the DC metro map in 2012.} That is, 2.69 minutes per station on average. If we consider the four periods in the data as equally distributed, they each have 6 hours, that is, 360 minutes. That is, each inspector can potentially inspect 134 stations in each period. The DC metro network has 88 edges between stations. That is, each monitor could in principle pass 1.52 times through each edge during the period considered, if monitoring is uniformly distributed. If we have 30 inspectors, then each edge is inspected by 45.60 inspectors over the six hours period. So the total mass of inspectors to be distributed is $\lambda^{Total}=\sum_{(x,y) \in \mathcal{E}}\lambda_{(x,y)}=4,013$.
By using the number of passengers traveling from each origin and destination and the price for those trips, we can calculate what would be the revenue that the DC metro obtained, on average, in May, during each period of the day. Table \ref{table:TrafficAndRevenue2019Prices} shows the results of these calculations. We see that 729,110 subway trips are made on average per day, resulting in a daily revenue of $\pi^{Total}=\text{U}\$2,353,949.00$.
\begin{table}[t]
\centering
\begin{tabular}{l|c|c|c|c|c}
& \textbf{AM Peak} & \textbf{Midday} & \textbf{PM Peak} & \textbf{Evening} & \textbf{Total}\\
\hline
\hline
Total Traffic & 236,177 & 142,851 & 259,165 & 90,916 & 729,110\\
Revenue & \$845,138 & \$373,262 & \$890,785 & \$244,764 & \$2,353,948 \\
\hline
\hline \end{tabular}
\caption{Passenger traffic and revenue under the 2019 pricing scheme}
\label{table:TrafficAndRevenue2019Prices} \end{table}
The origin-destination data gives us the number of passengers who travel from each origin to each destination. In order to produce the incentive-compatible prices, however, we need to know the total number of travelers between every two neighboring connected stations. To do so, first we set the weight of every edge in the network to zero, and for each pair of stations $x$ and $y$, we derive the shortest path between $x$ and $y$, and add the number of passengers with origin $x$ and destination $y$ to the weight of each edge in that path. The values of the weights at any edges $(x,y)$ at the end of that process is the value of $\sigma^{Pass}_{(x,y)}$ that we will consider. Notice that this procedure makes the flow of passengers in the edge $(x,y)$ aggregate those passing through both directions---$x$ to $y$ and $y$ to $x$---in line with the network model.\footnote{While it could, in principle, be the case that the path chosen by some passengers in equilibrium is not the shortest, this is the case in our simulation exercise.}
\subsection{Fine for fare evasion}
Given these values for $\lambda^{Total}$ and $\sigma^{pass}$ for each period considered, we can use equation \ref{eq:ICPriceInNerwork} to find the value of $\alpha$ (the fine to be paid by a passenger who is caught without a ticket or with an incorrect ticket) that makes the revenue obtained when using these prices be the same as the one in Table \ref{table:TrafficAndRevenue2019Prices}. For that, we consider that the function $\phi$ is $\phi(\lambda)=\lambda$. In other words, we assume that the probability that a passenger has her ticket inspected equals the probability that a passenger in a train is an inspector.
When using the incentive-compatible price, the price for the edge $(x,y)$ is: \begin{equation*} P_{(x,y)}=\frac{\lambda_{(x,y)}}{\sigma_{(x,y)}^{pass}}\alpha. \end{equation*} Note that, by construction $\sigma_{(x,y)}^{pass}=\sigma_{(y,x)}^{pass}$, we have $P_{(x,y)}=P_{(y,x)}$. The revenue for the edge $(x,y)$ under IC pricing is therefore $\pi_{(x,y)}=P_{(x,y)}\sigma_{(x,y)}^{pass}=\alpha\lambda_{(x,y)}$. So total revenue is the sum of revenues of every edges: \begin{equation*} \pi^{Total}=\sum_{(x,y)\in \mathcal{E}}\pi_{(x,y)}=\sum_{(x,y)\in \mathcal{E}}P_{(x,y)}\sigma_{(x,y)}^{pass}=\alpha\sum_{(x,y)\in \mathcal{E}}\lambda_{(x,y)}=\alpha\lambda^{Total}, \end{equation*} and therefore $\alpha=\pi^{Total}/\lambda^{Total}$.
With that, we obtain the values of U\$210.60, U\$93.01, U\$221.97 and U\$60.99 for $\alpha$ for the periods of AM Peak, Midday, PM Peak and Evening, respectively. Interestingly, the size of the fine required is not too much different actually charged under DC system. Before decriminalization of fare evasion in May 2019, police could issue criminal citations up to U\$300 and even jail people for 10 days. Under the new law, it becomes a civil penalty with maximum fine of U\$50.\footnote{D.C. Law 22-310. Fare Evasion Decriminalization Amendment Act of 2018: \url{https://code.dccouncil.us/us/dc/council/laws/22-310}}
\subsection{Incentive-compatible prices} \label{subsec:ICPrices}
Given these values for $\alpha$, we can then go back to equation \ref{eq:ICPriceInNerwork} and obtain the incentive-compatible prices for every edge in the network. We consider two configurations regarding the distribution of inspectors in the network.
The first is the \textbf{uniform distribution} case, in which inspectors are uniformly distributed across the system. That is, for every edge $(x,y) \in \mathcal{E}$ , $\lambda_{(x,y)}=\bar{\lambda}$. From Corollary \ref{cor:OptimalInspectorsStrictlyConcave}, this distribution of inspectors maximizes revenue. The ticket price for an edge $(x,y)$ is:
\begin{equation*} P_{(x,y)}=\frac{\bar{\lambda}}{\lambda^{Total}} \frac{\pi^{Total}}{\sigma_{(x,y)}^{pass}} \end{equation*}
The second configuration that we considered was the \textbf{proportional distribution}, where the mass of inspectors in an edge $(x,y)$ of the network is proportional to the total flow of passengers passing by it. That is:
\begin{equation*} \lambda_{(x,y)}=\frac{\lambda^{Total}\sigma^{pass}_{(x,y)}}{\sigma^{Total}}\text{, where }\sigma^{Total}=\sum_{(x,y)\in\mathcal{E}}\sigma^{pass}_{(x,y)} \end{equation*}
But then, the ticket prices for an edge $(x,y)$ under $\alpha=\pi^{Total}/\lambda^{Total}$ become:
\begin{equation*} P_{(x,y)}=\frac{\lambda^{Total}\frac{\sigma_{(x,y)}^{pass}}{\sigma^{Total}}}{\lambda^{Total}} \frac{\pi^{Total}}{\sigma_{(x,y)}^{pass}}=\frac{\sigma_{(x,y)}^{pass}}{\sigma^{Total}} \frac{\pi^{Total}}{\sigma_{(x,y)}^{pass}}=\frac{\pi^{Total}}{\sigma^{Total}} \end{equation*} That is, with proportional monitoring, the price is the same for each edge.
As explained in Section \ref{section:Networks}, it is necessary to check whether these are equilibrium values. We manually checked whether any deviation from the shortest path between two stations would have a lower cost, and the answer was no.\footnote{Under proportional monitoring, this is trivially true, since longer trips are always more expensive.} Therefore, when using incentive compatible prices, passengers will pay full fares and follow the shortest distance, in number of stops, between any two stations.
\begin{table}[t]
\centering
\begin{tabular}{l|c|c|c|c}
& \textbf{AM Peak} & \textbf{Midday} & \textbf{PM Peak} & \textbf{Evening} \\
\hline
\hline
\multicolumn{5}{c}{\textbf{DC Metro 2019 pricing}} \\
\hline
Minimum ticket price & \$2.25 & \$2.00 & \$2.25 & \$2.00 \\
Median ticket price & \$4.20 & \$3.40 & \$4.20 & \$3.40 \\
Maximum ticket price & \$6.00 & \$3.85 & \$6.00 & \$3.85 \\
\hline
\multicolumn{5}{c}{\textbf{Incentive-compatible pricing - Uniform monitoring}}\\
\hline
Minimum ticket price & \$0.18 & \$0.15 & \$0.18 & \$0.15 \\
Median ticket price & \$4.43 & \$3.92 & \$4.58 & \$3.76 \\
Maximum ticket price & \$17.68 & \$17.04 & \$18.51 & \$15.06 \\
\hline
Trips where $IC<DC$ & 65.44\% & 66.21\% & 66.88\% & 66.14\% \\
\hline
\hline
\multicolumn{5}{c}{\textbf{Incentive-compatible pricing - Proportional monitoring}}\\
\hline
Minimum ticket price & \$0.44 & \$0.38 & \$0.45 & \$0.36 \\
Median ticket price & \$4.82 & \$4.15 & \$4.92 & \$3.99 \\
Maximum ticket price & \$11.83 & \$10.18 & \$12.09 & \$9.79 \\
\hline
Trips where $IC<DC$ & 54.13\% & 55.82\% & 55.18\% & 55.47\% \\
\hline
\hline \end{tabular}
\caption{DC metro 2019 and incentive compatible (IC) prices.}
\label{table:IncentiveCompatiblePrices} \end{table}
Some summary statistics from both the DC metro 2019 pricing scheme and the incentive-compatible prices are shown in Table \ref{table:IncentiveCompatiblePrices}. These show that, while the average cost paid per station is essentially the same for both pricing schemes,\footnote{More specifically, the average cost per station is calculated for each origin/destination by dividing the price of the ticket by the number of stops in the shortest path between the two stations. The value shown in the table is the average value for this for all origin/destination pairs.} the range of prices differ substantially. While the highest difference between the cost of two tickets under the 2019 DC pricing scheme is of U\$4.00, for the incentive-compatible prices that difference jumps to U\$18.36.
Here it is important to remember the meaning of incentive-compatible prices: they are the highest price that guarantee that passengers will pay the full fare. Therefore, any price below those still guarantee that all passengers will pay. Perhaps the main reason why these values can be as high as more than U\$18.00 is that some prices have to be very low. The reason for this is intuitive: if a passenger wants to make a short trip, say from one station to the next, the likelihood that she will face an inspector is relatively low. Therefore, if the price is not low as well, a ``rational'' passenger will prefer to take the risk. Indeed, if the traffic pattern is such that many passengers make short trips, a random checks system will require a high value for $\alpha$ in order to have incentive-compatible prices that yield good revenues.
While some incentive-compatible prices are very high, under them most passengers would pay less than under the current DC metro pricing. As shown in Table \ref{table:IncentiveCompatiblePrices}, the majority of the trips made would be cheaper under the incentive-compatible prices in every period. This is especially true under proportional monitoring. These prices, however, should be considered upper-bounds. Therefore, a policymaker could choose to reduce the prices that are deemed too high.
\subsection{DC prices under proof-of-payment system} \label{subsec:DCPricesStrategicPassengers}
We would like to simulate a counterfactual in which the DC metro switches to under proof-of-payment with random inspection but keeps the 2019 prices. In other words, the authority changes the enforcement method but does not adapt the pricing to consider the incentives that they induce. The difference between these simulations and the 2019 revenue values can be seen as the impact that an evaluation of the agents' incentives has on the design of prices for public transport under random inspections.
\begin{table}[t]
\begin{tabular}{l|c|c|c|c|c}
& \textbf{AM Peak} & \textbf{Midday} & \textbf{PM Peak} & \textbf{Evening} & \textbf{Total}\\
\hline
\hline
\multicolumn{6}{c}{\textbf{Random inspections under DC 2019 Metro pricing - Uniform monitoring}}\\
\hline
Partially Paid Trips & 5.56\% & 3.78\% & 4.57\% & 4.67\% & 4.77\% \\
Trips without tickets & 64.48\% & 65.65\% & 66.45\% & 65.34\% & 65.52\% \\
Fully Paid Trips & 29.88\% & 30.57\% & 28.97\% & 29.99\% & 29.71\% \\
Revenue & \$342,664 & \$157,324 & \$356,833 & \$100,886 & \$957,707\\
Losses due to partial tickets & \$10,975 & \$3,071 & \$7,918 & \$2,566 & \$24,530\\
Losses due to no ticket purchased & \$491,498 & \$212,866 & \$526,033 & \$141,311 & \$1,371,708\\
\hline
\multicolumn{6}{c}{\textbf{Random inspections under DC 2019 Metro pricing - Proportional monitoring}}\\
\hline
Partially Paid Trips & 27.12\% & 14.45\% & 24.56\% & 13.86\% & 22.09\% \\
Trips without tickets & 48.54\% & 53.14\% & 50.72\% & 54.18\% & 50.91\% \\
Fully Paid Trips & 24.34\% & 32.40\% & 24.72\% & 31.96\% & 27.00\% \\
Revenue & \$413,806 & \$188,368 & \$429,495 & \$118,681 & \$1,150,352\\
Losses due to partial tickets & \$85,537 & \$15,759 & \$83,495 & \$9,613 & \$194,404\\
Losses due to no ticket purchased & \$345,794 & \$169,133 & \$377,793 & \$116,469 & \$1,009,189\\
\hline
\hline \end{tabular}
\caption{Counterfactuals under DC pricing and random inspection}
\label{table:Counterfactuals} \end{table}
The simulation exercise we ran considered both the fact that there might be different paths between an origin and destination, but also the possibility of buying tickets that cover only part of the trip. For example, consider a passenger traveling from stations $a$ to $z$. Moreover, suppose that there are two paths between these two stations: $a\to b\to z$ and $a\to c\to d\to z$. While incentive-compatible prices guarantee that the passenger will choose one of the two paths and pay a full ticket, there are many possibilities to consider when prices are not incentive-compatible. For instance, the passenger could choose to take the longer path $a\to c\to d\to z$ but only buy the ticket for the segment $(c,d)$, and ``take the risk'' from $a$ to $c$ and from $d$ to $z$ without a valid ticket. In our simulation, the passengers consider all the possible alternatives of paths and ticket purchases.
The values of $\alpha$ that we use are the ones used before, which guarantee that incentive-compatible prices yield the same revenue as the one obtained with 2019 prices. More specifically, let $P^{DC}_{(a,b)}$ be the cost of the ticket, in 2019 prices, from $a$ to $b$. Remember that $P_{(x,y)}$, which is the incentive-compatible price, is also the expected punishment (fine) from riding the edge $(x,y)$ without paying.
Let $\overrightarrow{(x,y)}$ be the set of paths starting at $x$ and ending on $y$. Passenger $i$ going from $a$ to $b$ will choose the origin-destination $(c,d)$ and the paths $\sigma_i^{1}\in \overrightarrow{(a,c)}$ and $\sigma_i^{2}\in \overrightarrow{(d,b)}$ that minimizes the expression:
\[ \sum_{(x,y)\in \sigma_i^{1} }P_{(x,y)} + P^{DC}_{(c,d)}+ \sum_{(x,y)\in \sigma_i^{2}}P_{(x,y)}\]
Notice that this includes the possibility of buying a full ticket and no tickets at all. Table \ref{table:Counterfactuals} shows the impact of using the 2019 DC metro pricing with random inspections, for both uniform and proportional monitoring. In both cases, the amount of fare evasion and its impact on revenue are remarkably high. Under uniform monitoring, more than 65\% of the trips are made without buying any ticket. Combined with some loss due to partial tickets, this results in a loss of U\$1,396,238 per day, or more than 59\% compared with revenues when all passengers pay their full fares. Under proportional monitoring, the loss is about 51\%---lower but still remarkably high.
\subsection{Incentives-adjusted DC prices} \label{subsec:ICAdjustedDCPrices}
The results in Section \ref{subsec:DCPricesStrategicPassengers} indicate that the impact on revenue that would result from switching to a proof-of-payment scheme with random inspections, without taking incentives into consideration, is potentially very high. One alternative that is available would be to use instead the incentive-compatible prices derived in Section \ref{subsec:ICPrices}. Under these, every passenger would have the incentive to buy the correct ticket, and revenues would be the same as the ones before the change in the payment scheme.
One potential issue to this, however, is that some of the incentive-compatible prices that were derived are too high: some trips would cost more than U\$18.00, whereas under 2019 DC prices the highest price was U\$6.00. An alternative approach would be to use incentive-compatible prices, but limit them so that they are never higher than DC prices. That is, setting incentive-compatible prices bounded from above by 2019 DC prices.
At first sight, one might be tempted to, for every origin-destination $(x,y)$, setting the price $P^*_{(x,y)}=\min\left\{P^{DC}_{(x,y)},P_{(x,y)}\right\}$, where $P^{DC}_{(x,y)}$ and $P_{(x,y)}$ are respectively, 2019 DC and incentive-compatible prices. This, however, might result in $P^*$ not being an incentive-compatible pricing scheme. To see this, consider a network with three nodes: $x$,$y$, and $z$, with edges $(x,y)$ and $(y,z)$. Suppose that under DC prices, $P^{DC}_{(x,y)}=\$0.50$, $P^{DC}_{(y,z)}=\$1.00$, and $P^{DC}_{(x,z)}=\$1.50$, and that under incentive-compatible prices, $P_{(x,y)}=P_{(y,z)}=\$0.70$, and therefore $P_{(x,z)}=P_{(y,z)}=\$1.40$. If we simply set the prices to be the lowest between the two, we would set $P^*_{(x,y)}=\$0.50$, $P^*_{(y,z)}=\$0.70$ and $P^*_{(x,z)}=\$1.40$. Under $P^*$, however, a passenger traveling from $x$ to $z$ is better off by only buying the ticket from $x$ to $y$ and taking the risk from $y$ to $z$ than by buying the full ticket.
To correctly adjust DC prices to be incentive-compatible, one needs to adjust for every possible deviation that a passenger might have, and adjust down the price so that this cannot be the case. In the example above, the value of $P^*_{(x,z)}$ has to be further reduced to $P^*_{(x,z)}=\$1.20$.
In our simulations, we proceeded as follows. First, we set, for every origin-destination $(x,y)$, $P^*_{(x,y)}=\min\left\{P^{DC}_{(x,y)},P_{(x,y)}\right\}$. Then, for each pair $(x,y)$, we identified the optimal strategy for a passenger making this trip. If that strategy had a lower expected cost than $P^*_{(x,y)}$, then we adjusted the price for $(x,y)$ to be equal to that lower expected cost. This adjustment was done simultaneously for all segments. After that, we iterated following the same procedure once again, but established that there were no further adjustments: no passenger had a deviating strategy that was better than buying the full ticket.
Table \ref{table:ICLimitedFromAbove} shows the impact on revenue that results from adjusting DC prices to be incentive compatible using the procedure described above: if incentive-compatible prices are lower than DC prices, the prices should be adjusted so that the entire pricing scheme is incentive-compatible and no price is above DC prices.
\begin{table}[t]
\centering
\begin{tabular}{l|c|c|c|c|c}
& \textbf{AM Peak} & \textbf{Midday} & \textbf{PM Peak} & \textbf{Evening} & \textbf{Total}\\
\hline
\hline
\multicolumn{6}{c}{\textbf{Uniform monitoring}} \\
\hline
Revenue & \$691,895 & \$283,148 & \$719,001 & \$189,060 & \$1,883,106\\
Revenue loss percentage & 18.13\% & 24.14\% & 19.28\% & 22.75\% & 20.00\% \\
\hline
\hline
\multicolumn{6}{c}{\textbf{Proportional monitoring}} \\
\hline
Revenue & \$729,801 & \$307,262 & \$763,937 & \$204,702 & \$2,005,703\\
Revenue loss percentage & 13.65\% & 17.68\% & 14.24\% & 16.37\% & 14.79\% \\
\hline
\hline \end{tabular}
\caption{Incentive-compatible prices limited from above by DC prices}
\label{table:ICLimitedFromAbove} \end{table}
Notice that, while the adjustment results in a significant loss of revenue, that pales in comparison to the scenarios simulated in Section \ref{subsec:DCPricesStrategicPassengers}, where DC prices were used without any incentive adjustments. That is, the comparison of the values in Tables \ref{table:Counterfactuals} and \ref{table:ICLimitedFromAbove} show the increase in revenue that would result from \emph{lowering} the prices for certain paths, and therefore eliminating the incentive to travel without paying when traveling through them.
Of course, the difference in the revenues in Tables \ref{table:Counterfactuals} and \ref{table:ICLimitedFromAbove} should be interpreted as an upper-bound of the impact of incentive considerations while designing prices, since passengers don't make purely strategic decisions, might have moral considerations and might also be risk averse. The magnitude of the impact that we observe in these simulations, however, indicate that incorporating incentive considerations into pricing decisions could be an inexpensive method for improving revenues, simplifying the decisions that passengers have to make, and reducing the incentives for potentially contagious ``bad behavior'' in the use of public transport.
\appendix \section*{Appendix: Map of the DC Metro in 2012} \begin{figure}
\caption{Map of the DC Metro network in 2012. Credit: VeggieGarden, originally posted to Wikipedia.}
\label{fig:dcMetroMap}
\end{figure}
\end{document} | arXiv | {
"id": "2205.11858.tex",
"language_detection_score": 0.8638648986816406,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\doublespacing
\title{Kelly betting on horse races with uncertainty\\in probability estimates}
\author{Michael R. Metel}
\affil{Laboratoire de Recherche en Informatique, Universit\'e Paris-Sud, Orsay, France\\ \url{metel@lri.fr}}
\maketitle
\begin{abstract} \doublespacing We investigate the problem of gambling with uncertainty in outcome probabilities. Stochastic optimization models are proposed for optimal investing on events with mutually exclusive outcomes when probabilities are estimated using multinomial logistic regression. Special attention is given to the case of there being two outcomes, and the general case of many outcomes. An empirical study using simulated data was conducted where the loss of return from probability estimation error is observed, and superior returns are achieved taking it into consideration. \end{abstract}
\section{Introduction}
The \cite{kelly1956} criterion is a powerful tool for decision making in the world of gambling and investing, answering the question of how much to wager by maximizing the asymptotic exponential rate of return. One limitation is that exact knowledge of outcome probabilities and payouts is assumed, which in general is not available for opportunities which have the potential for profitability, such as in sports betting and the stock market. Replacing the actual values with estimates leads to overbetting \citep{maclean1992}, resulting in higher risk with lower returns. We might assume unbiased errors will cancel out over time, but when using the Kelly criterion, outcomes with overestimated values will consistently look more favourable than in reality, with larger wagers placed on them than should be. The most popular way to mitigate this problem is through the use of a fractional Kelly strategy, which wagers a fixed fraction of the amount prescribed by the Kelly criterion, which has been shown to possess favourable risk-return properties by \cite{maclean1992}. This can be seen as a conservative strategy, where given an estimated upper bound of possible over betting, we correct by dividing all wagers by this amount. This technique has been endorsed and successfully used in practice by people such as \cite{ben08} and \cite{thorp2006}, with betting half the Kelly amount being popular amongst gamblers \citep{Pound05}. More recently, research has been done which directly considers the inherent uncertainty in probability estimates. \cite{Baker13} derived a shrinkage factor for two outcome gambling settings, which gives the optimal fraction of the Kelly amount to wager.\\
In this paper we develop methodologies for Kelly style betting on events with many possible outcomes taking into account the uncertainty in probability estimates. For clarity we present our work using the example of horse race betting, though the ideas can be applied more broadly in domains beyond sports betting, and should be of interest to people concerned with decision making under uncertainty in general. A significant amount of academic research has been done on horse racing, see \citep{hau81}, largely due to the fact that it can be seen as a microcosm of financial exchanges, with findings in this arena having wider implications in finance, economics, and decision theory.
\section{Optimal investment problem}
We are interested in the following problem, which we will describe in the setting of optimal wagering on a horse race.
\begin{alignat}{6} &\max&&\text{ }\sum_{h=1}^n\pi_{h}\log(x_{h}O_{h}+w-\sum_{i=1}^nx_i)\label{eq:SF}\tag{P}\\ &\mbox{s.t. }&&\sum_{h=1}^nx_h\leq w \nonumber\\ &&&x_{h}\geq 0\hspace{50 pt}h=1,...,n \nonumber \end{alignat}
The objective is to maximize the Kelly criterion, which is equivalent to maximizing the expected logarithm of wealth. There are $n$ horses in the race, and the probability of horse $h$ winning is $\pi_h$. $x_h$ is the amount we will wager on horse $h$, $O_h$ is the decimal payout odds for horse $h$, which we assume are fixed, and $w$ is our current wealth. The constraints ensure our wagers are sound. Exact optimal solutions can be found for this problem using the algorithm devised by \cite{smoc2010}.\\
\section{Outcome probability estimation}
\label{sec:OP}
The most popular method of estimating horse racing win probabilities is by multinomial logistic regression, which was first proposed by \cite{Bolt86}. We create a linear predictor function to value each horse $h$, $\beta'v_{h}$, where $v_{h}$ is the vector of factor values and $\beta'$ is the transposed vector of regression coefficients. Each horse is then assigned winning probabilities $\pi_{h}=\frac{e^{\beta'v_{h}}}{\sum_{i=1}^{n}e^{\beta'v_{i}}}$. Given $R$ historical races, the log-likelihood function is $$\ln L(\beta)=\sum_{r=1}^R \beta'v_{w^r}-\ln \sum_{i=1}^{n_r}e^{\beta'v_{i^r}},$$ where $w^r$ denotes the index of the winning horse in race $r$ and $n_r$ is the number of horses. This function is concave~\cite[p. 72]{boyd2004}, so we can find a maximum likelihood estimate $\hat{\beta}$ using standard unconstrained optimization algorithms. The $j^{th}$ component of the score function, or the gradient of $\ln L(\beta)$ is $$\frac{\partial\ln L(\beta)}{\partial \beta_j}=\sum_{r=1}^R v_{w^r_j}-\frac{\sum_{i=1}^{n_r}v_{i^r_j}e^{\beta'v_{i^r}}}{\sum_{i=1}^{n_r}e^{\beta'v_{i^r}}}.$$ The $jk$ cell of the curvature, or the negative Hessian is $$\frac{-\partial^2\ln L(\beta)}{\partial\beta_j\partial\beta_k}=\sum_{r=1}^R \frac{(\sum_{i=1}^{n_r}v_{i^r_j}v_{i^r_k}e^{\beta'v_{i^r}})(\sum_{i=1}^{n_r}e^{\beta'v_{i^r}})- (\sum_{i=1}^{n_r}v_{i^r_k}e^{\beta'v_{i^r}})(\sum_{i=1}^{n_r}v_{i^r_j}e^{\beta'v_{i^r}})}{(\sum_{i=1}^{n_r}e^{\beta'v_{i^r}})^2}.$$ The observed Fisher information, $I(\hat{\beta})$, is the curvature at $\hat{\beta}$. The maximum likelihood estimate $\hat{\beta}$ is consistent and asymptotically normal with covariance $I^{-1}(\hat{\beta})$ \citep{mcfad74}.\\
There are too many potential factors contributing to the outcome of a horse race to believe we have the actual model of outcome probabilities, so we must consider model misspecification. The variance of the score terms is $V(\hat{\beta})=\sum_{r=1}^R \frac{\partial\ln L_r(\hat{\beta})}{\partial \beta}\frac{\partial\ln L_r(\hat{\beta})}{\partial \beta}'$, where $\frac{\partial\ln L_r(\beta)}{\partial \beta}$ is the gradient of the log-likelihood function of the $r^{th}$ race. We can then calculate the sandwich estimate of the covariance matrix of $\hat{\beta}$ as $\Sigma=I(\hat{\beta})^{-1}V(\hat{\beta})I(\hat{\beta})^{-1}$, which is robust to our misspecification, and model our parameters as $\beta\sim N(\hat{\beta},\Sigma)$. Let $\pi^s_{h}=\frac{e^{\mathbb{E}(v_{h})}}{\sum_{i=1}^{n}e^{\mathbb{E}(v_i)}}=\frac{e^{\hat{\beta}'v_{h}}}{\sum_{i=1}^{n}e^{\hat{\beta}'v_{i}}}$, which is the maximum likelihood probability estimate typically used after completing a regression analysis. We denote (\ref{eq:SF}) with $\pi_h=\pi^s_h$ as (S) and call this the standard model, which will be used to compare other models to.
\section{Optimization models considering uncertainty in probability estimates}
We now attempt to move beyond the standard model (S) by considering the uncertainty of our probability estimates using different techniques from stochastic optimization. A natural approach would be to maximize the expected value of our objective, $$\mathbb{E}\sum_{h=1}^n \pi_h(\beta)\log(x_{h}O_h+w-\sum_{i=1}^nx_i),$$ resulting in (\ref{eq:SF}) with probabilites $\mathbb{E}(\pi_h)=\mathbb{E}\frac{e^{\beta'v_{h}}}{\sum_{i=1}^{n}e^{\beta'v_{i}}}$. As random variables our win probabilities, $\pi_{h}=\frac{e^{\beta'v_{h}}}{\sum_{i=1}^{n}e^{\beta'v_{i}}}$, follow a logistic-normal distribution \citep{atch1980} for which $\mathbb{E}(\pi_h)$, to the best of our knowledge, is not representable in a simple form. $\mathbb{E}(\pi_h)$ can be estimated using Monte Carlo integration by generating $N$ random samples of $\beta$ and taking our expected win probabilites as $\pi^{mc}_h=\frac{1}{N}\sum_{i=1}^N\frac{e^{(\beta^i)'v_{h}}}{\sum_{j=1}^{n}e^{(\beta^i)'v_{j}}}$. We denote this formulation as (Emc).\\
We can also find a lower bound using the normal moment generating function, $$\mathbb{E}\left(\frac{1}{\pi_h}\right)=\mathbb{E}\left(\sum_{i=1}^{n}e^{\beta'(v_i-v_h)}\right)=\frac{\sum_{i=1}^{n}e^{\hat{\beta}'v_i+\frac{1}{2}(v_i-v_h)'\Sigma(v_i-v_h)}}{e^{\hat{\beta}'v_h}},$$ and from Jensen's inequality, $\mathbb{E}(\pi_h)\geq \frac{1}{\mathbb{E}(\frac{1}{\pi_h})}$, giving us the lower bound $$\mathbb{E}^{lb}(\pi_h)=\frac{e^{\hat{\beta}'v_h}}{\sum_{i=1}^{n}e^{\hat{\beta}'v_i+\frac{1}{2}(v_i-v_h)'\Sigma(v_i-v_h)}}$$ for each outcome probability. Using these lower bounds gives us a conservative estimate of outcomes. The sum of probabilities $\mathbb{E}^{lb}(\pi_h)$ will in general not equal 1, so when betting on an outcome $h$, this formulation underweights the event of $h$ not occurring. To overcome this we have the following formulation which contains an extra outcome with probability $1-\sum_{i=1}^n \mathbb{E}^{lb}(\pi_i)$ where all money wagered is lost.
\begin{alignat}{6} &\max&&\text{ }\sum_{h=1}^n\mathbb{E}^{lb}(\pi_h)\log(x_{h}O_{h}+w-\sum_{i=1}^nx_i)+\left(1-\sum_{h=1}^n\mathbb{E}^{lb}(\pi_h)\right)\log(w-\sum_{i=1}^nx_i) \label{eq:Elb}\tag{Elb}\\ &\mbox{s.t. }&&\sum_{h=1}^nx_h\leq w \nonumber\\ &&&x_{h}\geq 0\hspace{50 pt}h=1,...,n \nonumber \end{alignat}
As we want to limit overbetting from probability estimation error, we can add solution robustness to probability estimation. The optimization problem is rewritten so that the uncertainty is in the constraints and then we ensure a minimal objective value holds for a given probability through the use of a chance constraint. \begin{alignat}{6} &\max&&\text{ }t\label{eq:RO}\tag{CC}\\ &\mbox{s.t. }&&\mathbb{P}(t\leq\sum_{h=1}^n\pi_h\log(x_{h}O_h+w-\sum_{i=1}^nx_i))\geq 1-\alpha\nonumber\\ &&&\sum_{h=1}^nx_h\leq w \nonumber\\ &&&x_{h}\geq 0\hspace{50 pt}h=1,...,n \nonumber \end{alignat}
where $\alpha<0.5$. When using (S), we are only optimizing over a point estimate of $\pi$. Assuming this estimate differs from the actual value of $\pi$, we could very well be placing a wager with actual expected log wealth lower than our current $\log(w)$. With this chance constraint we can ensure that our solution generates a positive expected return over a high proportion of possible values of $\pi$. Further, we can choose a solution $x$ which will generate the highest return over $(1-\alpha)\%$ of potential values of $\pi$, avoiding large misplaced wagers.\\
In the case where there are only two outcomes, we can achieve the exact solution by solving the following optimization program. \begin{alignat}{6} &\max&&\text{ }t\label{eq:CC2E}\tag{CC2}\\ &\mbox{s.t. }&&t\leq \pi^H_1\log(x_{1}(O_1-1)+w-x_2)+\pi^L_2\log(x_{2}(O_2-1)+w-x_1)\nonumber\\ &&&t\leq \pi^L_1\log(x_{1}(O_1-1)+w-x_2)+\pi^H_2\log(x_{2}(O_2-1)+w-x_1)\nonumber\\ &&&x_1+x_2\leq w \nonumber\\ &&&x_1,x_2\geq 0\nonumber \end{alignat} where \begin{alignat}{6} &\pi^H_1=\frac{e^{\Phi^{-1}(1-\alpha)\sigma+\hat{\beta}'v_1}}{e^{\Phi^{-1}(1-\alpha)\sigma+\hat{\beta}'v_1}+e^{\hat{\beta}'v_2}},&\pi^L_2=\frac{e^{\hat{\beta}'v_2}}{e^{\Phi^{-1}(1-\alpha)\sigma+\hat{\beta}'v_1}+e^{\hat{\beta}'v_2}}\nonumber\\ &\pi^L_1=\frac{e^{\hat{\beta}'v_1}}{e^{\hat{\beta}'v_1}+e^{\Phi^{-1}(1-\alpha)\sigma+\hat{\beta}'v_2}},&\pi^H_2=\frac{e^{\Phi^{-1}(1-\alpha)\sigma+\hat{\beta}'v_2}}{e^{\hat{\beta}'v_1}+e^{\Phi^{-1}(1-\alpha)\sigma+\hat{\beta}'v_2}}\nonumber \end{alignat}
and $\sigma^2=(v_1-v_2)'\Sigma(v_1-v_2)$. Intuitively, our probabilities $\pi_1$ and $\pi_2$ are reweighted depending on which outcome is more favourable, where the first constraint puts more weight on outcome one, which will be tight when outcome two is more favourable, and the second constraint puts more weight on outcome two, for when one is more favourable. A derivation of this program can be found in the appendix in the subsection {\it Derivation of (CC2)}.\\
We now focus on the case of more than two outcomes. The following optimization problem is an approximation of \ref{eq:RO}, where we have taken $S$ iid samples $\pi^s$ of the outcome probabilities and want to satisfy the chance constraint over this empirical distribution by ensuring the constraint $t\leq\sum_{h=1}^n\pi^s_h\log(x_{h}O_h+w-\sum_{i=1}^nx_i)$ is not satisfied over no more than $S\alpha$ samples. $M$ is chosen sufficiently large so as not to restrict the value of $t$. The convergence of the optimal objective value and solution set of this approximation to (\ref{eq:RO}) in the limit is established in \cite[p. 211]{shap2009}.
\begin{alignat}{6} &\max&&\text{ }t\label{eq:CCN}\tag{CCN}\\ &\mbox{s.t. }&&t\leq \sum_{h=1}^n\pi^s_h\log(x_{h}O_h+w-\sum_{i=1}^nx_i)+z_sM\hspace{50 pt}s=1,...,S\nonumber\\ &&&\sum_{s=1}^S z_s\leq S\alpha\nonumber\\ &&&\sum_{h=1}^nx_h\leq w \nonumber\\ &&&x_{h}\geq 0\hspace{70 pt}h=1,...,n \nonumber\\ &&&z_s\in\{0,1\}\hspace{50 pt}s=1,...,S \nonumber \end{alignat}
This problem is quite challenging, and is only practical for very small sample choices of $S$. We use a simple heuristic to find the $S\alpha$ worst constraints, setting their binary values to $1$, and then proceed to solve the now convex problem with decision variables $x_h$ and $t$. We first solve (\ref{eq:CCN}) with $\alpha=0$, or for $z_s=0$ $\forall s\in S$, sort $t-\sum_{h=1}^n\pi^s_h\log(x_{h}O_h+w-\sum_{i=1}^nx_i)$ in descending order, set $z_s=1$ for the first $S\alpha$ corresponding constraints, and then resolve (\ref{eq:CCN}) with fixed $z_s$.\\
We consider a final model, combining the previous two, where we are maximizing the expectation of log wealth subject to a chance constraint which ensures our solution does not have a negative true expected return with high probability.
\begin{alignat}{6} &\max&&\text{ }\mathbb{E} \sum_{h=1}^n\pi_h\log(x_{h}O_h+w-\sum_{i=1}^nx_i)\label{eq:ECC}\tag{ECC}\\ &\mbox{s.t. }&&\mathbb{P}(\log(w)\leq\sum_{h=1}^n\pi_h\log(x_{h}O_h+w-\sum_{i=1}^nx_i))\geq 1-\alpha\nonumber\\ &&&\sum_{h=1}^nx_h\leq w \nonumber\\ &&&x_{h}\geq 0\hspace{50 pt}h=1,...,n \nonumber \end{alignat}
The implementation uses the objective of (\ref{eq:SF}) with probabilities $\pi^{mc}_h$ and the constraint set of either (\ref{eq:CC2E}) or (\ref{eq:CCN}), with $t$ replaced with $\log(w)$. We label these formulations (ECC2) and (ECCN) respectively.
\section{Empirical model comparison}
\label{sec:ES}
We seek to compare the performance of standard Kelly betting, fractional Kelly betting and Kelly betting considering the uncertainty in probability estimates, using simulated data to allow for accurate testing of the different methodologies. We simulated the distribution of $\beta$ by taking $\hat{\beta}$ as a standard normal random vector of size $m=10$. For each component $\hat{\beta}_i$, a corresponding standard deviation $\sigma_i$ was simulated between $[0,\sigma'_i]$, where $\sigma'_i=\frac{-|\beta_i|}{\Phi^{-1}(0.025)}$. This implies that the p-value of $\beta_i$ is not greater than $0.05$ if the data were the result of a regression analysis. $\Sigma$ was then taken as a diagonal matrix consisting of $\sigma^2$. The true values of $\beta$, $\beta^t$, corresponding to the true outcome probabilities were simulated by taking a random sample from the distribution of $\beta$. We generated an $m \times n$ matrix $F$ of standard uniform random variables representing the $m$ factor values of the $n$ horses. We then calculated the true outcome probabilities as
$$\pi^t_h=\frac{e^{{\beta^t}' F_{h}}}{\sum_{i=1}^ne^{{\beta^t}'F_{i}}}$$
In order to limit the variation of the empirical testing, we compared models not based on their final simulated wealth, but by their expected exponential return over the true probability distribution $\pi^t$. Given our optimal solution $x^*$ using any technique in race $i$, the expected exponential return is calculated as
$$\mathbb{E}\log\left(\frac{w_{i}}{w_{i-1}}\right)=\sum_{h=1}^n\pi^t_h\log\left(\frac{x^*_{h}O_h+w_{i-1}-\sum_{i=1}^nx^*_i}{w_{i-1}}\right)$$
We can then calculate the expected return over the entire sample of size $T$ as
$$\mathbb{E}\log\left(\frac{w_{T}}{w_{0}}\right)=\sum_{i=1}^T\mathbb{E}\log\left(\frac{w_{i}}{w_{i-1}}\right)$$
This should reduce the variance in the result, as our performance statistic does not depend on the outcomes of the races. To further limit variance, we use a fixed, identical payout odd for each outcome.\\
Four experiments were conducted, each consisting of 2500 trials, with details in Table \ref{T1}.
\begin{table}[H] \centerline{ \resizebox{0.3\textwidth}{!}{ \renewcommand{1}{1} \begin{tabular}{lrr} \bf{Experiment}&\bf{n}&$\mathbf{O_h}$\\
\hline (E1) &2&1.1\\ (E2) &2&1.2\\ (E3) &10&2\\ (E4) &30&4\\ \hline \end{tabular}}} \caption{Experiment details} \label{T1} \end{table}
All chance constraints were tested using $\alpha=0.4$, $0.25$, and $0.1$. $\pi^{mc}_h$ was estimated using $1,000,000$ samples when $n=2$, and $2,000,000$ when $n=10$ and $n=30$. The chance constraints were estimated using $1,000$ samples for $n=10$ and $2,000$ samples for $n=30$. All experiments were done on a Windows 10 Home 64-bit, Intel Core i5-7200U 2.5GHz processor with 8 GB of RAM, in Matlab R2017a using {\it fmincon}.\\
Result data is presented in Table \ref{T5} for all experiments. (T) is the best that could be achieved in the experiment, using the true probabilities $\pi^t_h$ in (\ref{eq:SF}). (F) is using a 50\% fractional Kelly strategy of (S). (CCx) stands for (CC2) or (CCN) depending on the experiment, and likewise for (ECCx). The numbers under each experiment name are the total expected return for each model, with the final column being the sum over all experiments. We see in total, (\ref{eq:Elb}), (Emc), and (CCx) and (ECCx) with $\alpha=0.4$ outperformed (\ref{eq:SF}), with (Emc) and (ECCx) with $\alpha=0.4$ outperforming (S) in all experiments, and (ECCx) with $\alpha=0.4$ performing best overall. Though the best result was achieved using a chance constraint, we can see that in general its use as a risk measure against uncertainty in probability estimation is overly aggressive for smaller values of $\alpha$, dampening long term growth.\\
\begin{table}[H] \centerline{ \resizebox{0.65\textwidth}{!}{
\begin{tabular}{llrrrrr}
\bf{Model}&&\bf{(E1)}&\bf{(E2)}&\bf{(E3)}&\bf{(E4)}&\bf{Sum}\\
\hline (T) & &3.051&16.861&4.385&3.166&27.463\\ (S) & &1.583&12.214&2.383&1.954&18.134\\ (F) & &1.210&8.754&1.788&1.515&13.268\\ (Elb) & &1.852&12.179&2.353&1.823&18.206\\ (Emc) & &1.820&12.297&2.390&1.964&18.471\\ (CCx) &$ \alpha=0.40$ &1.786&12.288&2.345&1.834&18.253\\ (CCx) &$ \alpha=0.25$ &1.694&11.371&1.964&1.426&16.454\\ (CCx) &$ \alpha=0.10$ &1.080&9.027&1.206&0.846&12.158\\ (ECCx)&$ \alpha=0.40$ &1.825&12.288&2.390&1.980&18.484\\ (ECCx)&$ \alpha=0.25$ &1.834&11.983&2.319&1.728&17.863\\ (ECCx)&$ \alpha=0.10$ &1.433&10.355&1.647&1.164&14.599\\ \hline \end{tabular}}} \caption{Expected total returns} \label{T5} \end{table}
\section{Conclusion and future research} \label{s:ME}
We have investigated different stochastic optimization models for Kelly style betting on mutually exclusive outcomes considering probability estimation uncertainty stemming from multinomial logistic regression. An empirical study using simulated data was conducted to compare performance. The large difference in long term growth when using the true probability outcomes versus relying on estimates in our experiments display the significance of probability estimation error in decision making, and the challenges for those attempting to maximize return in speculative markets. Improvements in long term growth have been found, first by considering the uncertainty in outcome probabilities when calculating the expected log wealth, and with a mild use of a chance constraint, which will likely need to be calibrated in each application to find the proper balance of preventing losses from uncertainty without overly dampening the potential to capture positive returns.\\
The presentation of the material in this paper has focused on the application of betting on horse racing, but the ideas are applicable to a general investment setting. New research adapting the methods presented to more general return settings beyond multinomial logistic regression uncertainty and mutual exclusive events would be interesting, with applications such as investing in a portfolio of stocks following geometric Brownian motions considering parameter uncertainty.
\section*{Appendix}
\subsection*{Derivation of (CC2)}
We need to find an equivalent deterministic constraint for the chance constraint $\mathbb{P}(t\leq \pi_1\log(x_{1}(O_1-1)+w-x_2) +\pi_2\log(x_{2}(O_2-1)+w-x_1))\geq 1-\alpha$. For simplicity, let $W_1=x_{1}(O_1-1)+w-x_2$ and $W_2=x_{2}(O_2-1)+w-x_1$, then the chance constraint can be written as \begin{alignat}{6} \mathbb{P}(t\leq \pi_1\log(W_1)+(1-\pi_1)\log(W_2))\geq 1-\alpha.\label{eq:CC2} \end{alignat}
Given that $\pi_1\in(0,1)$, the only time $t\geq\max(\log(W_1),\log(W_2))$ is feasible in (\ref{eq:CC2}) and (\ref{eq:CC2E}) is when $t=\log(W_1)=\log(W_2)$, with all other instances being infeasible in both. The case where $t\leq\min(\log(W_1),\log(W_2))$ is always feasible in (\ref{eq:CC2}) and (\ref{eq:CC2E}). We now consider the case where $\log(W_2)<t<\log(W_1)$. Rearranging and taking $\pi_1=\frac{e^{\beta'v_1}}{e^{\beta'v_1}+e^{\beta'v_2}}$, the chance constraint equals \begin{alignat}{6} &\mathbb{P}(t-\log(W_2)\leq \pi_1(\log(W_1)-\log(W_2))\geq 1-\alpha\nonumber\\ &\mathbb{P}(e^{\beta'(v_2-v_1)}\leq \frac{\log(W_1)-\log(W_2)}{t-\log(W_2)}-1)\geq 1-\alpha\nonumber\\ &\mathbb{P}(\beta'(v_2-v_1)\leq \log\left(\frac{\log(W_1)-t}{t-\log(W_2)}\right))\geq 1-\alpha\nonumber\\ &\log\left(\frac{\log(W_1)-t}{t-\log(W_2)}\right)\geq \Phi^{-1}(1-\alpha)\sigma+\mu\nonumber \end{alignat}
where $\mu=\hat{\beta}'(v_2-v_1)$ and $\sigma^2=(v_1-v_2)'\Sigma(v_1-v_2)$. Rearranging, \begin{alignat}{6} &\frac{1}{1+e^{\Phi^{-1}(1-\alpha)\sigma+\mu}}\log(W_1)+\frac{e^{\Phi^{-1}(1-\alpha)\sigma+\mu_2}}{1+e^{\Phi^{-1}(1-\alpha)\sigma+\mu}}\log(W_2)\geq t\nonumber\\ &\frac{e^{\hat{\beta}'v_1}}{e^{\hat{\beta}'v_1}+e^{\Phi^{-1}(1-\alpha)\sigma+\hat{\beta}'v_2}}\log(W_1)+\frac{e^{\Phi^{-1}(1-\alpha)\sigma+\hat{\beta}'v_2}}{e^{\hat{\beta}'v_1}+e^{\Phi^{-1}(1-\alpha)\sigma+\hat{\beta}'v_2}}\log(W_2)\geq t\nonumber \end{alignat}
which is the second constraint in (\ref{eq:CC2E}). In the case where $\log(W_1)<t<\log(W_2)$, we will get the same result, but with the probability shifted towards outcome 1, or the first constraint in (\ref{eq:CC2E}). Finally, we must show that the correct constraint will be active. When $\log(W_2)<t<\log(W_1)$,
\begin{alignat}{6} &(\pi^H_1-\pi^L_1)\log(W_2)\leq (\pi^H_1-\pi^L_1)\log(W_1)\nonumber\\ &\pi^L_1(\log(W_1)-\log(W_2))\leq \pi^H_1(\log(W_1)-\log(W_2))\nonumber\\ &\pi^L_1\log(W_1)-(1-\pi^H_2)\log(W_2)\leq \pi^H_1\log(W_1)-(1-\pi^L_2)\log(W_2)\nonumber\\ &\pi^L_1\log(W_1)+\pi^H_2\log(W_2)\leq \pi^H_1\log(W_1)+\pi^L_2\log(W_2),\nonumber \end{alignat}
and so the right hand side of the second constraint is not greater than the right hand side of the first constraint in (\ref{eq:CC2E}), implying the second constraint will be active. We can similarly show in the case when $\log(W_1)<t<\log(W_2)$ the opposite holds, that $\pi^H_1\log(W_1)+\pi^L_2\log(W_2)\leq \pi^L_1\log(W_1)+\pi^H_2\log(W_2)$, implying the first constraint will be active as desired.
\end{document} | arXiv | {
"id": "1701.02814.tex",
"language_detection_score": 0.7530345320701599,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{Modeling Single Picker Routing Problems in Classical and Modern Warehouses}
\category{Working Paper DPO-2018-11 (version 1, 04.11.2018)}
\authors{\textbf{Dominik Goeke and Michael Schneider}\\
goeke$|$schneider@dpo.rwth-aachen.de\\ Deutsche Post Chair\,--\,Optimization of Distribution Networks\\ RWTH Aachen University\\[2ex]}
\abstract{The standard single picker routing problem (SPRP) seeks the cost-minimal tour to collect a set of given articles in a rectangular single-block warehouse with parallel picking aisles and a dedicated storage policy, i.e, each SKU is only available from one storage location in the warehouse. We present a compact formulation that forgoes classical subtour elimination constraints by directly exploiting two of the properties of an optimal picking tour used in the dynamic programming algorithm of Ratliff and Rosenthal~(1983). We extend the formulation to three important settings prevalent in modern e-commerce warehouses: scattered storage, decoupling of picker and cart, and multiple end depots. In numerical studies, our formulation outperforms existing standard SPRP formulations from the literature and proves able to solve large instances within short runtimes. Realistically sized instances of the three problem extensions can also be solved with low computational effort. We find that decoupling of picker and cart can lead to substantial cost savings depending on the speed and capacity of the picker when traveling alone, whereas additional end depots have rather limited benefits in a single-block warehouse.\\[1ex] \textbf{Keywords:} \textit{warehouse management, picker routing, scattered storage, decoupling, multiple end depots}}
\logo{\includegraphics[scale=0.75]{logo}} \titlepage
\section{Introduction} \label{sec:intro} Order picking is a central and labor-intensive task in warehouses. The aim of {single picker routing problems} (SPRPs) is to determine a picker tour of minimum cost---starting from and ending at a depot---to collect all stock keeping units (SKUs) contained in a pick list from their storage locations in the warehouse. The cost of a tour is typically measured as distance or time. Single-block SPRPs are defined on a rectangular warehouse, in which the SKUs are stored in racks along both sides of multiple parallel picking aisles that are enclosed by a storage-free cross aisle at the top and at the bottom (see Figure~\ref{fig:warehouse}). Each of the picking aisles contains a number of picking positions, and multiple different SKUs can be located at the same picking position. In single-block SPRPs, we do not distinguish between a picking request from the rack on the left side, on the right side, or from both sides of a picking position. All these cases are treated equally, and only the travel cost to reach the picking positions in the aisles is of relevance. Therefore, a pick list translates into a set of required picking positions that the picker needs to visit.
The single-block SPRP with dedicated storage, in which each SKU is only available from one picking position in the warehouse, is the most well-studied SPRP variant, and is denoted as standard SPRP in the following. In a seminal work, \citet{Ratliff:1983} introduce a dynamic programming (DP) algorithm to solve the standard SPRP to optimality with a runtime linear in the number of picking aisles. \cite{Roodbergen:2001} extend the DP to two-block warehouses, and \citet{Pansart:2018} present a DP that is applicable to warehouses with an arbitrary number of blocks, however, the runtime complexity is exponential in the number of cross aisles.
SPRPs can also be tackled using mathematical formulations that are solved with the help of optimization software. To address the standard SPRP, \citet{Scholz:2016} reduce the number of vertices that have to be considered in each picking aisle based on the fact that the largest gap in an aisle is never traversed if the aisle is entered from top and bottom, originally discussed in \citet{Ratliff:1983}. On the resulting graph, they solve a single-commodity flow formulation of a traveling salesman problem (TSP) variant that contains optional vertices indicating the direction of travel at the entry and exit of each picking aisle. In this way, they obtain a formulation whose size is linear to the number of picking aisles. This formulation is compared to three TSP formulations defined on a complete graph spanning the SKUs to be picked and one Steiner TSP formulation that is adapted to single-block warehouses. The authors demonstrate on a large set of test instances that their formulation is superior with regard to the size of the instances that can be solved and the runtimes for solving the instances. Their formulation can also be extended to multi-block warehouses, but they only present results for the standard SPRP. \citet{Pansart:2018} present a model of the SPRP in multi-block warehouses that is based on a single-commodity flow formulation of the Steiner TSP. The authors use a procedure similar to the one described in \citet{Scholz:2016} to reduce the number of vertices, and the number of arcs is decreased by solving the minimum 1-spanner problem using a commercial solver. In addition, valid inequalities exploiting the special structure of the warehouse are added, and the solver is provided with upper bounds that are computed using a freely available version of the heuristic of \citet{Lin:1973}. On single-block warehouse instances, their formulation is clearly superior to the formulation of \citet{Scholz:2016}.
We propose a compact formulation of the standard SPRP that directly exploits two properties of an optimal tour used in the algorithm of \citet{Ratliff:1983}: (i)~two consecutive picking aisles can only be connected using four possible configurations, and (ii)~to prevent the generation of isolated subtours, it is sufficient to ensure that the tour is always connected and the degree of the connections at the top and bottom of each picking aisle is of even degree. Thus, no classical subtour elimination constraints are needed. Although we do not rely on preprocessing or the addition of cuts to speed up the solution of our model, our formulation vastly outperforms the one of \citet{Scholz:2016} and is approximately six times faster than the one of \citet{Pansart:2018} on a set of benchmark instances with up to 30 picking aisles and 45 required picking positions using a comparable computer. Our approach shows a convincing scaling behavior and is able to solve instances with 1000 aisles, 1000 available picking positions per aisle, and 1000 required picking positions in approximately two minutes.
In addition, our model can be extended to cope with three important settings relevant to modern e-commerce warehouses: \begin{itemize} \item \textbf{Scattered storage:} In warehouses with scattered storage, any SKU can be available from more than one picking position. This setting plays a major role in modern e-commerce warehouses of companies like Amazon or Zalando and is receiving growing attention from the scientific community \citep[see, e.g.,][]{Boysen:2018, Weidinger:2018b}. \citet{Daniels:1998} propose a TSP formulation for the SPRP with scattered storage for arbitrary warehouse layouts and compare several heuristics. \citet{Weidinger:2018a} shows that the single-block SPRP with scattered storage is NP-hard. He proposes a heuristic based on the decomposition of the problem into a selection and a routing problem. As comparison method, the formulation of \citet{Daniels:1998} using \citet*{Miller:1960} subtour elimination constraints is realized with Gurobi. Given a time limit of three hours, the formulation is able to solve most of the single-block warehouse instances generated by the authors with three picking aisles, 30 picking positions per aisle, and pick lists with up to seven requested SKUs. In contrast, the extension of our formulation to the single-block SPRP with scattered storage solves large instances with up to 100 picking aisles, 180 picking positions per aisle, and pick lists containing up to 30 SKUs within short runtimes of at most three minutes. \item \textbf{Decoupling of picker and cart:} In manual order picking, items are typically retrieved from the warehouse by a picker pushing a cart, so that multiple items can be picked during one tour. To speed up the order picking, Zalando, a large fashion online retailer, allows pickers to park the cart during the tour, retrieve a few items traveling on their own, then return to the cart and continue their tour (comparable to the picking behavior of people in supermarkets). The company also incorporates this option when planning picker tours \citep{Zalando:2014,Nvidia:2015}; however, no mathematical model or algorithm has yet been published. We extend our formulation to the single-block SPRP with decoupling of picker and cart and investigate the potential time savings of this approach depending on the carrying capacity and the speed of the picker without cart. \item \textbf{Multiple end depots:} To reduce unnecessary trips back to a central depot, warehouse managers can use multiple end depots at which collected items can be dropped off, e.g., at dedicated positions of a conveyor belt. \Citet{DeKoster:1998} consider the single-block SPRP with decentralized depositing, in which they assume that it is possible to drop items anywhere along the upper or lower cross aisle. \citet{Scholz:2016} show how to extend their formulation to this problem variant, but they only present results for the single depot case. We extend our formulation to single-block SPRP with multiple end depots, and we investigate the potential cost savings depending on the number of available end depots. \end{itemize}
\noindent Although using our formulation in a commercial solver still cannot match the performance of a dedicated implementation of the DP approach of \citet{Ratliff:1983} in a higher programming language, the former approach has the following advantages: \begin{itemize} \item The formulation can be easily be implemented and used by anyone familiar with a mathematical programming solver. No knowledge of a higher programming language is required, and no experience in algorithmic programming to realize a DP is necessary. This point is certainly relevant in practice, where algorithmic programming skills are generally far rarer than at universities and other scientific organizations. \item The formulation is extendable to handle three important settings in modern e-commerce warehouses---scattered storage, decoupling of picker and cart, and multiple end depots---and seems likely to be able to incorporate other real-world-inspired constraints. \item The formulation can be used in approaches for integrated problems, in which the higher-level decision depends on the outcome of the SPRP, e.g., order batching \citep{Gademann:2005, Valle:2017} or storage assignment \citep{Petersen:1999}. For example, the integrated order batching and picker routing problem could be solved by i)~column generation, where our model can be modified to solve the pricing subproblem, i.e., the orders are associated with current prices, and the picker can only pick orders such that the number of collected items does not exceed the maximum batch size, or ii)~by a compact formulation that extends our model by an index for each batch (up to the maximum number of batches), a limited capacity for each batch, and a set covering constraint. The integrated storage assignment and picker routing problem could be studied in a scattered-storage setting using our model. \end{itemize} This paper is organized as follows. We introduce our compact formulation for the standard SPRP in Section~\ref{sec:model}. The following sections present the extensions of the model to the setting with scattered storage (Section~\ref{sec:mixshelves}), decoupling of picker and cart (Section~\ref{sec:cart}), and multiple end depots (Section~\ref{sec:openDepot}). Section~\ref{sec:results} presents the numerical studies to investigate the performance of our formulation and the benefits of the considered extensions. Section~\ref{sec:conclusion} concludes the paper.
\section{Mathematical Formulation of the Standard SPRP} \label{sec:model} To solve an instance of the standard SPRP, the warehouse can be restricted to its relevant part, i.e., all aisles that lie to the left of both the depot and the leftmost aisle in which a SKU needs to be picked, and, analogously, all aisles that lie to the right of both the depot and the rightmost aisle in which a SKU needs to be picked, can be removed. The resulting part of the warehouse is represented as a set $\mathcal{J}=\{ 0, \ldots, \numberOfAisles-1 \}$ indexing $m$ aisles numbered from left to right. Each aisle $j \in \mathcal{J}$ has $n$ available picking positions, numbered from top to bottom, and is associated with a set of required picking positions $\setOfArticlesInAisle{j} \subseteq \{0,\ldots, n-1\}$ that the picker needs to visit to complete the pick list. The depot can be located at the entries to the picking aisles in the top or bottom cross aisle. The picking aisle above\,/\,below which the depot is located is denoted as aisle $l$. The parameter $\depotTop{}$ is set to 1 if the depot is located in the top cross aisle and to zero otherwise.
The example in Figure~\ref{fig:warehouse} illustrates the introduced concepts: There are eight picking aisles with $n=10$ available picking positions per aisle. SKUs need to be picked from nine required picking positions that are marked black. We only have to consider the $m = 6$ aisles containing required picking positions, i.e., $\mathcal{J}=\{0, \ldots 5\}$. The required picking positions in the aisles are given by $\setOfArticlesInAisle{0}=\{1\}$, $\setOfArticlesInAisle{1}=\{9\}$, $\setOfArticlesInAisle{2}=\{2,3,7\}$, $\setOfArticlesInAisle{3}=\{3\}$, $\setOfArticlesInAisle{4}=\{5,6\}$, and $\setOfArticlesInAisle{5}=\{7\}$. The depot is located at the bottom of aisle 3, i.e., $l=3$ and $\depotTop{} = 0$.
\begin{figure}
\caption{Optimal solution of an example instance of the standard SPRP.}
\label{fig:warehouse}
\end{figure}
As described by \cite{Ratliff:1983}, there exist four feasible configurations to connect aisle $j$ and aisle $j+1$ using the cross aisles located at the top and bottom of the warehouse. We represent these configurations by means of the following binary decision variables: \begin{itemize} \item $\xTwoTop{j}$ equals 1 if the top cross aisle is traversed twice (back and forth), 0 otherwise, \item $\xTwoBottom{j}$ equals 1 if the bottom cross aisle is traversed twice (back and forth), 0 otherwise, \item $\xTopBottom{j}$ equals 1 if the bottom and the top cross aisle are both traversed once, 0 otherwise, and \item $\xTwoTopBottom{j}$ equals 1 if both the top and bottom cross aisle are both traversed twice, 0 otherwise. \end{itemize} With regard to the traversal of picking aisles, the binary decision variables $\xUp{j}$ are used to indicate that aisle $j$ is completely traversed once (in arbitrary direction). If the costs for traversing the picking aisles are non-uniform, it may be beneficial to traverse an aisle $j$ twice: in this case, $\xTwoUp{j}$ equals one. For uniform traversal costs, there always exists an optimal solution in which no aisle is traversed twice \citep[see][]{Ratliff:1983}. Furthermore, the binary decision variables $\xPickTop{j}{i}$ ($\xPickBottom{j}{i}$) define that picking position $i$---and all picking positions that are located between the top (bottom) and picking position $i$---are accessed via a vertical\xspace branch-and-pick tour from the top (bottom) of aisle $j$, i.e., the picking aisle is entered and left from the same cross aisle. For each of the described decision variables, we precompute cost coefficients $\cost{}$ that correspond to the additional travel cost of the picker if the respective decision variable equals 1, e.g., $\xTwoTopBottom{j}$ has a coefficient $\costTwoTopBottom{j}$ that corresponds to four times the travel cost between aisle $j$ and $j+1$. Figure~\ref{fig:warehouse} illustrates the meaning of the decision variables: here, $\xPickTop{0}{1}, \xTwoTop{0}, \xPickBottom{1}{9}, \xTwoTopBottom{1}, \xUp{2}, \xTopBottom{2},\xPickTop{3}{2}, \xTopBottom{3}, \xUp{4},\xTwoBottom{4}, \xPickBottom{5}{7}$ are equal to $1$.
According to \citet{Ratliff:1983}, the generation of isolated subtours is prevented if the degree of the connections at the top and bottom of each picking aisle is of even, and the picking tour is connected. Using the observation that an even degree divided by two is an integer, we introduce for each picking aisle $j$ an integer variable $\degreeEvenTop{j}$ ($\degreeEvenBottom{j}$), whose value corresponds to the degree of the connections at the top (at the bottom) of aisle $j$ divided by two. For example, $\degreeEvenTop{2} = 2$ and $\degreeEvenBottom{3} = 1$ in Figure~\ref{fig:warehouse}. To guarantee that the picking tour is connected, we introduce an additional binary variable $\xComponent{j}$ for each picking aisle $j$, which is equal to 0 if the picking tour from the leftmost relevant aisle in the warehouse up to aisle $j$ is connected, and equal to 1 if this picking tour consists of two components. Note that two components emerge whenever (i)~we have configuration $\xTwoTopBottom{j}$ for the leftmost aisle, or (ii)~we switch from configuration $\xTwoTop{j-1}$ or $\xTwoBottom{j-1}$ to configuration $\xTwoTopBottom{j}$ without connecting the top and bottom cross aisle.
Using the above definitions, we can formulate the standard SPRP. To present a concise and comprehensible model (by avoiding the repetition of basically identical constraints for different index sets), we take the following two liberties with regard to notation: (i)~we sometimes use conditional statements for defining the relevant index set or to define the validity of a certain set of constraints, and (ii)~ we use the notation $\underset{\mathclap{\mathit{range}}}{[}\ldots]$ to define that a certain part of an expression is only relevant for a given range of the index (and otherwise disappears).
\begin{align} \text{min~}&\sum_{j \in \mathcal{J}} \costTwoBottom{j} \xTwoBottom{j} + \costTwoTop{j} \xTwoTop{j} + \costTopBottom{j} \xTopBottom{j} + \costTwoTopBottom{j} \xTwoTopBottom{j} + \costUp{j} \xUp{j} + \costTwoUp{j} \xTwoUp{j}+ \sum_{j \in \mathcal{J}}\sum_{i \in \setOfArticlesInAisle{j}} \left(\costPickBottom{j}{i} \xPickBottom{j}{i} +\costPickTop{j}{i} \xPickTop{j}{i}\right) &~~~~~~~~~~~~~~~~~~~~~~~~\label{F0} \end{align} {\begin{align}
\text{s.t.~}&\xTwoBottom{j} + \xTwoTop{j} + \xTopBottom{j} + \xTwoTopBottom{j} = 1 & j \in \mathcal{J}\setminus \{\numberOfAisles-1\} \label{F1}\\
&\xUp{j} + \xTwoUp{j} + \sum_{i' \in \setOfArticlesInAisle{j} :i'\geqi} \xPickTop{j}{i'} + \sum_{i' \in \setOfArticlesInAisle{j} :i'\leqi}\xPickBottom{j}{i'} \geq 1 & j \in \mathcal{J}, i \in \setOfArticlesInAisle{j} \label{F2}\\
&\underset{\mathclap{j > 0}}{[}\xTwoBottom{j-1} + \xTopBottom{j-1} + \xTwoTopBottom{j-1}]+ \xTwoBottom{j} + \xTopBottom{j} + \xTwoTopBottom{j} \geq \xPickBottom{j}{i} & \begin{aligned}[c] \mathit{if}(\depotTop{} = 1)\,\{j \in \mathcal{J}\}\,\\\mathit{else}\,\{j \in \mathcal{J} \setminus \{l\} \}, i \in \setOfArticlesInAisle{j} \end{aligned} \label{F3}\\[15pt] & \underset{\mathclap{j > 0}}{[} \xTwoTop{j-1} + \xTopBottom{j-1} + \xTwoTopBottom{j-1}] + \xTwoTop{j} + \xTopBottom{j} + \xTwoTopBottom{j} \geq \xPickTop{j}{i} & \begin{aligned}[c] \mathit{if}(\depotTop{} = 0)\,\{j \in \mathcal{J}\}\,\\\mathit{else}\,\{j \in \mathcal{J} \setminus \{l\} \}, i \in \setOfArticlesInAisle{j} \end{aligned} \label{F4}\\[15pt]
&\xTwoTop{j-1} + \xTwoBottom{j} \leq \xTwoUp{j}+1 & j \in \mathcal{J} \setminus \{0\}\label{F5}\\
&\xTwoBottom{j-1} + \xTwoTop{j} \leq \xTwoUp{j}+1 & j \in \mathcal{J} \setminus \{0\}\label{F6}\\
&2 \xTwoUp{l} + \xUp{l} +\underset{\mathclap{l > 0}}{[}\xTwoTop{l-1} +\xTwoTopBottom{l-1} ] + \xTwoTop{l} +\xTwoTopBottom{l} \geq \underset{\mathclap{l > 0}}{[}\xTwoBottom{l-1}] + \xTwoBottom{l}& \mathit{if}(\depotTop{} = 1) \label{F7}\\
&2 \xTwoUp{l} + \xUp{l} +\underset{\mathclap{l > 0}}{[}\xTwoBottom{l-1} + \xTwoTopBottom{l-1}] + \xTwoBottom{l} +\xTwoTopBottom{l} \geq \underset{\mathclap{l > 0}}{[}\xTwoTop{l-1}] + \xTwoTop{l}& \mathit{if}(\depotTop{}= 0)\label{F8}\\
&\underset{\mathclap{j > 0}}{[}\xTopBottom{j-1} + 2 \xTwoTopBottom{j-1} + 2 \xTwoTop{j-1} ] + \xTopBottom{j} + 2 \xTwoTopBottom{j} + 2 \xTwoTop{j} + \xUp{j}+ 2 \xTwoUp{j} = 2 \degreeEvenTop{j} & j \in \mathcal{J} \label{E1}\\
&\underset{\mathclap{j > 0}}{[}\xTopBottom{j-1} + 2 \xTwoTopBottom{j-1} + 2 \xTwoBottom{j-1} ] + \xTopBottom{j} + 2 \xTwoTopBottom{j} + 2 \xTwoBottom{j} + \xUp{j}+ 2 \xTwoUp{j} = 2 \degreeEvenBottom{j} & j \in \mathcal{J} \label{E2}\\
&\xTwoTopBottom{j} + \xTwoBottom{j-1} + \xTwoTop{j-1} - \xTwoUp{j} \leq \xComponent{j} +1 & j \in \mathcal{J} \setminus \{0\}\label{T1}\\
&\xTwoTopBottom{j}+\underset{\mathclap{j > 0}}{[}-\xTwoTopBottom{j-1}-\xTwoTop{j-1} -\xTwoBottom{j-1}] - \xTwoUp{j} - \xUp{j} \leq \xComponent{j} & j \in \mathcal{J} \label{T3}\\
&\xComponent{j-1} - \xUp{j}- \xTwoUp{j} \leq \xComponent{j} & j \in \mathcal{J} \setminus \{0\}\label{T4}\\
&\xComponent{j} \leq \xTwoTopBottom{j} & j \in \mathcal{J} \label{T5}\\
&\xTopBottom{j}, \xTwoTop{j}, \xTwoBottom{j}, \xTwoTopBottom{j}, \xComponent{j} \in \{0,1\} & j \in \mathcal{J} \setminus \{\numberOfAisles-1\} \label{B1}\\
&\xUp{j}, \xTwoUp{j} \in \{0,1\} & j \in \mathcal{J} \label{B2}\\
& \xPickTop{j}{i}, \xPickBottom{j}{i}\in \{0,1\} & j \in \mathcal{J}, i \in \setOfArticlesInAisle{j} \label{B3}\\
& \degreeEvenTop{j},\degreeEvenBottom{j} \in \mathcal{N}_{0} & j \in \mathcal{J} \label{B4}\\ & \xTopBottom{\numberOfAisles-1}, \xTwoTop{\numberOfAisles-1}, \xTwoBottom{\numberOfAisles-1}, \xTwoTopBottom{\numberOfAisles-1}, \xComponent{\numberOfAisles-1} = 0 & \label{B5} \end{align}} The objective~\eqref{F0} is to minimize the total costs of the picker tour. Constraints~\eqref{F1} guarantee that the relevant part of the warehouse is visited by the picker using one of the four cross aisle configurations. Constraints~\eqref{F2} ensure that the picker visits all required picking positions. Constraints~\eqref{F3} and \eqref{F4} guarantee that a vertical\xspace branch-and-pick tour from the bottom (top) cross aisle into aisle $j$ can only take place if $j$ is connected to the previous or successive aisle with a configuration that uses the bottom (top) cross aisle. The squared brackets exclude the terms involving the preceding aisle when the constraints for the first picking aisle are determined. Constraints~\eqref{F5} and \eqref{F6} guarantee that switches between top and bottom cross aisle are connected in a feasible manner. Constraints~\eqref{F7} and \eqref{F8} ensure that the depot is included in the tour. For example, if the depot is located at the top $(\depotTop{} = 1)$ and the aisle containing the depot is connected via $\xTwoBottom{l} =1$ and $\xTwoBottom{l-1} =1$, the depot must be included in the tour by setting $\xTwoUp{l} =1$. Instead, if $\xTwoBottom{l} =1$ and $\xTwoBottom{l-1} =0$, the depot must be included by setting $\xTwoUp{l} =1$, $\xTwoTopBottom{l-1} =1$, or $\xTwoTop{l-1}=1$. Constraints~\eqref{E1} and \eqref{E2} establish that the degrees of all connections at the top and also at the bottom of each picking aisle must be even, i.e, every position must be left as often as it is entered. Constraints~\eqref{T1} set the number of components to two (i.e., $\xComponent{j}=1$) if there is a transition from configurations $\xTwoBottom{j-1} =1$ or $\xTwoTop{j-1} =1$ to $\xTwoTopBottom{j} =1$ without directly connecting top and bottom by $\xTwoUp{j} =1$. Constraints~\eqref{T3} set the number of components to two, if top and bottom are not connected by a traversal of the picking aisle, and the part of the warehouse to the left is not visited. Constraints~\eqref{T4} propagate the number of components. Constraints~(\ref{T5}) ensure that configuration $\xTwoTopBottom{j}$ is used as long as there are two components. Finally, constraints~\eqref{B1}--\eqref{B5} define the decision variables.
Note that the model could be further improved by substituting for every aisle $j$ the variables $\xPickBottom{j}{i}, \xPickTop{j}{i}, i \in \setOfArticlesInAisle{j}$ with three new variables
$\xPickBottom{}{j}, \xPickTop{}{j},\xPickTopBottom{j}$ that represent the three vertical\xspace branch-and-pick tours that can alternatively be part of an optimal tour, i.e, from the bottom cross aisle to the topmost requested SKU, from the top cross aisle to the bottommost requested SKU, and from top cross aisle and bottom cross aisle to the two neighboring requested SKUs with the largest distance between them. We refrained from implementing this improvement to keep a more general formulation as a basis for the extensions presented in the following sections.
\section{The Single-Block SPRP with Scattered Storage} \label{sec:mixshelves} We extend formulation~\eqref{F0}--\eqref{B5} to the single-block SPRP with scattered storage, i.e., now, any SKU can be available from multiple picking positions. Figure~\ref{fig:warehouse2} shows the optimal solution of an example instance of this problem variant. We assume that multiple items of each individual SKU may be contained in the pick list and that the supply of items of a SKU available at a given picking position is limited. Set $\mathcal{H}$ contains all SKUs that need to be picked, and $\demand{h}$ denotes the number of items of SKU $h \in \mathcal{H}$ that are requested. Set $\setOfCellsInAisleForArticle{j}{h}$ contains all picking positions from which SKU $h$ is available in aisle $j$, and $\capacityTwo{j}{i}{h}$ denotes the number of items of SKU $h$ that are available in aisle $j$ at position $i$. Set $\setOfArticlesInAisle{j}$ is redefined to contain all picking positions in aisle $j$ from which SKUs present in the pick list are available: several positions storing the same SKU may be contained, and not all positions have to be visited. To indicate whether picking position $i$ in aisle ${j}$ is visited, we introduce additional binary variables $\xPick{j}{i}$. In the example in Figure~\ref{fig:warehouse2}, we assume $\mathcal{H} = \{a,b,c,d,e,f,g,h,i\}$, $\capacityTwo{j}{i}{h} = 1,h \in \mathcal{H}, j \in \mathcal{J}, i \in \setOfArticlesInAisle{j}$, and $b_h = 1,h \in \mathcal{H}$. The picking tour is given by $\xPickBottom{1}{7}, \xTwoBottom{1}, \xTwoBottom{2}, \xUp{3}, \xTopBottom{3}, \xPickBottom{4}{9}, \xTopBottom{4},\xUp{5}$ equal to $1$, and, e.g., $\xPick{3}{8} = 1$ indicates that the requested item of SKU 'a' is picked in aisle 3 at position 8. \begin{figure}
\caption{Optimal solution of an example instance of the single-block SPRP with scattered storage. Picking positions from which a requested SKU is picked are marked in black.}
\label{fig:warehouse2}
\end{figure}
To model the decision where to pick the requested SKUs, we replace constraints~\eqref{F2} by constraints~\eqref{F2c}--\eqref{F2d}: \begin{align} \tiny &\sum_{j \in \mathcal{J}} \sum_{i \in \setOfCellsInAisleForArticle{j}{h}} \capacityTwo{j}{i}{h} \xPick{j}{i} \geq \demand{h} & h \in \mathcal{H} \label{F2c} \\ &\xUp{j} + \xTwoUp{j} + \sum_{i' \in \setOfArticlesInAisle{j} :i'\geqi} \xPickTop{j}{i'} + \sum_{i' \in \setOfArticlesInAisle{j} :i'\leqi}\xPickBottom{j}{i'} \geq \xPick{j}{i}\ & j \in \mathcal{J}, i \in \setOfArticlesInAisle{j} \label{F2b} \\ &\xPick{j}{i} \in \{0,1\} & j \in \mathcal{J}, i \in \setOfArticlesInAisle{j} \label{F2d} \end{align} Constraints~\eqref{F2c} ensure that the requested number of items of each SKU is picked from the picking positions at which the SKU is available. Constraints~\eqref{F2b} guarantee that the selected positions are visited by the picking tour.
Because not all picking positions storing requested SKUs have to be visited in the case of scattered storage, it is not possible to define the relevant part of the warehouse by means of {these} picking positions and the location of the depot like in the standard SPRP. Instead, we require additional binary variables $\xLast{j}$ that indicate whether aisle $j$ is reached by the picker or not, i.e., in Figure~\ref{fig:warehouse2}, $\xLast{1}, ..., \xLast{5}$ are equal to $1$. We replace constraints~\eqref{F1} by the following constraints: \begin{align} \tiny & \xLast{j} \geq \xPick{j}{i} & j \in \mathcal{J}, i \in \setOfArticlesInAisle{j} \label{F1e} \\ &\xLast{l} = 1 & \label{F1fb} \\ &\xTwoBottom{j} + \xTwoTop{j} + \xTopBottom{j} + \xTwoTopBottom{j} = \xLast{j+1} & j \in \mathcal{J}\setminus \{\numberOfAisles-1\} : j \geq l \label{F1b} \\ &\xTwoBottom{j} + \xTwoTop{j} + \xTopBottom{j} + \xTwoTopBottom{j} = \xLast{j} & j \in \mathcal{J} : j < l \label{F1bb} \\ & \xLast{j} \geq \xLast{j+1} & j \in \mathcal{J}\setminus \{\numberOfAisles-1\} : j \geq l \label{F1d} \\ & \xLast{j} \leq \xLast{j+1} & j \in \mathcal{J}: j < l \label{F1db} \\ &\xLast{j} \in \{0,1\} & j \in \mathcal{J} \label{F1f} \end{align} Constraints~\eqref{F1e} and \eqref{F1fb} guarantee that all aisles containing the selected picking positions of the requested SKUs and the aisle containing the depot are reached. Constraints~\eqref{F1b} define the configurations to connect aisles that allow reaching a certain aisle located to the right of the depot, and \eqref{F1bb} does the same for the aisles to the left of the depot. Constraints~\eqref{F1d} and \eqref{F1db} describe how to propagate the $\xLast{}$ variables.
\section{The Single-Block SPRP with Decoupling} \label{sec:cart} In this section, we extend our model to cover the possibility of the picker to park the cart and continue the tour for a certain period without the cart, then returning to the cart. Figure~\ref{fig:warehouse3} depicts an example of an optimal solution for the resulting single-block SPRP with decoupling. We make the following modeling assumptions:
\begin{figure}
\caption{Optimal solution of an example instance of the single-block SPRP with decoupling. Only one SKU is stored at each picking position, and only one item is requested of each SKU. The picker capacity is two items, and the speed of the picker without cart is twice the speed with cart. The tours that the picker does without cart are indicated by dotted edges.}
\label{fig:warehouse3}
\end{figure}
\begin{itemize} \item The speed of the picker without cart differs from the speed when pushing the cart. The cost coefficients $\cost{}$ now represent the travel times of the picker when pushing the cart, the coefficients $\cost{}^p$ the times of the picker when traveling alone. \item Like in the previous models, we assume that the pick list is generated such that the capacity of the picking cart is sufficient to carry all items. However, the carrying capacity of the picker alone is limited to $C$ items. We return to a warehouse with dedicated storage, i.e., each SKU is only stored at one picking position, and $\demandTwo{j}{i}{h}$ denotes the number of requested items of SKU $h$ that have to be picked from aisle $j$ at position $i$. \item If a picking aisle is completely traversed, the picker pushes the cart, and no decoupling takes place. \item In horizontal\xspace branch-and-pick tours, the picker alone travels along a cross aisle and may visit one or more picking aisles to retrieve SKUs. In Figure~\ref{fig:warehouse3}, one horizontal\xspace branch-and-pick tour starts at the top of aisle 3, another one at the bottom. We assume that after parking the cart, at most one vertical\xspace branch-and-pick tour (see Figure~\ref{fig:warehouse3}, aisles 0 and 2 for examples of vertical\xspace branch-and-pick tours) and at most two horizontal\xspace branch-and-pick tours---one to the left and one to the right---are possible. Note that this assumption is not restrictive if the picker without cart is not more than twice as fast as the picker pushing the cart. Lifting the assumption is not possible with the presented modeling approach because it would entail that a section of an aisle is traversed more than twice. For the same reason, we assume that branch-and-pick tours (vertical\xspace as well as horizontal\xspace) starting from different parking positions cannot overlap.
Parking the cart directly at the entry or within a picking aisle and doing a vertical\xspace branch-and-pick tour in that aisle does not explicitly have to be modeled: Under our assumption that only one vertical\xspace branch-and-pick tour for each parking position is allowed, it is beneficial to only push the cart as far into the picking aisle as is needed so that the capacity of the picker is sufficient to visit the remaining required picking positions in the aisle (see Figure~\ref{fig:warehouse3}, aisle 0). Because the decision $\xPickTop{j}{i}=1$ ($\xPickBottom{j}{i}=1$) implicates that all positions located above (below) position $i$ in aisle $j$ are picked in the branch-and-pick tour, the parking position of the cart and consequently the respective cost coefficients $\costPickTop{j}{i}$ and $\costPickBottom{j}{i}$ can be precomputed. However, decoupling and the parking position of the cart must explicitly be modeled for horizontal\xspace branch-and-pick tours. In this case, possible parking positions are located in the cross aisles at the entries to the picking aisles. \end{itemize} To model the path of the picker when traveling without cart, we introduce additional binary variables: Variables $\xPickerTopRight{j}$ indicate that the picker traverses the top cross aisle from the entry of picking aisle $j$ to the entry of $j+1$, and the cart is parked somewhere to his left, i.e., the picker is traveling to the right. Variables $\xPickerBottomRight{j}$ are defined analogously for the bottom cross aisle. Variables $\xPickerBottomLeft{j}$ and $\xPickerTopLeft{j}$ indicate that the picker is traveling from the entry of aisle $j+1$ to the entry of $j$, and the cart is parked to his right. Variables $\xPickTop{j}{i}^p$ ($\xPickBottom{j}{i}^p$) indicate that picking aisle $j$ is entered from the top (bottom) without cart, and all required picking positions down (up) to position $i$ are visited. The variables are only defined for those picking positions $i$ for which the picker capacity is sufficient to carry the requested number of items of all SKUs that are stored in the picking positions passed by the picker. In the example in Figure~\ref{fig:warehouse3}, variables $\xPickTop{0}{3}, \xTwoTop{0}, \xUp{1}, \xTopBottom{1}, \xPickTop{2}{2}, \xTopBottom{2}, \xUp{3}$ are equal to $1$ and define the picking tour with cart, and variables $\xPickerTopRight{3}, \xPickTop{4}{4}^p, \xPickerBottomRight{3}, \xPickBottom{4}{9}^p$ equal $1$ and describe the two horizontal\xspace branch-and-pick tours starting from aisle 3.
We modify formulation~\eqref{F0}--\eqref{B5} as follows: First, the objective function~\eqref{F0} is changed to minimize the total travel time, i.e., the sum of the time the picker travels alone and the time that the picker travels with cart: \begin{align} \tiny & \begin{aligned} \text{min~}&\sum_{j \in \mathcal{J}} \costTwoBottom{j} \xTwoBottom{j} + \costTwoBottom{j}^p (\xPickerBottomLeft{j} + \xPickerBottomRight{j}) + \costTwoTop{j} \xTwoTop{j} + \costTwoTop{j}^p (\xPickerTopLeft{j} + \xPickerTopRight{j}) + \costTopBottom{j} \xTopBottom{j} + \costTwoTopBottom{j} \xTwoTopBottom{j} + \costUp{j} \xUp{j} + \costTwoUp{j} \xTwoUp{j}+ \\ &\sum_{j \in \mathcal{J}}\sum_{i \in \setOfArticlesInAisle{j}} \left(\costPickBottom{j}{i} \xPickBottom{j}{i} +\costPickTop{j}{i} \xPickTop{j}{i}\right) + \left(\costPickBottom{j}{i}^p \xPickBottom{j}{i}^p +\costPickTop{j}{i}^p \xPickTop{j}{i}^p\right)
\end{aligned} & \label{H4}
\end{align}
\noindent We replace constraints~\eqref{F2} with constraints~\eqref{F2Pick} to take picking without cart into account: \begin{align} \tiny
&\xUp{j} + \xTwoUp{j} + \sum_{i' \in \setOfArticlesInAisle{j} :i'\geqi} (\xPickTop{j}{i'} + \xPickTop{j}{i'}^p) + \sum_{i' \in \setOfArticlesInAisle{j} :i'\leqi} (\xPickBottom{j}{i'} + \xPickBottom{j}{i'}^p) \geq 1 & j \in \mathcal{J}, i \in \setOfArticlesInAisle{j} \label{F2Pick} \end{align} To determine the part of the warehouse in which the cart is used, we replace constraints~\eqref{F1} with constraints~\eqref{F1fb}--\eqref{F1f}, and we add the following constraints: \begin{align} \tiny &\xLast{j} \geq \xTwoUp{j} & j \in \mathcal{J} \label{F1z} \end{align}
\noindent To model feasible horizontal\xspace branch-and-pick tours of the picker without cart, we add: \begin{align} \tiny &\xPickerBottomRight{j} + \xPickerBottomLeft{j} + \xTwoBottom{j} + \xTwoTopBottom{j} + \xTopBottom{j} \leq 1 & j \in \mathcal{J}\setminus \{\numberOfAisles-1\} \label{G1} \\ &\xPickerTopRight{j} + \xPickerTopLeft{j} + \xTwoTop{j} + \xTwoTopBottom{j} + \xTopBottom{j} \leq 1 & j \in \mathcal{J}\setminus \{\numberOfAisles-1\} \label{G2} \\ &\underset{\mathclap{j > 0}}{[}\xPickerTopRight{j-1} + \xTwoTop{j-1} + \xTwoTopBottom{j-1}] + \xUp{j} +\xTwoUp{j} \geq \xPickerTopRight{j} & j \in \mathcal{J}\setminus \{\numberOfAisles-1\}\label{G3} \\ &\underset{\mathclap{j > 0}}{[}\xPickerBottomRight{j-1} + \xTwoBottom{j-1} + \xTwoTopBottom{j-1}] + \xUp{j} +\xTwoUp{j} \geq \xPickerBottomRight{j} & j \in \mathcal{J}\setminus \{\numberOfAisles-1\}\label{G4} \\ &\xPickerTopLeft{j+1} + \xTwoTop{j+1} + \xTwoTopBottom{j+1} + \xUp{j+1} +\xTwoUp{j+1} \geq \xPickerTopLeft{j} & j \in \mathcal{J}\setminus \{\numberOfAisles-1\} \label{G5} \\ &\xPickerBottomLeft{j+1} + \xTwoBottom{j+1} + \xTwoTopBottom{j+1} + \xUp{j+1} +\xTwoUp{j+1} \geq \xPickerBottomLeft{j} & j \in \mathcal{J}\setminus \{\numberOfAisles-1\} \label{G6} \\ &\underset{\mathclap{j > 0}}{[}\xPickerBottomRight{j-1}] + \xPickerBottomLeft{j} \geq \xPickBottom{j}{i}^p & j \in \mathcal{J}, i \in \setOfArticlesInAisle{j} \label{G7}\\ &\underset{\mathclap{j > 0}}{[}\xPickerTopRight{j-1}] + \xPickerTopLeft{j} \geq \xPickTop{j}{i}^p & j \in \mathcal{J}, i \in \setOfArticlesInAisle{j} \label{G8}\\ & \xPickerBottomRight{j},\xPickerBottomLeft{j},\xPickerTopRight{j}, \xPickerTopLeft{j} \in \{0,1\} & j \in \mathcal{J} \setminus \{\numberOfAisles-1\} \label{G9}\\ & \xPickerBottomRight{\numberOfAisles-1},\xPickerBottomLeft{\numberOfAisles-1},\xPickerTopRight{\numberOfAisles-1}, \xPickerTopLeft{\numberOfAisles-1} = 0& \label{G10}\\ & \xPickTop{j}{i}^p \in \{0,1\} & j \in \mathcal{J}, i \in \setOfArticlesInAisle{j} \label{G11}\\ & \xPickBottom{j}{i}^p \in \{0,1\}& j \in \mathcal{J}, i \in \setOfArticlesInAisle{j} \label{G12} \end{align} Constraints~\eqref{G1} and \eqref{G2} forbid overlaps of horizontal\xspace branch-and-pick tours with other horizontal\xspace branch-and-pick tours and also with the tour with cart. Constraints~\eqref{G3}--\eqref{G8} define feasible conditions for starting or continuing horizontal\xspace branch-and-pick tours. Constraints~\eqref{G9}--\eqref{G12} define the domains of the variables.
To ensure that the carrying capacity of the picker is not exceeded, we introduce variables $\capBottom{j}$ ($\capTop{j}$) that keep track of the total load collected by the picker on a horizontal\xspace branch-and-pick tour at the moment when passing at the bottom (at the top) of aisle $j$. In Figure~\ref{fig:warehouse3}, $\capTop{4} =2$ and $\capBottom{4}=1$. Let $\itemsTop{j}{i}$ ($\itemsBottom{j}{i}$) indicate the number of required items of all SKUs stored between the top (bottom) aisle and picking position $i$, i.e., $\itemsTop{j}{i} = \sum_{i' \in \setOfArticlesInAisle{j}: i' \leqi} \sum_{h \in \mathcal{H} }\demandTwo{j}{i'}{h}$ and $\itemsBottom{j}{i} = \sum_{i' \in \setOfArticlesInAisle{j}: i' \geqi} \sum_{h \in \mathcal{H} }\demandTwo{j}{i'}{h}$. Then, we add the following constraints:
\begin{align} \tiny &\capTop{j+1} + \sum_{i \in \setOfArticlesInAisle{j+1}} \itemsTop{j+1}{i}\,\xPickTop{j+1}{i}^p - C (1-\xPickerTopLeft{j}) \leq \capTop{j} & j \in \mathcal{J}\setminus \{\numberOfAisles-1\} \label{H1} \\ &\capBottom{j+1} + \sum_{i \in \setOfArticlesInAisle{j+1}} \itemsBottom{j+1}{i} \xPickBottom{j+1}{i}^p - C (1-\xPickerBottomLeft{j}) \leq \capBottom{j} & j \in \mathcal{J}\setminus \{\numberOfAisles-1\} \label{H2} \\ &\capTop{j} + \sum_{i \in \setOfArticlesInAisle{j}} \itemsTop{j}{i} \xPickTop{j}{i}^p - C (1-\xPickerTopRight{j}) \leq \capTop{j+1} & j \in \mathcal{J}\setminus \{\numberOfAisles-1\} \label{H3} \\ &\capBottom{j} + \sum_{i \in \setOfArticlesInAisle{j}} \itemsBottom{j}{i} \xPickBottom{j}{i}^p - C (1-\xPickerBottomRight{j}) \leq \capBottom{j+1} & j \in \mathcal{J}\setminus \{\numberOfAisles-1\} \label{H4} \\ & \capTop{j} + \sum_{i \in \setOfArticlesInAisle{j}} \itemsTop{j}{i} \xPickTop{j}{i}^p \leq C & j \in \mathcal{J} \label{H5} \\
& \capBottom{j} + \sum_{i \in \setOfArticlesInAisle{j}} \itemsBottom{j}{i} \xPickBottom{j}{i}^p \leq C & j \in \mathcal{J} \label{H6} \\
& \capTop{j}, \capBottom{j} \geq 0 & j \in \mathcal{J} \label{H7} \end{align} Constraints~\eqref{H1}--\eqref{H4} propagate the load to the next aisle, and constraints~\eqref{H5} and \eqref{H6} restrict the load to the maximum picker capacity.
Finally, note that the described modeling approach can also be used to implement inaccessibility constraints for certain aisles when traveling with cart.
\section{The Single-Block SPRP with Multiple End Depots} \label{sec:openDepot} Finally, we consider the single-block SPRP with multiple end depots, in which the picker does not have to return to the start depot but may select an arbitrary end depot from a set of possible candidates. The start depot is always included in the set of candidates, and additional depots can be located in both cross aisles at the entries to all picking aisles. Initially, the relevant part of the warehouse, i.e., the set of aisles $\mathcal{J}$, can only be restricted to the area between the leftmost depot or required picking position and the rightmost depot or required picking position.
Figure~\ref{fig:warehouse4} shows an example with the given start depot 'D' and four potential end depots given by the filled circles. Depot 'E' is finally selected as end depot. As illustrated in the figure, the idea of our modeling approach is to simultaneously determine a picking tour according to formulation~\eqref{F0}--\eqref{B5} and a simple path (given by the dot-dashed edges) on which we do not return from the selected end depot to the given start depot. The edges of this path are removed from the edges of the closed loop which starts and ends at the start depot and includes the selected end depot. To preserve the connectivity of the picking tour, the path can only contain edges that are traversed twice in the closed loop, i.e., those for which $\xTwoTopBottom{j},\xTwoTop{j}, \xTwoBottom{j}$, or $\xTwoUp{j}$ are equal to $1$.
\begin{figure}
\caption{Optimal solution of an example instance of the single-block SPRP with multiple end depots.}
\label{fig:warehouse4}
\end{figure}
To define the path, we introduce the binary variables $\yTop{j},\yBottom{j},\yUp{j}$, and $\yTopBottom{j}$, which are defined analogous to the $x_{j}$ variables in Section~\ref{sec:model}. The binary variables $\openDepotTop{j}$ and $\openDepotBottom{j}$ indicate whether the depot at the top (bottom) of aisle $j$ is selected as end depot. To keep notation simple, we set $\openDepotTop{j}$ and $\openDepotBottom{j}$ to zero if no potential end depot is available at the respective position. The binary variables $\degreeEvenTopReturn{j}$ ($\degreeEvenBottomReturn{j}$) equal 0 if the degree of the path at the top (bottom) of aisle $j$ is zero or uneven and equal 1 if the degree is even. In Figure~\ref{fig:warehouse4}, the path between the start depot 'D' and the selected end depot 'E' ($\openDepotBottom{1} = 1$) is given by $\yUp{3}, \yTop{2}, \yUp{2}, \yBottom{1} = 1$. The degree of the path at the start and end depot is uneven ($\degreeEvenBottomReturn{1},\degreeEvenBottomReturn{3} = 0$), and the degrees at the top and bottom of aisle 2 and the top of aisle 3 are even ($\degreeEvenTopReturn{2}, \degreeEvenBottomReturn{2}, \degreeEvenTopReturn{3} = 1$).
We modify the objective function \eqref{F0} and now subtract the cost associated with the path:
\begin{align} \tiny & \begin{aligned} \text{min~} &\sum_{j \in \mathcal{J}} \costTwoBottom{j} (\xTwoBottom{j}-\yBottom{j}) + \costTwoTop{j} (\xTwoTop{j}-\yTop{j}) + \costTopBottom{j} \xTopBottom{j} + \costTwoTopBottom{j} ( \xTwoTopBottom{j}-\yTopBottom{j})+ \costUp{j} \xUp{j} + \costTwoUp{j}( \xTwoUp{j}-\yUp{j})+\\ &\sum_{j \in \mathcal{J}}\sum_{i \in \setOfArticlesInAisle{j}} \left(\costPickBottom{j}{i} \xPickBottom{j}{i} +\costPickTop{j}{i} \xPickTop{j}{i}\right)
\end{aligned} & \label{K0}
\end{align} We keep constraints~\eqref{F2}--\eqref{B5} and replace constraints (2) with constraints~\eqref{F1fb}--\eqref{F1f} and constraints~\eqref{F1y} to restrict the relevant part of the warehouse: \begin{align} \tiny &\xLast{j} \geq 1 & j \in \mathcal{J}: \setOfArticlesInAisle{j} \neq \emptyset\label{F1y} \end{align} To characterize the path, we add the following constraints: \begin{align} \tiny & \xTwoTop{j} + \xTwoTopBottom{j} \geq \yTop{j} & j \in \mathcal{J} \label{K1}\\ & \xTwoBottom{j} + \xTwoTopBottom{j} \geq \yBottom{j}& j \in \mathcal{J} \label{K2}\\ & \xTwoTopBottom{j} \geq \yTopBottom{j}& j \in \mathcal{J} \label{K3}\\ & \xTwoUp{j} \geq \yUp{j}& j \in \mathcal{J} \label{K4}\\ &\yTop{j} + \yBottom{j+1} \leq \yUp{j+1}+1 & j \in \mathcal{J} \setminus \{\numberOfAisles-1\} \label{K6}\\ &\yBottom{j} + \yTop{j+1} \leq \yUp{j+1}+1 & j \in \mathcal{J} \setminus \{\numberOfAisles-1\} \label{K7}\\ & \yTop{j} + \yBottom{j} + \yTopBottom{j} \geq \yTop{j+1} + \yBottom{j+1} + \yTopBottom{j+1} & j \in \mathcal{J} \setminus \{\numberOfAisles-1\} : j \geq l \label{K9}\\ & \yTop{j} + \yBottom{j} +\yTopBottom{j} \geq \yTop{j-1} + \yBottom{j-1} +\yTopBottom{j-1} & j \in \mathcal{J} \setminus \{0\} : j < l \label{K10}\\ & \sum_{j \in \mathcal{J}} \openDepotBottom{j} + \openDepotTop{j} \leq 1 & \label{K11}\\[0.1cm]
&\underset{\mathclap{j > 0}}{[}\yTopBottom{j-1} +
\yTop{j-1}] + \yTopBottom{j} +
\yTop{j} + \yUp{j} = 2 \degreeEvenTopReturn{j} +\openDepotTop{j} & \begin{aligned}[c] \mathit{if}(\depotTop{} = 0)\,\{j \in \mathcal{J}\}\,\\\mathit{else}\,\{j \in \mathcal{J} \setminus \{l\} \} \end{aligned} \label{K12}\\[0.3cm]
&\underset{\mathclap{j > 0}}{[}\yTopBottom{j-1} +
\yBottom{j-1}] + \yTopBottom{j} +
\yBottom{j} + \yUp{j} = 2 \degreeEvenBottomReturn{j} +\openDepotBottom{j} & \begin{aligned}[c] \mathit{if}(\depotTop{} = 1)\,\{j \in \mathcal{J}\}\,\\\mathit{else}\,\{j \in \mathcal{J} \setminus \{l\} \} \end{aligned} \label{K13}\\[0.3cm] & \yTopBottom{j} + \yTop{j} + \yBottom{j} \leq 1& j \in \mathcal{J} \label{K5}\\ &\yBottom{j}, \yTop{j}, \yTopBottom{j}, \yUp{j}, \openDepotBottom{j}, \openDepotTop{j}, \degreeEvenTopReturn{j},\degreeEvenBottomReturn{j} \in \{0,1\} & j \in \mathcal{J} \label{K14}
\end{align} Constraints~\eqref{K1}--\eqref{K4} restrict the path to the edges of the closed loop that are traversed twice. Constraints~\eqref{K6}--\eqref{K10} guarantee that the path is connected. Constraint~\eqref{K11} ensures that at most one end depot is selected. Constraints \eqref{K12}--\eqref{K14} guarantee that the path is simple and connects start and end depot.
\section{Computational experiments} \label{sec:results} This section presents the numerical studies to assess the performance of our models and the benefits of (i)~allowing the decoupling of picker and cart, and (ii)~having multiple end depots. We discuss results for the standard SPRP (Section~\ref{sec:single}), the settings with scattered storage (Section~\ref{sec:mixed}), decoupling (Section~\ref{sec:decouple}), and multiple end depots (Section~\ref{sec:multdepot}).
We used Gurobi\,6.5.0 set to use only a single thread and otherwise using the standard setting to solve our formulation. All studies were conducted on a Desktop PC with an AMD FX-6300 processor at 3.5 GHz with 8 GB of RAM and running Windows 10 Pro.
\subsection{Results for the Standard SPRP} \label{sec:single} We compare our formulation (denoted as GS) to the two best performing formulations from the literature: SHSW (\citet*{Scholz:2016}) and PPC (\citet*{Pansart:2018}). The comparison between GS and SHSW is performed on the 900-instance benchmark of \citet{Scholz:2016}, which are generated in groups of 30 instances for different numbers of aisles $m \in \{5,10,15,20,25,30\}$ and required picking positions $a \in \{30,45,60,75,90\}$. The number of available picking positions per aisle $n$ is set to 45, and the required picking positions are uniformly distributed over the warehouse. The depot is located at the bottom left of the warehouse. The comparison between GS and PPC is carried out on the benchmark of \citet{Pansart:2018}, which is generated in an analogous fashion to that of \citet{Scholz:2016} but only contains 10 instances per group. Because \citet{Pansart:2018} do not use a subset of the \citet{Scholz:2016} benchmark but generate new instances using a different random number generator, results between SHSW and PPC are not directly comparable. The two benchmarks are available at \url{http://www.mansci.ovgu.de/Forschung/Materialien/2016+_+I_-p-534.html} and \url{https://pagesperso.g-scop.grenoble-inp.fr/~pansartl/en/en_picking.html}, respectively.
Table~\ref{tab:resultsForm} shows the results of the comparison: Column $(m,a)$ denotes the instance group defined as combination of the number of aisles $m$ and the number of required picking positions $a$. Column $\# opt$ reports the number of instances solved to optimality with each formulation within the runtime limit of 30 minutes. In column \runtime{avg}, we report the average runtime in seconds calculated over all instances in the respective group that were solved by the respective method within the time limit, and in column \runtime{max} the maximum runtime observed for any of the instances in the considered group. We judge the comparison of runtimes to be relatively fair: the speed of our processor and the one used by \citet{Pansart:2018} (an Intel Xeon E5-2440 v2 at 1.9\,GHz) are roughly comparable based on their Passmark single-thread scores; \citet{Scholz:2016} only provide the information that a 3.4\,GHz Pentium processor was used, but in general, models that fit this description have similar or even superior Passmark single-thread scores than the other two machines.
\begin{table}[htbp] \centering \begin{footnotesize} \sffamily \setlength{\tabcolsep}{5.0pt}
\ra{1.1} \begin{tabular}{@{}l@{\hspace{0.5cm}}rr@{\hspace{0.5cm}}rrr@{\hspace{.5cm}}rrr@{\hspace{0.5cm}}rrr@{}} \toprule
& \multicolumn{2}{c}{SHSW} & \multicolumn{3}{c}{GS} & \multicolumn{3}{c}{PPC} & \multicolumn{3}{c}{GS}\\ \cmidrule(rr{.55cm}l{.05cm}){2-3} \cmidrule(r{.05cm}l{.05cm}){4-6} \cmidrule(r{.05cm}l{.05cm}){7-9} \cmidrule(r{.05cm}l{.05cm}){10-12}
$(m,a)$ &\multicolumn{1}{c}{\#opt.} & \multicolumn{1}{c}{\runtime{avg}} & \multicolumn{1}{c}{\#opt.} & \multicolumn{1}{c}{\runtime{avg}} & \runtime{max} & \multicolumn{1}{c}{\#opt.} & \multicolumn{1}{c}{\runtime{avg}} & \multicolumn{1}{c}{\runtime{max}} & \multicolumn{1}{c}{\#opt.} & \multicolumn{1}{c}{\runtime{avg}} & \multicolumn{1}{c}{\runtime{max}} \\ \midrule (5,30) & 30/30 & 0.09 & 30/30 & 0.01 & 0.02 & 10/10 & 0.03 & 0.08 & 10/10 & 0.00 & 0.02 \\ (5,45) & 30/30 & 0.09 & 30/30 & 0.01 & 0.02 & 10/10 & 0.06 & 0.09 & 10/10 & 0.01 & 0.02 \\ (5,60) & 30/30 & 0.09 & 30/30 & 0.01 & 0.02 & 10/10 & 0.08 & 0.10 & 10/10 & 0.02 & 0.03 \\ (5,75) & 30/30 & 0.09 & 30/30 & 0.01 & 0.02 & 10/10 & 0.08 & 0.11 & 10/10 & 0.02 & 0.03 \\ (5,90) & 30/30 & 0.10 & 30/30 & 0.01 & 0.02 & 10/10 & 0.08 & 0.13 & 10/10 & 0.03 & 0.03 \\ \addlinespace
(10,30) & 30/30 & 1.60 & 30/30 & 0.02 & 0.05 & 10/10 & 0.05 & 0.13 & 10/10 & 0.03 & 0.06 \\ (10,45) & 30/30 & 1.03 & 30/30 & 0.02 & 0.03 & 10/10 & 0.08 & 0.19 & 10/10 & 0.03 & 0.05 \\ (10,60) & 30/30 & 1.42 & 30/30 & 0.02 & 0.05 & 10/10 & 0.13 & 0.20 & 10/10 & 0.02 & 0.05 \\ (10,75) & 30/30 & 1.36 & 30/30 & 0.02 & 0.05 & 10/10 & 0.09 & 0.17 & 10/10 & 0.02 & 0.05 \\ (10,90) & 30/30 & 0.62 & 30/30 & 0.02 & 0.03 & 10/10 & 0.10 & 0.20 & 10/10 & 0.02 & 0.05 \\ \addlinespace (15,30) & 30/30 & 2.29 & 30/30 & 0.02 & 0.10 & 10/10 & 0.06 & 0.13 & 10/10 & 0.04 & 0.08 \\ (15,45) & 30/30 & 5.28 & 30/30 & 0.03 & 0.06 & 10/10 & 0.11 & 0.20 & 10/10 & 0.04 & 0.05 \\ (15,60) & 30/30 & 10.64 & 30/30 & 0.03 & 0.08 & 10/10 & 0.14 & 0.32 & 10/10 & 0.05 & 0.08 \\ (15,75) & 30/30 & 15.10 & 30/30 & 0.03 & 0.08 & 10/10 & 0.15 & 0.41 & 10/10 & 0.04 & 0.08 \\ (15,90) & 30/30 & 19.41 & 30/30 & 0.04 & 0.08 & 10/10 & 0.23 & 0.45 & 10/10 & 0.05 & 0.08 \\\addlinespace (20,30) & 30/30 & 10.57 & 30/30 & 0.04 & 0.16 & 10/10 & 0.09 & 0.31 & 10/10 & 0.06 & 0.08 \\ (20,45) & 30/30 & 27.32 & 30/30 & 0.03 & 0.13 & 10/10 & 0.10 & 0.21 & 10/10 & 0.07 & 0.11 \\ (20,60) & 30/30 & 114.33 & 30/30 & 0.04 & 0.08 & 10/10 & 0.26 & 0.49 & 10/10 & 0.06 & 0.11 \\ (20,75) & 30/30 & 216.63 & 30/30 & 0.04 & 0.08 & 10/10 & 0.84 & 5.21 & 10/10 & 0.07 & 0.13 \\ (20,90) & 30/30 & 485.71 & 30/30 & 0.05 & 0.11 & 10/10 & 0.39 & 1.64 & 10/10 & 0.09 & 0.13 \\\addlinespace (25,30) & 30/30 & 54.46 & 30/30 & 0.04 & 0.14 & 10/10 & 0.11 & 0.24 & 10/10 & 0.05 & 0.06 \\ (25,45) & 30/30 & 85.46 & 30/30 & 0.05 & 0.13 & 10/10 & 0.24 & 0.50 & 10/10 & 0.06 & 0.10 \\ (25,60) & 30/30 & 258.92 & 30/30 & 0.06 & 0.13 & 10/10 & 0.71 & 2.34 & 10/10 & 0.08 & 0.17 \\ (25,75) & 29/30 & 527.39 & 30/30 & 0.07 & 0.19 & 10/10 & 0.76 & 2.34 & 10/10 & 0.09 & 0.15 \\ (25,90) & 24/30 & 646.59 & 30/30 & 0.07 & 0.18 & 10/10 & 1.01 & 4.41 & 10/10 & 0.09 & 0.18 \\\addlinespace (30,30) & 30/30 & 204.18 & 30/30 & 0.05 & 0.11 & 10/10 & 0.08 & 0.21 & 10/10 & 0.06 & 0.13 \\ (30,45) & 30/30 & 406.19 & 30/30 & 0.06 & 0.17 & 10/10 & 0.19 & 0.49 & 10/10 & 0.08 & 0.11 \\ (30,60) & 30/30 & 508.80 & 30/30 & 0.07 & 0.17 & 10/10 & 0.39 & 0.59 & 10/10 & 0.10 & 0.13 \\ (30,75) & 24/30 & 638.89 & 30/30 & 0.07 & 0.16 & 10/10 & 0.99 & 6.69 & 10/10 & 0.11 & 0.19 \\ (30,90) & 21/30 & 786.29 & 30/30 & 0.08 & 0.22 & 10/10 & 1.63 & 6.69 & 10/10 & 0.15 & 0.30 \\ \midrule \textbf{Avg.} & & \textbf{167.70} & & \textbf{0.04} & & & \textbf{0.31} & & & \textbf{0.05} & \\
\bottomrule
\end{tabular} \rmfamily
\caption{\textrm{Comparison of the formulations SHSW, PPC, and GS on standard SPRP instances from the literature}.\label{tab:resultsForm}}
\end{footnotesize}
\end{table}
Comparing GS to SHSW, we note that GS is able to solve all instances, while SHSW fails to solve 22 of the instances with a number of aisles $m \geq 25$ and a number of required picking positions $a \geq 75$. The runtimes of SHSW grow strongly with a larger number of aisles, and, for a number of aisles above 10 also with an increasing number of required picking positions. Contrary to this, the runtime of GS only grows moderately with a larger number of aisles, and the number of required picking positions seems to have no clear influence on the runtime. The average runtime of GS is approximately 4500 times lower than that of SHSW. The difference between GS and PPC is smaller: Both formulations are able to solve all instance groups within less than 10 seconds. Still, GS is approximately six times faster on average, and the worst runtime on the instance groups is up to 40 times lower than that of PPC.
To assess the scaling behavior of GS, we generate a set of large instances, more precisely, 30 instances for all combinations of $m, a, n \in \{100,250,500,750,1000\}$. Again, the required picking positions are uniformly distributed over the warehouse, and the depot is located at the bottom left corner of the warehouse. Table~\ref{tab:resultsLarge} presents aggregate results over the instances in a group. In addition to the values reported in Table~\ref{tab:resultsForm}, we report in rows \runtimeSup{avg}{a} the average runtimes over different values of $a$ within one group (defined by a fixed value of $m$ and $n$). Column \runtimeSup{avg}{n} reports the average runtime over all different values of $n$ for a fixed combination of $m$ and $a$.
\begin{table}[htbp]
\centering \begin{footnotesize} \sffamily \setlength{\tabcolsep}{5.0pt}
\ra{1.1} \begin{tabular}{@{}l@{\hspace{0.5cm}}rr@{\hspace{0.5cm}}rr@{\hspace{0.5cm}}rr@{\hspace{0.5cm}}rr@{\hspace{0.5cm}}rr@{\hspace{0.5cm}}r@{}}\toprule
& \multicolumn{2}{c}{$n = 100$} & \multicolumn{2}{c}{$n = 250$} & \multicolumn{2}{c}{$n = 500$} & \multicolumn{2}{c}{$n = 750$} & \multicolumn{2}{c}{$n = 1000$} \\ \cmidrule(l{.2cm}r{.2cm}){2-3} \cmidrule(l{.2cm}r{.2cm}){4-5} \cmidrule(l{.2cm}r{.2cm}){6-7} \cmidrule(l{.2cm}r{.2cm}){8-9} \cmidrule(l{.2cm}r{.2cm}){10-11}
$(m,a)$ & \multicolumn{1}{r}{\runtime{avg}} & \multicolumn{1}{r@{\hspace{0.5cm}}}{\runtime{max}} & \multicolumn{1}{r}{\runtime{avg}} & \multicolumn{1}{r@{\hspace{0.5cm}}}{\runtime{max}} & \multicolumn{1}{r}{\runtime{avg}} & \multicolumn{1}{r@{\hspace{0.5cm}}}{\runtime{max}} & \multicolumn{1}{r}{\runtime{avg}} & \multicolumn{1}{r@{\hspace{0.5cm}}}{\runtime{max}} & \multicolumn{1}{r}{\runtime{avg}} & \multicolumn{1}{r}{\runtime{max}} & \multicolumn{1}{r}{\boldmath\runtimeSup{avg}{n}} \\\midrule
(100,100) & 1.03 & 2.38 & 1.33 & 5.50 & 1.87 & 13.47 & 1.23 & 6.49 & 1.30 & 4.63 & \textbf{1.35} \\ (100,250) & 1.67 & 8.87 & 2.08 & 7.11 & 1.79 & 5.06 & 1.54 & 4.82 & 2.22 & 6.44 & \textbf{1.86} \\ (100,500) & 2.50 & 10.20 & 1.35 & 4.49 & 1.30 & 4.60 & 1.13 & 2.13 & 1.14 & 1.87 & \textbf{1.49} \\ (100,750) & 1.38 & 5.09 & 1.32 & 3.81 & 1.65 & 6.05 & 1.42 & 6.29 & 1.16 & 1.57 & \textbf{1.39} \\ (100,1000) & 1.46 & 6.12 & 1.65 & 4.80 & 1.27 & 2.79 & 1.27 & 2.87 & 1.45 & 2.86 & \textbf{1.42} \\%\addlinespace
{\boldmath\runtimeSup{avg}{a}} & \textbf{1.61} & & \textbf{1.54} & & \textbf{1.58} & & \textbf{1.32} & & \textbf{1.45} & & \\ \midrule (250,100) & 2.42 & 5.21 & 3.58 & 17.52 & 3.43 & 12.50 & 5.60 & 23.89 & 4.79 & 32.91 & \textbf{3.96} \\ (250,250) & 5.55 & 22.54 & 7.52 & 24.61 & 10.98 & 29.25 & 9.93 & 28.51 & 9.14 & 26.16 & \textbf{8.62} \\ (250,500) & 8.26 & 29.42 & 6.07 & 32.26 & 10.10 & 26.03 & 7.59 & 26.32 & 8.97 & 29.03 & \textbf{8.20} \\ (250,750) & 11.06 & 44.55 & 12.77 & 33.40 & 14.10 & 36.57 & 12.35 & 30.44 & 14.21 & 35.33 & \textbf{12.90} \\ (250,1000) & 14.99 & 34.76 & 15.03 & 35.66 & 9.96 & 32.23 & 11.32 & 31.41 & 9.22 & 25.84 & \textbf{12.10} \\%\addlinespace
{\boldmath\runtimeSup{avg}{a}} & \textbf{8.45} & & \textbf{8.99} & & \textbf{9.71} & & \textbf{9.36} & & \textbf{9.27} & & \\ \midrule (500,100) & 4.32 & 15.45 & 6.78 & 18.43 & 8.82 & 31.58 & 11.46 & 47.96 & 15.22 & 68.86 & \textbf{9.32} \\ (500,250) & 14.44 & 63.67 & 12.81 & 37.21 & 22.07 & 68.54 & 28.85 & 54.87 & 26.87 & 87.59 & \textbf{21.01} \\ (500,500) & 11.43 & 56.09 & 18.99 & 66.95 & 23.75 & 59.27 & 29.56 & 64.88 & 34.35 & 75.92 & \textbf{23.62} \\ (500,750) & 17.86 & 55.94 & 21.56 & 66.74 & 26.19 & 71.22 & 25.35 & 68.96 & 30.05 & 79.15 & \textbf{24.20} \\ (500,1000) & 16.03 & 64.74 & 18.64 & 62.14 & 23.48 & 71.43 & 30.40 & 83.83 & 24.12 & 63.57 & \textbf{22.53} \\%\addlinespace
{\boldmath\runtimeSup{avg}{a}} & \textbf{12.81} & & \textbf{15.75} & & \textbf{20.86} & & \textbf{25.12} & & \textbf{26.13} & & \\ \midrule (750,100) & 8.68 & 41.81 & 8.70 & 55.56 & 12.67 & 77.53 & 16.09 & 114.41 & 16.18 & 117.38 & \textbf{12.46} \\ (750,250) & 19.75 & 56.34 & 29.74 & 95.73 & 44.66 & 107.42 & 55.18 & 125.61 & 50.61 & 120.86 & \textbf{39.99} \\ (750,500) & 24.11 & 73.18 & 56.25 & 134.75 & 64.34 & 161.30 & 72.61 & 211.40 & 68.02 & 166.36 & \textbf{57.06} \\ (750,750) & 31.09 & 112.22 & 44.27 & 156.87 & 60.81 & 140.97 & 68.15 & 130.10 & 68.59 & 154.34 & \textbf{54.58} \\ (750,1000) & 23.09 & 106.81 & 34.24 & 104.50 & 50.49 & 130.80 & 52.78 & 127.16 & 72.75 & 173.93 & \textbf{46.67} \\%\addlinespace
{\boldmath\runtimeSup{avg}{a}} & \textbf{21.34} & & \textbf{34.64} & & \textbf{46.59} & & \textbf{52.96} & & \textbf{55.23} & & \\ \midrule (1000,100) & 10.40 & 74.06 & 26.68 & 300.07 & 22.36 & 98.25 & 23.42 & 141.61 & 22.77 & 154.93 & \textbf{21.12} \\ (1000,250) & 24.25 & 84.93 & 47.38 & 174.19 & 74.49 & 257.29 & 82.95 & 371.27 & 89.06 & 617.04 & \textbf{63.63} \\ (1000,500) & 25.07 & 72.91 & 70.79 & 227.91 & 83.98 & 181.89 & 110.82 & 287.68 & 90.04 & 291.63 & \textbf{76.14} \\ (1000,750) & 33.60 & 127.68 & 117.88 & 310.40 & 161.87 & 338.99 & 122.89 & 239.56 & 102.21 & 242.78 & \textbf{107.69} \\ (1000,1000) & 35.62 & 134.22 & 96.56 & 261.85 & 127.16 & 307.27 & 106.87 & 226.21 & 122.96 & 251.78 & \textbf{97.83} \\%\addlinespace
{\boldmath\runtimeSup{avg}{a}} & \textbf{25.79} & & \textbf{71.86} & & \textbf{93.97} & & \textbf{89.39} & & \textbf{85.41} & & \\ \bottomrule
\end{tabular}
\end{footnotesize} \rmfamily
\caption{Results of our formulation on newly generated large standard SPRP instances\label{tab:resultsLarge}.} \end{table}
The results show that the runtimes consistently increase for a larger number of aisles $m$, while the relationship between the number of required picking positions $a$ or the number of available picking positions $n$ and the runtime is not entirely consistent: we can only note the rough tendency that runtimes are higher for larger values of the two parameters. Overall, the scaling behavior of our formulation is quite convincing, even the largest instances with $m, a, n = 1000$ can be solved with an average runtime of about two minutes, and the most challenging instance in the benchmark was solved in approximately 10 minutes.
\subsection{Results for the Single-Block SPRP with Scattered Storage} \label{sec:mixed} Because the benchmark instances used by \citet{Weidinger:2018a} were not archived by the author, we generated new instances in a fashion similar to the procedure described in the original paper. To allow for a fair comparison, we reimplemented the mathematical model of \citet{Weidinger:2018a} using Gurobi (in the following denoted as formulation W).
The instances consider warehouses of different sizes by varying the number of aisles $m \in \{5,25,100\}$ and the number of available picking positions $n \in \{30,60,180\}$. We assume that one SKU is stored in each picking position. To investigate the influence of the degree of duplication of the SKUs in the warehouse, we vary the number of different SKUs $\xi$ stored in the warehouse depending on (i)~the storage capacity of the warehouse (given by $m \cdot n$), (ii)~a factor $\alpha \in \{1,5,10,50\}$ that determines the frequency with which SKUs are assigned to multiple storage positions, and (iii)~the number of different SKUs in the pick list $a$ as follows: $$ \xi =\max(a,\lceil m \cdot n / \alpha \rceil).$$ For example, if we set $\alpha = 1$, we have a standard warehouse in which each picking position is occupied by a different SKU. The maximum expression guarantees that for higher degrees of duplication at least as many different SKUs as required in the pick list are available in the warehouse.
The SKUs in the warehouse are divided into three classes A, B, and C based on their turnover rate. We assign $20\%$ of the SKUs to class A, $30\%$ to class B, and $50\%$ to class C. To ensure that each SKU is available at least at one picking position, each SKU is first assigned to one randomly selected picking position. Afterward, all remaining picking positions are assigned a randomly drawn SKU from class A with $80\%$ probability, from class B with $15\%$ probability, and from class C with 5\% probability. The number of items of the selected SKU that is available at each picking position is randomly selected from $\mathbb{N} \cap [1,3]$. Next, we generate pick lists with $a \in \{3,7,15,30\}$ SKUs. Each SKU is selected from classes A, B, and C according to the probabilities $80\%$, $15\%$, and $5\%$. The demand $\demand{h}$ for each SKU $h$ is randomly drawn from $\mathbb{N} \cap [1,min(6,\bar{\capacity{h}{}})]$ with $\bar{\capacity{h}{}}$ the total supply of $h$. In the described way, we generate $3 \cdot 3 \cdot 4 \cdot 4 = 144$ instances.
Both formulations were given a time limit of one hour. Table~\ref{tab:mixed} reports the results for different warehouse sizes $(m,n)$, number of SKUs in the pick list $a$, and degrees of duplication $\alpha$. For W, we report the runtime in column $t_W$ (TL indicates that the time limit was reached, OOM that an out of memory error occurs) and the difference between the upper bound found and the optimal solution (if no valid upper bound is found, this is indicated with a ``-''). GS finds the optimal solution for all instances, and we only report the runtime in column $t_{GS}$. If all instances of an instance group were solved to optimality by the respective formulation, we report averages of the runtimes for the group.
\begin{table}[htbp]
\centering
\begin{footnotesize} \sffamily \setlength{\tabcolsep}{3.0pt}
\ra{1.1} \begin{tabular}{@{}l@{\hspace{0.6cm}}rr@{\hspace{0.5cm}}r@{\hspace{0.9cm}}rr@{\hspace{0.5cm}}r@{\hspace{0.9cm}}rr@{\hspace{0.5cm}}r@{\hspace{0.9cm}}rr@{\hspace{0.5cm}}r@{}}
\toprule
& \multicolumn{3}{c}{$a=3$} & \multicolumn{3}{c}{$a=7$} & \multicolumn{3}{c}{$a=15$} & \multicolumn{3}{c}{$a=30$} \\ \cmidrule(l{.0cm}r{.5cm}){2-4} \cmidrule(l{.0cm}r{.5cm}){5-7} \cmidrule(l{.0cm}r{.5cm}){8-10} \cmidrule(l{.0cm}r{.0cm}){11-13}
$(m,n)$ & $\Delta_{ub}$ & $t_{W}$ & $t_{GS}$ & $\Delta_{ub}$ & $t_{W}$ & $t_{GS}$ & $\Delta_{ub}$ & $t_{W}$ & $t_{GS}$ & $\Delta_{ub}$ & $t_{W}$ & $t_{GS}$ \\
\midrule {\boldmath$\alpha=1$} & & & & & & & & & & & & \\ (5,30) & 0.0 & 0.00 & 0.01 & 0.0 & 0.02 & 0.00 & 0.0 & 0.88 & 0.01 & 0.0 & 98.92 & 0.02 \\ (5,60) & 0.0 & 0.00 & 0.01 & 0.0 & 0.01 & 0.00 & 0.0 & 0.93 & 0.01 & 0.0 & 3.73 & 0.01 \\ (5,180) & 0.0 & 0.03 & 0.00 & 0.0 & 0.04 & 0.01 & 0.0 & 0.14 & 0.01 & 0.0 & 14.41 & 0.01 \\ (25,30) & 0.0 & 0.01 & 0.08 & 0.0 & 0.03 & 0.14 & 0.0 & 3.04 & 0.07 & 0.0 & 570.42 & 0.14 \\ (25,60) & 0.0 & 0.01 & 0.03 & 0.0 & 0.03 & 0.02 & 0.0 & 9.98 & 0.15 & 0.0 & TL & 0.05 \\ (25,180) & 0.0 & 0.01 & 0.15 & 0.0 & 0.04 & 0.06 & 0.0 & 0.23 & 0.03 & 0.0 & 114.35 & 0.10 \\ (100,30) & 0.0 & 0.01 & 0.21 & 0.0 & 0.03 & 0.93 & 0.0 & 56.99 & 0.15 & 0.4 & TL & 0.15 \\ (100,60) & 0.0 & 0.00 & 0.53 & 0.0 & 0.03 & 0.28 & 0.0 & 15.22 & 0.12 & 0.2 & TL & 0.44 \\ (100,180) & 0.0 & 0.01 & 0.67 & 0.0 & 0.04 & 0.30 & 0.0 & 0.23 & 0.13 & 0.0 & 1761.86 & 0.17 \\ \textbf{Avg.} & & \textbf{0.01} & \textbf{0.19} & & \textbf{0.03} & \textbf{0.19} & & \textbf{9.74} & \textbf{0.07} & & & \textbf{0.12} \\ \midrule {\boldmath$\alpha=5$} & & & & & & & & & & & & \\ (5,30) & 0.0 & 1.09 & 0.03 & 0.0 & TL & 0.05 & 1.2 & TL & 0.10 & 16.0 & TL & 0.06 \\ (5,60) & 0.0 & 2.63 & 0.05 & 0.0 & TL & 0.02 & 6.8 & TL & 0.07 & 86.9 & TL & 0.10 \\ (5,180) & 0.0 & 1.54 & 0.01 & 12.3 & TL & 0.52 & 106.1 & TL & 0.18 & - & TL & 0.75 \\ (25,30) & 0.0 & TL & 0.39 & 3.9 & TL & 0.20 & 86.1 & TL & 0.16 & 320.3 & TL & 0.16 \\ (25,60) & 0.0 & 1.59 & 0.10 & 3.4 & TL & 0.31 & 34.7 & TL & 0.98 & - & TL & 2.24 \\ (25,180) & 0.0 & 2.68 & 0.25 & 0.0 & 128.34 & 0.07 & 26.5 & TL & 0.39 & - & TL & 1.59 \\ (100,30) & 0.0 & TL & 0.27 & 15.3 & TL & 0.19 & 72.8 & TL & 1.99 & - & TL & 3.18 \\ (100,60) & 0.0 & 28.33 & 0.30 & 1.6 & TL & 1.24 & 13.7 & TL & 0.55 & - & TL & 0.48 \\ (100,180) & 0.0 & 15.84 & 3.37 & 12.1 & TL & 1.72 & 148.5 & TL & 1.83 & 105.8 & TL & 1.13 \\ \textbf{Avg.} & & & \textbf{0.53} & & & \textbf{0.48} & & & \textbf{0.69} & & & \textbf{1.08} \\\midrule {\boldmath$\alpha=10$} & & & & & & & & & & & & \\ (5,30) & 0.0 & TL & 0.02 & 0.0 & TL & 0.04 & 3.0 & TL & 0.05 & 14.9 & TL & 0.02 \\ (5,60) & 0.0 & 17.35 & 0.02 & 0.0 & TL & 0.20 & 83.0 & TL & 0.07 & 37.8 & TL & 0.19 \\ (5,180) & 0.0 & TL & 0.07 & 68.9 & TL & 0.32 & 77.0 & TL & 0.36 & - & TL & 0.64 \\ (25,30) & 0.0 & 92.49 & 0.19 & 10.7 & TL & 0.33 & 28.6 & TL & 0.72 & - & TL & 0.27 \\ (25,60) & 0.0 & 447.62 & 4.24 & 32.7 & TL & 0.44 & 375.0 & TL & 1.00 & - & TL & 0.87 \\ (25,180) & 0.0 & 86.79 & 0.47 & 74.7 & TL & 0.36 & 525.8 & TL & 30.94 & - & TL & 3.11 \\ (100,30) & 0.0 & TL & 0.51 & 12.6 & TL & 0.82 & 412.1 & TL & 1.43 & - & TL & 2.79 \\ (100,60) & 0.0 & 418.16 & 0.33 & 15.9 & TL & 11.69 & 310.9 & TL & 3.21 & - & TL & 6.04 \\ (100,180) & 0.0 & 849.48 & 1.69 & 38.3 & TL & 22.58 & - & TL & 93.84 & - & OOM & 29.07 \\ \textbf{Avg.} & & & \textbf{0.84} & & & \textbf{4.09} & & & \textbf{14.62} & & & \textbf{4.78} \\\midrule {\boldmath$\alpha=40$} & & & & & & & & & & & & \\ (5,30) & 0.0 & TL & 0.38 & 2.1 & TL & 0.19 & 0.0 & TL & 0.03 & 7.1 & TL & 0.04 \\ (5,60) & 0.0 & TL & 0.39 & 50.9 & TL & 1.00 & 20.6 & TL & 0.12 & 84.5 & TL & 0.10 \\ (5,180) & 0.0 & TL & 0.76 & 333.9 & TL & 2.07 & - & TL & 1.26 & - & TL & 0.74 \\ (25,30) & 0.0 & TL & 0.12 & 607.9 & TL & 0.35 & - & TL & 1.28 & - & TL & 0.49 \\ (25,60) & 29.2 & TL & 0.47 & - & TL & 0.68 & - & TL & 4.74 & - & TL & 5.31 \\ (25,180) & 178.0 & TL & 0.88 & - & OOM & 0.79 & - & TL & 1.20 & - & TL & 8.98 \\ (100,30) & 439.5 & TL & 0.78 & - & OOM & 2.41 & - & TL & 3.25 & - & OOM & 4.90 \\ (100,60) & 0.0 & 193.97 & 0.11 & - & OOM & 81.39 & - & TL & 21.61 & - & TL & 12.63 \\ (100,180) & 0.0 & 245.70 & 1.76 & - & OOM & 1.30 & - & OOM & 4.75 & - & OOM & 155.13 \\ \textbf{Avg.} & & & \textbf{0.63} & & & \textbf{10.02} & & & \textbf{4.25} & & & \textbf{20.92} \\ \bottomrule
\end{tabular}
\end{footnotesize} \rmfamily
\caption{Comparison of formulations W and GS for the single-block SPRP with scattered storage.}
\label{tab:mixed} \end{table}
For W, both a higher number of SKUs in the pick list and a higher degree of duplication make the instances more difficult to solve. Of the 144 instances, 95 cannot be solved to proven optimality within the time limit (no valid upper bound is found in 24 cases, and an OOM error occurs in 8 cases). Contrary to this, GS is able to solve all instances, and the average runtime on the hardest instance group is approximately 21 seconds. The highest runtime observed for any instance is around 155 seconds. The relationship between the number of SKUs in the pick list or the degree of duplication and the runtime of GS is not entirely consistent: we can again note only a rough tendency that runtimes are higher for larger values of the two parameters.
\subsection{Results for the Single-Block SPRP with Decoupling} \label{sec:decouple} To investigate the performance of our formulation in the setting with decoupling of picker and cart, we use the standard SPRP benchmark of \citet{Scholz:2016}. Because we want to study the effect of different capacities and speeds of the picker when traveling without cart, we consider three values of the picker capacity $C \in \{2,4,6\}$, and we vary the travel time required by the picker without cart by multiplying the original travel time with cart by factors $\beta\in\{0.5,0.75\}$, e.g., $\costTwoTop{j}^p = \beta\cdot \costTwoTop{j}$. Thus, we study $6 \cdot 900 = 5400$ instances. Table~\ref{tab:decouple} provides aggregate values for the 30 instances in each group $(m,a)$: the average gap $\Delta$ between the objective value obtained with the respective setting and the base setting without decoupling of picker and cart, the average runtime \runtime{avg}, and the maximum runtime \runtime{max}. In the last row, we provide the average of the gaps and of the average runtimes for all combinations of travel speed and capacity of the picker.
\afterpage{ \begin{landscape}
\begin{table}[p] \centering \ra{1.1}
\begin{footnotesize} \sffamily \setlength{\tabcolsep}{2.0pt} \begin{tabular}{@{}l@{\hspace{0.5cm}}rrr@{\hspace{0.5cm}}rrr@{\hspace{0.5cm}}rrr@{\hspace{0.5cm}}rrr@{\hspace{0.5cm}}rrr@{\hspace{0.5cm}}rrr@{}} \toprule
& \multicolumn{6}{c}{$C = 2$} & \multicolumn{6}{c}{$C = 4$} & \multicolumn{6}{c}{$C = 6$} \\ \cmidrule(l{.2cm}r{.2cm}){2-7} \cmidrule(l{.2cm}r{.2cm}){8-13} \cmidrule(l{.2cm}r{.0cm}){14-19} & \multicolumn{3}{c}{$\beta = 0.75$} & \multicolumn{3}{c}{$\beta = 0.5$}& \multicolumn{3}{c}{$\beta = 0.75$} & \multicolumn{3}{c}{$\beta = 0.5$} & \multicolumn{3}{c}{$\beta = 0.75$} & \multicolumn{3}{c}{$\beta = 0.5$} \\ \cmidrule(l{.2cm}r{.2cm}){2-4} \cmidrule(l{.0cm}r{.2cm}){5-7} \cmidrule(l{.2cm}r{.2cm}){8-10} \cmidrule(l{.2cm}r{.2cm}){11-13} \cmidrule(l{.2cm}r{.2cm}){14-16} \cmidrule(l{.2cm}r{.0cm}){17-19} $(m,a)$ & $\Delta$ & \runtime{avg} & \runtime{max} & $\Delta$ & \runtime{avg} & \runtime{max} & $\Delta$ & \runtime{avg} & \runtime{max} & $\Delta$ & \runtime{avg} & \runtime{max} & $\Delta$ & \runtime{avg} & \runtime{max} & $\Delta$ & \runtime{avg} & \runtime{max} \\ \midrule (5,30) & -2.14 & 0.01 & 0.03 & -6.01 & 0.01 & 0.02 & -4.39 & 0.02 & 0.03 & -17.02 & 0.03 & 0.12 & -5.77 & 0.02 & 0.03 & -20.51 & 0.13 & 0.22 \\ (5,45) & -1.88 & 0.02 & 0.03 & -4.25 & 0.01 & 0.03 & -4.00 & 0.02 & 0.03 & -11.36 & 0.02 & 0.06 & -5.03 & 0.02 & 0.03 & -17.52 & 0.07 & 0.22 \\ (5,60) & -1.40 & 0.02 & 0.05 & -3.14 & 0.02 & 0.05 & -3.17 & 0.03 & 0.06 & -7.11 & 0.02 & 0.05 & -4.66 & 0.03 & 0.06 & -13.06 & 0.03 & 0.09 \\ (5,75) & -0.96 & 0.03 & 0.05 & -2.19 & 0.03 & 0.05 & -2.87 & 0.03 & 0.05 & -6.66 & 0.03 & 0.05 & -4.30 & 0.03 & 0.05 & -9.80 & 0.03 & 0.06 \\ (5,90) & -0.79 & 0.04 & 0.05 & -1.66 & 0.03 & 0.05 & -2.03 & 0.03 & 0.06 & -4.51 & 0.03 & 0.06 & -3.36 & 0.03 & 0.06 & -7.53 & 0.03 & 0.05 \\\addlinespace (10,30) & -7.43 & 0.06 & 0.14 & -18.24 & 0.15 & 0.62 & -12.25 & 0.11 & 0.37 & -29.06 & 0.55 & 2.37 & -12.95 & 0.11 & 0.37 & -30.23 & 1.19 & 2.68 \\ (10,45) & -3.83 & 0.05 & 0.09 & -10.47 & 0.08 & 0.28 & -7.31 & 0.07 & 0.26 & -23.85 & 0.46 & 1.95 & -8.40 & 0.07 & 0.26 & -27.80 & 1.66 & 5.08 \\ (10,60) & -2.26 & 0.05 & 0.13 & -6.81 & 0.07 & 0.22 & -5.59 & 0.07 & 0.18 & -20.25 & 0.28 & 3.77 & -6.89 & 0.07 & 0.18 & -25.97 & 0.71 & 4.23 \\ (10,75) & -1.30 & 0.04 & 0.06 & -3.73 & 0.06 & 0.16 & -3.27 & 0.06 & 0.11 & -13.92 & 0.13 & 0.36 & -4.48 & 0.06 & 0.11 & -21.74 & 0.36 & 2.06 \\ (10,90) & -0.64 & 0.05 & 0.12 & -1.92 & 0.06 & 0.22 & -1.67 & 0.07 & 0.14 & -8.57 & 0.12 & 0.38 & -2.51 & 0.07 & 0.14 & -16.39 & 0.25 & 0.66 \\\addlinespace (15,30) & -9.84 & 0.17 & 0.99 & -22.38 & 0.78 & 4.05 & -14.18 & 0.29 & 1.18 & -31.72 & 2.15 & 5.09 & -14.50 & 0.29 & 1.18 & -32.49 & 3.28 & 6.13 \\ (15,45) & -7.48 & 0.12 & 0.33 & -18.60 & 0.34 & 1.01 & -12.02 & 0.17 & 0.42 & -30.73 & 1.92 & 5.59 & -12.80 & 0.17 & 0.42 & -32.44 & 3.16 & 6.04 \\ (15,60) & -4.82 & 0.09 & 0.17 & -12.73 & 0.24 & 0.66 & -9.84 & 0.17 & 0.50 & -28.41 & 1.00 & 5.81 & -11.00 & 0.17 & 0.50 & -31.65 & 3.37 & 7.56 \\ (15,75) & -3.22 & 0.10 & 0.20 & -9.99 & 0.19 & 0.55 & -6.98 & 0.12 & 0.28 & -23.74 & 0.41 & 3.63 & -8.02 & 0.12 & 0.28 & -28.63 & 3.08 & 8.39 \\ (15,90) & -2.07 & 0.07 & 0.13 & -6.33 & 0.12 & 0.38 & -4.86 & 0.12 & 0.25 & -18.58 & 0.29 & 0.58 & -6.08 & 0.12 & 0.25 & -25.48 & 2.61 & 9.02 \\\addlinespace (20,30) & -11.78 & 0.46 & 4.81 & -26.11 & 2.09 & 8.49 & -15.34 & 1.08 & 3.58 & -33.11 & 4.49 & 7.99 & -15.55 & 1.08 & 3.58 & -33.65 & 5.44 & 9.76 \\ (20,45) & -9.11 & 0.17 & 0.47 & -21.80 & 1.13 & 6.78 & -13.85 & 0.57 & 4.81 & -33.09 & 4.86 & 16.42 & -14.52 & 0.57 & 4.81 & -34.40 & 6.55 & 16.95 \\ (20,60) & -7.71 & 0.18 & 0.66 & -19.10 & 0.74 & 4.52 & -12.68 & 0.29 & 0.82 & -32.32 & 3.57 & 9.36 & -13.33 & 0.29 & 0.82 & -34.14 & 6.23 & 12.20 \\ (20,75) & -5.15 & 0.15 & 0.30 & -13.87 & 0.75 & 4.37 & -10.16 & 0.22 & 0.66 & -29.27 & 2.30 & 9.15 & -11.03 & 0.22 & 0.66 & -32.26 & 5.76 & 15.64 \\ (20,90) & -3.98 & 0.14 & 0.37 & -11.00 & 0.30 & 0.56 & -8.03 & 0.23 & 0.45 & -25.50 & 1.47 & 6.16 & -9.17 & 0.23 & 0.45 & -30.44 & 5.09 & 16.37 \\\addlinespace (25,30) & -11.69 & 0.83 & 4.99 & -25.91 & 3.46 & 12.47 & -14.92 & 1.73 & 7.21 & -32.17 & 6.35 & 18.22 & -15.19 & 1.73 & 7.21 & -32.73 & 7.74 & 13.89 \\ (25,45) & -10.87 & 0.32 & 2.69 & -24.38 & 2.55 & 15.51 & -15.11 & 0.74 & 2.64 & -33.76 & 7.01 & 14.90 & -15.58 & 0.74 & 2.64 & -34.65 & 9.75 & 20.71 \\ (25,60) & -9.18 & 0.25 & 0.51 & -21.57 & 2.13 & 8.09 & -14.12 & 0.45 & 1.82 & -33.82 & 6.46 & 20.65 & -14.75 & 0.45 & 1.82 & -35.23 & 9.11 & 26.48 \\ (25,75) & -7.32 & 0.23 & 0.48 & -18.44 & 1.69 & 5.01 & -12.50 & 0.35 & 0.92 & -32.56 & 8.07 & 27.64 & -13.34 & 0.35 & 0.92 & -34.55 & 8.80 & 19.91 \\ (25,90) & -6.22 & 0.22 & 0.41 & -15.82 & 0.71 & 3.38 & -11.02 & 0.35 & 0.81 & -30.33 & 4.66 & 26.77 & -12.15 & 0.35 & 0.81 & -33.83 & 10.51 & 26.92 \\\addlinespace (30,30) & -12.51 & 2.18 & 8.97 & -26.66 & 7.53 & 29.84 & -15.54 & 3.31 & 11.85 & -32.30 & 11.95 & 34.28 & -15.73 & 3.31 & 11.85 & -32.81 & 13.30 & 34.73 \\ (30,45) & -11.26 & 0.41 & 2.07 & -25.20 & 3.93 & 29.24 & -15.55 & 1.08 & 7.44 & -34.21 & 11.48 & 28.04 & -15.81 & 1.08 & 7.44 & -34.80 & 14.30 & 37.37 \\ (30,60) & -10.57 & 0.43 & 1.24 & -24.26 & 3.36 & 10.33 & -15.50 & 1.08 & 4.66 & -35.18 & 9.84 & 24.00 & -15.85 & 1.08 & 4.66 & -36.01 & 16.53 & 49.18 \\ (30,75) & -8.36 & 0.43 & 1.55 & -20.66 & 4.33 & 14.04 & -13.94 & 0.75 & 2.88 & -33.98 & 8.92 & 28.41 & -14.65 & 0.75 & 2.88 & -35.54 & 11.87 & 26.38 \\ (30,90) & -7.62 & 0.27 & 0.59 & -18.57 & 2.80 & 7.31 & -12.78 & 0.55 & 1.73 & -32.59 & 12.17 & 50.15 & -13.68 & 0.55 & 1.73 & -35.16 & 18.77 & 35.48 \\ \midrule \textbf{Avg.} & \textbf{-6.11} & \textbf{0.25} & & \textbf{-14.73} & \textbf{1.32} & & \textbf{-9.85} & \textbf{0.47} & & \textbf{-25.32} & \textbf{3.70} & & \textbf{-10.70} & \textbf{0.47} & & \textbf{-28.38} & \textbf{5.66} & \\\bottomrule \end{tabular}
\end{footnotesize} \rmfamily
\caption{Results for the single-block SPRP with decoupling for different values of travel time factor $\beta$ and picker capacity $C$.}\label{tab:decouple} \end{table}
\end{landscape}
} Concerning the performance of our formulation, we observe the tendency that average and maximum runtimes increase if either the time required by the picker traveling alone decreases, i.e., $c_p$ decreases, or if the capacity $C$ increases. Nevertheless, all instances can be solved to optimality within very short runtimes of at most 50 seconds. The average runtimes over all instances of a certain combination of picker speed and capacity range between 0.25 and 5.66 seconds. We find that decoupling results in considerable cost savings, which grow with increasing picker speed and capacity. Even assuming the conservative values $C=2$ and $\beta=0.75$, the objective value is notably reduced by around 6\% on average. For the most optimistic setting with $C=6$ and $\beta=0.5$, savings rise to more than 28\%.
\subsection{Results for the Single-Block SPRP with Multiple End Depots} \label{sec:multdepot} To study the effect of multiple end depots, we generate $3 \cdot 900 = 2700$ new instances from the standard SPRP instances of \citet{Scholz:2016} by locating end depots at the top or the bottom (independent of each other) of each aisle of the respective instance with a probability of $\sigma \in \{0.1, 0.5,1.0\}$. Thus, the instances with $\sigma=1$ refer to the single-block SPRP with decentralized depositing introduced in \citet{DeKoster:1998}. Table~\ref{tab:multEndDepot} presents aggregate results for each instance group. Columns $\Delta$ report the average gap between the optimal solution with multiple end depots and the optimum of the standard setting in which the picker must return to the start depot. Columns \runtime{avg} and \runtime{max} again report average and maximum runtime for each instance group.
Our formulation is able to solve all instances to optimality within a maximum runtime of approximately seven seconds. We note that, in general, the runtimes slightly increase with $\sigma$, on average from 1.13 seconds to 2.09 seconds. With respect to solution quality, we see that only moderate average savings between 2\% for $\sigma=0.1$ and 3.4\% for $\sigma=1.0$ can be realized. This suggests that, in a single-block rectangular warehouse, the overall benefits of multiple end depot are rather limited. This result might be different if the batching decision already incorporates the availability of multiple end depots. The results also indicate that already a few additional end depots achieve a large portion of the possible benefits.
\begin{table}[htbp]
\centering
\begin{footnotesize} \sffamily \ra{1.1} \setlength{\tabcolsep}{3.0pt} \begin{tabular}{@{}l@{\hspace{0.3cm}}rrr@{\hspace{0.3cm}}rrr@{\hspace{0.3cm}}rrr@{}} \toprule & \multicolumn{3}{c}{$\sigma = 0.1$} & \multicolumn{3}{c}{$\sigma = 0.5$} & \multicolumn{3}{c}{$\sigma = 1.0$} \\ \cmidrule(l{.1cm}r{.1cm}){2-4} \cmidrule(l{.1cm}r{.1cm}){5-7} \cmidrule(l{.1cm}r{.0cm}){8-10} $(m,a)$ & $\Delta$ & \runtime{avg} & \runtime{max} & $\Delta$ & \runtime{avg} & \runtime{max} & $\Delta$ & \runtime{avg} & \runtime{max} \\ \midrule (5,30) & -1.25 & 0.02 & 0.05 & -2.78 & 0.03 & 0.09 & -3.24 & 0.03 & 0.08 \\ (5,45) & -1.68 & 0.03 & 0.09 & -3.82 & 0.03 & 0.08 & -4.22 & 0.03 & 0.09 \\ (5,60) & -3.21 & 0.03 & 0.09 & -6.85 & 0.02 & 0.05 & -7.26 & 0.02 & 0.06 \\ (5,75) & -3.75 & 0.04 & 0.12 & -6.47 & 0.03 & 0.13 & -7.48 & 0.03 & 0.05 \\ (5,90) & -3.87 & 0.04 & 0.12 & -7.46 & 0.03 & 0.05 & -7.67 & 0.03 & 0.05 \\\addlinespace (10,30) & -1.25 & 0.11 & 0.38 & -2.51 & 0.26 & 0.51 & -2.83 & 0.28 & 0.53 \\ (10,45) & -1.55 & 0.11 & 0.31 & -2.50 & 0.17 & 0.46 & -2.76 & 0.21 & 0.42 \\ (10,60) & -1.31 & 0.10 & 0.25 & -2.39 & 0.17 & 0.47 & -2.55 & 0.17 & 0.36 \\ (10,75) & -1.27 & 0.08 & 0.16 & -2.47 & 0.13 & 0.45 & -2.59 & 0.18 & 0.58 \\ (10,90) & -1.86 & 0.08 & 0.19 & -3.06 & 0.12 & 0.30 & -3.22 & 0.11 & 0.36 \\\addlinespace (15,30) & -1.96 & 0.32 & 1.53 & -2.71 & 0.70 & 3.27 & -2.97 & 0.80 & 2.62 \\ (15,45) & -1.32 & 0.29 & 0.75 & -2.45 & 0.73 & 2.90 & -2.58 & 0.98 & 3.68 \\ (15,60) & -1.65 & 0.29 & 0.93 & -2.40 & 0.59 & 2.14 & -2.48 & 0.76 & 3.54 \\ (15,75) & -1.68 & 0.22 & 0.60 & -2.56 & 0.30 & 0.79 & -2.68 & 0.39 & 1.36 \\ (15,90) & -1.98 & 0.19 & 0.63 & -2.68 & 0.20 & 0.39 & -2.74 & 0.22 & 0.53 \\\addlinespace (20,30) & -2.53 & 0.75 & 2.70 & -3.42 & 1.69 & 4.55 & -3.57 & 2.10 & 4.94 \\ (20,45) & -2.27 & 0.70 & 2.66 & -2.92 & 1.73 & 3.81 & -3.08 & 1.98 & 5.08 \\ (20,60) & -1.45 & 1.18 & 3.15 & -2.33 & 1.81 & 4.31 & -2.37 & 2.68 & 6.07 \\ (20,75) & -1.49 & 0.76 & 3.37 & -2.16 & 1.51 & 4.73 & -2.27 & 1.69 & 4.97 \\ (20,90) & -1.74 & 0.72 & 1.96 & -2.52 & 1.12 & 3.96 & -2.65 & 1.30 & 5.47 \\\addlinespace (25,30) & -3.23 & 1.31 & 4.36 & -4.31 & 2.95 & 6.46 & -4.48 & 3.77 & 7.49 \\ (25,45) & -2.44 & 1.97 & 5.48 & -3.06 & 3.57 & 9.39 & -3.24 & 4.10 & 8.78 \\ (25,60) & -1.77 & 2.09 & 5.88 & -2.44 & 3.81 & 8.29 & -2.60 & 3.62 & 7.23 \\ (25,75) & -1.79 & 1.73 & 6.34 & -2.40 & 3.26 & 8.60 & -2.53 & 3.08 & 7.40 \\ (25,90) & -2.02 & 1.56 & 5.23 & -2.63 & 2.43 & 5.67 & -2.69 & 3.06 & 9.80 \\\addlinespace (30,30) & -3.16 & 3.65 & 8.22 & -4.20 & 5.01 & 11.12 & -4.43 & 5.50 & 12.12 \\ (30,45) & -2.53 & 3.74 & 8.93 & -3.04 & 5.54 & 12.90 & -3.19 & 6.63 & 17.93 \\ (30,60) & -2.13 & 4.30 & 9.40 & -2.55 & 5.92 & 10.22 & -2.62 & 7.05 & 11.18 \\ (30,75) & -2.09 & 3.67 & 10.50 & -2.48 & 5.32 & 13.70 & -2.62 & 5.90 & 14.82 \\ (30,90) & -1.77 & 3.77 & 12.93 & -2.16 & 5.36 & 14.52 & -2.24 & 5.89 & 14.15 \\ \midrule \textbf{Avg.} & \textbf{-2.07} & \textbf{1.13} & & \textbf{-3.19} & \textbf{1.82} & & \textbf{-3.39} & \textbf{2.09} & \\\bottomrule \end{tabular}
\end{footnotesize} \rmfamily
\caption{Results for single-block SPRP with multiple end depots for different probabilites $\sigma$ that an aisle contains an end depot at the top or bottom.}
\label{tab:multEndDepot} \end{table}
\section{Conclusion} \label{sec:conclusion} In this paper, we present a compact formulation of the standard SPRP that directly exploits two properties of an optimal picking tour used in the algorithm of \citet{Ratliff:1983} and thus does not require classical subtour elimination constraints. Our formulation outperforms exisiting standard SPRP formulations from the literature and is able to solve large problem instances within short runtimes. The extensions of our formulation to scattered storage, decoupling of picker and cart, and multiple end depots are also able to solve realistically sized instances with low computational effort. Large savings are possible by allowing the decoupling of picker and cart: assuming a picker capacity of only two items and a reduction of travel time of $25\%$ when the picker travels alone, cost savings of $6\%$ are possible, and up to $28\%$ are achieved with a picker capacity of six items and a doubling of picker speed. Contrary to this, the cost savings of multiple end depots are rather limited. An interesting topic for future research is the extension or utilization of our formulation for integrated warehousing problems with a picker routing component, as outlined in Section 1.
\end{document} | arXiv | {
"id": "1909.13344.tex",
"language_detection_score": 0.6348218321800232,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title{The Second Variational Formula For the Functional $\int v^{(6)}(g)dV_g$} \author{Bin Guo \and Haizhong Li\thanks{Supported by grants of NSFC-10971110.}} \date{}
\maketitle \begin{abstract} \noindent In this note, we compute the second variational formula for the functional $\int_M v^{(6)}(g)dv_g$, which was introduced by Graham-Juhl [GJ] and the first variational formula was obtained by Chang-Fang (\cite{CF08}). We also prove that Einstein manifolds (with dimension $\ge 7$) with positive scalar curvature is a strict local maximum within its conformal class, unless the manifold is isometric to round sphere with the standard metric up to a multiple of constant. Note that when $(M,g)$ is locally conformally flat, this functional reduces to the well-studied $\int_M \sigma_3(g)dv_g$. Hence, our result generalize a previous result of Jeff Viaclovsky (\cite{V}) without the locally conformally flat restraint. \end{abstract} {\bf Key words and phrases:} second variation, renormalized volume coefficients, Bach tensor, Einstein metric.
\section{Introduction} In the following, we let $(M^n,g)$ denote a compact, connected, smooth Riemannian manifold without boundary. We denote the Ricci curvature and scalar curvature by $Ric$ and $R$, respectively. Recall that the Schouten tensor $P_{ij}$ is defined by \begin{equation*} P_{ij}=\frac{1}{n-2}\Big(R_{ij}-\frac{R}{2(n-1)}g_{ij}\Big), \end{equation*} and the Riemann curvature tensor can be written by \begin{equation*} Riem = W + P\odot g, \end{equation*} where $W$ is the Weyl curvature and $\odot$ is the Kulkarni-Nomizu product, which is defined by \begin{equation*} (\alpha\odot\beta)_{ijkl}=\alpha_{ik}\beta_{jl}+\alpha_{jl}\beta_{ik}-\alpha_{il}\beta_{jk}-\alpha_{jk}\beta_{il}, \quad \forall\text{ symmetric 2-tensors }\alpha, \beta. \end{equation*} The $\sigma_k (g)$ curvature is defined to be the $k$-th elementary symmetric polynomial of the eigenvalue of the Schouten tensor $P$. In [V], Viaclovsky started study of the variational problems of the functional $\int_M \sigma_k(g)dv_g$, he proved that the first variation of the functional $\int_M \sigma_k(g)dv_g (k=1,2)$ within a conformal class subject to the constraint $Vol(M,g)=1$ is a metric satisfying $\sigma_k(g)\equiv \text{const}$, and if $k\ge 3$ and the Riemannian manifold is locally conformally flat, the same result follows.
However, for $k\ge 3$ and the manifold is not locally conformally flat, $\sigma_k(g)\equiv \text{const}$ is not Euler-Lagrange equation of the functional $\int_M \sigma_k dv$ within a conformal class subject to the constraint $Vol(M,g)=1$.
The renormalized volume coefficients of $g$, denoted here by $v^{(2k)}(g)$, arose in the late 90s in the physics literature. They are defined in terms of the expansion of the ambient or Poincare metric associated to $g$. If the Riemannian manifold is locally conformally flat, these quantities coincide with the $\sigma_k(g)$ up to a constant. More precisely, it is known that (see [GJ], [CF], or [GHL]) \begin{equation*} \begin{split}
v^{(2)}(g)=-\frac{1}{2}\sigma_1(g),\qquad
v^{(4)}(g)=\frac{1}{4}\sigma_2(g),\\
v^{(6)}(g)=-\frac{1}{8}\big[\sigma_3(g)+\frac{1}{3(n-4)}(P_g)^{ij}(B_g)_{ij}\big], \end{split} \end{equation*}
where \begin{equation}\label{eqn:ba}
(B_g)_{ij}:=\frac{1}{n-3}\nabla^k\nabla^lW_{likj}+\frac{1}{n-2}R^{kl}W_{likj} \end{equation}
is the {\it Bach} tensor of the metric.
Just as $\int_M \sigma_k(g^{-1}\circ A_g)\,dv_g$ is conformally invariant when $2k=n$ and $(M,g)$ is locally conformally flat, Graham showed in \cite{G00} that $\int_M v^{(2k)}(g) \,dv_g$ is also conformally invariant on a general manifold when $2k=n$. Chang and Fang showed in \cite{CF08} that, for $n\neq 2k$, the Euler-Lagrange equations for the functional $\int_M v^{(2k)}(g) \,dv_g$ under conformal variations subject to the constraint $Vol_g(M)=1$ satisfies $v^{(2k)}(g)=$ const., which is a generalized characterization for the curvatures $\sigma_k(g^{-1}\circ A_g)$ when $(M,g)$ is locally conformally flat, as given by Viaclovsky \cite{V}.
We note that Graham \cite{G00} also gives an explicit expression of $v^{(8)}(g)$, but the explicit expression of $v^{(2k)}(g)$ for general $k$ is not known because they are algebraically complicated (see page 1958 of \cite{G00}). Thus the study of the $v^{(2k)}(g)$ curvatures involves significant challenges not shared by that of $\sigma_k(g)$: firstly, for $k\geq 3$, $v^{(2k)}(g)$ depends on derivatives of curvature of $g$--- in fact, for $k\geq 3$, $v^{(2k)}(g)$ depends on derivatives of curvatures of order up to $2k-4$; secondly, the $v^{(2k)}(g)$ are defined via an indirect highly nonlinear inductive algorithm (see \cite{G00}).
We aim to study the stability of the critical metric of the functional \begin{equation*} \mathcal{F}_3[g]=\frac{\int _M v^{(6)}(g)dv_g}{\big(\int_M dv_g\big)^{(n-6)/n}}, \end{equation*} within a conformal class. First we recall the theorem of Chang-Fang [CF] (also see Graham [G]). \begin{thm}\label{thm:thm2}(\cite{CF08}) Let $(M^n,g)$ be an $n$-dimensional $(n\geq 7)$ compact Riemannian manifold, then the functional $\mathcal{F}_3(g)$ is variational within the conformal class, i.e. the critical metric in $[g]$ satisfies the equation \begin{equation}\label{eqn:first} v^{(6)}\equiv \text{const}. \end{equation} If $n=6$, $\mathcal{F}_3[g]$ is a constant in the conformal class $[g]$. \end{thm} In this note, we compute the second variational formula of $\mathcal{F}_3[g]$ within its conformal class $[g]$. Our results are \begin{thm}\label{thm:thm1} Let $(M^n,g)$ be an $n$-dimensional $(n\geq 7)$ compact Riemannian manifold with $v^{(6)}(g)=$const, then the second variational formula of the functional $\mathcal{F}_3{[g]}$ within its conformal class at $g$ is \begin{equation*}
\frac{d^2}{dt^2}\Big|_{t=0} \mathcal{F}_3[g_t]= (n-6)V^{-(n-6)/n}\Bigg\{\int \Big[-6v^{(6)}(g)\bar{\phi}^2+\Big( \frac{B_{ij}\bar{\phi}_{ij}}{24(n-4)}+\frac{1}{8}T_{2ij}\bar{\phi}_{ij}-\frac{1}{12}P_{ij}C_{ijk}\bar{\phi}_k\Big)\bar{\phi} \Big]dv\Bigg\} \end{equation*}
where $g_t=e^{2u_t}g$, $\frac{\partial}{\partial t}\big|_{t=0}u_t=\phi$, and $\bar\phi=\phi-\frac{\int_M\phi dv_g}{\int_M dv_g}$, $T_{2ij}$ and $C_{ijk}$ are defined in section 2. \end{thm}
\begin{thm}\label{thm:main} Let $(M^n,g)$ be an $n$-dimensional $(n\geq 7)$ compact Einstein manifold with positive scalar curvature. Then it is a strict local maximum within its conformal class $[g]$, unless $(M^n,g)$ is isometric to $S^n$ with the standard metric up to a multiple of constant. \end{thm}
\begin{rem} When $(M^n,g)$ is a locally conformally flat, $v^{(6)}(g)=-\frac{1}{8}\sigma_3(g)$, for the functional $\int_M \sigma_3(g)dv_g$, J. Viaclovsky (\cite{V}) proved that a positive constant sectional curvature metric is a strict local minimum, unless the manifold is isometric to $S^n$ with the standard metric. Our result coincides with his at the locally conformally flat Einstein metrics, however, ours does not need the locally conformally flat assumption. \end{rem}
\section{Preliminaries} Let $(M^n,g)$ be an $n$-dimensional compact Riemannian manifold. Throughout this note, we make the convention that repeated index means summation over $1$ to $n$. First we recall the transformation law of various curvatures under conformal change of metrics. Let $\tilde{g}=e^{2u}g$, $u\in C^\infty (M)$, then the Riemannian curvature tensors satisfy \begin{equation*} Riem(\tilde{g})=e^{2u}(Riem (g) - \alpha\odot g), \end{equation*}
where $\alpha_{ij}=u_{ij}-u_iu_j+\frac{|\nabla u|^2}{2}g_{ij}$ (note that $u_{ij}$ means the covariant derivative with respect to the fixed metric $g$). By contracting, we see that the Ricci curvature and scalar curvature satisfy \begin{equation}\label{eqn:trans} R_{ij}(\tilde{g})= R_{ij}-(n-2)\alpha_{ij}-(\sum_k\alpha_{kk})g_{ij}, \quad R(\tilde{g})= e^{-2u}R - 2(n-1)e^{-2u}\sum_k\alpha_{kk}. \end{equation} From \eqref{eqn:trans} and the definition of Schouten tensor, we see that \begin{equation}\label{eqn:schouten} \tilde{P}_{ij}=P_{ij}-\alpha_{ij}, \end{equation} where we denote $P(\tilde{g})$ by $\tilde{P}$ for notations convenience.
\begin{lem}\label{lem:lem1} We have the following formulae (see e.g. [GHL])
(1) $\nabla^i W_{ijkl}=-(n-3)C_{jkl}, \; C_{ijk}\text{ is the {\it{Cotton}} tensor defined by } P_{ij,k}-P_{ik,j}$;
(2) \label{eqn:bach} $\nabla^j B_{ij}=(n-4)\sum_{k,l}P_{kl}C_{kli}$;
(3) $B_{ij}=B_{ji}$, where $B_{ij}$ is defined by \eqref{eqn:ba}.
\end{lem} The proof of Lemma \ref{lem:lem1} is a direct calculation and one can find it in \cite{GHL}.
Let $V$ be a vector space, $A:V\to V$ a linear map. Define the Newton transformation $T_k(A)$ $:V\to V$ by: \begin{equation*} T_{k}(A):=\sigma_k(A)I-\sigma_{k-1}(A)A+\cdots+(-1)^k A^k=\sum_{i=0}^k\sigma_{k-i}(A)(-1)^iA^i, \end{equation*} where $I$ is the identity map and $\sigma_k(A)$ is the $k$-th elementary symmetric polynomial of the eigenvalues of $A$. Under an orthnormal basis of $V$, $T_{k}$ can be written as follows: \begin{equation*} {T_{k}}_{ij}=\frac{1}{k!}\delta^{j_1\ldots j_k j}_{i_1\ldots i_k i}A_{i_1j_1}\ldots A_{i_kj_k}, \end{equation*} where $\delta^{j_1\ldots j_k j}_{i_1\ldots i_k i}$ is the generalized Kronecker notation. We recall some well-known results in this respect, which we will need in our later arguments. \begin{lem}\label{lem:newton} The Newton transformations $T_{k}$ satisfy ([R], [GHL])
(1) Newton's formula: $(k+1)\sigma_{k+1}(A)=\mathrm{tr}(T_kA)$;
(2) $\frac{d}{dt}\sigma_k(A_t)=\mathrm{tr}(T_{k-1}\frac{d}{dt} A_t)$, for any family of transformations $A_t: V\to V$.
(3) $\mathrm{tr}(T_{k})=(n-k)\sigma_k(A)$.
\end{lem}
In the following we denote $T_k(g^{-1}\circ P)$ simply by $T_k$. We have the following formula, which is a direct calculation (see [GHL]) \begin{equation}\label{eqn:tij} \sum_{i}{T_2}_{ij,i}=-\sum_{k,l}P_{kl}C_{klj}. \end{equation}
\section{The first variational formula and proof of Theorem 1.1} In this section, we will compute the Euler-Lagrange equation for the functional $\mathcal{F}_3(g)$ within the conformal class. For convenience we denote the numerator of $\mathcal{F}_3(g)$ by
$$ F(g)=\int_M v^{(6)}(g)dv_g. $$
Under the conformal change of metrics $g_t=e^{2u(t)}g$, by use of \eqref{eqn:schouten}, we see that in local coordinates (see [CF]) \begin{equation*} \tilde{P}_{i}^{\;j}=e^{-2u(t)}(P_{i}^j-\alpha_{i}^{\;j}) \end{equation*} \begin{equation*} \tilde{B}_{i}^{\;j}=e^{-4u(t)}\Big(B_{i}^{\;j}+(n-4)u^k\big(g^{jl}C_{ilk}+g^{jl}C_{lik}\big)+(n-4)u^k u^lg^{pj}W_{ikpl}\Big), \end{equation*}
where we write $\alpha_{ij}=u_{ij}(t)-u_i(t)u_j(t)+\frac{1}{2}|\nabla_g u(t)|^2g_{ij}$ and we make the convention that $\tilde{P}_{ij} = P_{ij}(g_t)$, $\tilde{B}_{ij}=B_{ij}(g_t)$, etc, for notations convenience.
\begin{comment}
First, we define the functional: \begin{equation} \mathscr{F}_3[g]:=\frac{\int_M v^{(6)}d V_g}{(\int_Md V_g)^{(n-6)/n}}. \end{equation} where $v^{(6)}=\sigma_3(P)+\frac{1}{3(n-4)}B_{ij}P_{ij}$. For convenience we denote the denominator of the above functional by $F=\int_M v^{(6)}d V_g$. We use the following notations: \begin{equation*} g=\sum_i w_i^2,\quad g_t=e^{2u(t)}g=\sum_i \tilde{w}_i^2=\sum_i (e^{u(t)}w_i)^2, \end{equation*} \begin{equation*}
g_0=g,\quad\text{i.e.}\; u(0)=0,\; \frac{d}{dt}u(0)\Big|_{t=0}=\phi,
\frac{d^2}{dt^2}u(t)\Big|_{t=0}=\psi. \end{equation*} We also make the following convention, the covariant derivative with a tilde above means differentiation with respect to the coframe $\tilde{w_i}$, otherwise it means that with respect to $w_i$. Direct calculations show the following two formulas, which are the change of Schouten tensor and Bach tensor under conformal change of metrics. \begin{align*} \tilde{P}_{ij}&=e^{-2u(t)}(P_{ij}-\alpha_{ij});\\ \tilde{B}_{ij}&=e^{-4u(t)}(B_{ij}+(n-4)u_k(C_{ijk}+C_{jik})+(n-4)u_k u_lW_{ikjl}). \end{align*} \end{comment}
For notions convenience we denote $\frac{d}{d t}$ by $\delta$. Denote
$\frac{\partial}{\partial t}\Big|_{t=0}u=\phi$, and $\frac{\partial^2}{\partial t^2}\Big|_{t=0}u=\psi$. With the above preparations, we have \begin{equation*} \delta \tilde{P}_{i}^{\;j}=-2(\delta u)\tilde{P}_{i}^{\;j}-e^{-2u}\delta\alpha_{i}^{\;j} \end{equation*} \begin{equation*} \begin{split} \delta \tilde{B}_{i}^{\;j}=-4(\delta u)\tilde{B}_{i}^{\;j}+(n-4)e^{-4u}\Big(&(\delta u)^k\big(g^{jl}C_{ilk}+g^{jl}C_{lik}\big)\\
& +(\delta u)^k u^lg^{pj}W_{ikpl}+u^k(\delta u)^lg^{pj}W_{ikpl}\Big) \end{split}\end{equation*}
\begin{equation*} \delta (dv_{g_t})= n(\delta u) dv_{g_t}. \end{equation*} Now we derive the first variation formula for $\mathcal{F}_3$. First we have
\begin{equation}\label{eqn:new1st} -8\delta F=\int \delta(\sigma_3(g_t)dv_{g_t})+\frac{1}{3(n-4)}\delta(\tilde{P}_{i}^{\;j}\tilde{B}_{j}^{\;i}dv_{g_t}). \end{equation} By use of Lemma \ref{lem:newton}, we have \begin{align*} \delta(\sigma_3(g_t))=\tilde{T}_{2j}^{\;i}\delta\tilde{P}_{i}^{\;j}&=\tilde{T}_{2j}^{\;i}
(-2(\delta u)\tilde{P}_{i}^{\;j}-e^{-2u}\delta\alpha_{i}^{\;j})\\
&=-6(\delta u)\sigma_3(g_t)-e^{-2u}{\tilde{T}_{2j}}^{\;i}\delta\alpha_{i}^{\;j}. \end{align*} Hence \begin{equation}\label{eqn:11}\begin{split} \int \delta(\sigma_3(g_t)dv_{g_t})&=\int \Big[-6(\delta u)\sigma_3(g_t)
-e^{-2u}\tilde{T}_{2i}^{\;j}\delta\alpha_{j}^{\;i}+n(\delta u)\sigma_3(g_t)\Big]dv_{g_t}\\
&=\int \Big[(n-6)(\delta u)\sigma_3(g_t)-e^{-2u}\tilde{T}_{2i}^{\;j}\delta\alpha_{j}^{\;i}\Big]dv_{g_t}. \end{split}\end{equation} On the other hand, the second term of \eqref{eqn:new1st} is \begin{align} \label{eqn:22} &\int \frac{1}{3(n-4)}\delta(\tilde{P}_{i}^{\;j}\tilde{B}_{j}^{\;i}dv_{g_t})\\ =& \int \frac{1}{3(n-4)}\bigg[\delta\tilde{P}_{i}^{\;j}\tilde{B}_{j}^{\;i}
+\tilde{P}_{i}^{\;j}\delta\tilde{B}_{j}^{\;i}+n(\delta u)\tilde{P}_{i}^{\;j}\tilde{B}_{j}^{\;i}\bigg]dv_{g_t}\notag\\
= &\int \frac{1}{3(n-4)}\bigg[\tilde{B}_{j}^{\;i} \Big((-2\delta u)\tilde{P}_{i}^{\;j}-e^{-2u}(\delta\alpha_{i}^{\;j})\Big)+
\tilde{P}_{i}^{\;j}\Big(-4(\delta u)\tilde{B}_{j}^{\;i} + (n-4)e^{-4u}\big((\delta u)^k(g^{il}C_{ljk}+C_{jlk})\notag \\
&\qquad\quad+2(\delta u)^k\ u^lg^{ip}W_{pkjl}\big)\Big)
+n(\delta u)\tilde{P}_{i}^{\;j}\tilde{B}_{j}^{\; i}
\bigg]dv_{g_t}\notag\\
&=\int \frac{1}{3(n-4)}\bigg[(n-6)(\delta u)\tilde{P}_{i}^{\;j}\tilde{B}_{j}^{\;i}
-e^{-2u}\tilde{B}_{j}^{\;i}
(\delta\alpha_{i}^{\;j})\notag \\ &\qquad
+2(n-4)e^{-4u}\tilde{P}_{i}^{\;j}\Big((\delta u)^kg^{il}C_{ljk}+(\delta u)^k
u^lg^{ip}W_{pkjl}\Big)\bigg]dv_{g_t}\notag \end{align}
From calculations in \eqref{eqn:11} and \eqref{eqn:22}, we have the following formula, which will be used in section 4
\begin{align}\label{eqn:vv} &\delta \big(v^{(6)}(g_t)dv_{g_t}\big)\\ =&-\frac{1}{8}\Big[(n-6)(\delta u)\sigma_3(g_t)-e^{-2u}{\tilde{T}}_{2i}^{\;j}(\delta\alpha)_{j}^{\;i}\Big]dv_{g_t}
-\frac{1}{24(n-4)}\Big[(n-6)(\delta u)\tilde{P}_{i}^{\;j}\tilde{B}_{j}^{\;i}\notag\\ &\;\quad
-e^{-2u}\tilde{B}_{i}^{\;j}(\delta\alpha)_{j}^{\;i}
+2(n-4)e^{-4u}\tilde{P}_{i}^{\;j}\Big((\delta u)^kg^{il}C_{ljk} +
(\delta u)^k
u^lg^{ip}W_{pkjl}\Big)\Big]dv_{g_t}.\notag \end{align}
Thus we have
\begin{align}\label{eqn:1st} \delta F=&\int \delta (v^{(6)}(g_t)dv_{g_t})\\
=&-\frac{1}{8}\int\Bigg\{(n-6)(\delta u)\sigma_3(g_t)-e^{-2u}\tilde{T}_{2i}^{\;j}\delta\alpha_{j}^{\;i}
+ \frac{1}{3(n-4)}\Big[(n-6)(\delta u)\tilde{P}_{i}^{\;j}\tilde{B}_{j}^{\;i}\notag\\ &\;
-e^{-2u}\tilde{B}_{j}^{\;i}(\delta\alpha)_{i}^{\;j}
+2(n-4)e^{-4u}\tilde{P}_{i}^{\;j}\big((\delta u)^kg^{il}C_{ljk}+(\delta u)^k u^lg^{ip}W_{pkjl}\big)\Big]\Bigg\}dv_{g_t}\notag\\
=&\int \Bigg\{(n-6)(\delta u)v^{(6)}(g_t)-\frac{1}{8}\bigg[ -e^{-2u}\tilde{T}_{2i}^{\;j}\delta\alpha_{j}^{\;i} -
\frac{1}{3(n-4)}e^{-2u}\tilde{B}_{j}^{\;i}
\delta\alpha_{i}^{\;j}\notag\\
& +\frac{2}{3}e^{-4u}\tilde{P}_{i}^{\;j}\Big((\delta u)^k g^{il}C_{ljk}+
(\delta u)^k u^lg^{ip} W_{pkjl}\Big)\bigg]\Bigg\}dv_{g_t}.\notag \end{align}
\noindent {\bf Proof of Theorem 1.1} Noting that $u(0)=0$, we conclude the first variational formula of $\mathcal{F}_3[g_t]$ within the conformal class $[g]$ is (see [CF] or [G]) \begin{align}\label{eqn:main1}
\frac{d}{dt}\Big|_{t=0}\mathcal{F}_3(g_t)&=\frac{d}{dt}\Big|_{t=0}F\cdot V^{-\frac{n-6}{n}}- \frac{n-6}{n}V^{-\frac{n-6}{n}}\Big(\int v^{(6)}d v\Big)\int n \phi d v\\ =&V^{-\frac{n-6}{n}}\bigg\{(n-6)\int \phi v^{(6)}(g)+\frac{1}{8}\int T_{2ij}\phi_{ij}+\frac{1}{8}\int \frac{B_{ij}\phi_{ij}}{3(n-4)}
-\frac{1}{12}
\int P_{ij}\phi_kC_{ijk}\notag \\ &\qquad \; \qquad-(n-6)V^{-1}\Big(\int v^{(6)}(g)\Big)\int \phi d v\bigg\}\notag\\ =&(n-6)V^{-\frac{n-6}{n}}\bigg\{\int \phi \Big(v^{(6)}-V^{-1}\int v^{(6)}\Big)d v\bigg\}.\notag \end{align} where we have used \eqref{eqn:tij} and (2) of Lemma \ref{lem:lem1} and the integration by parts. Here $V=\int d v_g$. Hence, we see that the Euler-Langrange equation of the functional $\mathcal{F}_{3}(g)$ within the conformal class $[g]$ is \begin{equation*} v^{(6)}(g)=V^{-1}\int v^{(6)}(g)dv_g\equiv \text{const}, \end{equation*} and we get Theorem \ref{thm:thm2}.\qed
\section{The Second Variational Formula and proofs of Theorem 1.2-1.3} In this section, we will calculate the second variational formula for the functional $\mathcal{F}_3$ within the conformal class $[g]$. The computation is direct and routine. For convenience, we separate each term in the first variational equation \eqref{eqn:1st} and compute them respectively.
For derivative of the first term in \eqref{eqn:1st}, by use of \eqref{eqn:vv}, we have \begin{align}\label{eqn:m1}
&(n-6)\frac{d}{dt}\Big|_{t=0}\int (\delta u) v^{(6)}(g_t)dv_{g_t}\\ =& (n-6)\int\Bigg\{ \psi v^{(6)}(g)-\frac{1}{8}(\delta u) \Big[(n-6)(\delta u)\sigma_3(g_t)-
e^{-2u} \tilde{T}_{2i}^{\;j}\delta\alpha_{j}^{\;i}\Big]dv_{g_t}\Big|_{t=0}\notag\\
&\quad\qquad\qquad-\frac{(\delta u)}{24(n-4)}\Big[(n-6)(\delta u)\tilde{P}_{i}^{\;j}\tilde{B}_{j}^{\;i}
- e^{-2u}\tilde{B}_{j}^{\;i}\delta\alpha_{i}^{\;j}\notag
\\ &\quad\qquad\qquad +2(n-4)e^{-4u}\tilde{P}_{i}^{\;j}\bigg((\delta u)^kg^{il}C_{ljk}
+(\delta u)^k u^lg^{ip}W_{pkjl}\bigg)\Big]dv_{g_t}\Big|_{t=0}\Bigg\}\notag\\
=& (n-6)\int \bigg\{\psi v^{(6)}(g)+(n-6)\phi^2v^{(6)}(g)-\frac{1}{8} \Big[-\phi T_{2ij}\phi_{ij}
\notag \\ &\;\qquad -\frac{\phi B_{ij}\phi_{ij}}{3(n-4)}
+\frac{2}{3}
\phi\phi_k P_{ij} C_{ijk}\Big]\bigg\}dv.\notag \end{align} For derivative of the second term in \eqref{eqn:1st}, we need the following formula of the variation of the Newton transformation:
\begin{align}
\frac{d}{dt}\Big|_{t=0}(\tilde{T}_{2i}^{\;j})&=\frac{d}{dt}\Big|_{t=0}\Big(\frac{1}{2!}\delta^{j_1j_2j}_{i_1i_2i} \tilde{P}_{j_1}^{\;i_1}\tilde{P}_{j_2}^{\;i_2}\Big)
=\delta^{j_1j_2j}_{i_1i_2i}P_{j_1}^{i_1}\frac{d}{dt}\Big|_{t=0}\tilde{P}_{j_2}^{i_2}\\
&=\delta^{j_1j_2j}_{i_1i_2i}P_{i_1j_1}(-2\phi P_{i_2j_2}-\phi_{i_2j_2})=
-4\phi {T_{2}}_{ij}-\delta^{j_1j_2j}_{i_1i_2i}P_{i_1j_1}\phi_{i_2j_2}.\notag \end{align} Therefore, the variation of the second term of \eqref{eqn:1st} is given by \begin{align}\label{eqn:m2}
\frac{1}{8}&\frac{d}{dt}\Big|_{t=0}\int e^{-2u}\tilde{T}_{2i}^{\;j}\delta\alpha_{j}^{i}dv_{g_t}=\frac{1}{8}\int \Bigg\{
-2\phi
T_{2ij}\phi_{ij}+\frac{d}{dt}\Big|_{t=0}\tilde{T}_{2i}^{\;j}\phi_{j}^{\;i}\\
&\quad\qquad +T_{2ij}\frac{d^2}{dt^2}\Big|_{t=0}\alpha_{ij}
+ n\phi T_{2ij}\phi_{ij}\Bigg\}d v\notag\\
=&\frac{1}{8}\int \bigg\{(n-2)\phi T_{2ij}\phi_{ij}+T_{2ij}\frac{d^2}{dt^2}\Big|_{t=0}\alpha_{ij}
+\phi_{ij}\Big(-4\phi {T_{2}}_{ij}
-\delta^{j_1j_2j}_{i_1i_2i}P_{i_1j_1}\phi_{i_2j_2}\Big)\bigg\}dv\notag\\
=&\int\bigg\{ \frac{(n-6)}{8}\phi T_{2ij}\phi_{ij}+\frac{1}{8}T_{2ij}\frac{d^2}{dt^2}\Big|_{t=0}\alpha_{ij}
-\frac{1}{8}\delta^{j_1j_2j}_{i_1i_2i}P_{i_1j_1}\phi_{i_2j_2}\phi_{ij}\bigg\}.\notag \end{align} The variation of the third term of \eqref{eqn:1st} is \begin{align}\label{eqn:m3}
&\frac{1}{24(n-4)}\frac{d}{dt}\Big|_{t=0}\int \Big(e^{-2u}\tilde{B}_{i}^{\;j}\delta\alpha_{j}^{\;i}\Big)dv_{g_t}\\
=&\frac{1}{24(n-4)}\int\Bigg\{-2\phi B_{ij}\phi_{ij}+\frac{d}{dt}\Big|_{t=0}(\tilde{B}_{i}^{\;j})\phi_{ij}
+B_{ij}\frac{d}{dt}\Big|_{t=0}\delta\alpha_{ij}+n\phi B_{ij}\phi_{ij}\Bigg\}d v\notag\\
=&\frac{1}{24(n-4)}\int\Bigg\{(n-2)\phi B_{ij}\phi_{ij}+B_{ij}\frac{d^2}{dt^2}\Big|_{t=0}\alpha_{ij} +\phi_{ij}\Big(-4\phi B_{ij}+2(n-4)\phi_kC_{ijk}\Big)\Bigg\}d v\notag\\
=&\int \Bigg\{\frac{n-6}{24(n-4)}\phi B_{ij}\phi_{ij}+\frac{B_{ij}}{24(n-4)}\frac{d^2}{dt^2}\Big|_{t=0}\alpha_{ij} +\frac{1}{12}\phi_k\phi_{ij}C_{ijk}\Bigg\}dv.\notag \end{align}
The variation of the fourth term of \eqref{eqn:1st} is \begin{align}\label{eqn:m4}
&-\frac{1}{12}\frac{d}{dt}\Big|_{t=0}\int \bigg[e^{-4u}\tilde{P}_{j}^{\;i}\Big((\delta u)^kg^{jl}C_{ilk}+
(\delta u)^k u^lg^{jp}W_{ikpl}\Big)\bigg]dv_{g_t}\\ &=-\frac{1}{12}\int \Bigg\{-4\phi P_{ij}\phi_kC_{ijk} +
\frac{d}{dt}\Big|_{t=0}\tilde{P}_{i}^{j}\phi^kg^{il}C_{ljk}
+P_{ij}\psi_k C_{ijk}+P_{ij}\phi_k \phi_l W_{ikjl}+n\phi P_{ij}\phi_kC_{ijk}\Bigg\}dv\notag\\ &=-\frac{1}{12}\int \Bigg\{(n-4)\phi P_{ij}\phi_kC_{ijk}+P_{ij}\phi_k \phi_lW_{ikjl} +\phi_kC_{ijk}\Big(-2\phi P_{ij}-\phi_{ij}\Big)+P_{ij}\psi_kC_{ijk}\Bigg\}dv\notag\\ &=\int \Bigg\{-\frac{(n-6)}{12}\phi P_{ij}\phi_kC_{ijk}-\frac{1}{12}P_{ij}\phi_k\phi_lW_{ikjl} +\frac{1}{12}\phi_k\phi_{ij}C_{ijk}-\frac{1}{12}P_{ij}\psi_kC_{ijk}\Bigg\}dv.\notag \end{align} Combining \eqref{eqn:m1}, \eqref{eqn:m2}, \eqref{eqn:m3} and \eqref{eqn:m4}, we have \begin{align}\label{eqn:m2nd}
& \frac{d^2}{dt^2}\Big|_{t=0} F(g_t) \\
&=\int \Bigg\{(n-6)\psi v^{(6)}(g)+(n-6)^2\phi^2 v^{(6)}(g) +\frac{n-6}{4}\phi T_{2ij}\phi_{ij} -\frac{1}{8}\Big[ - \frac{2(n-6)\phi B_{ij}\phi_{ij}}{3(n-4)}\notag\\
& \qquad+ \frac{4(n-6)}{3}\phi\phi_k P_{ij}C_{ijk}- T_{2ij}\frac{d^2}{dt^2}\Big|_{t=0}\alpha_{ij}-
\frac{B_{ij}}{3(n-4)}\frac{d^2}{dt^2}\Big|_{t=0}\alpha_{ij}\notag\\
&\qquad +\delta^{j_1j_2j}_{i_1i_2i}P_{i_1j_1}\phi_{i_2j_2}\phi_{ij}-\frac{4}{3}\phi_k\phi_{ij}C_{ijk}
+\frac{2}{3}P_{ij}\phi_k\phi_l W_{ikjl}+\frac{2}{3}P_{ij}\psi_kC_{ijk}\Big]\Bigg\}dv\notag. \end{align}
Since $\frac{d^2}{dt^2}\Big|_{t=0}\alpha_{ij}=\psi_{ij}-2\phi_i\phi_j+|\nabla
\phi|^2g_{ij}$,
by use of divergence theorem we obtain \begin{align}\label{eqn:alp}
&-\int T_{2ij}\frac{d^2}{dt^2}\Big|_{t=0}\alpha_{ij}-\int \frac{B_{ij}}{3(n-4)}\frac{d^2}{dt^2}\Big|_{t=0}\alpha_{ij}\\
&=-\int T_{2ij}(\psi_{ij}-2\phi_i\phi_j+|\nabla \phi|^2g_{ij})
-\int \frac{B_{ij}(\psi_{ij}-2\phi_i\phi_j+|\nabla \phi|^2g_{ij})}{3(n-4)}\notag\\ &=\int -\frac{2}{3}P_{kl}C_{kli}\psi_i+2T_{2ij}\phi_i\phi_j+\frac{2}{3(n-4)}B_{ij}\phi_i\phi_j
-|\nabla \phi|^2T_{2kk}\notag\\
&=\int -\frac{2}{3}P_{kl}C_{kli}\psi_i-|\nabla \phi|^2T_{2kk}-2T_{2ij,j}\phi_i\phi
-2T_{2ij}\phi_{ij}\phi-\frac{2}{3(n-4)}B_{ij,j}\phi_i\phi-\frac{2}{3(n-4)}B_{ij}\phi_{ij}\phi\notag\\
&=\int -\frac{2}{3}P_{kl}C_{kli}\psi_i-|\nabla \phi|^2T_{2kk}+\frac{4}{3}\phi \phi_i P_{kl}C_{kli}-2T_{2ij}\phi_{ij}\phi
-\frac{2}{3(n-4)}B_{ij}\phi_{ij}\phi,\notag
\end{align} where we have used the following identity in the second equality $$ \int T_{2ij}\psi_{ij}dv+\frac{1}{3(n-4)}\int B_{ij}\psi_{ij}dv=\int \frac{2}{3}P_{kl}C_{kli}\psi_i, $$ which can be checked by use of (2.3), (2) of Lemma 2.1 and integration by parts.
Substituting \eqref{eqn:alp} into \eqref{eqn:m2nd} and making some cancelations, we conclude that \begin{align}\label{eqn:2nd}
\frac{d^2}{dt^2}\Big|_{t=0} F(g_t)=&\int \Bigg\{(n-6) \psi v^{(6)}+(n-6)^2\phi^2v^{(6)}(g)-\frac{1}{8}\Big[-2(n-5)\phi T_{2ij}\phi_{ij} \\ &-\frac{2(n-5)}{3(n-4)}\phi B_{ij}\phi_{ij}+\frac{4(n-5)}{3}\phi P_{ij}\phi_kC_{ijk}\notag\\
& -\frac{4}{3}\phi_k\phi_{ij}C_{ijk}+\delta^{mnj}_{kli}P_{km}\phi_{ln}\phi_{ij}
+\frac{2}{3}P_{ij}\phi_k\phi_l W_{ikjl}
-(n-2)|\nabla \phi|^2\sigma_2(g)\Big]\Bigg\}dv\notag, \end{align} where we have used the identity that $T_{2kk}=(n-2)\sigma_2(g)$ (see Lemma 2.2). It remains to study the last four terms on the right hand side of \eqref{eqn:2nd}. By definition, \begin{align*} \delta^{mnj}_{kli}&=\det\begin{pmatrix}\delta_{km}&\delta_{kn}&\delta_{kj}\\
\delta_{lm}&\delta_{ln}&\delta_{lj}\\
\delta_{im}&\delta_{in}&\delta_{ij}\end{pmatrix}\\
=&\delta_{km}\delta_{ln}\delta_{ij}-\delta_{km}\delta_{lj}\delta_{in}-\delta_{lm}\delta_{kn}\delta_{in}+
\delta_{lm}\delta_{in}\delta_{kj}+\delta_{im}\delta_{kn}\delta_{lj}-\delta_{im}\delta_{ln}\delta_{kj}. \end{align*} We compute by use of divergence theorem \begin{equation}\label{eqn:mid}\begin{split} &\int \delta^{mnj}_{kli}P_{km}\phi_{ln}\phi_{ij}=\int \phi\Big(\delta^{mnj}_{kli}(P_{mk}\phi_{nl})_{,ij}\Big)\\ =&\int \phi\delta^{mnj}_{kli}\Big(P_{km,ij}\phi_{nl}+2P_{km,i}\phi_{nl,j}+P_{km}\phi_{nl,ij}\Big). \end{split}\end{equation} Now we compute integrands of the right hand side of \eqref{eqn:mid} respectively. The first term is \begin{align}\label{eqn:mid1} &\phi\delta^{mnj}_{kli}P_{km,ij}\phi_{nl}\\ =&\phi P_{km,ij}\phi_{nl}(\delta_{km}\delta_{ln}\delta_{ij}-\delta_{km}\delta_{lj}\delta_{in}-\delta_{lm}\delta_{kn}\delta_{in}+
\delta_{lm}\delta_{in}\delta_{kj}+\delta_{im}\delta_{kn}\delta_{lj}-\delta_{im}\delta_{ln}\delta_{kj})\notag \\
=&\phi P_{kk,ii}\phi_{nn}-\phi P_{kk,ij}\phi_{ij}-\phi P_{kl,ii}\phi_{kl}+\phi P_{kl,ik}\phi_{il}+\phi P_{ki,il}\phi_{kl}-\phi P_{ki,ik}\phi_{ll}\notag\\
=&\phi \phi_{nn}C_{iik,k}+\phi\phi_{kl}C_{lik,i}+\phi\phi_{kl}C_{iki,l}
=\phi \phi_{kl}C_{lik,i}.\notag \end{align} The second one is \begin{align}\label{eqn:mid2} &2\phi\delta^{mnj}_{kli}P_{km,i}\phi_{nlj}\\ =&2\phi P_{kk,i}\phi_{nni}-2\phi P_{kk,i}\phi_{ijj}-2\phi P_{km,i}\phi_{kmi}+2\phi P_{km,i}\phi_{imk}+2\phi P_{km,m}\phi_{kll}-2\phi P_{km,m}\phi_{llk}\notag\\ =&2\phi P_{km,i}(\phi_{mik}-\phi_{mki})=2\phi P_{km,i}\phi_{j}R_{jmik}.\notag \end{align} The third one is \begin{align}\label{eqn:mid3} &\phi\delta^{mnj}_{kli}P_{km}\phi_{nl,ij}\\ =&\phi \phi_{kk,ii}P_{nn}-\phi\phi_{iikl}P_{kl}-\phi\phi_{klii}P_{kl}+\phi\phi_{ilki}P_{kl}+\phi\phi_{kiil}P_{kl}-\phi\phi_{kiik}P_{ll}\notag \\ =&\phi P_{nn}(\phi_{kkii}-\phi_{kiik})+\phi P_{kl}(\phi_{ilki}-\phi_{klii})+\phi P_{kl}(\phi_{kiil}-\phi_{iikl})\notag\\ =&\phi P_{nn}(-\phi_{mk}R_{mk}-\phi_m R_{mk,k})+\phi P_{kl}(\phi_{mi}R_{mlik}+\phi_m R_{ikml,i})+\phi P_{kl}{(\phi_{ml}R_{mk}+\phi_mR_{mk,l})}\notag \end{align} where we have used the Ricci identity in the last equality.
Substituting the following identities into \eqref{eqn:mid2} and \eqref{eqn:mid3}, \begin{equation*} R_{ij}=(n-2)P_{ij}+\frac{R}{2(n-1)}g_{ij},\quad R_{ij,i}=\frac{R_j}{2},\quad P_{kk}=\frac{R}{2(n-1)}; \end{equation*} \begin{equation*} R_{ijkl}=W_{ijkl}+P_{ik}g_{jl}+P_{jl}g_{ik}-P_{il}g_{jk}-P_{jk}g_{ik}, \; P_{kk,i}=P_{ik,k}; \end{equation*} \begin{equation*}\begin{split} R_{ikml,i}&=R_{kl,m}-R_{km,l} =(n-2)C_{klm}+\frac{\nabla_m R g_{kl}-\nabla_l R g_{km}}{2(n-1)}. \end{split}\end{equation*} after making some cancelations we see that the left hand side of \eqref{eqn:mid} becomes \begin{align}\label{eqn:1} &\int \phi\delta^{mnj}_{kli}P_{km}\phi_{nl}\phi_{ij}\\
\notag =&\int \Big[-\phi\phi_{kl}C_{lki,i}+\frac{4-n}{2(n-1)}\phi R\phi_{mk}P_{mk}-\frac{R^2}{4(n-1)^2}\phi\Delta\phi-\frac{n-2}{4(n-1)^2}\phi R \phi_m R_{,m}\\
&\quad +\phi P_{kl}\phi_{mi}W_{mlik}+\phi |P_{kl}|^2\Delta \phi+(n-4)\phi P_{kl}\phi_{ml}P_{mk}+n\phi P_{kl}\phi_m P_{kl,m}\notag\\
&\quad +2\phi P_{km,i}\phi_j W_{jmik}-2\phi P_{km,i}\phi_k P_{mi}\Big].\notag \end{align} On the other hand, by divergence theorem, we see that the other three terms on the last of \eqref{eqn:2nd} are \begin{align}\label{eqn:2} \frac{2}{3}\int P_{ij}\phi_k\phi_l W_{ikjl}=&
\frac{2}{3}\int -\phi P_{ij,k}\phi_l W_{ikjl}-\phi P_{ij}\phi_{kl}W_{ikjl}-\phi P_{ij}\phi_l W_{ikjl,k}\\
=&\int -\frac{2}{3}\phi P_{ij,k}\phi_l W_{ikjl}-\frac{2}{3}\phi P_{ij}\phi_{kl}W_{ikjl}
-\frac{2(n-3)}{3}\phi P_{ij}\phi_l C_{ijl},\notag \end{align}
\begin{equation}\label{eqn:3}\begin{split} -\frac{4}{3}\int \phi_k\phi_{ij}C_{ijk}
=\int
\frac{4}{3}\phi\phi_{ijk}C_{ijk}+\frac{4}{3}\phi\phi_{ij}C_{ijk,k}, \end{split}\end{equation}
\begin{align}\label{eqn:4}
&-\int (n-2)|\nabla\phi|^2\sigma_2(g)\\ =&\int (n-2)\phi\Delta\phi\sigma_2(g) +(n-2)\phi \phi_i(\sigma_2(g))_{,i}\notag\\
=&\int \frac{(n-2)}{2}\phi\Delta\phi \Big(\frac{R^2}{4(n-1)^2}-|P_{kl}|^2\Big) +(n-2)\phi\phi_{i}\Big(\frac{RR_{,i}}{4(n-1)^2}-P_{kl}P_{kl,i}\Big)\notag\\
=&\int \frac{(n-2)\phi\Delta\phi R^2}{8(n-1)^2}-\frac{n-2}{2}\phi\Delta\phi |P_{kl}|^2 + \frac{(n-2)\phi R\phi_i R_{,i}}{4{(n-1)}^2}
-(n-2)\phi\phi_i P_{kl}P_{kl,i}.\notag \end{align} By combining equations \eqref{eqn:1}, \eqref{eqn:2}, \eqref{eqn:3} and \eqref{eqn:4} and doing some cancelations,
we conclude that the last four terms on the right hand side of \eqref{eqn:2nd} are equal to
\begin{align}\label{eqn:a} -\frac{1}{8}&\int\Big[\frac{1}{3}\phi\phi_{kl}B_{kl}+(n-4)\phi T_{2ij}\phi_{ij}-\frac{2(n-6)}{3}\phi P_{ij}\phi_k C_{ijk}\\
&\qquad\qquad +\frac{4}{3}\phi C_{kmi}\phi_j W_{jmik}+\frac{4}{3}\phi \phi_{ijk}C_{ijk}\Big]dv,\notag \end{align} where we have used $T_{2ij}=\sigma_2(g)\delta_{ij}-\sigma_1(g)P_{ij}+P_{ik}P_{kj}$ and $B_{ij}=C_{ijk,k}+P_{kl}W_{ikjl}$. Moreover, \begin{equation*}\begin{split} &\frac{4}{3}\phi C_{ijk}\phi_{jik}\\ =&\frac{4}{3}\phi C_{ijk}(\phi_{jki}+\phi_m R_{mjik})\\ =&\frac{4}{3}\phi C_{ijk}\phi_m(W_{mjik}+P_{mi}g_{jk}+P_{jk}g_{mi}-P_{mk}g_{ij}-P_{ij}g_{mk})\\ =&\frac{4}{3}\phi C_{ijk}\phi_m W_{mjik}+\frac{4}{3}\phi C_{ijj}\phi_m P_{mi}+\frac{4}{3}\phi C_{ijk}\phi_i P_{jk}-\frac{4}{3}\phi C_{iik}\phi_m P_{mk}-\frac{4}{3}\phi C_{ijk}P_{ij}\phi_k\\ =&-\frac{4}{3}\phi \phi_m C_{ijk}W_{mjki}-\frac{4}{3}\phi C_{ijk}P_{ij}\phi_k, \end{split} \end{equation*} where we used $\sum_i C_{iik}=0$ and $C_{ijk}=-C_{ikj}$.
Thus it follows that \eqref{eqn:a} is equal to \begin{equation}\label{eqn:mf}\begin{split} -\frac{1}{8}\int\Big[\frac{1}{3}\phi\phi_{kl}B_{kl}+(n-4)\phi \phi_{ij}T_{2ij}-\frac{2(n-6)}{3}\phi\phi_k P_{ij} C_{ijk}
-\frac{4}{3}\phi\phi_k C_{ijk}P_{ij}\Big]dv. \end{split}\end{equation} Substituting \eqref{eqn:mf} into \eqref{eqn:2nd}, we conclude that \begin{align}\label{eqn:2nd1}
&\frac{d^2}{dt^2}\Big|_{t=0} F(g_t) \\ =&\int \Bigg\{(n-6)\psi v^{(6)}+(n-6)^2\phi^2v^{(6)}(g)-\frac{1}{8}\bigg[-2(n-5)\phi\phi_{ij} T_{2ij} -\frac{2(n-5)}{3(n-4)}\phi \phi_{ij}B_{ij}\notag \\ & +\frac{4(n-5)}{3}\phi\phi_k P_{ij}C_{ijk}+ \frac{1}{3}\phi\phi_{kl}B_{kl}+ (n-4)\phi \phi_{ij}T_{2ij}-\frac{2(n-6)}{3}\phi\phi_k P_{ij} C_{ijk}
-\frac{4}{3}\phi\phi_k C_{ijk}P_{ij}\bigg]\Bigg\}\notag \\ =&\int\Bigg\{ (n-6)\psi v^{(6)}+(n-6)^2\phi^2v^{(6)}+\frac{1}{8}\bigg[(n-6)\phi \phi_{ij}T_{2ij}+\frac{n-6}{3(n-4)}\phi\phi_{ij} B_{ij}\notag\\ & \qquad
- \frac{2(n-6)}{3}\phi\phi_k P_{ij} C_{ijk}\bigg]\Bigg\}\notag. \end{align}
\noindent{\bf Proof of Theorem 1.2} By Theorem \ref{thm:thm2}, at the critical metric of the functional $\mathcal{F}_3(g)$, it holds that $v^{(6)}(g)$ should be constant, and it follows that $F(g)= Vv^{(6)}(g)$. By our notations
$\mathcal{F}_3[g_t]=\frac{F(g_t)}{(\intdv_{g_t})^{(n-6)/n}}$. By use of \eqref{eqn:2nd1}
and \eqref{eqn:main1}, at
the critical metric $g$, we have \begin{align}\label{eqn:final}
&\frac{d^2}{dt^2}\Big|_{t=0} \mathcal{F}_3[g_t]\\
=& \frac{d^2}{dt^2}\Big|_{t=0} F(g_t)\cdot V^{-\frac{n-6}{n}} + 2 \frac{d}{dt}\Big|_{t=0} F(g_t)\cdot
\frac{d}{dt}\Big|_{t=0}V(g_t)^{-\frac{n-6}{n}}+F(g)\frac{d^2}{dt^2}\Big|_{t=0}V(g_t) ^{-\frac{n-6}{n}}\notag\\
=& \frac{d^2}{dt^2}\Big|_{t=0} F(g_t)\cdot V^{-\frac{n-6}{n}}
-\frac{2(n-6)}{n}\bigg(\frac{d}{dt}\Big|_{t=0}F(g_t)\bigg)\cdot V^{-\frac{(2n-6)}{n}}\int n\phi \notag\\ &\quad + F\Bigg\{
(n-6)(2n-6)V^{-\frac{(3n-6)}{n}}\Big(\int \phi\Big)^2 -n(n-6)V^{-\frac{(2n-6)}{n}}\int \phi^2-
(n-6)V^{-\frac{(2n-6)}{n}}\int \psi\Bigg\}\notag\\
=&V^{-\frac{n-6}{n}}\Bigg\{\frac{d^2}{dt^2}\Big|_{t=0} F(g_t)-2(n-6)^2v^{(6)}(g)V^{-1}\Big(\int \phi\Big)^2
+(n-6)(2n-6)v^{(6)}(g)V^{-1}\Big(\int \phi\Big)^2\notag\\ &\qquad -n(n-6)v^{(6)}(g)\int \phi^2-
(n-6)v^{(6)}(g)\int \psi \Bigg\}\notag\\
=&V^{-\frac{n-6}{n}}\Bigg\{\frac{d^2}{dt^2}\Big|_{t=0} F(g_t) + 6(n-6)v^{(6)}(g)V^{-1}\Big(\int \phi\Big)^2 - n(n-6)v^{(6)}(g)\int \phi^2
-(n-6)v^{(6)}(g)\int \psi\Bigg\}\notag\\
=&V^{-\frac{n-6}{n}}\Bigg\{\int \bigg[-6v^{(6)}(g)\Big(\phi-V^{-1}\int \phi\Big)^2 +\frac{\phi\phi_{kl}}{24(n-4)}B_{kl}+\frac{1}{8}
\phi\phi_{mk}T_{2mk}\notag\\
&\qquad\qquad - \frac{1}{12}\phi C_{ijk}P_{ij}\phi_k\bigg]dv \Bigg\}\notag.
\end{align} If we define an operator $\mathcal L$ by \begin{equation*} \mathcal{L}(f):=\frac{B_{ij}f_{ij}}{24(n-4)}+\frac{1}{8}T_{2ij}f_{ij}- \frac{1}{12}P_{ij}C_{ijk}f_k, \end{equation*} for $f\in C^\infty(M)$. It is easy to see that $\mathcal{L}$ is self-adjoint with respect to the $L^2$ inner product of the Riemannian manifold. Indeed, for any two smooth functions $f$ and $h$, we have \begin{equation*}\begin{split} \langle\mathcal{L}(f),h\big\rangle&=\int_M \mathcal{L}(f)h dv\\ &=\int_M \Big[\frac{B_{ij}f_{ij}h}{24(n-4)}+\frac{1}{8}T_{2ij}hf_{ij}- \frac{1}{12}P_{ij}C_{ijk}f_kh\Big]dv\\ &=\int_M \Big[-\frac{B_{ij}f_ih_j}{24(n-4)}-\frac{1}{8}T_{2ij}f_i h_j-\frac{B_{ij,j}f_i h}{24(n-4)}-\frac{1}{8}T_{2ij,j}f_i h-\frac{1}{12}P_{ij}C_{ijk}f_k h\Big]dv\\
&=\int_M \Big[-\frac{B_{ij}f_ih_j}{24(n-4)}-\frac{T_{2ij}f_ih_j}{8}\Big] dv\\ &=\langle f,\mathcal{L}(h)\rangle, \end{split}\end{equation*} where we have used (2)and (3) in Lemma \ref{lem:lem1}, \eqref{eqn:tij} and integration by parts. Denote $\phi-V^{-1}\int \phi$ by $\bar{\phi}$. From \eqref{eqn:final}, we see that \begin{align}
&\frac{d^2}{dt^2}\Big|_{t=0} \mathcal{F}_3(g_t)\\ &=(n-6)V^{-\frac{n-6}{n}}\Bigg\{\int \Big[-6v^{(6)}(g)\bar{\phi}^2+\mathcal{L}(\phi)\phi \Big]dv\Bigg\}\notag\\ &=(n-6)V^{-\frac{n-6}{n}}\Bigg\{\int \Big[-6v^{(6)}(g)\bar{\phi}^2+\mathcal{L}(\bar{\phi})\bar{\phi} \Big]dv\Bigg\}.\notag \end{align} Thus we complete the proof of Theorem \ref{thm:thm1}.\qed
To prove Theorem \ref{thm:main}, we need the following famous theorem. \begin{thm}[Lichnerowicz and Obata, see e.g. \cite{LI}]\label{thm:thm3} Let $M$ be an $n$-dimensional compact manifold. Suppose the Ricci curvature of $M$ is bounded from below by \begin{equation*}Ric\ge (n-1)K\end{equation*} for some positive constant $K$, then the first nonzero eigenvalue of the Laplacian on $M$ must satisfy $$\lambda_1\ge nK.$$ Moreover, equality holds if and only if $M$ is isometric to a standard sphere of radius $\frac{1}{\sqrt{K}}$. \end{thm} By the min-max principle, for the first nonzero eigenvalue $\lambda_1$ of Laplacian, it holds that \begin{equation}\label{eqn:cha}
\lambda_1\int_M f^2dv\le \int_M|\nabla f|^2dv, \end{equation} for any $f\in C^\infty(M)$ satisfying $\int_M f dv=0$.
\noindent
{\bf Proof of Theorem 1.3}. Note that an Einstein manifold $(M^n,g)$ is a critical metric in $[g]$, i.e. it satifies (1.2).
Now let $(M^n,g)$ be an Einstein manifold with positive scalar curvature, then it follows from Theorem \ref{thm:thm3} and \eqref{eqn:cha} that \begin{equation}\label{eqn:res}
\frac{R}{n-1}\int_M \bar{\phi}^2dv\le \int_M|\nabla \bar{\phi}|^2dv. \end{equation} Note that for an Einstein manifold, $v^{(6)}(g)=-\frac{(n-2)R^3}{386n^2(n-1)^2}$, $\mathcal{L}(\phi)= \frac{(n-2)R^2}{64n^2(n-1)}\Delta \phi$. Hence, we see that \begin{align}\label{eqn:424}
\frac{d^2}{dt^2}\Big|_{t=0}\mathcal{F}_3[g_t]&=(n-6)V^{-\frac{n-6}{n}}\int_M \Big[\frac{(n-2)R^3}{64n^2(n-1)^2}\bar{\phi}^2 + \frac{(n-2)R^2\bar{\phi}}{64n^2(n-1)}\Delta\bar{\phi}\Big]dv ,\notag\\ &=\frac{(n-2)(n-6)R^2}{64n^2(n-1)}V^{-\frac{n-6}{n}}\int_M\Big[\frac{R\bar{\phi}^2}{n-1}
-|\nabla\bar{\phi}|^2\Big]dv\notag\\ &\le\frac{(n-2)(n-6)R^2}{64n^2(n-1)}V^{-\frac{n-6}{n}} \Big(\frac{R}{n-1}-\lambda_1\Big)\int_M \bar{\phi}^2 dv\\ &\le 0,\notag \end{align} with equality holds if and only if $\lambda_1=\frac{R}{n-1}$. Hence, by Theorem \ref{thm:thm3}, in this case $(M,g)$ is isometric to the standard sphere $S^n$.
Therefore, we prove that an Einstein manifold with positive scalar curvature must be a strict local maximum ``point'' within its conformal class $[g]$ unless $(M,g)$ is isometric to $S^n$ with a multiple of the standard metric. We complete the proof of theorem 1.3. \qed
\noindent
\begin{rem} Let $(M^n,g)$ be an $n$-dimensional Einstein manifold with nonpositive scalar curvature, then we have from the proof of Theorem \ref{thm:main} (see \eqref{eqn:424}) $$
\frac{d^2}{dt^2}\Big|_{t=0}\mathcal{F}_3(g_t)\le 0, $$ that is, it is stable. \end{rem} \begin{rem} When $M^n$ is an Einstein manifold with positive scalar curvature with dimension $n=5$, we see from \eqref{eqn:424} that \begin{equation}\label{eqn:test}
\frac{d^2}{dt^2}\Big|_{t=0}\mathcal{F}_3[g_t]\ge 0, \end{equation} with equality if and only if $\lambda_1=\frac{R}{4}$. Theorem \ref{thm:thm3} shows that in this case $(M^5,g)$ is isometric to the sphere $S^5$ with the standard metric up to a multiple of constant. And we see that this Einstein metric is a strict local minimum of the functional $\mathcal{F}_3$ within its conformal class if the equality does not hold in \eqref{eqn:test}. \end{rem}
\begin{rem} Let $\mathcal{T}_{ij}(g)=T_{2ij}(g)+\frac{1}{n-4}(B_g)_{ij}$, we have \begin{equation*} \sum_j\nabla^{j}\mathcal{T}_{ij}=0, \end{equation*} that is, $\mathcal{T}_{ij}$ is a divergence-free tensor. We observe that $v^{(6)}(g)=-24\sum_{ij}\mathcal{T}_{ij}(g)(P_g)_{ij}$. \end{rem}
\noindent Bin Guo: {\sc Department of Mathematical Sciences, Tsinghua University, Beijing 100084, People's Republic of China}\ \ Email: guob07@mails.tsinghua.edu.cn
\noindent Haizhong Li: {\sc Department of Mathematical Sciences, Tsinghua University, Beijing 100084, People's Republic of China} \ \ E-mail: hli@math.tsinghua.edu.cn
\end{document} | arXiv | {
"id": "1006.0156.tex",
"language_detection_score": 0.4587038457393646,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
\begin{document}
\title[Identities of the multi-variate independence polynomials from heaps]{Identities of the multi-variate independence polynomials from heaps theory}
\author{Deniz Kus} \address{University of Bochum, Faculty of Mathematics, Universit{\"a}tsstr. 150, Bochum 44801, Germany} \email{deniz.kus@rub.de} \author{Kartik Singh} \address{Department of Combinatorics and Optimization, University of Waterloo Waterloo, Ontario N2L 3G1, Canada} \email{k266singh@uwaterloo.ca} \author{R. Venkatesh} \address{Department of Mathematics, Indian Institute of Science, Bangalore 560012, India} \email{rvenkat@iisc.ac.in} \thanks{}
\begin{abstract} We study and derive identities for the multi-variate independence polynomials from the perspective of heaps theory. Using the inversion formula and the combinatorics of partially commutative algebras we show how the multi-variate version of Godsil type identity as well as the fundamental identity can be obtained from weight preserving bijections. Finally, we obtain a new multi-variate identity involving connected bipartite subgraphs similar to the Christoffel-Darboux type identities obtained by Bencs. \end{abstract}
\maketitle
\section{Introduction}
Let $G$ be a finite simple connected graph with vertex set $V(G).$ A subset of $V(G)$ is said to be \textit{independent} if it does not include two adjacent vertices and by convention we allow the empty subset to be independent. The \textit{multi-variate independence polynomial} of $G$ is defined as
$$I(G, \mathbf{x}):=\sum_{S}(-1)^{|S|} \prod_{v\in S}x_v$$ where the sum runs over all independent subsets $S$ of $V(G)$. The aim of this article is to approach certain identities for multi-variate independence polynomials using the inversion formula from heaps theory.
To explain our motivations and results we need some terminologies. One can associate a monoid called the \textit{Cartier–Foata monoid} to the graph $G$ (see \cite{CF69}). This monoid is generated by the vertices of $G$ and the defining relations are given by $uv=vu$ if $u, v\in V(G)$ and there is no edge between them. One can prove that the Cartier–Foata monoid of $G$ is equivalent to the monoid of heaps with pieces in $V(G)$ and the concurrency relation is determined by $G$ (see \cite{V85}). The fundamental result of Viennot's general theory of heaps is the inversion lemma (see for example \cite{V85} and \cite[Theorem 2.1]{V92}) which gives a closed formula for the generating function of heaps with all maximal pieces in some fixed subset.
Even though heaps give a geometric interpretation of the elements of the Cartier--Foata monoid, we prefer to work with the Cartier--Foata monoid itself in this paper. Fix a subset $K$ of $V(G)$, and consider the set $\mathcal{P}^{\emptyset}_K(G)$ that consists of all elements in the monoid that can only end with one of the $v$'s from $K$ (see Section \ref{sect2.1} for more details). We can assign a weight to each element of $\mathcal{P}^{\emptyset}_K(G)$ as follows: given $\mathbf{w}=u_1\cdots u_r\in \mathcal{P}^{\emptyset}_K(G)$, define $\mathrm{wt}(\mathbf{w})=\prod\limits_{i=1}^{r}x_{u_i}\in \mathbb{C}[x_v : v\in V(G)]$. Then the generating function of $\mathcal{P}^{\emptyset}_K(G)$ is simply given by $$\sum\limits_{\mathbf{w}\in \mathcal{P}^{\emptyset}_K(G)} \mathrm{wt}(\mathbf{w})= \frac{I(G-K, \mathbf{x})}{I(G, \mathbf{x})}$$ where $G-K$ is the graph obtained from $G$ by removing the vertices in $K.$ The motivation of this work comes from a Godsil's type identity that has been proved in \cite{Bencs} for one-variable independence polynomials; recall that the one variable independence polynomial is obtained by evaluating $x_v=-x$ for all $v\in V(G)$ in the multi-variate independence polynomial. Given a vertex $u\in G$, Bencs constructed a rooted (stable path) tree $(T, u')$ such that \begin{equation}\label{Godsil}
\frac{I(G-u,x)}{I(G,x)}=\frac{I(T-u', x)}{I(T,x)} \end{equation} Godsil's orginial identity was stated for matching polynomials \cite{Godsil1} and was one of the key ingredients in proving that the matching polynomial is real rooted. Furthermore, the importance of this identity is also highlighted in \cite{MSS} where the authors prove the existence of infinite families of regular bipartite Ramanujan graphs of every degree greater than $2$. It is not hard to prove the multi-variate version of Equation~\ref{Godsil} (the proof goes along the same lines as the proof of \cite[Theorem 2.3]{Bencs}). However, both sides of the multi-variate version of Equation~\ref{Godsil} are the generating functions of certain words from the Cartier–Foata monoid of $G$. More precisely, the left hand side of Equation~\ref{Godsil} corresponds to the generating function of $\mathcal{P}^{\emptyset}_{u}(G)$ and the right hand side corresponds to the generating function of $\mathcal{P}^{\emptyset}_{u'}(T)$. So, we have the following natural questions: \begin{itemize}
\item Is there any natural weight preserving bijective map from $\mathcal{P}^{\emptyset}_{u}(G)$ onto $\mathcal{P}^{\emptyset}_{u'}(T)$ that gives the
multi-variate version of Equation~\ref{Godsil}?
\item Using the method of finding weight preserving bijections, is one able to give new proofs of existing identities, generalize them to the multi-variate case and obtain new identitites? \end{itemize} We answer the first question affirmatively in this paper. We will also use our approach to get more identities and prove existing identities for multi-variate independence polynomial of $G$. In particular we prove a new multi-variate identity Equation~\ref{identity2} involving connected bipartite subgraphs similar to the Christoffel-Darboux type identities obtained by Bencs \cite{Bencs2}. This identity seems to be new in the literature.
\section{Independence polynomials and word decompositions}
\subsection{}\label{sect2.1} Let $G$ be a finite simple connected graph, i.e., $G$ contains no loops and multiple edges. The vertex set and edge set of $G$ are denoted as $V(G)$ and $E(G)$ respectively. We denote by $e(u, v)$ the edge between the vertices $u$ and $v$ of $G$. For $u\in V(G)$, we denote by $N_G(u)$ the neighbourhood of $u$ in $G$, $d_G(u):=|N_G(u)|$ the degree of $u$ in $G$ and set $N_G[u]=N_G(u)\cup \{u\}.$ For a subset $S\subseteq V(G)$ we denote by $G[S]$ the subgraph of $G$ spanned by the vertices in $S$. Let $\mathcal{P}^{\emptyset}(G)$ denote the partially commutative monoid of $G$ which is generated by the elements of $V(G)$ modulo the relations $$uv=vu\iff e(u, v)\notin E(G).$$ If $\mathcal{C}^{\emptyset}(G)$ denotes the commutative monoid generated by $V(G)$, we have a canonical monoid morphism $\pi_G: \mathcal{P}^{\emptyset}(G)\to \mathcal{C}^{\emptyset}(G)$. We set $\mathcal{P}(G):=\mathcal{P}^{\emptyset}(G)\backslash\{\mathrm{pt}\}$ where we think of the extra point in $\mathcal{P}^{\emptyset}(G)$ as the empty word and introduce further
$$\mathcal{P}_{v_1,\dots,v_r}(G)=\{\mathbf{w} \in \mathcal{P}(G): \mathrm{IA}(\mathbf{w})\subseteq\{v_1,\dots,v_r\}\},$$
$$\mathcal{P}^c_{v_1,\dots,v_r}(G)=\{\mathbf{w} \in \mathcal{P}(G): \mathrm{IA}(\mathbf{w})=\{v_1,\dots,v_r\}\},$$
$$\mathcal{P}^{\emptyset}_{v_1,\dots,v_r}(G)=\mathcal{P}_{v_1,\dots,v_r}(G)\sqcup \{\mathrm{pt}\},$$
i.e., $\mathcal{P}_{v_1,\dots,v_r}(G)$ consists of all words that can only end with one of the $v_i$'s. For a word $\mathbf{w}=v_1\cdots v_r\in \mathcal{P}(G)$ we write $|\mathbf{w}|=r$ for the length of $\mathbf{w}$ and set $v(\mathbf{w})=|\{1\leq j\leq r: v_j=v\}|$ for a vertex $v\in V(G)$. The \textit{initial alphabet} of $\mathbf{w}$ is the multiset denoted by $\mathrm{IA_m}(\mathbf{w})$ and defined by $v\in \mathrm{IA_m}(\mathbf{w})$ (counted with multiplicities) if and only if $\mathbf{w}=\mathbf{u}v$ for some $\mathbf{u}\in \mathcal{P}(G)$. We denote the underlying set by $\mathrm{IA}(\mathbf{w})$ \begin{example} Let us take $G$ to be the path graph $P_4$:\ \
\begin{center} \begin{tikzpicture}
\draw (1,2)-- (2,2);
\draw (2,2)-- (3,2);
\draw (3,2)-- (4,2);
\fill (1,2) circle (1.5pt);
\draw (1,2.3) node {$1$};
\fill (2,2) circle (1.5pt);
\draw (2,2.3) node {$2$};
\fill (3,2) circle (1.5pt);
\draw (3,2.3) node {$3$};
\fill (4,2) circle (1.5pt);
\draw (4,2.3) node {$4$};
\end{tikzpicture} \end{center}
Take $\mathbf{w}=342111\in \mathcal{P}(G)$, then $$\text{ $|\mathbf{w}|=6$, $1(\mathbf{w})=3$, $2(\mathbf{w})=3(\mathbf{w})=4(\mathbf{w})=1$, $\mathrm{IA_m}(\mathbf{w})=\{1, 1, 1, 4\}$ and $\mathrm{IA}(\mathbf{w})=\{1, 4\}$.} $$ \end{example}
\subsection{} Given $\mathbf{w}\in\mathcal{P}_u(G)$, it has been shown in \cite[Proposition 4.3]{AKV17} that there exists unique words
$\mathbf{w}_1, \dots, \mathbf{w}_{u(\mathbf{w})}\in \mathcal{P}(G)$ such that
\begin{equation}\label{w12}\mathbf{w}=\mathbf{w}_1\cdots\mathbf{w}_{u(\mathbf{w})},\ \ \mathrm{IA}_m(\mathbf{w}_i)=\{u\} \ \text{for all}\ 1\le i\leq u(\mathbf{w}).\end{equation} If $u(\mathbf{w})>1$, we refer to the decomposition above as the \emph{initialalphabet}-decomposition or simply \emph{ia}-decomposition of $\mathbf{w}$. We shall define now the so-called neigborhood decomposition. We write
$$N_G(u,\mathbf{w})=\{v\in N_G(u): v(\mathbf{w})>0\},\ \ d_G(u, \mathbf{w})=|N_G(u,\mathbf{w})|.$$
\begin{prop}\label{mainlemma2}
Let $\mathbf{w}\in\mathcal{P}_u(G)$ with $u(\mathbf{w})=1$ and write $N_G(u,\mathbf{w})=\{u_1<\cdots<u_d\}$ where $d=d_G(u, \mathbf{w})$.
Then there exist unique $\mathbf{w}_1, \dots, \mathbf{w}_{d}\in \mathcal{P}^{\emptyset}(G)$ such that:
\begin{enumerate}[(i)]
\item $\mathbf{w}=\mathbf{w}_1\cdots\mathbf{w}_{d}u$
\item If $\mathbf{w}_i\in \mathcal{P}(G)$, then $\mathrm{IA}(\mathbf{w}_i)=\{u_i\}\ \text{for all}\ 1\le i\leq d$
\item $u_i\notin \mathbf{w}_j \ \text{for all}\ i<j$.
\end{enumerate}
\end{prop}
\begin{proof} We proceed by induction on $d$ where the $d=1$ case is obviously true. So we can assume that $d>1$. We choose $\mathbf{u}_1, \mathbf{u}_2$ such that $\mathbf{w} =\mathbf{u}_1\mathbf{u}_2$
and $|\mathbf{u}_2|$ is maximal with the property that $u_1\notin \mathbf{u}_2$. This forces $\mathrm{IA}(\mathbf{u}_1)=\{u_1\}$. Since $d_G(u,\mathbf{u}_2)<d_G(u,\mathbf{w})$ we can use induction to get the required decomposition for $\mathbf{u}_2$. This gives the decomposition for $\mathbf{w}$ with the properties $(i)-(iii)$ once we set $\mathbf{w}_1= \mathbf{u}_1$.
The rest of the proof is concerned with the uniqueness. Assume that $\mathbf{w} = \mathbf{w}_1'\cdots\mathbf{w}_{d}'u$ is another decomposition satisfying the conditions $(i)-(iii)$ of the lemma. Write $\mathbf{w}=\mathbf{w}_1'\mathbf{u}'$ then we have $u_1\notin \mathbf{u}'$. However, the choice of $\mathbf{w}_1$ implies $|\mathbf{w}_1|\leq |\mathbf{w}_1'|$ and $\mathbf{u}'$ is a subword of $\mathbf{u}_2$.
This forces $|\mathbf{w}_1|=|\mathbf{w}_1'|$, since $\mathrm{IA}(\mathbf{w}_1')=\{u_1\}$. Hence $\mathbf{u}'=\mathbf{u}_2$ and $\mathbf{w}_1=\mathbf{w}_1'$. Now a simple induction argument completes the proof. \end{proof} For $\mathbf{w}\in\mathcal{P}_u(G)$ with $u(\mathbf{w})=1$, we refer to the decomposition of Proposition~\ref{mainlemma2} as the \emph{neighbourhood}-decomposition or simply \emph{nbd}-decomposition of $\mathbf{w}$.
\subsection{} A subset $S$ of $V(G)$ is said to be \emph{independent} if there is no edge between the elements of $S$ in the graph $G$. We denote by $\mathcal{I}_G$ the set of independent subsets of $G$ and note that we have $\emptyset, \{v\}\in \mathcal{I}_G$ for each $v\in V(G).$ The \textit{multi-variate independence polynomial} of $G$ is defined as
$$I(G, \mathbf{x}):=\sum_{S\in \mathcal{I}_G}(-1)^{|S|} \prod_{v\in S}x_v$$ and we view it as an element in $\mathbb{C}[x_v : v\in V(G)]$, the polynomial algebra over $\mathbb{C}$ generated by the commuting variables $\{x_v : v\in V(G)\}$. The aim of this article is to approach certain identities for multi-variate independence polynomials using the inversion formula from heap theory. We need the following trivial identifications. \begin{lem}\label{verytriv} Let $S\subseteq V(G)$ and $\{K_1,\dots, K_s\}$ be the set of non-empty independent subsets of the graph $G[S]$. \begin{enumerate} \item We have a bijection \begin{equation}\label{bijdis}\mathcal{P}^c_{K_1}(G)\ \dot\sqcup\cdots \dot\sqcup \ \mathcal{P}^c_{K_s}(G)\rightarrow \mathcal{P}_{S}(G)\end{equation} \item For any independent subset $K\neq \emptyset$ of $S$ we have a bijection $$\varphi_{K}:\mathcal{P}^c_{K}(G)\rightarrow \mathcal{P}^{\emptyset}_{N_G[K]}(G),\ \ \mathbf{w}\mapsto \frac{\mathbf{w}}{\prod_{y\in K}y}$$
\end{enumerate} \begin{proof} We first show that the left hand side of \eqref{bijdis} is a disjoint union. Let $\mathbf{w}\in \mathcal{P}^c_{K_1}(G)\sqcap \mathcal{P}^c_{K_2}(G)$ and $u\in K_1\backslash K_2$. Then we have $\mathbf{w}=\mathbf{w}'u$ and thus $u\in \mathrm{IA}(\mathbf{w})=K_2$ which is a contradiction. So the left hand side is disjoint. The identity map $$\mathrm{Id}_{K_i}:\mathcal{P}^c_{K_i}(G)\rightarrow \mathcal{P}_{S}(G) $$ for all $i\in\{1,\dots,s\}$ induces the desired map \eqref{bijdis} which is clearly bijective. In order to show the second part we first note that the map is well defined. If $z\in \mathrm{IA}(\varphi_K(\mathbf{w}))$ but $z\notin N_G(K)$, then we would also have $z\in \mathrm{IA}(\mathbf{w})=K$. Hence $z\in N_G[K]$. The map is bijective because the inverse map is simply given by multiplication with $\prod_{y\in K}y$. \end{proof} \end{lem}
\iffalse \begin{center} \begin{tikzpicture}[node distance={15mm}, thick, main/.style = {draw, circle}] \node[main] (1) {$u'$}; \node[main] (2) [above right of=1] {$r_1$}; \node[main] (3) [below right of=1] {$r_2$}; \node[main] (4) [below of=3] {$r_d$}; \draw (1) -- (2); \draw (1) -- (3); \draw (1) -- (4); \end{tikzpicture} \end{center} \fi \subsection{}\label{inver} The inversion lemma from heap theory \cite[Proposition 5.3]{V85} states that $$\frac{I(G-S,\mathbf{x})}{I(G,\mathbf{x})}=\sum_{\mathbf{w}=v_1\cdots v_r\in \mathcal{P}_S^{\emptyset}(G)}x_{v_1}\cdots x_{v_r},\ \ S\subseteq V(G)$$ Using the inversion lemma one can derive certain well-known and possibly new identities of independence polynomials and extend them to the multi-variate version. For example, Lemma~\ref{verytriv} simply implies that (keeping the same notation) \begin{equation}\label{fi1}I(G-S,\mathbf{x})-I(G,\mathbf{x})=\sum_{i=1}^s (\prod\limits_{v\in K_i} x_v) \ I(G-N_G[K_i],\mathbf{x})\end{equation} which is known as the fundamental identity if $S$ is singleton. The importance of the identity can be seen for example in \cite{CS07} where the authors proved that independence polynomials of claw free graphs are real-rooted by using \eqref{fi1} when $S$ is a clique. The single variable version of the above identity is the main result of \cite{XH94}. \section{Weight preserving bijection and Godsil's identity} \subsection{} Here we recall the construction of a rooted tree associated with $(G, u)$, where $u\in V(G)$, which is important in Godsil type identity (originally it is stated for the matching polynomial; see \cite{Godsil93} and also \cite{Bencs}) which relates the independence polynomial of $G$ to that of the tree. The constructed tree is called a stable-path tree of $G$, for more details we refer the reader to \cite{Bencs} and for an example see Figure~\ref{fig:pt}. Let $V(G)=\{1, \dots, n\}$ be an enumeration of the vertices of $G$ and let $N_G(u)=\{u_{1}<\cdots <u_{d}\}$ where $u\in V(G)$ and $d:=d_G(u)$. For each vertex $u\in V(G)$ we will recursively associate a rooted tree $(T_{G}, u')$ and a surjective graph homomorphism $$\ell_G: V(T_G) \rightarrow V(G),\ u'\mapsto u$$ as follows. If $d=0$ then $G$ is a single vertex and we set $T_{G}=\{u'\}$ as the tree with one vertex $u'$. If $d\ge 1$, we let $G_i$ be the connected component of $G[V(G)\backslash\{u,u_1,u_2, \dots,u_{i-1}\}]$ containing $u_i$ and we take the induced total ordering on $V(G_i)$ that comes from $V(G)$. Now we have by induction the family of rooted trees $(T_{G_i}, u'_i)$ and the graph homomorphisms $$\ell_{G_i}:V(T_{G_i})\rightarrow V(G_i),\ \ u_i'\mapsto u_i.$$ Finally we take the disjoint union of rooted trees $(T_{G_i}, u'_i)$ and a new vertex $u'$, and join the vertex $u'$ with the vertices $u'_i$, $1\le i\le d$. Clearly the graph $(T_{G}, u')$ obtained in this way is a rooted tree. Define the map $\ell_G: V(T_{G}) \rightarrow V(G)$ by $\ell_G(u')=u$ and $\ell_G(v)=\ell_{G_i}(v)$ if $v\in V(T_{G_i}).$ This is clearly a surjective graph homomorphism and the map $\ell_G$ induces a partial ordering on $V(T_{G})$ as follows: for $v_1, v_2\in V(T_{G})$, we have $$v_1\ge v_2 \iff \ell_G(v_1)\ge \ell_G(v_2).$$ We extend this partial order to a total ordering on $V(T_{G})$. The extension of $\ell_G$ to $\mathcal{C}(T_{G})$ is again denoted as $\ell_G.$
\begin{figure}
\caption{A graph $G$ with labeled vertices.}
\caption{The graph $T_{G,1}$.}
\caption{A graph with its stable-path tree.}
\label{fig:pt}
\end{figure}
\subsection{}We freely use the notations that were developed in the earlier sections. We now state and prove the following result.
\begin{thm}\label{thmmain}
Let $G$ be a finite, simple and connected graph. Then there exists a bijection $\varphi_G:\mathcal{P}^{\emptyset}_{u}(G)\rightarrow \mathcal{P}^{\emptyset}_{u'}(T_{G})$ such that $|\varphi_G(\mathbf{w})|=|\mathbf{w}|$ and $$ \begin{tikzcd} \mathcal{P}^{\emptyset}_{u}(G) \arrow{r}{\varphi_G} \arrow[swap]{d}{\pi_G} & \mathcal{P}^{\emptyset} _{u'}(T_{G}) \arrow{d}{\pi_{T_{G}}} \\ \mathcal{C}^{\emptyset}(G) & \arrow{l}{\ell_G} \mathcal{C}^{\emptyset}(T_G) \end{tikzcd} $$
is a commutative diagram.
\end{thm}
\begin{proof}
We recursively construct the map $\varphi_G$ and its inverse $\psi_G$. If $|V(G)|=1$, then we set $\varphi_G(u)=u'$ and $\psi_G(u')=u$. So assume that $|V(G)|>1$ and let $\varphi_H$ be the required map for all finite, connected graphs with $|V(H)|<|V(G)|$.
We first consider the case $\mathbf{w}\in \mathcal{P}_{u}(G)$ with $u(\mathbf{w})=1$ and recall that we have the \emph{nbd}-decomposition $\mathbf{w}=\mathbf{w}_1\cdots\mathbf{w}_{d}u$ by Proposition~\ref{mainlemma2} where we abbreviate $d=d(u,\mathbf{w})$ in the rest of the proof. From the conditions $(ii)$ and $(iii)$ of Proposition~\ref{mainlemma2}, it is clear that $\mathbf{w}_i\in \mathcal{P}^{\emptyset}_{u_i}(G_i)$ for all $1\le i\le d$.
Now since $|V(G_i)|<|V(G)|$, we obtain by induction a family of bijective maps $\varphi_{G_i}:\mathcal{P}^{\emptyset}_{u_i}(G_i)\to \mathcal{P}^{\emptyset}_{u'_i}(T_{G_i})$ satisfying the required properties for all $1\le i\leq d$. We define
\begin{equation}\label{firstdefn}
\varphi_G(\mathbf{w})=\varphi_{G_1}(\mathbf{w}_1)\varphi_{G_2}(\mathbf{w}_2)\cdots\varphi_{G_{d}}(\mathbf{w}_{d})u'
\end{equation}
Since the decomposition $\mathbf{w}=\mathbf{w}_1\cdots\mathbf{w}_{d}u$ is unique, the above map is well-defined. Clearly the map $\varphi_G$ preserves the \emph{nbd}-decomposition, i.e., the decomposition in \ref{firstdefn} is the \emph{nbd}-decomposition of $ \varphi_G(\mathbf{w})$.
Now we extend this map using the \textit{ia}-decomposition of $\mathbf{w}\in \mathcal{P}_{u}(G)$ with $u(\mathbf{w})>1$. We have $\mathbf{w} = \mathbf{w}_1\cdots\mathbf{w}_{u(\mathbf{w})}$ satisfying $\mathbf{w}_i \in \mathcal{P}_{u}(G)$ and $u(\mathbf{w}_i)=1$ for all
$1\le i\le u(\mathbf{w})$. We extend $\varphi_G$ as follows: $$ \varphi_G(\mathbf{w})=\varphi_G(\mathbf{w}_1)\varphi_G(\mathbf{w}_2)\cdots\varphi_G(\mathbf{w}_{u(\mathbf{w})})$$ Again $\varphi_G$ is well-defined by the uniqueness of the decomposition and $\varphi_G$ preserves the \emph{ia}-decomposition.
The fact that $|\varphi_G(\mathbf{w})|=|\mathbf{w}|$ holds and that the above diagram commutes follows from the fact that $\ell_G, \pi_G,\pi_{T_G}$ are all homomorphisms and the maps $\varphi_{G_i}$ also satisfy these properties. So it remains to construct the inverse map.
In a similar way, we now define the inverse map $\psi_G$ using the maps $\psi_{G_i}=\varphi_{G_i}^{-1}$. Let $\mathbf{w}'\in \mathcal{P}_{u'}(T_{G})$ be such that $u'(\mathbf{w}')=1$. Again we have the \emph{nbd}-decomposition $\mathbf{w}' = \mathbf{w}_1'\cdots\mathbf{w}'_{d(u',\mathbf{w}')}u'$. We define $$\psi_G(\mathbf{w}')=\psi_{G_1}(\mathbf{w}_1')\psi_{G_2}(\mathbf{w}_2')\cdots\psi_{G_{d(u', \mathbf{w}')}}(\mathbf{w}'_{d(u',\mathbf{w}')})u$$ As before this is a well-defined map and preserves the \emph{nbd}-decomposition. Using this, it is easy to see that $\varphi_G\circ \psi_G(\mathbf{w})=\mathbf{w}$ and $\psi_G\circ\varphi_G(\mathbf{w}')=\mathbf{w}'$ for $\mathbf{w}\in \mathcal{P}_{u}(G), \mathbf{w}'\in \mathcal{P}_{u'}(T_{G})$ with $u(\mathbf{w})=u'(\mathbf{w}')=1$.
If $\mathbf{w}'\in \mathcal{P}_{u'}(T_{G})$ with $u'(\mathbf{w}')>1$, we extend the map using the \emph{ia}-decomposition of $\mathbf{w}' = \mathbf{w}_1'\cdots\mathbf{w}'_{u'(\mathbf{w}')}$, namely we set \begin{equation}
\psi_G(\mathbf{w}')=\psi_G(\mathbf{w}_1')\cdots \psi_G(\mathbf{w}'_{u'(\mathbf{w}')})
\end{equation} As before this is a well-defined map and preserves the \emph{ia}-decomposition. Again we have $\varphi_G\circ\psi_G=\mathrm{Id}_{\mathcal{P}_{u'}(T_{G})}$ and $\psi_G\circ\varphi_G=\mathrm{Id}_{ \mathcal{P}_{u}(G)}$, proving that $\varphi_G$ is a bijection.
\end{proof}
\subsection{} The observation in Section~\ref{inver} together with Theorem~\ref{thmmain} immediately imply the multi-variate Godsil identity
$$\frac{I(G-u,\mathbf{x})}{I(G,\mathbf{x})}=\frac{\ell_G(I(T_G-u',\mathbf{x}))}{\ell_G(I(T_G,\mathbf{x}))}$$ We refer also to \cite{LR06} for different genralizations of this identity.
\section{Bipartite graphs and positive sum identities}
\subsection{}Motivated by the Christoffel-Darboux type identities for the independence polynomial obtained in \cite{Bencs2} we would like to achieve a similar type identity or a refined version of it without the alternating sign and in a multi-variate version. Our approach will be the same by oberving underlying indexing sets.
Let $u, v$ be two distinct vertices of $G.$ Given a pair $(\mathbf{w}u,\mathbf{w}'v)\in \mathcal{P}_u(G)\times \mathcal{P}_v(G)$ and a shortest path $\mathbf{p}=v_1v_2\cdots v_k$ connecting $u=v_1$ with $v=v_k$ we define a bipartite graph $H=H_1\sqcup H_2$ by $$H_1=\mathrm{IA}(\mathbf{w}\cdot v_2\cdot v_4\cdots ),\ \ H_2=\mathrm{IA}(\mathbf{w}'\cdot v_1\cdot v_3\cdots )$$
Note that $v\in H_1$ and $u\in H_2$ if $k$ is even and $u,v\in H_2$ otherwise. We consider the map \begin{equation}\label{map1}\mathcal{P}_u(G)\times \mathcal{P}_v(G)\rightarrow \dot\bigsqcup_{H}\mathcal{P}^{\emptyset}_{Z_1(H)}(G)\times \mathcal{P}^{\emptyset}_{Z_2(H)}(G)\end{equation} $$(\mathbf{w}u,\mathbf{w}'v)\rightarrow \left(\frac{\mathbf{w}\cdot v_2\cdot v_4\cdots }{\prod_{y\in H_1}y},\frac{\mathbf{w}'\cdot v_1\cdot v_3\cdots }{\prod_{y\in H_2}y}\right)$$ where the disjoint union runs over all connected bipartite subgraphs $H$ of $G$ containing the path $\mathbf{p}$ and satisfying \begin{equation}\label{prop1}
\begin{split}
H_1\backslash\{v_2,v_4,\dots\}\subseteq N_G[u],\ \ H_2\backslash\{v_1,v_3,\dots\}\subseteq N_G[v], \\
Z_1(H)=N_G[H_1\backslash\{v_2,v_4,\dots\}]\cup (N_G[H_1]\cap N_G[u]),\, \\ Z_2(H)=N_G[H_2\backslash\{v_1,v_3,\dots\}]\cup (N_G[H_2]\cap N_G[v]). \end{split} \end{equation}
\begin{prop}\label{pr12} The map defined in \eqref{map1} is a bijection. \begin{proof} We first show that the map is well-defined. Set $\mathbf{w}'=\frac{\mathbf{w}\cdot v_2\cdot v_4\cdots }{\prod_{z\in H_1}z}$, then we have $$\mathbf{w}\cdot v_2\cdot v_4\cdots=\mathbf{w}'\prod_{z\in H_1}z \,\, \text{and} \,\, \mathbf{w}=\mathbf{w}'\prod_{z\in H_1\backslash \{v_2, v_4, \cdots\}}z.$$ Assume that a letter $y$ is in the initial alphabet of the word $\mathbf{w}'$ which we assume to be non-empty. Suppose $y\in N_G[H_1\backslash \{v_2, v_4, \cdots\}]$ then we have $y\in Z_1(H)$. Otherwise $y\notin N_G[H_1\backslash \{v_2, v_4, \cdots\}]$ which implies $y\in IA(\mathbf{w})$, hence $y\in N_G[u].$ Suppose $y\in N_G(H_1)$, then we have $y\in Z_1(H)$. Otherwise $y\notin N_G(H_1)$, then $y\in IA(\mathbf{w}\cdot v_2\cdot v_4\cdots )=H_1$, again in this case we have $y\in Z_1(H)$.
Similar calculation shows that the initial alphabet of the second component lies in $Z_2(H)$. This shows that the map is well-defined. For the bijectivity we construct the inverse map.
Given a bipartite connected graph $H$ containing $\mathbf{p}$ (say $v_1,v_3,\dots \in H_2$) and satisfying \eqref{prop1}, we define $$\mathcal{P}^{\emptyset}_{Z_1(H)}(G)\times \mathcal{P}^{\emptyset}_{Z_2(H)}(G)\mapsto \mathcal{P}_u(G)\times \mathcal{P}_v(G)$$ \begin{equation}\label{map2}(\mathbf{\tilde{w}},\mathbf{\tilde{w}}')\rightarrow \left(\mathbf{\tilde{w}}\prod_{y\in H_1\backslash\{v_2,v_4,\dots\}}y\ u,\mathbf{\tilde{w}}'\prod_{y\in H_2\backslash\{v_1,v_3,\dots\}} y\ v\right)\end{equation} From \eqref{prop1} and the definition of $Z_i(H)$, $i=1,2$, we know that the above map is well defined. This map induces the inverse of \eqref{map1} since $$\mathrm{IA}(\tilde{\mathbf{w}}\prod_{y\in H_1}y)=H_1,\ \ \mathrm{IA}(\tilde{\mathbf{w}}'\prod_{y\in H_2}y)=H_2$$ \end{proof} \end{prop} \subsection{} As an immediate consequence of Proposition~\ref{pr12} we obtain the following identity \begin{align} \notag &\left(\frac{I(G-u,\mathbf{x})}{I(G,\mathbf{x})}-1\right)\left(\frac{I(G-v,\mathbf{y})}{I(G,\mathbf{y})}-1\right)&\\&\label{identity2}= \sum_{H} \prod_{\substack{w\in H_1\backslash\{v_2,v_4,\dots\}\\ w'\in H_2\backslash\{v_1,v_3,\dots\}}} x_w y_{w'} x_u y_v \left(\frac{I(G-Z_1(H),\mathbf{x})}{I(G,\mathbf{x})}\right)\left(\frac{I(G-Z_2(H),\mathbf{y})}{I(G,\mathbf{y})}\right)
\end{align} where the sum runs over all connected bipartite subgraphs $H$ of $G$ containing the path $\mathbf{p}$ and satisfying \eqref{prop1} (by convention we denote always by $H_2$ the part which contains $v_1,v_3,\dots$). Using $$I(G-u,\mathbf{x})-I(G,\mathbf{x})=-x_u\frac{\partial I(G,\mathbf{x})}{\partial{x_u}}$$ we can rewrite \eqref{identity2} as follows \begin{align} \notag \frac{\partial I(G,\mathbf{x})}{\partial x_u}\frac{\partial I(G,\mathbf{y})}{\partial y_v} = \sum_{H} \prod_{\substack{w\in H_1\backslash\{v_2,v_4,\dots\}\\ w'\in H_2\backslash\{v_1,v_3,\dots\}}} x_w y_{w'} \, I(G-Z_1(H),\mathbf{x})I(G-Z_2(H),\mathbf{y})
\end{align} where the sum runs over the same index set as before.
\begin{rem} If there is an edge between $u$ and $v$, then the left hand side of the above identity becomes (after evaluating $\mathbf{x}=\mathbf{y}$) $$\frac{I(G-u,\mathbf{x})}{I(G,\mathbf{x})}\frac{I(G-v,\mathbf{x})}{I(G,\mathbf{x})}-\frac{I(G-\{u, v\}, \mathbf{x})}{I(G,\mathbf{x})}.$$ This part also appeared for example in Gutman's identity for trees (see \cite{Gutman}) and for general graphs in \cite{Bencs2}. \end{rem}
\iffalse \begin{example} {\color{red} fill, interpretation claw free graphs, zeros?} \end{example} \fi
\subsection{} We will now see some examples that explain our results.
\begin{example} Let us consider the path graph $P_4$ (see Figure~\ref{fig:path}) and take $u=2$ and $v=3.$ The connected bipartite subgraphs of $P_4$ containing $u, v$ are given in Figure~\ref{fig:path2}. \begin{figure}
\caption{Path graph $P_4$}
\label{fig:path}
\end{figure}
\begin{figure}
\caption{$H^1$}
\caption{$H^2$}
\caption{$H^3$}
\caption{$H^4$}
\caption{Connected bipartite subgraphs of $P_4$ containing $2$ and $3$}
\label{fig:path2}
\end{figure}
In this case we can rewrite the equation \ref{identity2} as follows: \begin{align}\notag &\left(I(G-u,\mathbf{x})-I(G,\mathbf{x})\right)\left(I(G-v,\mathbf{y})-I(G,\mathbf{y})\right)&\\&\label{identity3}=\sum_{H} \prod_{\substack{w\in H_1\\ w'\in H_2}} x_w y_{w'} I(G-Z_1(H),\mathbf{x})I(G-Z_2(H),\mathbf{y}).\end{align} It is easy to see that $$I(G,\mathbf{x})=1-x_1-x_2-x_3-x_4+x_1x_3+x_1x_4+x_2x_4$$ $$I(G-u,\mathbf{x})=1-x_1-x_3-x_4+x_1x_3+x_1x_4, \text{and}$$ $$I(G-v,\mathbf{x})=1-y_1-y_2-y_4+y_1y_4+y_2y_4.$$ This gives $$(I(G-u,\mathbf{x})-I(G,\mathbf{x}))(I(G-v,\mathbf{y})-I(G,\mathbf{y}))=x_2y_3(1-x_4)(1-y_1).$$ On the other hand, we have the parts arising from the bipartite parts which we list now \begin{enumerate}
\item[(a)] In this case we have $$H_1^1=\{3\},\ H_2^1=\{2\},\ Z_1(H^1)=\{2, 3\}=Z_2(H^1)$$ and
$$I(G-\{2, 3\},\mathbf{x})=1-x_1-x_4+x_1x_4$$
\item[(b)] In this case we have
$$H_1^2=\{1, 3\},\ H_2^2=\{2\},\ Z_1(H^2)=\{1, 2, 3\},\ Z_2(H^2)=\{2, 3\}$$
and
$$I(G-\{1, 2, 3\},\mathbf{x})=1-x_4, \ \ I(G-\{2, 3\},\mathbf{y})=1-y_1-y_4+y_1y_4;$$
\item[(c)] In this case we have
$$H_1^3=\{3\},\ H_2^3=\{2, 4\},\ Z_1(H^3)=\{2, 3\},\ Z_2(H^3)=\{2, 3, 4\}$$
and
$$I(G-\{2, 3\},\mathbf{x})=1-x_1-x_4+x_1x_4, \ \ I(G-\{2, 3, 4\},\mathbf{y})=1-y_1$$
\item[(d)] In this case we have
$$H_1^4=\{1, 3\},\ H_2^4=\{2, 4\},\ Z_1(H^4)=\{1, 2, 3\},\ Z_2(H^4)=\{2, 3, 4\}$$
and
$$I(G-\{1, 2, 3\},\mathbf{x})=1-x_4, \ \ I(G-\{2, 3, 4\},\mathbf{y})=1-y_1.$$ \end{enumerate} If we simplify the RHS of Equation \ref{identity3} becomes $x_2y_3(1-x_4)(1-y_1)$ which is same as the LHS of Equation \ref{identity3}.
\end{example}
\begin{example} Let us consider the path graph $P_4$ (see Figure~\ref{fig:path}) and take $u=1$ and $v=4.$ The only connected bipartite subgraphs of $P_4$ containing $u, v$ is $P_4$ itself. In this case, we have $$I(G-u,\mathbf{x})=1-x_2-x_3-x_4+x_2x_4\ \text{and}\ I(G-v,\mathbf{y})=1-y_1-y_2-y_3+y_1y_3.$$ The LHS of Equation \ref{identity3} is equal to $$x_1y_4(1-x_3-x_4)(1-y_1-y_2).$$ On the other hand, we have $H_1=\{2, 4\}$, $H_2=\{1, 3\}$, $Z_1(H)=\{1, 2\}$ and $Z_2(H)=\{3, 4\}$. This implies the RHS of Equation \ref{identity3} is equal to $$x_1y_4(1-x_3-x_4)(1-y_1-y_2),$$ which is same as the LHS of Equation \ref{identity3}. \end{example}
\iffalse \section{Connection with BKM algebras}
\subsection{Borcherds--Kac--Moody algebra of a graph}\label{BKM subsection} We refer to \cite{Ej96} for the definition of BKM algebras and their basic properties, see also \cite{Bor88, K90}. Let $G$ be a finite connected simple graph with the vertex set $V(G)$ and the edge set $E(G).$ Given two vertices $u, v\in V(G)$, we denote by $e(u, v)\in E(G)$ the edge between $u$ and $v$. Let $\lie g$ be an indecomposable BKM algebra whose simple roots are imaginary and $G$ is the simple graph underlying Dynkin diagram of $\lie g$. i.e., the \textit{Borcherds--Cartan\ matrix} $A=(a_{ij})_{i, j\in V(G)}$ of $\lie g$ satisfies: \begin{enumerate} \item\label{BKM first condition} $A$ is symmetrizable; \item $a_{ij}\le 0$ for all $i, j \in V(G)$; \item\label{BKM last condition} $a_{ij}=0$ if and only if $e(i, j)\notin E(G)$.
\end{enumerate} We fix the \textit{Borcherds--Cartan\ matrix} $A$ satisfying the above conditions. Recall that a matrix $A$ is called symmetrizable if $DA$ is symmetric for some diagonal matrix $D=\mathrm{diag}(d_1, \ldots, d_n)$ with positive entries.
Very explicitly, the BKM algebra $\lie g=\mathfrak{g}(A)$ associated to the Borcherds--Cartan matrix $A$ is the Lie algebra generated by $e_i, f_i, h_i$, $i\in V(G)$ with the following defining relations: \begin{enumerate}
\item[(R1)] $[h_i, h_j]=0$ for $i,j\in V(G)$
\item[(R2)] $[h_i, e_k]=a_{i,k}e_i$, $[h_i, f_k]=-a_{i,k}f_i$ for $i,k\in V(G)$
\item[(R3)] $[e_i, f_j]=\delta_{ij}h_i$ for $i, j\in V(G)$
\item[(R4)] $[e_i, e_j]=0$ and $[f_i, f_j]=0$ if $a_{ij}=0$. \end{enumerate}
\subsection{Root space decomposition}
The BKM algebra $\lie g$ is $\mathbb{Z}^{n}$--graded by defining $$\text{ $\mathrm{deg}\ h_i=(0, \dots,0)$, $\mathrm{deg}\ e_i=(0,\dots,0,1,0,\dots,0)$ and $\text{deg}\ f_i=$ $(0,\dots,0,-1, 0,\dots,0)$}$$ where $\pm 1$ appears at the $i$--th position. For a sequence $(k_1, \dots, k_n)$, we denote by $\lie g(k_1, \dots, k_n)$ the corresponding graded piece. Let $\mathfrak{h}=\text{Span}_\mathbb{C}\{h_i: i\in I\}$ be the abelian subalgebra and let $\mathfrak{E}=\text{Span}_{\mathbb{C}}\{D_i: i\in I\}$, where $D_i$ denotes the derivation that acts on $\lie g(k_1, \dots, k_{n})$ by multiplication by the scalar $k_i$ and zero on the other graded components. Note that $D_i, i\in I$ are commuting derivations of $\lie g$. The abelian subalgebra $\mathfrak{E}\ltimes \mathfrak{h}$ of $\mathfrak{E}\ltimes \mathfrak{g}$ acts by scalars on $\lie g(k_1,\dots, k_n)$ and giving a root space decomposition: \begin{equation}\label{rootdec}\mathfrak{g}=\bigoplus _{\alpha \in (\mathfrak{E}\ltimes \mathfrak{h})^*} \mathfrak{g}_{\alpha }, \ \mathrm{where} \ \mathfrak{g}_{\alpha }
:=\{ x\in \mathfrak{g}\ |\ [h, x]=\alpha(h) x \ \mathrm{for\ all}\ h\in \mathfrak{E}\ltimes \mathfrak{h} \}.\end{equation} Define $\Pi=\{\alpha_i : i\in I\}\subseteq (\mathfrak{E}\ltimes\lie h)^{*}$ by $\alpha_j((D_k,h_i))=\delta_{k,j}+a_{i,j}$. The elements of $\Pi$ are called the simple roots of $\lie g$. Set $$Q:=\bigoplus _{\alpha\in \Pi}\mathbb{Z}\alpha,\ \ Q_+ :=\sum _{\alpha\in \Pi}\mathbb{Z}_{+}\alpha \ \ \text{and}\ \ Q_+^{\mathrm{im}} :=\sum _{\alpha\in \Pi_{\mathrm{im}}}\mathbb{Z}_{+}\alpha.$$ The coroot associated with $\alpha\in \Pi$ is denoted by $h_\alpha$, namely $h_\alpha=h_i$ for $\alpha=\alpha_i$. The set of roots is denoted by $\Delta :=\{ \alpha \in (\mathfrak{E}\ltimes\lie h)^*\backslash \{0\} \mid \mathfrak{g}_{\alpha }\neq 0\}$ and the set of positive roots is denoted by $\Delta_+:=\Delta\cap Q_+$. The elements in $\Pi^\mathrm{re}:=\{\alpha_i: i\in I^{\mathrm{re}}\}$ and $\Pi^\mathrm{im}:=\Pi\backslash \Pi^\mathrm{re}$ are called the set of real simple roots and the set of imaginary simple roots. We have $\Delta =\Delta_+ \sqcup - \Delta_+$ and $$\mathfrak{g}_0=\mathfrak{h},\ \ \lie g_\alpha=\lie g(k_1, \dots,k_n),\ \text{ if }\ \alpha=\sum_{i\in I} k_i\alpha_i\in \Delta.$$ \noindent Moreover, we have a triangular decomposition $\lie g\cong \lie n^{-}\oplus \lie h \oplus \lie n^+,$ where $\lie n^{\pm}=\bigoplus_{\alpha \in \pm\Delta_{+}} \mathfrak{g}_{\alpha}.$ Given $\gamma=\sum_{i\in I}k_i\alpha_i\in Q_+$, we set $\text{ht}(\gamma)=\sum_{i\in I}k_i.$ Finally, for $\lambda, \mu\in (\mathfrak{E}\ltimes \mathfrak{h})^*$ we say that $\lambda\ge \mu$ if $\lambda-\mu\in Q_+.$
\subsection{Characters}\label{subsec characters} Denote by $\mathcal{O}^{\rm{int}}$ the category consisting of integrable $\lie g-$modules from the category $\mathcal{O}$ of $\lie g$. Let $P_+=\{\lambda\in (\mathfrak{E}\ltimes \mathfrak{h})^*: \lambda(h_{\alpha})\in\mathbb{Z}_+, \ \alpha\in \Pi\}$ be the set of all dominant integral weights of $\lie g$ and let $L(\lambda)$ be the irreducible highest weight module of $\lie g$ associated to $\lambda\in P_+.$ Then it is well-known that there exists a bijective correspondence between the irreducible objects in $\mathcal{O}^{\rm{int}}$ and $\{L(\lambda): \lambda\in P_+\}$ and the category $\mathcal{O}^{\rm{int}}$ is semi-simple (see \cite{Bor88}). Given $\lambda \in P_+$, the module $L(\lambda)$ has a weight space decomposition $L(\lambda) = \bigoplus_{\mu \in \mathfrak{h}^*} L(\lambda)_\mu$. The formal character of $L(\lambda)$ is defined to be $\text{ch} L(\lambda) := \sum_{\mu\in \mathfrak{h}^*} \dim(L(\lambda)_\mu)\, e^\mu$. Let $\Omega(\lambda)$ be the set of all $\gamma\in Q_+$ such that $\gamma$ is the sum of mutually orthogonal distinct imaginary simple roots which are orthogonal to $\lambda$.
Note that $0\in \Omega(\lambda)$ and that an imaginary simple root $\alpha$ is in $\Omega(\lambda)$ if $(\lambda, \alpha)=0.$ The Weyl-Kac character formula gives:
\begin{equation}\label{WeylKac} \text{ch} L(\lambda)e^{-\lambda}= \frac{\sum\limits_{\gamma \in \Omega(\lambda) } (-1)^{\mathrm{ht}(\gamma)} e^{-\gamma}}{\sum\limits_{\gamma\in \Omega(0) } (-1)^{\mathrm{ht}(\gamma)} e^{-\gamma}} \end{equation}
\fi
\end{document} | arXiv | {
"id": "2209.08029.tex",
"language_detection_score": 0.6351544857025146,
"char_num_after_normalized": null,
"contain_at_least_two_stop_words": null,
"ellipsis_line_ratio": null,
"idx": null,
"lines_start_with_bullet_point_ratio": null,
"mean_length_of_alpha_words": null,
"non_alphabetical_char_ratio": null,
"path": null,
"symbols_to_words_ratio": null,
"uppercase_word_ratio": null,
"type": null,
"book_name": null,
"mimetype": null,
"page_index": null,
"page_path": null,
"page_title": null
} | arXiv/math_arXiv_v0.2.jsonl | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.